diff --git "a/1261.jsonl" "b/1261.jsonl" new file mode 100644--- /dev/null +++ "b/1261.jsonl" @@ -0,0 +1,731 @@ +{"seq_id":"300969791","text":"# from da\nfrom flask import request, session, current_app as app\n\nfrom wtforms import Form, FloatField, StringField\nfrom wtforms.validators import InputRequired, ValidationError, StopValidation\nfrom fractions import Fraction\nfrom datetime import datetime\nfrom pytz import timezone\nimport wtforms_json\n\nfrom openprocurement.auction.utils import prepare_extra_journal_fields\n\nwtforms_json.init()\n\n\ndef validate_bid_value(form, field):\n \"\"\"\n Bid must be greater then 0\n \"\"\"\n if field.data <= 0.0 and field.data != -1:\n raise ValidationError(u'Too low value')\n\n\ndef validate_bid_change_on_bidding(form, field):\n \"\"\"\n Bid must be lower then previous bidder bid amount minus minimalStep amount\n \"\"\"\n stage_id = form.document['current_stage']\n if form.auction.features:\n minimal_bid = form.document['stages'][stage_id]['amount_features']\n minimal = Fraction(minimal_bid) * form.auction.bidders_coeficient[form.data['bidder_id']]\n minimal -= Fraction(form.document['minimalStep']['amount'])\n if field.data > minimal:\n raise ValidationError(u'Too high value')\n else:\n minimal_bid = form.document['stages'][stage_id]['amount']\n if field.data > (minimal_bid - form.document['minimalStep']['amount']):\n raise ValidationError(u'Too high value')\n\n\ndef validate_bidder_id_on_bidding(form, field):\n stage_id = form.document['current_stage']\n if field.data != form.document['stages'][stage_id]['bidder_id']:\n raise StopValidation(u'Not valid bidder')\n\n\nclass BidsForm(Form):\n bidder_id = StringField('bidder_id',\n [InputRequired(message=u'No bidder id'), ])\n\n bid = FloatField('bid', [InputRequired(message=u'Bid amount is required'),\n validate_bid_value])\n\n def validate_bid(self, field):\n stage_id = self.document['current_stage']\n if self.document['stages'][stage_id]['type'] == 'bids':\n validate_bid_change_on_bidding(self, field)\n else:\n raise ValidationError(u'Stage not for bidding')\n\n def validate_bidder_id(self, field):\n stage_id = self.document['current_stage']\n if self.document['stages'][stage_id]['type'] == 'bids':\n validate_bidder_id_on_bidding(self, field)\n\n\ndef form_handler():\n auction = app.config['auction']\n with auction.bids_actions:\n form = app.bids_form.from_json(request.json)\n form.auction = auction\n form.document = auction.db.get(auction.auction_doc_id)\n current_time = datetime.now(timezone('Europe/Kiev'))\n if form.validate():\n # write data\n auction.add_bid(form.document['current_stage'],\n {'amount': form.data['bid'],\n 'bidder_id': form.data['bidder_id'],\n 'time': current_time.isoformat()})\n if form.data['bid'] == -1.0:\n app.logger.info(\"Bidder {} with client_id {} canceled bids in stage {} in {}\".format(\n form.data['bidder_id'], session['client_id'],\n form.document['current_stage'], current_time.isoformat()\n ), extra=prepare_extra_journal_fields(request.headers))\n else:\n app.logger.info(\"Bidder {} with client_id {} placed bid {} in {}\".format(\n form.data['bidder_id'], session['client_id'],\n form.data['bid'], current_time.isoformat()\n ), extra=prepare_extra_journal_fields(request.headers))\n return {'status': 'ok', 'data': form.data}\n else:\n app.logger.info(\"Bidder {} with client_id {} wants place bid {} in {} with errors {}\".format(\n request.json.get('bidder_id', 'None'), session['client_id'],\n request.json.get('bid', 'None'), current_time.isoformat(),\n repr(form.errors)\n ), extra=prepare_extra_journal_fields(request.headers))\n return {'status': 'failed', 'errors': form.errors}\n","sub_path":"openprocurement/auction/worker/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"73971414","text":"import sys, pygame\nfrom pygame.locals import *\n\ndef main():\n screen = pygame.display.set_mode((400,400))\n pygame.display.set_caption('Prueba Pygame')\n\n # crear color\n # para dibujar cualquier figura geometrica hay que poyarnos en \n # line muchos valores\n # 1param,el primer valor es donde se va dibujar la línea\n # 2param,de que color se dibujará la linea\n # 3.param tupla en pos X y Y, SE DIBUJARA UNA LINEA RECTA HASTA ENCONTRAR EL PUNTO FINAL\n \n while True:\n for eventos in pygame.event.get():\n pygame.draw.rect(screen,(255,0,0),(0,0,50,50))\n pygame.draw.line(screen,(255,0,0),(0,0),(400,400))\n pygame.draw.line(screen,(255,0,0),(400,0),(0,400))\n pygame.draw.line(screen,(255,0,0),(200,0),(200,400))\n pygame.draw.line(screen,(255,0,0),(0,200),(400,200))\n if eventos.type == QUIT:\n sys.exit(0)\n pygame.display.update()\n return 0\n\nif __name__ == '__main__':\n pygame.init()\n main()\n\n\"\"\"\nLa ventana es una matriz\nlos cuadrados en pygame son llamados pixeles\nhay que poyarnos de las coordenadas\ntenemos coordenadas en X y en Y\nsiempre empezando de 0\nes importante cuando estemos trabajando con imagenes\n\"\"\"","sub_path":"pygame/basico/1_movimientos_basicos.py","file_name":"1_movimientos_basicos.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"125648072","text":"\"\"\"This module provides functions which determine various observatory\nspecific policies for JWST:\n\n1. How to convert reference file basenames to fully specified paths.\n\n2. How to manage parameters for reference file Validator objects used\nin the certification of reference files. \n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nimport os.path\nimport re\n\nfrom crds import (log, rmap, data_file, config, utils, timestamp)\nfrom . import tpn\n\n# =======================================================================\n\ntry:\n from jwst.datamodels import DataModel\n MODEL = DataModel()\nexcept Exception:\n MODEL = None\n\n# =======================================================================\n\nHERE = os.path.dirname(__file__) or \"./\"\n\n# =======================================================================\n\ndef test():\n \"\"\"Run the module doctests.\"\"\"\n import doctest\n from . import locate\n return doctest.testmod(locate)\n\n# =======================================================================\n\n# These two functions decouple the generic reference file certifier program \n# from observatory-unique ways of specifying and caching Validator parameters.\n\nfrom crds.jwst.tpn import get_tpninfos # reference_name_to_validator_key, mapping_validator_key defined here.\nfrom crds.jwst import TYPES, INSTRUMENTS, FILEKINDS, EXTENSIONS\n\nreference_name_to_validator_key = TYPES.reference_name_to_validator_key \n# mapping_validator_key = TYPES.mapping_validator_key\nget_row_keys = TYPES.get_row_keys\nget_row_keys_by_instrument = TYPES.get_row_keys_by_instrument\nget_item = TYPES.get_item\nsuffix_to_filekind = TYPES.suffix_to_filekind\n\n# =======================================================================\n\ndef match_context_key(key):\n \"\"\"Set the case of a context key appropriately for this project, JWST\n always uses upper case.\n \"\"\"\n return key.upper()\n\n# =======================================================================\n\ndef mapping_validator_key(mapping):\n \"\"\"For now, just use instrument based constraints.\"\"\"\n return (mapping.instrument + \"_all_ld.tpn\", mapping.name)\n\n# =======================================================================\n\nREF_EXT_RE = re.compile(r\"\\.fits|\\.r\\dh$\")\n\ndef get_file_properties(filename):\n \"\"\"Figure out (instrument, filekind, serial) based on `filename` which\n should be a mapping or FITS reference file.\n\n >> get_file_properties(\"./hst_acs_biasfile_0001.rmap\")\n ('acs', 'biasfile')\n\n >> get_file_properties(\"./hst_acs_biasfile_0001.pmap\")\n Traceback (most recent call last):\n ...\n AssertionError: Invalid .pmap filename './hst_acs_biasfile_0001.pmap'\n\n >> get_file_properties(\"test_data/s7g1700gl_dead.fits\")\n \"\"\"\n if rmap.is_mapping(filename):\n try:\n return decompose_newstyle_name(filename)[2:4]\n except Exception:\n # NOTE: load_mapping more conservative than fetch_mapping used in properties_from_mapping\n mapping = rmap.load_mapping(filename)\n return mapping.instrument, mapping.filekind\n elif config.is_reference(filename):\n result = get_reference_properties(filename)[2:4]\n else:\n try:\n result = properties_inside_mapping(filename)\n except Exception as exc:\n result = get_reference_properties(filename)[2:4]\n assert result[0] in INSTRUMENTS+[\"\"], \"Bad instrument \" + \\\n repr(result[0]) + \" in filename \" + repr(filename)\n assert result[1] in FILEKINDS+[\"\"], \"Bad filekind \" + \\\n repr(result[1]) + \" in filename \" + repr(filename)\n return result\n\ndef decompose_newstyle_name(filename):\n \"\"\"\n >> decompose_newstyle_name(\"./hst.pmap\")\n ('.', 'hst', '', '', '', '.pmap')\n\n >> decompose_newstyle_name(\"./hst_0001.pmap\")\n ('.', 'hst', '', '', '0001', '.pmap')\n\n >> decompose_newstyle_name(\"./hst_acs.imap\")\n ('.', 'hst', 'acs', '', '', '.imap')\n\n >> decompose_newstyle_name(\"./hst_acs_0001.imap\")\n ('.', 'hst', 'acs', '', '0001', '.imap')\n\n >> decompose_newstyle_name(\"./hst_acs_biasfile.rmap\")\n ('.', 'hst', 'acs', 'biasfile', '', '.rmap')\n\n >> decompose_newstyle_name(\"./hst_acs_biasfile_0001.rmap\")\n ('.', 'hst', 'acs', 'biasfile', '0001', '.rmap')\n\n >> decompose_newstyle_name(\"./hst_acs_biasfile.fits\")\n ('.', 'hst', 'acs', 'biasfile', '', '.fits')\n\n >> decompose_newstyle_name(\"./hst_acs_biasfile_0001.fits\")\n ('.', 'hst', 'acs', 'biasfile', '0001', '.fits')\n \"\"\"\n path, parts, ext = _get_fields(filename)\n observatory = parts[0]\n serial = list_get(parts, 3, \"\")\n\n if ext == \".pmap\":\n assert len(parts) in [1,2], \"Invalid .pmap filename \" + repr(filename)\n instrument, filekind = \"\", \"\"\n serial = list_get(parts, 1, \"\")\n elif ext == \".imap\":\n assert len(parts) in [2,3], \"Invalid .imap filename \" + repr(filename)\n instrument = parts[1]\n filekind = \"\"\n serial = list_get(parts, 2, \"\")\n else:\n assert len(parts) in [3,4], \"Invalid filename \" + repr(filename)\n instrument = parts[1]\n filekind = parts[2]\n serial = list_get(parts, 3, \"\")\n\n # Don't include filename in these or it messes up crds.certify unique error tracking.\n\n assert instrument in INSTRUMENTS+[\"\"], \"Invalid instrument \" + repr(instrument)\n assert filekind in FILEKINDS+[\"\"], \"Invalid filekind \" + repr(filekind)\n assert re.match(r\"\\d*\", serial), \"Invalid id field \" + repr(id)\n # extension may vary for upload temporary files.\n\n return path, observatory, instrument, filekind, serial, ext\n\ndef properties_inside_mapping(filename):\n \"\"\"Load `filename`s mapping header to discover and \n return (instrument, filekind).\n \"\"\"\n map = rmap.fetch_mapping(filename)\n if map.filekind == \"PIPELINE\":\n result = \"\", \"\"\n elif map.filekind == \"INSTRUMENT\":\n result = map.instrument, \"\"\n else:\n result = map.instrument, map.filekind\n return result\n\ndef _get_fields(filename):\n path = os.path.dirname(filename)\n name = os.path.basename(filename)\n name, ext = os.path.splitext(name)\n parts = name.split(\"_\")\n return path, parts, ext\n\ndef list_get(l, index, default):\n try:\n return l[index]\n except IndexError:\n return default\n\nCDBS_DIRS_TO_INSTR = {\n \"/jref/\":\"acs\",\n \"/oref/\":\"stis\",\n \"/iref/\":\"wfc3\",\n \"/lref/\":\"cos\",\n \"/nref/\":\"nicmos\",\n \n \"/upsf/\":\"wfpc2\",\n \"/uref/\":\"wfpc2\",\n \"/uref_linux/\":\"wfpc2\",\n \n \"/yref/\" : \"fos\",\n \"/zref/\" : \"hrs\",\n \n}\n\ndef get_reference_properties(filename):\n \"\"\"Figure out FITS (instrument, filekind, serial) based on `filename`.\n \"\"\"\n try: # Hopefully it's a nice new standard filename, easy\n return decompose_newstyle_name(filename)\n except AssertionError: # cryptic legacy paths & names, i.e. reality\n pass\n # If not, dig inside the FITS file, slow\n return ref_properties_from_header(filename)\n\n# =======================================================================\n\ndef ref_properties_from_header(filename):\n \"\"\"Look inside FITS `filename` header to determine instrument, filekind.\n \"\"\"\n # For legacy files, just use the root filename as the unique id\n path, parts, ext = _get_fields(filename)\n serial = os.path.basename(os.path.splitext(filename)[0])\n header = data_file.get_free_header(filename, observatory=\"jwst\")\n instrument = utils.header_to_instrument(header).lower()\n assert instrument in INSTRUMENTS, \"Invalid instrument \" + repr(instrument)\n filekind = utils.get_any_of(header, [\"REFTYPE\", \"TYPE\", \"META.TYPE\", \"META.REFFILE.TYPE\"], \"UNDEFINED\").lower()\n assert filekind in FILEKINDS, \"Invalid file type \" + repr(filekind)\n return path, \"jwst\", instrument, filekind, serial, ext\n\n# =============================================================================\n\ndef reference_keys_to_dataset_keys(rmapping, header):\n \"\"\"Given a header dictionary for a reference file, map the header back to keys\n relevant to datasets. So for ACS biasfile the reference says BINAXIS1 but\n the dataset says NUMCOLS. This would convert { \"BINAXIS1\": 1024 } to {\n \"NUMCOLS\" : 1024 }.\n \n In general, rmap parkeys are matched against datset values and are defined\n as dataset header keywords. For refactoring though, what's initially\n available are reference file keywords... which need to be mapped into the\n terms rmaps know: dataset keywords.\n \"\"\"\n header = dict(header)\n try:\n translations = rmapping.reference_to_dataset\n except AttributeError:\n pass\n else:\n # Add replacements for translations *if* the existing untranslated value\n # is poor and the translated value is better defined. This is to do\n # translations w/o replacing valid/concrete DM values with something \n # like guessed values of \"UNDEFINED\" or \"N/A\".\n for rkey in translations:\n if rkey in header:\n dkey = translations[rkey]\n dval = header.get(translations[rkey], None)\n rval = header[rkey]\n if dval in [None, \"N/A\", \"UNDEFINED\"] and rval not in [None, \"UNDEFINED\"]:\n header[dkey] = rval\n if \"USEAFTER\" in header: # and \"DATE-OBS\" not in header:\n reformatted = timestamp.reformat_useafter(rmapping, header).split()\n header[\"DATE-OBS\"] = header[\"META.OBSERVATION.DATE\"] = reformatted[0]\n header[\"TIME-OBS\"] = header[\"META.OBSERVATION.TIME\"] = reformatted[1]\n return header\n\n# =============================================================================\n\ndef condition_matching_header(rmapping, header):\n \"\"\"Normalize header values for .rmap reference insertion.\"\"\"\n return dict(header) # NOOP for JWST, may have to revisit\n\n# ============================================================================\n\nclass MissingDependencyError(Exception):\n \"\"\"A required package is missing.\"\"\"\n\ndef fits_to_parkeys(fits_header):\n \"\"\"Map a FITS header onto rmap parkeys appropriate for JWST.\"\"\"\n if MODEL is None:\n raise MissingDependencyError(\"JWST data models are not installed. Cannot fits_to_parkeys().\")\n parkeys = {}\n for key, value in fits_header.items():\n key, value = str(key), str(value)\n if not key.lower().startswith(\"meta.\"):\n pk = cached_dm_find_fits_keyword(key)\n if not pk:\n pk = key\n else:\n assert len(pk) == 1, \"CRDS JWST Data Model ambiguity on \" + \\\n repr(key) + \" = \" + repr(pk)\n pk = pk[0]\n else:\n pk = key\n pk = pk.upper()\n parkeys[pk] = value\n return parkeys\n\n@utils.cached\ndef cached_dm_find_fits_keyword(key):\n \"\"\"Return the SSB JWST data model path for the specified non-path keyword, nominally\n a FITS or json or ASDF bare keyword.\n \"\"\"\n return MODEL.find_fits_keyword(key.upper(), return_result=True)\n# ============================================================================\n\ndef get_env_prefix(instrument):\n \"\"\"Return the environment variable prefix (IRAF prefix) for `instrument`.\"\"\"\n return \"crds://\"\n\ndef locate_file(refname, mode=None):\n \"\"\"Given a valid reffilename in CDBS or CRDS format, return a cache path for the file.\n The aspect of this which is complicated is determining instrument and an instrument\n specific sub-directory for it based on the filename alone, not the file contents.\n \"\"\"\n if mode is None:\n mode = config.get_crds_ref_subdir_mode(observatory=\"jwst\")\n if mode == \"instrument\":\n instrument = utils.file_to_instrument(refname)\n rootdir = locate_dir(instrument, mode)\n elif mode == \"flat\":\n rootdir = config.get_crds_refpath(\"jwst\")\n else:\n raise ValueError(\"Unhandled reference file location mode \" + repr(mode))\n return os.path.join(rootdir, os.path.basename(refname))\n\ndef locate_dir(instrument, mode=None):\n \"\"\"Locate the instrument specific directory for a reference file.\"\"\"\n if mode is None:\n mode = config.get_crds_ref_subdir_mode(observatory=\"jwst\")\n else:\n config.check_crds_ref_subdir_mode(mode)\n crds_refpath = config.get_crds_refpath(\"jwst\")\n if mode == \"instrument\": # use simple names inside CRDS cache.\n rootdir = os.path.join(crds_refpath, instrument.lower())\n if not os.path.exists(rootdir):\n utils.ensure_dir_exists(rootdir + \"/locate_dir.fits\")\n elif mode == \"flat\": # use original flat cache structure, all instruments in same directory.\n rootdir = crds_refpath\n else:\n raise ValueError(\"Unhandled reference file location mode \" + repr(mode))\n return rootdir\n\n# ============================================================================\ndef load_all_type_constraints():\n \"\"\"Load all the JWST type constraint files.\"\"\"\n tpn.get_tpninfos(\"miri_flat.tpn\", \"foo.fits\") # With core schema, one type loads all\n","sub_path":"crds/jwst/locate.py","file_name":"locate.py","file_ext":"py","file_size_in_byte":13060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"46259476","text":"#!/usr/bin/python\n# coding:utf-8\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom modules.layers import Highway, Initialized_Conv1d\n\nclass Embedding(nn.Module):\n def __init__(self, wemb_dim, cemb_dim, d_model,\n dropout_w=0.1, dropout_c=0.05):\n super().__init__()\n self.conv2d = nn.Conv2d(cemb_dim, d_model, kernel_size = (1,5), padding=0, bias=True)\n nn.init.kaiming_normal_(self.conv2d.weight, nonlinearity='relu')\n self.conv1d = Initialized_Conv1d(wemb_dim + d_model, d_model, bias=False)\n self.high = Highway(2, d_model)\n self.dropout_w = dropout_w\n self.dropout_c = dropout_c\n\n def forward(self, ch_emb, wd_emb, length):\n ch_emb = ch_emb.permute(0, 3, 1, 2)\n ch_emb = F.dropout(ch_emb, p=self.dropout_c, training=self.training)\n ch_emb = self.conv2d(ch_emb)\n ch_emb = F.relu(ch_emb)\n ch_emb, _ = torch.max(ch_emb, dim=3)\n\n wd_emb = F.dropout(wd_emb, p=self.dropout_w, training=self.training)\n wd_emb = wd_emb.transpose(1, 2)\n emb = torch.cat([ch_emb, wd_emb], dim=1)\n emb = self.conv1d(emb)\n emb = self.high(emb)\n return emb\n","sub_path":"modules/embedding.py","file_name":"embedding.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"585368544","text":"\nclass BaseConfig:\n\n POINTS: tuple[int]\n TEAMS: tuple[str]\n POULES: tuple[str]\n TYPES: dict\n FINALS_MAPPER: dict\n\n WINST = 3\n GELIJK = 1\n VERLIES = 0\n\n MULTIPLIER = {\n WINST: 3,\n GELIJK: 2,\n VERLIES: 1\n }\n\n @classmethod\n def all_types(cls) -> list[str]:\n return [v for k in cls.TYPES for v in cls.TYPES[k]]\n\n @classmethod\n def get_points(cls, goals_home: int, goals_away: int):\n if goals_home is None or goals_away is None:\n return None\n elif goals_home == goals_away:\n return cls.GELIJK\n elif goals_home > goals_away:\n return cls.WINST\n else:\n return cls.VERLIES\n\n @classmethod\n def get_punten_spel(cls, points: int, goals: int):\n if points is None:\n return 0\n return cls.MULTIPLIER[points] * (goals + 1)\n\n\nclass EKspel2021Config(BaseConfig):\n\n POINTS = (50, 43, 38, 33, 30, 27, *range(24, 12, -2), *range(12, 0, -1))\n\n TEAMS = (\n 'België',\n 'Denemarken',\n 'Duitsland',\n 'Engeland',\n 'Finland',\n 'Frankrijk',\n 'Hongarije',\n 'Italië',\n 'Kroatië',\n 'Nederland',\n 'Noord-Macedonië',\n 'Oekraïne',\n 'Oostenrijk',\n 'Polen',\n 'Portugal',\n 'Rusland',\n 'Schotland',\n 'Slowakije',\n 'Spanje',\n 'Tsjechië',\n 'Turkije',\n 'Wales',\n 'Zweden',\n 'Zwitserland',\n )\n\n assert len(TEAMS) == len(POINTS) == 24\n\n POULES = tuple('ABCDEF')\n\n TYPES = {\n 'Poule': POULES,\n '8_FINAL': ('8F1', '8F2', '8F3', '8F4', '8F5', '8F6', '8F7', '8F8'),\n 'Q_FINAL': ('QF1', 'QF2', 'QF3', 'QF4'),\n 'S_FINAL': ('SF1', 'SF2'),\n 'FINAL': ('FINAL',)\n }\n\n FINALS_MAPPER = {\n # eighth finals\n '2A': '',\n '2B': '',\n '1A': '',\n '2C': '',\n '1C': '',\n '3DEF': '',\n '1B': '',\n '3ADEF': '',\n '2D': '',\n '2E': '',\n '1F': '',\n '3ABC': '',\n '1D': '',\n '2F': '',\n '1E': '',\n '3ABCD': '',\n\n # quarter finals\n 'WINNAAR 8F6': '',\n 'WINNAAR 8F5': '',\n 'WINNAAR 8F4': '',\n 'WINNAAR 8F2': '',\n 'WINNAAR 8F3': '',\n 'WINNAAR 8F1': '',\n 'WINNAAR 8F8': '',\n 'WINNAAR 8F7': '',\n\n # semi finals\n 'WINNAAR QF2': '',\n 'WINNAAR QF1': '',\n 'WINNAAR QF4': '',\n 'WINNAAR QF3': '',\n\n # final\n 'WINNAAR SF1': '',\n 'WINNAAR SF2': ''\n }\n\n\nclass WKspel2022Config(BaseConfig):\n\n TOTAL_GAMES = 64\n\n POINTS = (80, *range(70, 35, -5), 36, 32, 28, 25, 22, 20, *range(18, 0, -1))\n\n TEAMS = (\n \"Argentinië\",\n \"Australië\",\n \"België\",\n \"Brazilië\",\n \"Canada\",\n \"Costa Rica\",\n \"Denemarken\",\n \"Duitsland\",\n \"Ecuador\",\n \"Engeland\",\n \"Frankrijk\",\n \"Ghana\",\n \"Iran\",\n \"Japan\",\n \"Kameroen\",\n \"Kroatië\",\n \"Marokko\",\n \"Mexico\",\n \"Nederland\",\n \"Polen\",\n \"Portugal\",\n \"Qatar\",\n \"Saudi-Arabië\",\n \"Senegal\",\n \"Servië\",\n \"Spanje\",\n \"Tunesië\",\n \"Uruguay\",\n \"Verenigde Staten\",\n \"Wales\",\n \"Zuid-Korea\",\n \"Zwitserland\"\n )\n\n assert len(TEAMS) == len(POINTS) == 32\n\n POULES = tuple(\"ABCDEFGH\")\n\n TYPES = {\n \"Poule\": POULES,\n \"8_FINAL\": (\"8F1\", \"8F2\", \"8F3\", \"8F4\", \"8F5\", \"8F6\", \"8F7\", \"8F8\"),\n \"Q_FINAL\": (\"QF1\", \"QF2\", \"QF3\", \"QF4\"),\n \"S_FINAL\": (\"SF1\", \"SF2\"),\n \"FINAL\": (\"FINAL\", \"BRONZE\")\n }\n\n FINALS_MAPPER = {\n # Eighth finals - first and second in poule\n \"1A\": \"\",\n \"1B\": \"\",\n \"1C\": \"\",\n \"1D\": \"\",\n \"1E\": \"\",\n \"1F\": \"\",\n \"1G\": \"\",\n \"1H\": \"\",\n\n \"2A\": \"\",\n \"2B\": \"\",\n \"2C\": \"\",\n \"2D\": \"\",\n \"2E\": \"\",\n \"2F\": \"\",\n \"2G\": \"\",\n \"2H\": \"\",\n\n # Quarter finals - Winner Eight finals\n \"W8F1\": \"\",\n \"W8F2\": \"\",\n \"W8F3\": \"\",\n \"W8F4\": \"\",\n \"W8F5\": \"\",\n \"W8F6\": \"\",\n \"W8F7\": \"\",\n \"W8F8\": \"\",\n\n # Semi finals - Winner Quater Final\n \"WQF1\": \"\",\n \"WQF2\": \"\",\n \"WQF3\": \"\",\n \"WQF4\": \"\",\n\n # Final - Winner Semi Final\n \"WSF1\": \"\",\n \"WSF2\": \"\",\n\n # Bronze final - Loser Semi Final\n \"LSF1\": \"\",\n \"LSF2\": \"\"\n }\n\n\n# define config\nconfig = WKspel2022Config\n","sub_path":"src/wkspel/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"407366333","text":"import sys\nsys.path.append('utils/')\nimport tensorflow as tf\nimport tensorflow.contrib.seq2seq as seq2seq\nfrom tensorflow.python.layers import core as layers_core\nfrom model_utils import *\nfrom reward import *\nimport rlloss\n\nclass Seq2SeqModel():\n def __init__(self, config):\n\n print('The model is built for training:', config['IS_TRAIN'])\n\n self.rl_enable = config['RL_ENABLE']\n self.bleu_enable = config['BLEU_RL_ENABLE']\n self.learning_rate = tf.Variable(config['LR'], dtype=tf.float32, name='model_learning_rate', trainable=False)\n self.word_embedding_learning_rate = tf.Variable(config['WE_LR'], dtype=tf.float32, name='model_we_learning_rate', trainable=False)\n self.encoder_learning_rate = tf.Variable(config['ENCODER_LR'], dtype=tf.float32, name='model_enc_learning_rate', trainable=False)\n self.decoder_learning_rate = tf.Variable(config['DECODER_LR'], dtype=tf.float32, name='model_dec_learning_rate', trainable=False)\n if config['SPLIT_LR']:\n def tmp_func():\n self.word_embedding_learning_rate.assign( self.word_embedding_learning_rate * config['LR_DECAY'])\n self.encoder_learning_rate.assign( self.encoder_learning_rate * config['LR_DECAY'])\n self.decoder_learning_rate.assign( self.decoder_learning_rate * config['LR_DECAY'])\n self.lr_decay_op = tmp_func()\n else:\n self.lr_decay_op = self.learning_rate.assign(self.learning_rate * config['LR_DECAY'])\n\n if config['OPTIMIZER']=='Adam':\n self.optimizer = tf.train.AdamOptimizer\n elif config['OPTIMIZER']=='GD':\n self.optimizer = tf.train.GradientDescentOptimizer\n else:\n raise Exception(\"Wrong optimizer name...\")\n\n self.global_step = tf.Variable(config['GLOBAL_STEP'], dtype=tf.int32, name='model_global_step', trainable=False)\n self.batch_size = config['BATCH_SIZE']\n self.input_size = config['INPUT_VOCAB_SIZE']\n self.output_size = config['OUTPUT_VOCAB_SIZE']\n self.encoder_hidden_size = config['ENCODER_HIDDEN_SIZE']\n self.decoder_hidden_size = config['DECODER_HIDDEN_SIZE']\n self.embedding_size = config['WORD_EMBEDDING_SIZE']\n\n\n self.encoder_inputs = tf.placeholder(dtype=tf.int32, shape=(None, self.batch_size), name='encoder_inputs')\n self.encoder_inputs_length = tf.placeholder(dtype=tf.int32, shape=(self.batch_size, ), name='encoder_inputs_length')\n self.encoder_inputs_mask = tf.placeholder(dtype=tf.float32, shape=(self.batch_size, None), name='encoder_inputs_mask')\n self.decoder_inputs = tf.placeholder(dtype=tf.int32, shape=(None, self.batch_size), name='decoder_inputs')\n self.decoder_inputs_length = tf.placeholder(dtype=tf.int32, shape=(self.batch_size, ), name='decoder_inputs_length')\n self.decoder_inputs_mask = tf.placeholder(dtype=tf.float32, shape=(self.batch_size, None), name='decoder_inputs_mask')\n self.decoder_targets = tf.placeholder(dtype=tf.int32, shape=(None, self.batch_size), name='decoder_targets')\n self.decoder_targets_length = tf.placeholder(dtype=tf.int32, shape=(self.batch_size, ), name='decoder_targets_length')\n self.decoder_targets_mask = tf.placeholder(dtype=tf.float32, shape=(self.batch_size, None), name='decoder_targets_mask')\n self.rewards = tf.placeholder(dtype=tf.float32, shape=(self.batch_size, None, self.output_size), name='decoder_targets_mask')\n\n\n with tf.variable_scope(\"InputWordEmbedding\") as scope:\n\n self.input_word_embedding_matrix = modelInitWordEmbedding(self.input_size, self.embedding_size, name='we_input')\n\n self.encoder_inputs_embedded = modelGetWordEmbedding(self.input_word_embedding_matrix, self.encoder_inputs)\n\n # print('Embedding Trainable Variables')\n self.input_embedding_variables = scope.trainable_variables()\n # print(self.embedding_variables)\n\n with tf.variable_scope(\"OutputWordEmbedding\") as scope:\n\n self.output_word_embedding_matrix = modelInitWordEmbedding(self.output_size, self.embedding_size, name='we_output')\n self.decoder_inputs_embedded = modelGetWordEmbedding(self.output_word_embedding_matrix, self.decoder_inputs)\n\n # print('Embedding Trainable Variables')\n self.output_embedding_variables = scope.trainable_variables()\n # print(self.embedding_variables)\n\n with tf.variable_scope(\"DynamicEncoder\") as scope:\n self.encoder_cell = modelInitRNNCells(self.encoder_hidden_size, config['ENCODER_LAYERS'], config['CELL'], config['INPUT_DROPOUT'], config['OUTPUT_DROPOUT'])\n if config['BIDIRECTIONAL_ENCODER']:\n if config['VAE_ENCODER']:\n self.encoder_outputs, self.encoder_state, self.vae_loss = modelInitVAEBidirectionalEncoder(self.encoder_cell, self.encoder_inputs_embedded, self.encoder_inputs_length, encoder_type='stack')\n else:\n self.encoder_outputs, self.encoder_state = modelInitBidirectionalEncoder(self.encoder_cell, self.encoder_inputs_embedded, self.encoder_inputs_length, encoder_type='stack')\n else:\n if config['VAE_ENCODER']:\n self.encoder_outputs, self.encoder_state, self.vae_loss = modelInitVAEUndirectionalEncoder(self.encoder_cell, self.encoder_inputs_embedded, self.encoder_inputs_length)\n else:\n self.encoder_outputs, self.encoder_state = modelInitUndirectionalEncoder(self.encoder_cell, self.encoder_inputs_embedded, self.encoder_inputs_length)\n\n self.encoder_inputs_length_att = self.encoder_inputs_length\n\n if config['SAE_ENCODER']:\n # sae_h_size = 20\n # W_SAE = tf.get_variable(\"W_SAE\", shape=[config['MAX_IN_LEN'], sae_h_size])\n # self.encoder_outputs = tf.transpose(tf.reshape(tf.matmul(tf.reshape(tf.transpose(self.encoder_outputs, [1,2,0]), [-1,config['MAX_IN_LEN']]), W_SAE), [self.batch_size,self.encoder_hidden_size*2,sae_h_size]), [2,0,1])\n # print(self.encoder_outputs)\n # self.encoder_inputs_length_att = tf.convert_to_tensor([sae_h_size]*self.batch_size, dtype=tf.int32)\n\n o_1, o_2 = tf.split(self.encoder_outputs, 2, 1)\n euclidean_dis = tf.reduce_mean(tf.square(o_1-o_2),2)\n self.sae_loss = tf.reduce_mean(euclidean_dis)\n\n if config['USE_BS'] and not config['IS_TRAIN']:\n self.encoder_state = seq2seq.tile_batch(self.encoder_state, config['BEAM_WIDTH'])\n self.encoder_outputs = tf.transpose(seq2seq.tile_batch(tf.transpose(self.encoder_outputs, [1,0,2]), config['BEAM_WIDTH']), [1,0,2])\n self.encoder_inputs_length_att = seq2seq.tile_batch(self.encoder_inputs_length_att, config['BEAM_WIDTH'])\n\n\n # print('Encoder Trainable Variables')\n self.encoder_variables = scope.trainable_variables()\n # print(self.encoder_variables)\n\n\n with tf.variable_scope(\"DynamicDecoder\") as scope:\n self.decoder_cell = modelInitRNNCells(self.decoder_hidden_size, config['DECODER_LAYERS'], config['CELL'], config['INPUT_DROPOUT'], config['OUTPUT_DROPOUT'])\n if config['ATTENTION_DECODER']:\n self.decoder_cell = modelInitAttentionDecoderCell(self.decoder_cell, self.decoder_hidden_size, self.encoder_outputs, self.encoder_inputs_length_att, att_type=config['ATTENTION_MECHANISE'], wrapper_type='whole')\n else:\n self.decoder_cell = modelInitRNNDecoderCell(self.decoder_cell)\n\n initial_state = None\n\n if config['USE_BS'] and not config['IS_TRAIN']:\n initial_state = self.decoder_cell.zero_state(batch_size=self.batch_size*config['BEAM_WIDTH'], dtype=tf.float32)\n if config['ATTENTION_DECODER']:\n cat_state = tuple([self.encoder_state] + list(initial_state.cell_state)[:-1])\n initial_state.clone(cell_state=cat_state)\n else:\n initial_state = tuple([self.encoder_state] + list(initial_state[:-1]))\n else:\n initial_state = self.decoder_cell.zero_state(batch_size=self.batch_size, dtype=tf.float32)\n\n if config['ATTENTION_DECODER']:\n cat_state = tuple([self.encoder_state] + list(initial_state.cell_state)[:-1])\n initial_state.clone(cell_state=cat_state)\n else:\n initial_state = tuple([self.encoder_state] + list(initial_state[:-1]))\n\n self.output_projection_layer = layers_core.Dense(self.output_size, use_bias=False)\n if config['IS_TRAIN']:\n self.train_outputs = modelInitDecoderForTrain(self.decoder_cell, self.decoder_inputs_embedded, self.decoder_inputs_length, initial_state, self.output_projection_layer)\n if config['USE_BS'] and not config['IS_TRAIN']:\n self.infer_outputs = modelInitDecoderForBSInfer(self.decoder_cell, self.decoder_inputs[0], self.output_word_embedding_matrix, config['BEAM_WIDTH'], config['ID_END'], config['MAX_OUT_LEN'], initial_state, self.output_projection_layer)\n else:\n self.infer_outputs = modelInitDecoderForGreedyInfer(self.decoder_cell, self.decoder_inputs[0], self.output_word_embedding_matrix, config['ID_END'], config['MAX_OUT_LEN'], initial_state, self.output_projection_layer)\n\n\n if config['IS_TRAIN']:\n self.train_loss = seq2seq.sequence_loss(logits=self.train_outputs, targets=tf.transpose(self.decoder_targets, perm=[1,0]), weights=self.decoder_targets_mask)\n if config['VAE_ENCODER'] and config['PRE_ENCODER']==None:\n self.train_loss += 0.01 * self.vae_loss\n if config['SAE_ENCODER'] and config['PRE_ENCODER']==None:\n self.train_loss += self.sae_loss\n self.rewards = tf.py_func(contentPenalty, [tf.transpose(self.encoder_inputs, perm=[1,0]), self.train_outputs, tf.constant(config['SRC_DICT'], dtype=tf.string), tf.constant(config['DST_DICT'], dtype=tf.string), tf.transpose(self.decoder_targets, perm=[1,0])], tf.float32)\n self.rewards.set_shape(self.train_outputs.get_shape())\n if config['RL_ENABLE']:\n self.train_loss_rl = tf.constant(0.0)\n else:\n self.train_loss_rl = tf.constant(0.0)\n self.rewards_bleu = tf.py_func(againstInputPenalty, [tf.transpose(self.encoder_inputs, perm=[1,0]), self.train_outputs, tf.constant(config['SRC_DICT'], dtype=tf.string), tf.constant(config['DST_DICT'], dtype=tf.string)], tf.float32)\n self.rewards_bleu.set_shape(self.train_outputs.get_shape())\n if config['BLEU_RL_ENABLE']:\n self.train_loss_rl_bleu = rlloss.sequence_loss_rl(logits=self.train_outputs, rewards=self.rewards_bleu, weights=self.decoder_targets_mask)\n else:\n self.train_loss_rl_bleu = tf.constant(0.0)\n self.eval_loss = seq2seq.sequence_loss(logits=self.train_outputs, targets=tf.transpose(self.decoder_targets, perm=[1,0]), weights=self.decoder_targets_mask)\n\n if config['TRAIN_ON_EACH_STEP']:\n self.final_loss = self.train_loss\n if config['RL_ENABLE']:\n self.final_loss = self.final_loss + self.train_loss_rl\n if config['BLEU_RL_ENABLE']:\n self.final_loss = self.final_loss + self.train_loss_rl_bleu\n else:\n self.final_loss = self.eval_loss\n\n # print('Decoder Trainable Variables')\n self.decoder_variables = scope.trainable_variables()\n # print(self.decoder_variables)\n\n\n\n print('All Trainable Variables:')\n if config['PRE_ENCODER']:\n self.all_trainable_variables = list(set(tf.trainable_variables()).difference(set(self.encoder_variables)).difference(set(self.input_embedding_variables)))\n else:\n self.all_trainable_variables = tf.trainable_variables()\n print(self.all_trainable_variables)\n if config['IS_TRAIN']:\n if config['SPLIT_LR']:\n self.train_op = updateBP(self.final_loss, [self.word_embedding_learning_rate, self.encoder_learning_rate, self.decoder_learning_rate], [self.embedding_variables, self.encoder_variables, self.decoder_variables], self.optimizer, norm=config['CLIP_NORM'])\n else:\n self.train_op = updateBP(self.final_loss, [self.learning_rate], [self.all_trainable_variables], self.optimizer, norm=config['CLIP_NORM'])\n self.saver = initSaver(tf.global_variables(), config['MAX_TO_KEEP'])\n self.encoder_saver = initSaver(self.encoder_variables+self.input_embedding_variables, config['MAX_TO_KEEP'])\n\n\n def make_feed(self, encoder_inputs, encoder_inputs_length, encoder_inputs_mask, decoder_inputs, decoder_inputs_length, decoder_inputs_mask, decoder_targets, decoder_targets_length, decoder_targets_mask):\n return {\n self.encoder_inputs: encoder_inputs,\n self.encoder_inputs_length: encoder_inputs_length,\n self.encoder_inputs_mask: encoder_inputs_mask,\n self.decoder_inputs: decoder_inputs,\n self.decoder_inputs_length: decoder_inputs_length,\n self.decoder_inputs_mask: decoder_inputs_mask,\n self.decoder_targets: decoder_targets,\n self.decoder_targets_length: decoder_targets_length,\n self.decoder_targets_mask: decoder_targets_mask,\n }\n def train_on_batch(self, session, encoder_inputs, encoder_inputs_length, encoder_inputs_mask, decoder_inputs, decoder_inputs_length, decoder_inputs_mask, decoder_targets, decoder_targets_length, decoder_targets_mask):\n train_feed = self.make_feed(encoder_inputs, encoder_inputs_length, encoder_inputs_mask, decoder_inputs, decoder_inputs_length, decoder_inputs_mask, decoder_targets, decoder_targets_length, decoder_targets_mask)\n if self.rl_enable:\n if self.bleu_enable:\n [_, ce_loss, rl_loss, bleu_loss] = session.run([self.train_op, self.train_loss, self.train_loss_rl, self.train_loss_rl_bleu], train_feed)\n return [ce_loss, rl_loss, bleu_loss]\n else:\n [_, ce_loss, rl_loss] = session.run([self.train_op, self.train_loss, self.train_loss_rl], train_feed)\n return [ce_loss, rl_loss]\n else:\n [_, loss] = session.run([self.train_op, self.final_loss], train_feed)\n return loss\n\n def eval_on_batch(self, session, encoder_inputs, encoder_inputs_length, encoder_inputs_mask, decoder_inputs, decoder_inputs_length, decoder_inputs_mask, decoder_targets, decoder_targets_length, decoder_targets_mask):\n infer_feed = self.make_feed(encoder_inputs, encoder_inputs_length, encoder_inputs_mask, decoder_inputs, decoder_inputs_length, decoder_inputs_mask, decoder_targets, decoder_targets_length, decoder_targets_mask)\n [loss, outputs] = session.run([self.eval_loss, self.infer_outputs], infer_feed)\n return loss, outputs\n def test_on_batch(self, session, encoder_inputs, encoder_inputs_length, encoder_inputs_mask, decoder_inputs, decoder_inputs_length, decoder_inputs_mask, decoder_targets, decoder_targets_length, decoder_targets_mask):\n infer_feed = self.make_feed(encoder_inputs, encoder_inputs_length, encoder_inputs_mask, decoder_inputs, decoder_inputs_length, decoder_inputs_mask, decoder_targets, decoder_targets_length, decoder_targets_mask)\n [outputs] = session.run([self.infer_outputs], infer_feed)\n return outputs\n\ndef instanceOfInitModel(sess, config):\n ret = Seq2SeqModel(config)\n sess.run(tf.global_variables_initializer())\n print('Model Initialized.')\n return ret\n","sub_path":"seq2seq_model.py","file_name":"seq2seq_model.py","file_ext":"py","file_size_in_byte":16008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"572478091","text":"import argparse\n\nfrom pydataweaver.engines import engine_list\nfrom pydataweaver.lib.defaults import VERSION\n\n# Create the parser\nparser = argparse.ArgumentParser(prog=\"pydataweaver\")\n\n# Add first level arguments\nparser.add_argument(\"-v\", \"--version\", action=\"version\", version=VERSION)\nparser.add_argument(\"-q\",\n \"--quiet\",\n help=\"suppress command-line output\",\n action=\"store_true\")\n\n# ..............................................................\n# subparsers\n# ..............................................................\n\nsubparsers = parser.add_subparsers(help=\"sub-command help\", dest=\"command\")\nhelp_parser = subparsers.add_parser(\"help\", help=\"\")\n\nls_parser = subparsers.add_parser(\"ls\", help=\"display a list all available datasets\")\ncitation_parser = subparsers.add_parser(\"citation\", help=\"view citation\")\nlicense_parser = subparsers.add_parser(\"license\", help=\"view dataset licenses\")\njoin_parser = subparsers.add_parser(\"join\",\n help=\"integrate data using a data package script\")\nupdate_parser = subparsers.add_parser(\n \"update\", help=\"download updated versions of data package scripts\")\nreset_parser = subparsers.add_parser(\"reset\", help=\"reset pydataweaver: deletes scripts\")\n\n# ..............................................................\n# subparsers with Arguments\n# ...............................................................\n\ncitation_parser.add_argument(\"dataset\",\n help=\"pydataweaver citation or dataset citation\",\n nargs=\"?\",\n default=None)\nlicense_parser.add_argument(\"dataset\",\n help=\"pydataweaver license or dataset licenses\",\n nargs=\"?\",\n default=None)\n\nls_parser.add_argument(\"-l\",\n help=\"search datasets with specific license(s)\",\n nargs=\"*\",\n default=False)\nls_parser.add_argument(\"-k\",\n help=\"search datasets with keyword(s)\",\n nargs=\"*\",\n default=False)\nls_parser.add_argument(\"-v\",\n help=\"verbose list of all datasets\",\n nargs=\"*\",\n default=False)\njoin_parser.add_argument(\"--debug\", help=\"run in debug mode\", action=\"store_true\")\njoin_subparsers = join_parser.add_subparsers(help=\"engine-specific help\", dest=\"engine\")\nreset_parser.add_argument(\"scope\", help=\"things to reset: scripts\", choices=[\"scripts\"])\n\nfor engine in engine_list:\n join_engine_parser = join_subparsers.add_parser(engine.abbreviation, help=engine.name)\n join_engine_parser.add_argument(\"dataset\", help=\"file name\")\n\n abbreviations = set(\"h\")\n for arg in engine.required_opts:\n arg_name, help_msg, default = arg[:3]\n potential_abbreviations = [char for char in arg_name if not char in abbreviations]\n if potential_abbreviations:\n abbreviation = potential_abbreviations[0]\n abbreviations.add(abbreviation)\n else:\n abbreviation = \"-%s\" % arg_name\n join_engine_parser.add_argument(\n \"--%s\" % arg_name,\n \"-%s\" % abbreviation,\n help=help_msg,\n nargs=\"?\",\n default=default,\n )\n","sub_path":"pydataweaver/lib/get_opts.py","file_name":"get_opts.py","file_ext":"py","file_size_in_byte":3384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"605100930","text":"import requests\nimport dryscrape\nfrom bs4 import BeautifulSoup\nimport json\nimport re\nimport sys\n\nif 'linux' in sys.platform:\n\tdryscrape.start_xvfb()\n\ndef __is_info(p):\n\tpattern = re.compile(r'(.*\\n)?(.+:.+\\n)+')\n\tr = pattern.match(p)\n\treturn True if r is not None else False\n\ndef __extract_info(p_all):\n\tresult = []\n\tfor p in p_all:\n\t\tif __is_info(p.text) is True:\n\t\t\tresult.append(p)\n\treturn result\n\ndef robot_vegetarianStore(timeStamp = '2016-03-01'):\n\tWEBSITE = 'http://clnote.tw/'\n\tresults = list()\n\treq = requests.get(WEBSITE)\n\tsoup = BeautifulSoup(req.text, 'html.parser')\n\tarticles = soup.find('div',{'id':'main', 'class':'site-main'}).find_all('article')\n\n\n\tAPI_KEY = ''\n\twith open('/home/ubuntu/documents/robot-vegetarian/geocoding_apikey.txt','r') as rfile:\n\t\tAPI_KEY = rfile.read().split('\\n')[0]\n\n\tfor idx, article in enumerate(articles):\n\t\ttitle = article.h1.text.encode('latin1').decode('utf8')\n\t\tpublishedTime = article.span.a.find('time',{'class':'entry-date published'}).text\n\t\tcontent_url = article.h1.a['href']\n\n\t\tif publishedTime <= timeStamp:\n\t\t\tcontinue\n\n\t\tprint (\"drive to article content \"+content_url)\n\t\tsession = dryscrape.Session(base_url = 'http://google.com')\n\t\tsession.visit(content_url)\n\t\tcontent = BeautifulSoup(session.body(), 'html.parser')\n\t\tp_all = content.find('div',{'class':'entry-content'}).find_all('p')\n\n\t\tinfos = __extract_info(p_all)\n\n\t\tinfo = infos[0] # Extract one for example if there are many infos\n\t\t# Extract info\n\t\tstoreName = info.strong.text\n\t\taddress = info.text.split('\\n')[1].split(':')[1][:-4]\n\t\topeningTime = info.text.split('\\n')[2].split(':')[1]\n\t\tphone = info.text.split('\\n')[3].split(':')[1]\n\t\twebsite = info.text.split('\\n')[4].split(':')[1]\n\n\n\t\tgeocoding_url = 'https://maps.googleapis.com/maps/api/geocode/json?address={0}&key={1}'.format(address,API_KEY)\n\t\tgeocode = requests.get(geocoding_url)\n\t\tjsonGeo = json.loads(geocode.text)\n\t\tlng = jsonGeo['results'][0]['geometry']['location']['lng']\n\t\tlat = jsonGeo['results'][0]['geometry']['location']['lat']\n\n\t\tprint (\"{0}\\n{1}\\n{2}\\n{3}\\n{4},{5}\\n\\n\".format(title,publishedTime,content_url,address,lng,lat))\n\t\tresults.append((title,storeName,publishedTime,info.text,lng,lat,content_url,website))\n\t\t\n\t\tif idx == 1:\n\t\t\tbreak\t\t\n\treturn results\n\n\nif __name__ == '__main__':\n\ttimeStamp = ''\n\twith open ('/home/ubuntu/documents/robot-vegetarian/timeStamp.txt','r') as rfile:\n\t\ttimeStamp = rfile.read().split('\\n')[0]\n\tprint (timeStamp)\n\tresults = robot_vegetarianStore()\n\tprint (results[0])\n\tif timeStamp < results[0][2]:\n\t\tprint ('New found store')\n\n\n\n","sub_path":"robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"11066747","text":"# -*- coding: UTF-8 -*- \r\n#!/usr/bin/env python\r\n# \r\n# Copyright 2010- Hui Zhang\r\n# E-mail: hui.zh012@gmail.com\r\n#\r\n# Distributed under the terms of the GPL (GNU Public License)\r\n#\r\n# you can redistribute it and/or modify it under the terms of the GNU \r\n# General Public License as published by the Free Software Foundation;\r\n# either version 2 of the License, or (at your option) any later version.\r\n#\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with this program; if not, write to the Free Software\r\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\r\n\r\nfrom datetime import date\r\nfrom zlib import decompress\r\nfrom xy.core import bytestr, utils\r\n\r\ndef d2n(d=None):\r\n if not d: d = date.today()\r\n return d.year*10000 + d.month*100 + d.day\r\n\r\ndef n2d(n):\r\n y, d = divmod(n, 10000)\r\n m, d = divmod(d, 100)\r\n return date(y, m, d)\r\n\r\ndef m2c(market):\r\n return ['sz', 'sh'].index(market.lower())\r\n\r\ndef c2m(mcode):\r\n return ['sz', 'sh'][mcode]\r\n\r\nResponseMagic = '\\xb1\\xcb\\x74\\x00'\r\n\r\nclass Cmds:\r\n #commands\r\n HeartBeat = '\\x04\\x00'\r\n StockCount = '\\x4e\\x04'\r\n StockList = '\\x50\\x04'\r\n FileInfo = '\\xc5\\x02'\r\n GetFile = '\\xb9\\x06'\r\n Tran_DD = '\\xc5\\x0f' ## 分笔明细? deal divided? /urgly english\r\n Tran_TD = '\\x1d\\x05' ## 分时明细?time divided\r\n KData = '\\x2c\\x05'\r\n Quote = '\\x3e\\x05'\r\n \r\n #magic numbers\r\n\r\n_protocol_reg = {}\r\n\r\ndef protocol(klass):\r\n _protocol_reg[klass.cmd] = klass\r\n return klass\r\n\r\ndef build_request(seqno, cmd, *args, **kwargs):\r\n return _protocol_reg[cmd].build_request(seqno, *args, **kwargs)\r\n\r\ndef parse_response(cmd, data):\r\n return _protocol_reg[cmd].parse_response(data)\r\n\r\ndef merge_requests(cmd, requests):\r\n return _protocol_reg[cmd]._merge_requests(requests)\r\n\r\ndef split_response(stream):\r\n resps = []\r\n while 1:\r\n bs = bytestr(stream)\r\n if len(stream) < 16: break # even not have a complete header\r\n respmagic, compresstype, seqno, packtype, cmd, datalen, origdatalen = bs.read_struct(\"=4sBIB2sHH\")\r\n if respmagic != ResponseMagic:\r\n try:\r\n stream = stream[stream.index(ResponseMagic):]\r\n except:\r\n stream = ''\r\n continue\r\n \r\n bs.cut()\r\n if len(bs.buf) < datalen: break\r\n stream = bs.buf[datalen:]\r\n \r\n data = bs.read_s(datalen)\r\n if compresstype==0x1c:\r\n try:\r\n data = decompress(data)\r\n except:\r\n data = ''\r\n\r\n if origdatalen != len(data):\r\n data = ''\r\n \r\n resps.append((seqno, cmd, data)) \r\n return tuple(resps), stream\r\n \r\nclass BaseProtocol(object):\r\n #cmd = None #must be defined by sub class\r\n \r\n @classmethod\r\n def build_request(cls, seqno, *args, **kwargs):\r\n '''\r\n char zip; // always 0x0c: data-uncompressed\r\n uint seq_id; // 同一种命令的 seq_id。\r\n char packet_type; // 00: 回应。 1,2,3... request count\r\n ushort len; // 数据长度\r\n ushort len1; // 数据长度重复\r\n #ushort cmd; // b4 bf: 分钟线。。b5 bf 单笔成交\r\n '''\r\n bs = bytestr()\r\n bs.write_s(cls.cmd)\r\n cls._build_request(bs, *args, **kwargs)\r\n length = len(bs.buf)\r\n bs.pos = 0\r\n bs.write_struct(\"=BIBHH\", 0x0c, seqno, 1, length, length)\r\n return bs.buf\r\n \r\n @classmethod\r\n def parse_response(cls, resp):\r\n bs = bytestr(resp)\r\n return cls._parse_response(bs)\r\n \r\n @classmethod\r\n def _build_request(cls, bs, *args, **kwargs):\r\n raise NotImplemented('request builder for %s' %utils.bth(cls.cmd))\r\n @classmethod\r\n def _parse_response(cls, bs):\r\n return bs.buf\r\n @classmethod\r\n def _merge_requests(cls, requests):\r\n return None\r\n\r\n@protocol\r\nclass HeartBeat(BaseProtocol):\r\n cmd = Cmds.HeartBeat\r\n @classmethod\r\n def _build_request(cls, bs):\r\n pass\r\n @classmethod\r\n def _merge_requests(cls, requests):\r\n # compress the heart beat package\r\n return ((), {}), tuple([r[0] for r in requests])\r\n\r\n@protocol\r\nclass StockCount(BaseProtocol):\r\n cmd = Cmds.StockCount\r\n @classmethod\r\n def _build_request(cls, bs, market):\r\n bs.write_struct('=HI', m2c(market), d2n())\r\n @classmethod\r\n def _parse_response(cls, bs):\r\n return bs.read_struct('=H')\r\n\r\n@protocol\r\nclass StockList(BaseProtocol):\r\n cmd = Cmds.StockList\r\n @classmethod\r\n def _build_request(cls, bs, market, begin):\r\n bs.write_struct('=HH', m2c(market), begin)\r\n @classmethod\r\n def _parse_response(cls, bs):\r\n count = bs.read_struct('=H')\r\n stocks = []\r\n for i in range(count):\r\n code, unit, name, unknown, dim, price, basemap, gbbqmap = bs.read_struct('=6sH8sIBfHH')\r\n name = name.strip('\\x00').decode('gbk')\r\n stocks.append((code, (name, price, basemap, gbbqmap)))\r\n return stocks\r\n\r\n@protocol\r\nclass FileInfo(BaseProtocol):\r\n cmd = Cmds.FileInfo\r\n @classmethod\r\n def _build_request(cls, bs, filename):\r\n bs.write_s(filename)\r\n bs.write_s('\\x00' * (40-len(filename)))\r\n @classmethod\r\n def _parse_response(cls, bs):\r\n filelen, packtype = bs.read_struct('=IB')\r\n hexmd5 = bs.read_s()[:-1]\r\n return (filelen, hexmd5)\r\n \r\n@protocol\r\nclass FileDown(BaseProtocol):\r\n cmd = Cmds.GetFile\r\n @classmethod\r\n def _build_request(cls, bs, filename, begin, size):\r\n bs.write_struct('=II', begin, size)\r\n bs.write_s(filename)\r\n bs.write_s('\\x00' * (100-len(filename)))\r\n @classmethod\r\n def _parse_response(cls, bs):\r\n length = bs.read_struct('=I')\r\n data = bs.read_s()\r\n if len(data) == length:\r\n return data\r\n\r\n@protocol\r\nclass Quote(BaseProtocol):\r\n cmd = Cmds.Quote\r\n @classmethod\r\n def _build_request(cls, bs, *tickers):\r\n bs.write_s('\\x05\\x00\\x00\\x00\\x00\\x00\\x00\\x00')\r\n bs.write_struct('=H', len(tickers))\r\n for market, code in tickers:\r\n bs.write_struct('=B6s', market, code)\r\n @classmethod\r\n def _parse_response(cls, bs):\r\n magic = bs.read_s(2)\r\n # if magic != '\\xb1\\xcb':\r\n # print 'strange: response bs magic is %s, instead of \"B1 CB\"' % ' '.join(map(lambda x:hex(ord(x)), magic))\r\n records = []\r\n num = bs.read_struct('=H')\r\n unknowns = []\r\n for i in range(num):\r\n market, code, liveness = bs.read_struct('=B6sH')\r\n market = 'sh' if market==1 else 'sz'\r\n price = bs.read_varint()\r\n c, o, h, l = map(lambda v: v+price, bs.read_varint(4))\r\n unknowns.extend(bs.read_varint(2))\r\n vol, lastbid_vol = bs.read_varint(2)\r\n turnover = bs.read_struct('=f')\r\n inner_vol, outer_vol = bs.read_varint(2)\r\n unknowns.extend(bs.read_varint(2))\r\n bid5 = bs.read_varint(20)\r\n buy5 = [(bid5[i*4], bid5[i*4+2]) for i in range(5)]\r\n sell5 = [(bid5[i*4+1], bid5[i*4+3]) for i in range(5)]\r\n unknowns.extend(bs.read_struct('=HB'))\r\n unknowns.extend(bs.read_varint(3))\r\n unknowns.extend(bs.read_struct('=HH'))\r\n \r\n # data = dictattr()\r\n # data.market = market\r\n # data.code = code\r\n # data.liveness = liveness\r\n # data.prices = (c, o, h, l, price)\r\n # data.volums = (vol, lastbid_vol, inner_vol, outer_vol)\r\n # data.turnover = turnover\r\n # data.bid5 = (buy5, sell5)\r\n # data.unknowns = unknowns\r\n # records.append(data)\r\n records.append((code, market, price, turnover, vol,\r\n (c, o, h, l), \r\n (lastbid_vol, inner_vol, outer_vol),\r\n buy5, sell5,\r\n ))\r\n return tuple(records)\r\n","sub_path":"xyproj/src/xyearn/tdx/hqproto.py","file_name":"hqproto.py","file_ext":"py","file_size_in_byte":8481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"249757113","text":"__author__ = 'Rebekah Orth'\n\n# CIS 125 Fall 2015\n# Temperature Table\n#\n# Outputs a table of Celsius and Fahrenheit temperatures in 10 degree increments.\n\ndef main():\n for C in range(0, 101, 10):\n F= 9/5 * C + 32\n print(C, F)\n \nmain()\n","sub_path":"Temperature Table.py","file_name":"Temperature Table.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"371594875","text":"\"\"\"\n 217 - Contains Duplicate\n @author oneshan\n @version 1.0 2/5/2017\n\"\"\"\n\n\nclass Solution(object):\n def containsDuplicate(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n seen = {}\n for n in nums:\n if n in seen:\n return True\n seen[n] = None\n\n return False\n","sub_path":"leetcode/217_containsDuplicate.py","file_name":"217_containsDuplicate.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"47319961","text":"from faeria_puzzle.settings import CARD_DIR\nfrom maker.scripts import github\nfrom maker.scripts import utils\n\n\ndef main(need_cards, languages):\n\tdownload_database()\n\tif need_cards is True:\n\t\tdownload_cards(languages)\n\n\ndef download_database():\n\turl = \"https://raw.githubusercontent.com/abrakam/Faeria_Cards/master/CardExport/merlin_shortened.csv\"\n\tpath = \"cardbase.csv\"\n\tutils.download_file(url, path)\n\tprint(\"Updated database\")\n\n\ndef download_cards(languages):\n\towner = \"abrakam\"\n\trepo = \"Faeria_Cards\"\n\tfolder_path = \"CardExport\"\n\tfor language in languages:\n\t\tfolder_name = language\n\t\tgithub.download_folder(owner, repo, folder_path, folder_name, path=CARD_DIR)\n\t\tprint(\"Folder '{}' finished downloading\".format(language))\n","sub_path":"clilib/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"144625495","text":"'''\nMARS Blender Tools - a Blender Add-On to work with MARS robot models\n\nFile mtexport.py\n\nCreated on 13 Feb 2014\n\n@author: Kai von Szadkowski\n\nCopy this add-on to your Blender add-on folder and activate it\nin your preferences to gain instant (virtual) world domination.\nYou may use the provided install shell script.\n\nNOTE: If you edit this script, please make sure not to use any imports\nnot supported by Blender's standard Python distribution. This is a script\nintended to be usable on its own and thus should not use external dependencies,\nespecially none of the other modules of the MARStools package.\n'''\n\nimport bpy\nimport mathutils\nimport os\nfrom datetime import datetime\nimport yaml\nimport struct\nfrom bpy.types import Operator\nfrom bpy.props import StringProperty, BoolProperty, IntProperty\nfrom marstools.mtutility import *\nimport marstools.mtdefs as mtdefs\nimport marstools.mtmarssceneexport as mtmse\nimport marstools.mtinertia as mtinertia\nimport marstools.mtrobotdictionary as mtrobotdictionary\n\ndef register():\n print(\"Registering mtexport...\")\n\ndef unregister():\n print(\"Unregistering mtexport...\")\n\nindent = ' '\nurdfHeader = '\\n'\nurdfFooter = indent+'\\n'\n\ndef exportBobj(path, obj):\n bpy.ops.object.select_all(action='DESELECT')\n obj.select = True#\n bpy.context.scene.objects.active = obj\n #TODO: make this exception-handled\n totverts = totuvco = totno = 1\n\n globalNormals = {}\n\n # ignore dupli children\n if obj.parent and obj.parent.dupli_type in {'VERTS', 'FACES'}:\n # XXX\n print(obj.name, 'is a dupli child - ignoring')\n return\n\n mesh = obj.to_mesh(bpy.context.scene, True, 'PREVIEW')\n #mesh.transform(obj.matrix_world)\n\n faceuv = len(mesh.uv_textures)\n if faceuv:\n uv_layer = mesh.uv_textures.active.data[:]\n\n if bpy.app.version[0] * 100 + bpy.app.version[1] >= 265:\n face_index_pairs = [(face, index) for index, face in enumerate(mesh.tessfaces)]\n else:\n face_index_pairs = [(face, index) for index, face in enumerate(mesh.faces)]\n\n mesh.calc_normals()\n\n me_verts = mesh.vertices[:]\n\n out = open(os.path.join(path, obj.name) + '.bobj', \"wb\")\n\n for v in mesh.vertices:\n out.write(struct.pack('ifff', 1, v.co[0], v.co[1], v.co[2]))\n\n if faceuv:\n uv = uvkey = uv_dict = f_index = uv_index = None\n\n uv_face_mapping = [[0, 0, 0, 0]] * len(face_index_pairs) # a bit of a waste for tri's :/\n\n uv_dict = {} # could use a set() here\n if bpy.app.version[1] >= 65:\n uv_layer = mesh.tessface_uv_textures.active.data[:]\n else:\n uv_layer = mesh.uv_textures.active.data\n for f, f_index in face_index_pairs:\n for uv_index, uv in enumerate(uv_layer[f_index].uv):\n uvkey = round(uv[0], 6), round(uv[1], 6)\n try:\n uv_face_mapping[f_index][uv_index] = uv_dict[uvkey]\n except:\n uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] = len(uv_dict)\n out.write(struct.pack('iff', 2, uv[0], uv[1]))\n\n del uv, uvkey, uv_dict, f_index, uv_index\n\n for f, f_index in face_index_pairs:\n if f.use_smooth:\n for v_idx in f.vertices:\n v = me_verts[v_idx]\n noKey = roundVector(v.normal, 6)\n if noKey not in globalNormals:\n globalNormals[noKey] = totno\n totno += 1\n out.write(struct.pack('ifff', 3, noKey[0], noKey[1], noKey[2]))\n else:\n # Hard, 1 normal from the face.\n noKey = roundVector(f.normal, 6)\n if noKey not in globalNormals:\n globalNormals[noKey] = totno\n totno += 1\n out.write(struct.pack('ifff', 3, noKey[0], noKey[1], noKey[2]))\n\n for f, f_index in face_index_pairs:\n f_smooth = f.use_smooth\n # write smooth info for face?\n\n f_v_orig = [(vi, me_verts[v_idx]) for vi, v_idx in enumerate(f.vertices)]\n\n if len(f_v_orig) == 3:\n f_v_iter = (f_v_orig, )\n else:\n f_v_iter = (f_v_orig[0], f_v_orig[1], f_v_orig[2]), (f_v_orig[0], f_v_orig[2], f_v_orig[3])\n\n for f_v in f_v_iter:\n da = struct.pack('i', 4)\n out.write(da)\n\n if faceuv:\n if f_smooth: # Smoothed, use vertex normals\n for vi, v in f_v:\n out.write(struct.pack('iii', v.index + totverts, totuvco + uv_face_mapping[f_index][vi], globalNormals[roundVector(v.normal, 6)]))\n else: # No smoothing, face normals\n no = globalNormals[roundVector(f.normal, 6)]\n for vi, v in f_v:\n out.write(struct.pack('iii', v.index + totverts, totuvco + uv_face_mapping[f_index][vi], no))\n else: # No UV's\n if f_smooth: # Smoothed, use vertex normals\n for vi, v in f_v:\n out.write(struct.pack('iii', v.index + totverts, 0, globalNormals[roundVector(v.normal, 6)]))\n else: # No smoothing, face normals\n no = globalNormals[roundVector(f.normal, 6)]\n for vi, v in f_v:\n out.write(struct.pack('iii', v.index + totverts, 0, no))\n out.close()\n\ndef exportObj(path, obj):\n objname = obj.name\n obj.name = 'tmp_export_666' #surely no one will ever name an object like so\n tmpobject = createPrimitive(objname, 'box', (2.0, 2.0, 2.0))\n tmpobject.data = obj.data #copy the mesh here\n outpath = os.path.join(path, objname) + '.obj'\n bpy.ops.export_scene.obj(filepath=outpath, use_selection=True, use_normals=True)\n bpy.ops.object.select_all(action='DESELECT')\n tmpobject.select = True\n bpy.ops.object.delete()\n obj.name = objname\n\n #This is the old implementation which did not work properly (08.08.2014)\n #bpy.ops.object.select_all(action='DESELECT')\n #obj.select = True\n #outpath = os.path.join(path, obj.name) + '.obj'\n #world_matrix = obj.matrix_world.copy()\n ##inverse_local_rotation = obj.matrix_local.to_euler().to_matrix().inverted()\n ##world_scale = world_matrix.to_scale() TODO: implement scale\n ## we move the object to the world origin and revert its local rotation\n ##print(inverse_local_rotation, mathutils.Matrix.Translation((0, 0, 0)))\n ##obj.matrix_world = inverse_local_rotation.to_4x4() * mathutils.Matrix.Identity(4)\n #obj.matrix_world = mathutils.Matrix.Identity(4)\n #bpy.ops.export_scene.obj(filepath=outpath, axis_forward='-Z',\n # axis_up='Y', use_selection=True, use_normals=True)\n #obj.matrix_world = world_matrix\n\ndef exportModelToYAML(model, filepath):\n print(\"MARStools YAML export: Writing model data to\", filepath )\n with open(filepath, 'w') as outputfile:\n outputfile.write('#YAML dump of robot model \"'+model['modelname']+'\", '+datetime.now().strftime(\"%Y%m%d_%H:%M\")+\"\\n\\n\")\n outputfile.write(yaml.dump(model))#, default_flow_style=False)) #last parameter prevents inline formatting for lists and dictionaries\n\ndef xmlline(ind, tag, names, values):\n line = []\n line.append(indent*ind+'<'+tag)\n for i in range(len(names)):\n line.append(' '+names[i]+'=\"'+str(values[i])+'\"')\n line.append('/>\\n')\n return ''.join(line)\n\ndef l2str(items, start=-1, end=-1):\n line = []\n i = start if start >= 0 else 0\n maxi = end if end >= 0 else len(items)\n while i < maxi:\n line.append(str(items[i])+' ')\n i += 1\n return ''.join(line)[0:-1]\n\ndef writeURDFGeometry(output, element):\n output.append(indent*4+'\\n')\n if element['geometryType'] == 'box':\n output.append(xmlline(5, 'box', ['size'], [l2str(element['size'])]))\n elif element['geometryType'] == \"cylinder\":\n output.append(xmlline(5, 'cylinder', ['radius', 'length'], [element['radius'], element['height']]))\n elif element['geometryType'] == \"sphere\":\n output.append(xmlline(5, 'sphere', ['radius'], [element['radius']]))\n elif element['geometryType'] in ['capsule', 'mesh']: #capsules are not supported in URDF and are emulated using meshes\n output.append(xmlline(5, 'mesh', ['filename', 'scale'], [element['filename'], '1.0 1.0 1.0']))#TODO correct this after implementing scale properly\n output.append(indent*4+'\\n')\n\ndef exportModelToURDF(model, filepath):\n output = []\n output.append(urdfHeader)\n output.append(indent+'\\n\\n')\n #export link information\n for l in model['links'].keys():\n link = model['links'][l]\n output.append(indent*2+'\\n')\n if link['inertial'] != {} and 'mass' in link['inertial'] and 'inertia' in link['inertial']:\n output.append(indent*3+'\\n')\n if 'pose' in link['inertial']:\n output.append(xmlline(4, 'origin', ['xyz', 'rpy'], [l2str(link['inertial']['pose']['translation']), l2str(link['inertial']['pose']['rotation_euler'])]))\n output.append(xmlline(4, 'mass', ['value'], [str(link['inertial']['mass'])]))\n output.append(xmlline(4, 'inertia', ['ixx', 'ixy', 'ixz', 'iyy', 'iyz', 'izz'], ' '.join([str(i) for i in link['inertial']['inertia']])))\n output.append(indent*3+'\\n')\n #visual object\n if link['visual']:\n for v in link['visual']:\n vis = link['visual'][v]\n output.append(indent*3+'\\n')\n output.append(xmlline(4, 'origin', ['xyz', 'rpy'], [l2str(vis['pose']['translation']), l2str(vis['pose']['rotation_euler'])]))\n writeURDFGeometry(output, vis['geometry'])\n if 'material' in vis:\n if model['materials'][vis['material']]['users'] == 0: #FIXME: change back to 1 when implemented in urdfloader\n mat = model['materials'][vis['material']]\n output.append(indent*4+'\\n')\n color = mat['diffuseFront']\n output.append(indent*5+'\\n')\n if 'texturename' in mat:\n output.append(indent*5+'\\n')\n output.append(indent*4+'\\n')\n else:\n output.append(indent*4+'\\n')\n output.append(indent*3+'\\n')\n #collision object\n if link['collision']:\n for c in link['collision']:\n col = link['collision'][c]\n output.append(indent*3+'\\n')\n output.append(xmlline(4, 'origin', ['xyz', 'rpy'], [l2str(col['pose']['translation']), l2str(col['pose']['rotation_euler'])]))\n writeURDFGeometry(output, col['geometry'])\n output.append(indent*3+'\\n')\n output.append(indent*2+'\\n\\n')\n #export joint information\n for j in model['joints']:\n joint = model['joints'][j]\n output.append(indent*2+'\\n')\n child = model['links'][joint[\"child\"]]\n output.append(xmlline(3, 'origin', ['xyz', 'rpy'], [l2str(child['pose']['translation']), l2str(child['pose']['rotation_euler'])]))\n output.append(indent*3+'\\n')\n output.append(indent*3+'\\n')\n if 'axis' in joint:\n output.append(indent*3+'\\n')\n if 'limits' in joint:\n output.append(xmlline(3, 'limit', ['lower', 'upper', 'velocity', 'effort'], [str(joint['limits'][0]), str(joint['limits'][1]), joint['maxvelocity'], joint['maxeffort']]))\n output.append(indent*2+'\\n\\n')\n #export material information\n for m in model['materials']:\n if model['materials'][m]['users'] > 0: #FIXME: change back to 1 when implemented in urdfloader\n output.append(indent*2+'\\n')\n color = model['materials'][m]['diffuseFront']\n transparency = model['materials'][m]['transparency'] if 'transparency' in model['materials'][m] else 0.0\n output.append(indent*3+'\\n')\n if 'texturename' in model['materials'][m]:\n output.append(indent*3+'\\n')\n output.append(indent*2+'\\n\\n')\n #finish the export\n output.append(urdfFooter)\n with open(filepath, 'w') as outputfile:\n outputfile.write(''.join(output))\n # problem of different joint transformations needed for fixed joints\n print(\"MARStools URDF export: Writing model data to\", filepath )\n\ndef exportModelToSMURF(model, path):\n export = {'semantics': model['groups'] != {} or model['chains'] != {},\n 'state': False,#model['state'] != {}, #TODO: handle state\n 'materials': model['materials'] != {},\n 'sensors': model['sensors'] != {},\n 'motors': model['motors'] != {},\n 'controllers': model['controllers'] != {},\n 'simulation': True#model['simulation'] != {} #TODO: make this a nice test\n }\n\n\n #create all filenames\n smurf_filename = model['modelname'] + \".smurf\"\n urdf_filename = model['modelname'] + \".urdf\"\n filenames = {'semantics': model['modelname'] + \"_semantics.yml\",\n 'state': model['modelname'] + \"_state.yml\",\n 'materials': model['modelname'] + \"_materials.yml\",\n 'sensors': model['modelname'] + \"_sensors.yml\",\n 'motors': model['modelname'] + \"_motors.yml\",\n 'controllers': model['modelname'] + \"_controllers.yml\",\n 'simulation': model['modelname'] + \"_simulation.yml\"\n }\n\n infostring = ' definition SMURF file for \"'+model['modelname']+'\", '+model[\"date\"]+\"\\n\\n\"\n\n #write model information\n print('Writing SMURF information to', smurf_filename)\n modeldata = {}\n modeldata[\"date\"] = model[\"date\"]\n modeldata[\"files\"] = [urdf_filename] + [filenames[f] for f in filenames if export[f]]\n with open(path + smurf_filename, 'w') as op:\n op.write('#main SMURF file of model \"'+model['modelname']+'\"\\n\\n')\n op.write(\"modelname: \"+model['modelname']+\"\\n\")\n op.write(yaml.dump(modeldata, default_flow_style=False))\n\n #write urdf\n exportModelToURDF(model, path + urdf_filename)\n\n #write semantics (SRDF information in YML format)\n if export['semantics']:\n with open(path + filenames['semantics'], 'w') as op:\n op.write('#semantics'+infostring)\n op.write(\"modelname: \"+model['modelname']+'\\n')\n semantics = {}\n if model['groups'] != {}:\n semantics['groups'] = model['groups']\n if model['chains'] != {}:\n semantics['chains'] = model['chains']\n op.write(yaml.dump(semantics, default_flow_style=False))\n\n #write state (state information of all joints, sensor & motor activity etc.) #TODO: implement everything but joints\n if export['state']:\n states = []\n #gather all states\n for jointname in model['joints']:\n joint = model['joints'][jointname]\n if 'state' in joint: #this should always be the case, but testing doesn't hurt\n tmpstate = joint['state'].copy()\n tmpstate['name'] = jointname\n states.append(joint['state'])\n with open(path + filenames['state'], 'w') as op:\n op.write('#state'+infostring)\n op.write(\"modelname: \"+model['modelname']+'\\n')\n op.write(yaml.dump(states))#, default_flow_style=False))\n\n #write materials, sensors, motors & controllers\n for data in ['materials', 'sensors', 'motors', 'controllers']:\n if export[data]:\n with open(path + filenames[data], 'w') as op:\n op.write('#' + data +infostring)\n op.write(yaml.dump({data: list(model[data].values())}, default_flow_style=False))\n\n #write simulation\n if export['simulation']:\n nodes = {'visual': {}, 'collision': {}}\n for link in model['links']:\n for objtype in ['visual', 'collision']:\n for objname in model['links'][link][objtype]:\n props = model['links'][link][objtype][objname]\n #for prop in ['name']: #TODO: filter these properties and purge redundant ones\n # del(props[prop])\n nodes[objtype][objname] = props\n with open(path + filenames['simulation'], 'w') as op:\n op.write('#simulation'+infostring)\n if model['simulation'] != {}:\n op.write(\"modelname: \"+model['modelname']+'\\n')\n #TODO: handle simulation-specific data\n op.write(yaml.dump(list(model['simulation'].values()), default_flow_style=False))\n op.write(\"\\nvisual:\\n\")\n op.write(yaml.dump(list(nodes['visual'].values())))\n op.write(\"\\ncollision:\\n\")\n op.write(yaml.dump(list(nodes['collision'].values())))\n\ndef exportSceneToSMURF(path):\n \"\"\"Exports all robots in a scene to separate SMURF folders.\"\"\"\n pass\n\ndef exportModelToMARS(model, path):\n \"\"\"Exports selected robot as a MARS scene\"\"\"\n mtmse.exportModelToMARS(model, path)\n\ndef securepath(path): #TODO: this is totally not error-handled!\n if not os.path.exists(path):\n os.makedirs(path)\n return os.path.expanduser(path)\n\nclass ExportModelOperator(Operator):\n \"\"\"ExportModelOperator\"\"\"\n bl_idname = \"object.mt_export_robot\"\n bl_label = \"Export the selected model(s)\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n export()\n return {'FINISHED'}\n\ndef export():\n #TODO: check if all selected objects are on visible layers (option bpy.ops.object.select_all()?)\n if bpy.data.worlds[0].relativePath:\n outpath = securepath(os.path.expanduser(os.path.join(bpy.path.abspath(\"//\"), bpy.data.worlds[0].path)))\n else:\n outpath = securepath(os.path.expanduser(bpy.data.worlds[0].path))\n yaml = bpy.data.worlds[0].exportYAML\n urdf = bpy.data.worlds[0].exportURDF\n smurf = bpy.data.worlds[0].exportSMURF\n mars = bpy.data.worlds[0].exportMARSscene\n meshexp = bpy.data.worlds[0].exportMesh\n objexp = bpy.data.worlds[0].useObj\n bobjexp = bpy.data.worlds[0].useBobj\n objectlist = bpy.context.selected_objects\n\n if yaml or urdf or smurf or mars:\n robot = mtrobotdictionary.buildRobotDictionary()\n if yaml:\n exportModelToYAML(robot, outpath + robot[\"modelname\"] + \"_dict.yml\")\n if mars:\n exportModelToMARS(robot, outpath + robot[\"modelname\"] + \"_mars.scene\")\n if smurf:\n exportModelToSMURF(robot, outpath)\n elif urdf:\n exportModelToURDF(robot, outpath + robot[\"modelname\"] + \".urdf\")\n selectObjects(objectlist, True)\n if meshexp:\n show_progress = bpy.app.version[0] * 100 + bpy.app.version[1] >= 269;\n if show_progress:\n wm = bpy.context.window_manager\n total = float(len(objectlist))\n wm.progress_begin(0, total)\n i = 1\n for obj in bpy.context.selected_objects:\n if ((obj.MARStype == 'visual' or\n obj.MARStype == 'collision') and obj['geometryType'] == 'mesh'):\n if objexp:\n exportObj(outpath, obj)\n if bobjexp:\n exportBobj(outpath, obj)\n if show_progress:\n wm.progress_update(i)\n i += 1\n if show_progress:\n wm.progress_end()\n\n","sub_path":"scripts/blender/marstools/mtexport.py","file_name":"mtexport.py","file_ext":"py","file_size_in_byte":20268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"649442658","text":"\"\"\"Low-level MediaFire API Client\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport hashlib\nimport requests\nimport logging\n\nimport six\n\nfrom six.moves.urllib.parse import urlencode\n\nfrom requests_toolbelt import MultipartEncoder\n\nfrom requests.adapters import HTTPAdapter\nfrom requests.exceptions import RequestException\n\nAPI_BASE = 'https://www.mediafire.com'\nAPI_VER = '1.3'\n\nUPLOAD_MIMETYPE = 'application/octet-stream'\nFORM_MIMETYPE = 'application/x-www-form-urlencoded'\n\n# Retries on connection errors/timeouts\nAPI_ERROR_MAX_RETRIES = 5\n\nlogger = logging.getLogger(__name__)\n\n# Each API call may have lots of parameters, so disable warning\n# pylint: disable=too-many-arguments\n\n\nclass QueryParams(dict):\n \"\"\"dict tailored for MediaFire requests.\n\n * won't store None values\n * boolean values are converted to 'yes'/'no'\n \"\"\"\n\n def __init__(self, defaults=None):\n super(QueryParams, self).__init__()\n if defaults is not None:\n for key, value in defaults.items():\n self.__setitem__(key, value)\n\n def __setitem__(self, key, value):\n \"\"\"Set dict item, handling booleans\"\"\"\n if value is not None:\n if value is True:\n value = 'yes'\n elif value is False:\n value = 'no'\n dict.__setitem__(self, key, value)\n\n\nclass MediaFireError(Exception):\n \"\"\"Base class for MediaFire-related errors\"\"\"\n pass\n\n\nclass MediaFireApiError(MediaFireError):\n \"\"\"Base class for API errors\"\"\"\n def __init__(self, message, code=None):\n \"\"\"Initialize exception\"\"\"\n self.code = code\n self.message = message\n super(MediaFireApiError, self).__init__(message, code)\n\n def __str__(self):\n \"\"\"Stringify exception\"\"\"\n return \"{}: {}\".format(self.code, self.message)\n\n\nclass MediaFireConnectionError(MediaFireError):\n \"\"\"Low level connection errors\"\"\"\n pass\n\n\nclass MediaFireApi(object): # pylint: disable=too-many-public-methods\n \"\"\"Low-level HTTP API Client\"\"\"\n\n def __init__(self):\n \"\"\"Initialize MediaFire Client\"\"\"\n\n self.http = requests.Session()\n self.http.mount('https://',\n HTTPAdapter(max_retries=API_ERROR_MAX_RETRIES))\n\n self._session = None\n self._action_tokens = {}\n\n @staticmethod\n def _build_uri(action):\n \"\"\"Build endpoint URI from action\"\"\"\n return '/api/' + API_VER + '/' + action + '.php'\n\n def _build_query(self, uri, params=None, action_token_type=None):\n \"\"\"Prepare query string\"\"\"\n\n if params is None:\n params = QueryParams()\n\n params['response_format'] = 'json'\n\n session_token = None\n\n if action_token_type in self._action_tokens:\n # Favor action token\n using_action_token = True\n session_token = self._action_tokens[action_token_type]\n else:\n using_action_token = False\n if self._session:\n session_token = self._session['session_token']\n\n if session_token:\n params['session_token'] = session_token\n\n # make order of parameters predictable for testing\n keys = list(params.keys())\n keys.sort()\n\n query = urlencode([tuple([key, params[key]]) for key in keys])\n\n if not using_action_token and self._session:\n secret_key_mod = int(self._session['secret_key']) % 256\n\n signature_base = (str(secret_key_mod) +\n self._session['time'] +\n uri + '?' + query).encode('ascii')\n\n query += '&signature=' + hashlib.md5(signature_base).hexdigest()\n\n return query\n\n def request(self, action, params=None, action_token_type=None,\n upload_info=None, headers=None):\n \"\"\"Perform request to MediaFire API\n\n action -- \"category/name\" of method to call\n params -- dict of parameters or query string\n action_token_type -- action token to use: None, \"upload\", \"image\"\n upload_info -- in case of upload, dict of \"fd\" and \"filename\"\n headers -- additional headers to send (used for upload)\n\n session_token and signature generation/update is handled automatically\n \"\"\"\n\n uri = self._build_uri(action)\n\n if isinstance(params, six.text_type):\n query = params\n else:\n query = self._build_query(uri, params, action_token_type)\n\n if headers is None:\n headers = {}\n\n if upload_info is None:\n # Use request body for query\n data = query\n headers['Content-Type'] = FORM_MIMETYPE\n else:\n # Use query string for query since payload is file\n uri += '?' + query\n\n if \"filename\" in upload_info:\n data = MultipartEncoder(\n fields={'file': (\n upload_info[\"filename\"],\n upload_info[\"fd\"],\n UPLOAD_MIMETYPE\n )}\n )\n headers[\"Content-Type\"] = data.content_type\n else:\n data = upload_info[\"fd\"]\n headers[\"Content-Type\"] = UPLOAD_MIMETYPE\n\n logger.debug(\"uri=%s query=%s\",\n uri, query if not upload_info else None)\n\n try:\n # bytes from now on\n url = (API_BASE + uri).encode('utf-8')\n if isinstance(data, six.text_type):\n # request's data is bytes, dict, or filehandle\n data = data.encode('utf-8')\n\n response = self.http.post(url, data=data,\n headers=headers, stream=True)\n except RequestException as ex:\n logger.exception(\"HTTP request failed\")\n raise MediaFireConnectionError(\n \"RequestException: {}\".format(ex))\n\n return self._process_response(response)\n\n def _process_response(self, response):\n \"\"\"Parse response\"\"\"\n\n forward_raw = False\n content_type = response.headers['Content-Type']\n if content_type != 'application/json':\n logger.debug(\"headers: %s\", response.headers)\n # API BUG: text/xml content-type with json payload\n # http://forum.mediafiredev.com/showthread.php?136\n if content_type == 'text/xml':\n # we never request xml, so check it quacks like JSON\n if not response.text.lstrip().startswith('{'):\n forward_raw = True\n else:\n # _process_response can't deal with non-json,\n # return response as is\n forward_raw = True\n\n if forward_raw:\n response.raise_for_status()\n return response\n\n logger.debug(\"response: %s\", response.text)\n\n # if we are here, then most likely have json\n try:\n response_node = response.json()['response']\n except ValueError:\n # promised JSON but failed\n raise MediaFireApiError(\"JSON decode failure\")\n\n if response_node.get('new_key', 'no') == 'yes':\n self._regenerate_secret_key()\n\n # check for errors\n if response_node['result'] != 'Success':\n raise MediaFireApiError(response_node['message'],\n response_node['error'])\n\n return response_node\n\n def _regenerate_secret_key(self):\n \"\"\"Regenerate secret key\n\n http://www.mediafire.com/developers/core_api/1.3/getting_started/#call_signature\n \"\"\"\n # Don't regenerate the key if we have none\n if self._session and 'secret_key' in self._session:\n self._session['secret_key'] = (\n int(self._session['secret_key']) * 16807) % 2147483647\n\n @property\n def session(self):\n \"\"\"Returns current session information\"\"\"\n return self._session\n\n @session.setter\n def session(self, value):\n \"\"\"Set session token\n\n value -- dict returned by user/get_session_token\"\"\"\n\n # unset session token\n if value is None:\n self._session = None\n return\n\n if not isinstance(value, dict):\n raise ValueError(\"session info is required\")\n\n session_parsed = {}\n\n for key in [\"session_token\", \"time\", \"secret_key\"]:\n if key not in value:\n raise ValueError(\"Missing parameter: {}\".format(key))\n session_parsed[key] = value[key]\n\n for key in [\"ekey\", \"pkey\"]:\n # nice to have, but not mandatory\n if key in value:\n session_parsed[key] = value[key]\n\n self._session = session_parsed\n\n @session.deleter\n def session(self):\n \"\"\"Unset session\"\"\"\n self._session = None\n\n def set_action_token(self, type_=None, action_token=None):\n \"\"\"Set action tokens\n\n type_ -- either \"upload\" or \"image\"\n action_token -- string obtained from user/get_action_token,\n set None to remove the token\n \"\"\"\n if action_token is None:\n del self._action_tokens[type_]\n else:\n self._action_tokens[type_] = action_token\n\n def user_fetch_tos(self):\n \"\"\"user/fetch_tos\n\n http://www.mediafire.com/developers/core_api/1.3/user/#fetch_tos\n \"\"\"\n\n return self.request(\"user/fetch_tos\")\n\n def user_accept_tos(self, acceptance_token):\n \"\"\"user/accept_tos\n\n http://www.mediafire.com/developers/core_api/1.3/user/#user_top\n \"\"\"\n\n return self.request(\"user/accept_tos\", QueryParams({\n \"acceptance_token\": acceptance_token\n }))\n\n def user_get_session_token(self, app_id=None, email=None, password=None,\n ekey=None, fb_access_token=None,\n tw_oauth_token=None,\n tw_oauth_token_secret=None, api_key=None):\n \"\"\"user/get_session_token\n\n http://www.mediafire.com/developers/core_api/1.3/user/#get_session_token\n \"\"\"\n\n if app_id is None:\n raise ValueError(\"app_id must be defined\")\n\n params = QueryParams({\n 'application_id': str(app_id),\n 'token_version': 2,\n 'response_format': 'json'\n })\n\n if fb_access_token:\n params['fb_access_token'] = fb_access_token\n signature_keys = ['fb_access_token']\n elif tw_oauth_token and tw_oauth_token_secret:\n params['tw_oauth_token'] = tw_oauth_token\n params['tw_oauth_token_secret'] = tw_oauth_token_secret\n signature_keys = ['tw_oauth_token',\n 'tw_oauth_token_secret']\n elif (email or ekey) and password:\n signature_keys = []\n if email:\n signature_keys.append('email')\n params['email'] = email\n\n if ekey:\n signature_keys.append('ekey')\n params['ekey'] = ekey\n\n params['password'] = password\n signature_keys.append('password')\n else:\n raise ValueError(\"Credentials not provided\")\n\n signature_keys.append('application_id')\n\n signature = hashlib.sha1()\n for key in signature_keys:\n signature.update(str(params[key]).encode('ascii'))\n\n # Note: If the app uses a callback URL to provide its API key,\n # or if it does not have the \"Require Secret Key\" option checked,\n # then the API key may be omitted from the signature\n if api_key:\n signature.update(api_key.encode('ascii'))\n\n query = urlencode(params)\n query += '&signature=' + signature.hexdigest()\n\n return self.request('user/get_session_token', params=query)\n\n def user_renew_session_token(self):\n \"\"\"user/renew_session_token:\n\n http://www.mediafire.com/developers/core_api/1.3/user/#renew_session_token\n \"\"\"\n return self.request('user/renew_session_token')\n\n def user_get_action_token(self, type_=None, lifespan=None):\n \"\"\"user/get_action_token\n\n http://www.mediafire.com/developers/core_api/1.3/user/#get_action_token\n \"\"\"\n return self.request('user/get_action_token', QueryParams({\n 'type': type_,\n 'lifespan': lifespan\n }))\n\n def user_destroy_action_token(self, action_token=None):\n \"\"\"user/destroy_action_token\n\n http://www.mediafire.com/developers/core_api/1.3/user/#destroy_action_token\n \"\"\"\n return self.request('user/destroy_action_token', QueryParams({\n 'action_token': action_token\n }))\n\n def user_get_avatar(self):\n \"\"\"user/get_avatar\n\n http://www.mediafire.com/developers/core_api/1.3/user/#get_avatar\n \"\"\"\n return self.request(\"user/get_avatar\")\n\n def user_get_info(self):\n \"\"\"user/get_info\n\n http://www.mediafire.com/developers/core_api/1.3/user/#get_info\n \"\"\"\n return self.request(\"user/get_info\")\n\n def user_get_limits(self):\n \"\"\"user/get_limits\n\n http://www.mediafire.com/developers/core_api/1.3/user/#get_limits\n \"\"\"\n return self.request(\"user/get_limits\")\n\n def user_get_settings(self):\n \"\"\"user/get_settings\n\n http://www.mediafire.com/developers/core_api/1.3/user/#get_settings\n \"\"\"\n return self.request(\"user/get_settings\")\n\n def user_set_avatar(self, action=None, quick_key=None, url=None):\n \"\"\"user/set_avatar\n\n http://www.mediafire.com/developers/core_api/1.3/user/#set_avatar\n \"\"\"\n return self.request(\"user/set_avatar\", QueryParams({\n \"action\": action,\n \"quick_key\": quick_key,\n \"url\": url\n }))\n\n def user_update(self, display_name=None, first_name=None, last_name=None,\n email=None, password=None, current_password=None,\n birth_date=None, gender=None, website=None, subdomain=None,\n location=None, newsletter=None, primary_usage=None,\n timezone=None):\n \"\"\"\n user/update\n\n http://www.mediafire.com/developers/core_api/1.3/user/#update\n \"\"\"\n return self.request(\"user/update\", QueryParams({\n \"display_name\": display_name,\n \"first_name\": first_name,\n \"last_name\": last_name,\n \"email\": email,\n \"password\": password,\n \"current_password\": current_password,\n \"birth_date\": birth_date,\n \"gender\": gender,\n \"website\": website,\n \"subdomain\": subdomain,\n \"location\": location,\n \"newsletter\": newsletter,\n \"primary_usage\": primary_usage,\n \"timezone\": timezone\n }))\n\n def folder_get_info(self, folder_key=None, device_id=None, details=None):\n \"\"\"folder/get_info\n\n http://www.mediafire.com/developers/core_api/1.3/folder/#get_info\n \"\"\"\n return self.request('folder/get_info', QueryParams({\n 'folder_key': folder_key,\n 'device_id': device_id,\n 'details': details\n }))\n\n def folder_get_content(self, folder_key=None, content_type=None,\n filter_=None, device_id=None, order_by=None,\n order_direction=None, chunk=None, details=None,\n chunk_size=None):\n \"\"\"folder/get_content\n\n http://www.mediafire.com/developers/core_api/1.3/folder/#get_content\n \"\"\"\n return self.request('folder/get_content', QueryParams({\n 'folder_key': folder_key,\n 'content_type': content_type,\n 'filter': filter_,\n 'device_id': device_id,\n 'order_by': order_by,\n 'order_direction': order_direction,\n 'chunk': chunk,\n 'details': details,\n 'chunk_size': chunk_size\n }))\n\n def folder_update(self, folder_key, foldername=None, description=None,\n privacy=None, privacy_recursive=None, mtime=None):\n \"\"\"folder/update\n\n http://www.mediafire.com/developers/core_api/1.3/folder/#update\n \"\"\"\n return self.request('folder/update', QueryParams({\n 'folder_key': folder_key,\n 'foldername': foldername,\n 'description': description,\n 'privacy': privacy,\n 'privacy_recursive': privacy_recursive,\n 'mtime': mtime\n }))\n\n def folder_create(self, foldername=None, parent_key=None,\n action_on_duplicate=None, mtime=None):\n \"\"\"folder/create\n\n http://www.mediafire.com/developers/core_api/1.3/folder/#create\n \"\"\"\n return self.request('folder/create', QueryParams({\n 'foldername': foldername,\n 'parent_key': parent_key,\n 'action_on_duplicate': action_on_duplicate,\n 'mtime': mtime\n }))\n\n def folder_delete(self, folder_key):\n \"\"\"folder/delete\n\n http://www.mediafire.com/developers/core_api/1.3/folder/#delete\n \"\"\"\n return self.request('folder/delete', QueryParams({\n 'folder_key': folder_key\n }))\n\n def folder_purge(self, folder_key):\n \"\"\"folder/purge\n\n http://www.mediafire.com/developers/core_api/1.3/folder/#purge\n \"\"\"\n return self.request('folder/purge', QueryParams({\n 'folder_key': folder_key\n }))\n\n def folder_move(self, folder_key_src, folder_key_dst=None):\n \"\"\"folder/move\n\n http://www.mediafire.com/developers/core_api/1.3/folder/#move\n \"\"\"\n return self.request('folder/move', QueryParams({\n 'folder_key_src': folder_key_src,\n 'folder_key_dst': folder_key_dst\n }))\n\n def upload_check(self, filename=None, folder_key=None, filedrop_key=None,\n size=None, hash_=None, path=None, resumable=None):\n \"\"\"upload/check\n\n http://www.mediafire.com/developers/core_api/1.3/upload/#check\n \"\"\"\n return self.request('upload/check', QueryParams({\n 'filename': filename,\n 'folder_key': folder_key,\n 'filedrop_key': filedrop_key,\n 'size': size,\n 'hash': hash_,\n 'path': path,\n 'resumable': resumable\n }))\n\n def upload_simple(self, fd, filename, folder_key=None, path=None,\n filedrop_key=None, action_on_duplicate=None,\n mtime=None, file_size=None, file_hash=None):\n \"\"\"upload/simple\n\n http://www.mediafire.com/developers/core_api/1.3/upload/#simple\n \"\"\"\n action = 'upload/simple'\n\n params = QueryParams({\n 'folder_key': folder_key,\n 'path': path,\n 'filedrop_key': filedrop_key,\n 'action_on_duplicate': action_on_duplicate,\n 'mtime': mtime\n })\n\n headers = QueryParams({\n 'X-Filesize': str(file_size),\n 'X-Filehash': file_hash,\n 'X-Filename': filename.encode('utf-8')\n })\n\n upload_info = {\n \"fd\": fd,\n }\n\n return self.request(action, params, action_token_type=\"upload\",\n upload_info=upload_info, headers=headers)\n\n # pylint: disable=too-many-locals\n # The API requires us to provide all of that\n def upload_resumable(self, fd, filesize, filehash, unit_hash, unit_id,\n unit_size, quick_key=None, action_on_duplicate=None,\n mtime=None, version_control=None, folder_key=None,\n filedrop_key=None, path=None, previous_hash=None):\n \"\"\"upload/resumable\n\n http://www.mediafire.com/developers/core_api/1.3/upload/#resumable\n \"\"\"\n action = 'upload/resumable'\n\n headers = {\n 'x-filesize': str(filesize),\n 'x-filehash': filehash,\n 'x-unit-hash': unit_hash,\n 'x-unit-id': str(unit_id),\n 'x-unit-size': str(unit_size)\n }\n\n params = QueryParams({\n 'quick_key': quick_key,\n 'action_on_duplicate': action_on_duplicate,\n 'mtime': mtime,\n 'version_control': version_control,\n 'folder_key': folder_key,\n 'filedrop_key': filedrop_key,\n 'path': path,\n 'previous_hash': previous_hash\n })\n\n upload_info = {\n \"fd\": fd,\n \"filename\": \"chunk\"\n }\n\n return self.request(action, params, action_token_type=\"upload\",\n upload_info=upload_info, headers=headers)\n # pylint: enable=too-many-locals\n\n def upload_instant(self, filename, size, hash_, quick_key=None,\n folder_key=None, filedrop_key=None, path=None,\n action_on_duplicate=None, mtime=None,\n version_control=None, previous_hash=None):\n \"\"\"upload/instant\n\n http://www.mediafire.com/developers/core_api/1.3/upload/#instant\n \"\"\"\n return self.request('upload/instant', QueryParams({\n 'filename': filename,\n 'size': size,\n 'hash': hash_,\n 'quick_key': quick_key,\n 'folder_key': folder_key,\n 'filedrop_key': filedrop_key,\n 'path': path,\n 'action_on_duplicate': action_on_duplicate,\n 'mtime': mtime,\n 'version_control': version_control,\n 'previous_hash': previous_hash\n }))\n\n def upload_poll(self, key):\n \"\"\"upload/poll\n\n http://www.mediafire.com/developers/core_api/1.3/upload/#poll_upload\n \"\"\"\n return self.request('upload/poll_upload', QueryParams({\n 'key': key\n }))\n\n def file_get_info(self, quick_key=None):\n \"\"\"file/get_info\n\n http://www.mediafire.com/developers/core_api/1.3/file/#get_info\n \"\"\"\n return self.request('file/get_info', QueryParams({\n 'quick_key': quick_key\n }))\n\n def file_get_links(self, quick_key, link_type=None):\n \"\"\"file/get_links\n\n http://www.mediafire.com/developers/core_api/1.3/file/#get_links\n \"\"\"\n return self.request('file/get_links', QueryParams({\n 'quick_key': quick_key,\n 'link_type': link_type,\n }))\n\n def file_update(self, quick_key, filename=None, description=None,\n mtime=None, privacy=None):\n \"\"\"file/update\n\n http://www.mediafire.com/developers/core_api/1.3/file/#update\n \"\"\"\n return self.request('file/update', QueryParams({\n 'quick_key': quick_key,\n 'filename': filename,\n 'description': description,\n 'mtime': mtime,\n 'privacy': privacy\n }))\n\n def file_update_file(self, quick_key, file_extension=None, filename=None,\n description=None, mtime=None, privacy=None,\n timezone=None):\n \"\"\"file/update_file\n\n http://www.mediafire.com/developers/core_api/1.3/file/#update_file\n \"\"\"\n return self.request('file/update', QueryParams({\n 'quick_key': quick_key,\n 'file_extension': file_extension,\n 'filename': filename,\n 'description': description,\n 'mtime': mtime,\n 'privacy': privacy,\n 'timezone': timezone\n }))\n\n def file_delete(self, quick_key):\n \"\"\"file/delete\n\n http://www.mediafire.com/developers/core_api/1.3/file/#delete\n \"\"\"\n return self.request('file/delete', QueryParams({\n 'quick_key': quick_key\n }))\n\n def file_move(self, quick_key, folder_key=None):\n \"\"\"file/move\n\n http://www.mediafire.com/developers/core_api/1.3/file/#move\n \"\"\"\n return self.request('file/move', QueryParams({\n 'quick_key': quick_key,\n 'folder_key': folder_key\n }))\n\n def file_purge(self, quick_key):\n \"\"\"file/purge\n\n http://www.mediafire.com/developers/core_api/1.3/file/#purge\n \"\"\"\n return self.request('file/purge', QueryParams({\n 'quick_key': quick_key\n }))\n\n def file_zip(self, keys, confirm_download=None, meta_only=None):\n \"\"\"file/zip\n\n http://www.mediafire.com/developers/core_api/1.3/file/#zip\n \"\"\"\n return self.request('file/zip', QueryParams({\n 'keys': keys,\n 'confirm_download': confirm_download,\n 'meta_only': meta_only\n }))\n\n def system_get_info(self):\n \"\"\"system/get_info\n\n http://www.mediafire.com/developers/core_api/1.3/system/#get_info\n \"\"\"\n return self.request('system/get_info')\n\n def system_get_status(self):\n \"\"\"system/get_status\n\n http://www.mediafire.com/developers/core_api/1.3/system/#get_status\n \"\"\"\n return self.request('system/get_status')\n","sub_path":"mediafire/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":25118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"419120205","text":"#!C:\\Users\\groso\\AppData\\Local\\Programs\\Python\\Python37\\python.exe\n# -*- coding: utf-8 -*-\n\nimport save_db as sd\nimport pymysql\nimport json\n\n\"\"\"\nModulo que leera la cantidad de censos realizados por fechas, para la realizacion del grafico\n\"\"\"\n\nprint(\"Content-type: text/html; charset=UTF-8\")\nprint(\"\")\n\n# conexion a base de datos\ndb = sd.AnimalitosDb(\"root\", \"\")\n\ntry:\n\n messages = db.read_mascotas()\n msg = {}\n mascotas = []\n cantidad = []\n k = 0\n for i in messages:\n\n mascotas.append(messages[k][0])\n cantidad.append(messages[k][1])\n k += 1\n\n msg[0] = mascotas\n msg[1] = cantidad\n print(json.dumps(msg))\nexcept pymysql.Error as e:\n mensaje = 'Error con base de datos: {0} {1} '.format(e.args[0], e.args[1])\n\n","sub_path":"cgi-bin/read_mascotas.py","file_name":"read_mascotas.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"634316828","text":"#Main Window\r\n#10/11/2015\r\n\r\nimport sys\r\nimport random\r\nimport math\r\n\r\nfrom PyQt4.QtGui import *\r\nfrom PyQt4.QtCore import *\r\n\r\nfrom LoginWindow import *\r\nfrom Database import *\r\nfrom MenuBar import *\r\nfrom QuadraticsSA import *\r\nfrom SaveProgress import *\r\n\r\nclass QuadraticsMultipleChoiceWidget(QWidget):\r\n def __init__(self, delegate):\r\n super().__init__()\r\n self.delegate = delegate\r\n self.window_counter = 0\r\n self.create_layout()\r\n\r\n def GeneratingValues(self):\r\n a = random.randint(1,3)\r\n b = random.randint(20,50)\r\n c = random.randint(10,30)\r\n\r\n base = math.sqrt(((b * b) - (4 * a * c)))\r\n x1 = (-b + base) / (2 * a) \r\n x2 = (-b - base) / (2 * a)\r\n\r\n #generating fake answers\r\n x1_fake = random.uniform(x1,x1-7)\r\n x1_fake_two = random.uniform(x1,(x1+8))\r\n\r\n x2_fake = random.uniform(x2,(x2 + 8))\r\n x2_fake_two = random.uniform(x2,(x2 + 10))\r\n\r\n\r\n x1 = round(x1,2)\r\n x1_fake = round(x1_fake,2)\r\n x1_fake_two = round(x1_fake_two,2)\r\n \r\n x2 = round(x2,2)\r\n x2_fake = round(x2_fake,2)\r\n x2_fake_two = round(x2_fake_two,2)\r\n print(x1)\r\n print(x2)\r\n return a,b,c,x1,x2,x1_fake,x1_fake_two,x2_fake,x2_fake_two\r\n\r\n def VerifyingAnswer(self):\r\n index1 = self.multiple_choice_combo.currentIndex()\r\n index2 = self.multiple_choice_combo_two.currentIndex()\r\n num_correct_answers = 0\r\n if (index1 == 1): # Correct answer\r\n num_correct_answers = num_correct_answers + 1\r\n if (index2 == 0):\r\n num_correct_answers = num_correct_answers + 1\r\n if (num_correct_answers == 2):\r\n #Run 2 mark dialogbox and add two marks to total score.\r\n self.delegate.current_score += 2\r\n QMessageBox.information(self,\"Message\",\"Congratulations! You got two marks. You now have {0} marks\".format(self.delegate.current_score))\r\n self.a,self.b,self.c,self.x1,self.x2,self.x1_fake,self.x1_fake_two,self.x2_fake,self.x2_fake_two = self.GeneratingValues()\r\n self.message3.setText(\"{0}x² + {1}x + {2} = 0\".format(self.a, self.b, self.c))\r\n elif (num_correct_answers == 0):\r\n #Run 0 mark dialogbox\r\n QMessageBox.information(self,\"Message\",\"Oh no! You got zero marks for that question. You now have {0} marks\".format(self.delegate.current_score))\r\n self.a,self.b,self.c,self.x1,self.x2,self.x1_fake,self.x1_fake_two,self.x2_fake,self.x2_fake_two = self.GeneratingValues()\r\n self.message3.setText(\"{0}x² + {1}x + {2} = 0\".format(self.a, self.b, self.c))\r\n elif (num_correct_answers == 1):\r\n #Run 1 mark dialogbox\r\n self.delegate.current_score += 1\r\n QMessageBox.information(self,\"Message\",\"Well done, you got one mark. You now have {0} marks\".format(self.delegate.current_score))\r\n self.a,self.b,self.c,self.x1,self.x2,self.x1_fake,self.x1_fake_two,self.x2_fake,self.x2_fake_two = self.GeneratingValues()\r\n self.message3.setText(\"{0}x² + {1}x + {2} = 0\".format(self.a, self.b, self.c))\r\n self.MoveToShortAnswer()\r\n\r\n def FinishTest(self):\r\n self.window_counter += 1\r\n if self.window_counter == 5:\r\n self.button.setText(\"Submit and finish test\")\r\n self.button2.setText(\"Skip and finish test\")\r\n else:\r\n self.button.setText(\"Submit answer\")\r\n self.button2.setText(\"Skip question\")\r\n \r\n def SkipQuestion(self):\r\n self.a,self.b,self.c,self.x1,self.x2,self.x1_fake,self.x1_fake_two,self.x2_fake,self.x2_fake_two = self.GeneratingValues()\r\n self.message3.setText(\"{0}x² + {1}x + {2} = 0\".format(self.a, self.b, self.c))\r\n self.MoveToShortAnswer()\r\n\r\n def MoveToShortAnswer(self):\r\n if self.window_counter < 5:\r\n self.delegate.stackedlayout.setCurrentIndex(2)\r\n else:\r\n saveprogresswidget = SaveProgressWidget(self.delegate)\r\n saveprogresswidget.exec_()\r\n self.delegate.stackedlayout.setCurrentIndex(4)\r\n self.window_counter = 0\r\n\r\n def create_layout(self):\r\n self.a,self.b,self.c,self.x1,self.x2,self.x1_fake,self.x1_fake_two,self.x2_fake,self.x2_fake_two = self.GeneratingValues()\r\n\r\n self.multiple_choice_combo = QComboBox()\r\n self.multiple_choice_combo_two = QComboBox()\r\n self.Refresh()\r\n\r\n self.button = QPushButton(\"Submit answer\")\r\n self.button2 = QPushButton(\"Skip question\")\r\n\r\n self.button.clicked.connect(self.VerifyingAnswer)\r\n self.button2.clicked.connect(self.SkipQuestion)\r\n\r\n self.message = QTextEdit(\"Multiple choice question\")\r\n self.message.setReadOnly(True)\r\n self.message.setMaximumHeight(30)\r\n\r\n self.message2 = QTextEdit(\"Solve the following equation to 2 decimal places:\")\r\n self.message2.setReadOnly(True)\r\n self.message2.setMaximumHeight(30)\r\n self.message2.setMinimumWidth(300)\r\n\r\n self.message3 = QTextEdit(\"{0}x² + {1}x + {2} = 0\".format(self.a, self.b, self.c))\r\n self.message3.setReadOnly(True)\r\n self.message3.setMaximumHeight(40)\r\n\r\n self.initial_layout = QVBoxLayout()\r\n self.initial_layout.addWidget(self.message)\r\n self.initial_layout.addWidget(self.message2)\r\n self.initial_layout.addWidget(self.message3) \r\n self.initial_layout.addWidget(self.multiple_choice_combo)\r\n self.initial_layout.addWidget(self.multiple_choice_combo_two)\r\n self.initial_layout.addWidget(self.button)\r\n self.initial_layout.addWidget(self.button2)\r\n \r\n self.setLayout(self.initial_layout)\r\n\r\n def Refresh(self):\r\n self.multiple_choice_combo.clear()\r\n self.multiple_choice_combo_two.clear()\r\n self.multiple_choice_combo.addItem(str(self.x1_fake))\r\n self.multiple_choice_combo.addItem(str(self.x1))\r\n self.multiple_choice_combo.addItem(str(self.x1_fake_two))\r\n self.multiple_choice_combo_two.addItem(str(self.x2))\r\n self.multiple_choice_combo_two.addItem(str(self.x2_fake))\r\n self.multiple_choice_combo_two.addItem(str(self.x2_fake_two))\r\n","sub_path":"QuadraticsMC.py","file_name":"QuadraticsMC.py","file_ext":"py","file_size_in_byte":6254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"45042660","text":"\"\"\"\nДан список результатов попыток одного спортсмена для некоторого соревнования.\nНаписать функцию stronger(scores), которая считает сколько раз за сессию спортсмен показал результат лучше,\nчем в прошлую попытку, то есть текущее значение превышает предыдущее.\n\"\"\"\n\n\nscores = [10, 5, 20, 20, 4, 5, 2, 25, 1]\n\n\ndef stronger(scores):\n counter = 0\n for index in range(len(scores) - 1):\n if scores[index] < scores[index + 1]:\n counter += 1\n return counter\n\n\n\nprint(stronger(scores))\n","sub_path":"Practice_3/task_3.py","file_name":"task_3.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"415166346","text":"from usage import dataset, weights, weights_stochastic\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# separate instnaces by labels\nclass_0_points = dataset.loc[dataset['label'] == 0].iloc[:, [1, 2]].values\nclass_1_points = dataset.loc[dataset['label'] == 1].iloc[:, [1, 2]].values\n\n# plot data points\nplt.scatter(class_0_points[:, 0], class_0_points[:, 1], c='red', marker='s')\nplt.scatter(class_1_points[:, 0], class_1_points[:, 1], c='blue')\n\n# plot decision boundary - gradient descent\nx = np.arange(min(dataset.iloc[:, 1].values), max(dataset.iloc[:, 1].values), 0.1)\n# we equal input to sigmoid function to 0 and solve for x2\nx2 = (-weights[0]-weights[1]*x)/weights[2]\nplt.plot(x, x2)\n\n# plot decision boundary - stochastic gradient descent\nx2_s = (-weights_stochastic[0]-weights_stochastic[1]*x)/weights_stochastic[2]\nplt.plot(x, x2_s)\n\nplt.show()","sub_path":"classification/logistic_regression/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"75005324","text":"from flask_mail import Mail, Message\nimport os\nfrom flask import render_template\n\nmail_settings = {\n \"MAIL_SERVER\": 'smtp.gmail.com',\n \"MAIL_PORT\": 587,\n \"MAIL_USE_TLS\": True,\n \"MAIL_USERNAME\": \"amansinghbawa@gmail.com\",\n \"MAIL_PASSWORD\": \"Bawa7800\"\n}\n\n\nclass EmailService:\n def __init__(self, app):\n self.app = app\n app.config.update(mail_settings)\n self.mail = Mail(app)\n\n def send_mail(self, recipient, name, message):\n msg = Message(subject=f\"Query from {recipient}\",\n sender=self.app.config.get(\"MAIL_USERNAME\"),\n recipients=[self.app.config.get(\"MAIL_USERNAME\"), ], # replace with your email for testing\n body=message)\n self.mail.send(msg)\n\n msg = Message(subject=\"Baba iron and cement store - Query submitted\",\n sender=self.app.config.get(\"MAIL_USERNAME\"),\n recipients=[recipient, ] # replace with your email for testing\n # body=f\"Hi,\\nYour query is submitted we will contact you soon\\n\\nDetails:\\n{message}\\n\\nThank you,\\nBaba Iron and cement store\\n\"\n )\n msg.html = render_template(\"mail/contact_us_customer.html\", **{\"name\": name,\"message\": message})\n self.mail.send(msg)\n","sub_path":"utils/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"428247092","text":"from empire.server.common import helpers\n\n\ndef test_dynamic_powershell():\n with open(\n \"empire/server/data/module_source/situational_awareness/network/powerview.ps1\",\n \"r\",\n ) as file:\n script = file.read()\n new_script = helpers.generate_dynamic_powershell_script(\n script, \"Find-LocalAdminAccess\"\n )\n assert len(new_script) == 96681\n","sub_path":"empire/test/test_helpers.py","file_name":"test_helpers.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"603184484","text":"import random\n\nsize = 200000\ntable = [[0 for i in range(3)] for i in range(size)] \n\nfor i in range(size):\n\tf1 = random.uniform(0, 10)\n\tf2 = random.uniform(0, 10)\n\tif (f1 - 2) ** 2 + (f2 - 2) ** 2 <= 4 or (f1 - 7) ** 2 + (f2 - 7) ** 2 <= 4:\n\t\ttable[i][0] = \"+1\"\n\telse:\n\t\ttable[i][0] = \"-1\"\n\ttable[i][1] = f1\n\ttable[i][2] = f2\n\nwith open('TwoCircleHardTraining.txt', 'w') as training, open('TwoCircleHardTestingAll.txt', 'w') as testingall, open('TwoCircleHardTestingSignal.txt', 'w') as testingsignal, open('TwoCircleHardTestingBackground.txt', 'w') as testingbackground:\n\tfor i in range(0, size, 2):\n\t\ttraining.write(table[i][0] + \" 1:\" + str(table[i][1]) + \" 2:\" + str(table[i][2]) + \"\\n\")\n\tfor i in range(1, size, 2):\n\t\ttestingall.write(table[i][0] + \" 1:\" + str(table[i][1]) + \" 2:\" + str(table[i][2]) + \"\\n\")\n\t\tif table[i][0] == \"+1\":\n\t\t\ttestingsignal.write(table[i][0] + \" 1:\" + str(table[i][1]) + \" 2:\" + str(table[i][2]) + \"\\n\")\n\t\telse:\n\t\t\ttestingbackground.write(table[i][0] + \" 1:\" + str(table[i][1]) + \" 2:\" + str(table[i][2]) + \"\\n\")","sub_path":"Generators/TwoCircleHardGen.py","file_name":"TwoCircleHardGen.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"417044182","text":"import os\ntry:\n from urllib.parse import urljoin\nexcept ImportError:\n from urlparse import urljoin\nfrom . import get_html_theme_path\n\nhtml_theme_path = get_html_theme_path()\nhtml_theme = \"sunpy\"\nhtml_favicon = os.path.join(html_theme_path[0], html_theme, \"static\", \"img\", \"favicon-32.ico\")\n\ndef page_url(page):\n sunpy_website_url_base = \"http://sunpy.org/\"\n return urljoin(sunpy_website_url_base, page)\n\nhtml_theme_options = {\n 'navbar_links': [\n (\"Support Us\", page_url(\"contribute.html\"), 1),\n (\"Get Help\", page_url(\"help.html\"), 1),\n (\"SunPy Project\", page_url(\"team.html\"), 1),\n ]\n}\n","sub_path":"sunpy_sphinx_theme/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"55013597","text":"class Solution:\n def insertIntoBST(self, root, val):\n if not root:\n return TreeNode(val)\n\n if val < root.val:\n root.left = self.insertIntoBST(root.left, val)\n else:\n root.right = self.insertIntoBST(root.right, val)\n\n return root\n\n\nclass Solution:\n def insertIntoBST(self, root, val):\n if not root:\n return TreeNode(val)\n\n cur = root\n while True:\n if val < cur.val:\n if cur.left:\n cur = cur.left\n else:\n cur.left = TreeNode(val)\n break\n else:\n if cur.right:\n cur = cur.right\n else:\n cur.right = TreeNode(val)\n break\n return root\n\n\nclass Solution:\n def insertIntoBST(self, root, val):\n if not root:\n return TreeNode(val)\n prev, cur = None, root\n while cur:\n prev = cur\n if val < cur.val:\n cur = cur.left\n else:\n cur = cur.right\n if prev.val > val:\n prev.left = TreeNode(val)\n else:\n prev.right = TreeNode(val)\n return root\n","sub_path":"leetcode/py/701.py","file_name":"701.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"371538345","text":"\"\"\"\nLoad and prepare text documents for machine learning\nThis file has been modified by Muhammad Mahir Latif\nOriginally written by Patrick Coady (pcoady@alum.mit.edu)\n\n1. load_book(): Return list of words and word counter for document.\n Also basic document statistics.\n2. build_dict(): Build word -> integer dictionary\n3. doc2num(): Transform document word list to integer numpy array\n4. build_word_array(): Convenience function that runs 3 functions\n above to build an integer numpy word array from a file.\n5. save_word_array(): Save a word array and dictionary to file for\n future fast loading.\n6. load_word_array(): Load previously saved word array and dictionary.\n\"\"\"\nimport collections\nimport numpy as np\nimport pickle\nimport re\n\n\ndef load_books(filenames):\n \"\"\"\n Read files and count number of occurrences of each unique word in the\n file. Also return the document as a list of words in the same order\n as the original document.\n Notes:\n The following punctuation are treated as separate words: ;:-()&.,!?'\"\n All letters changed to lower-case\n Contractions (e.g. don't, we'll) returned as-is (i.e. ' treated as\n letter). This could cause problems for text that uses single\n quotes (') for other purposes\n :param filenames: list of filenames (including path, if needed)\n :return: tuple:\n 0) collections.Counter() with unique word counts\n 1) list with document words in order\n 2) tuples: (number of lines read, number of words read)\n \"\"\"\n word_counter = collections.Counter()\n word_list = []\n num_lines, num_words = (0, 0)\n for filename in filenames:\n with open(filename, 'r') as f:\n for line in f.readlines():\n # TODO: check reg-exp below\n words = re.findall(\"[\\\\w']+|[;:\\-\\(\\)&.,!?\\\"]\", line.lower().strip('\\n'))\n word_counter.update(words)\n word_list.extend(words)\n num_lines += 1\n num_words += len(words)\n\n return word_counter, word_list, num_lines, num_words\n\n\ndef build_dict(word_counter, vocab_size=50000):\n \"\"\"\n Builds dictionary and reverse dictionary of most common words in word_counter.\n Number of words to include in the dictionary is set by dict_size.\n :param word_counter: collections.Counter() with keys = word and values = number of\n occurrences. Case sensitive.\n :param vocab_size: Upper limit on vocabulary size. If number of unique words\n greater than vocab_size, will take most commonly occurring words\n :return: tuple:\n 0) dictionary of words to integers (most common word is 0, next most\n common is 1, ...)\n 1) reverse dictionary of integers to words (same integer to word mapping as\n \"forward dictionary\"\n \"\"\"\n top_words = word_counter.most_common(vocab_size)\n top_words.sort(key=lambda t: -t[1])\n dictionary = dict()\n for idx, word in enumerate(map(lambda t: t[0], top_words)):\n dictionary[word] = idx\n\n return dictionary\n\n\ndef doc2num(word_list, dictionary):\n \"\"\"\n Maps list of words to np.array of integers using key/value pairs in\n dictionary. Words not found in dictionary will be mapped to len(dictionary)\n (i.e. 1 larger than biggest value in dictionary).\n :param word_list: List of words\n :param dictionary: Dictionary mapping words to integers\n :return: return numpy array of type np.int32 corresponding to integer mapping\n of words, with words not appearing in dictionary being mapped to\n largest integer in dictionary (i.e. len(dictionary)-1)\n \"\"\"\n word_array = []\n unknown_val = len(dictionary)\n for word in word_list:\n word_array.append(dictionary.get(word, unknown_val))\n\n return np.array(word_array, dtype=np.int32)\n\n\ndef build_word_array(filenames, vocab_size):\n \"\"\"\n Convenience function that runs: 1) load_books(), 2) build_dict(),\n and doc2num() in sequence and returns integer word array of documents,\n a dictionary and basic document statistics.\n :param filenames: list of file names (including path, if needed)\n :param vocab_size: Upper limit on vocabulary size. If number of unique words\n greater than vocab_size, will take most commonly occurring words\n :param gutenberg: Set flag to True for .txt files from Project Gutenberg.\n Loader will then skip Gutenberg preamble and license text at end of\n file.\n :return: 3-tuple:\n 0) numpy array of type np.int32 corresponding to integer mapping\n of words in documents. Words not in dictionary are mapped to\n largest integer in dictionary (i.e. len(dictionary)-1)\n 1) dictionary: word -> int dictionary\n 2) 2-tuple: (number of lines read, number of words read)\n Note: no integration coverage\n \"\"\"\n word_counter, word_list, num_lines, num_words = load_books(filenames)\n dictionary = build_dict(word_counter, vocab_size)\n word_array = doc2num(word_list, dictionary)\n num_unique_words = len(word_counter.keys())\n return word_array, dictionary, num_lines, num_words, num_unique_words\n\n\ndef save_word_array(filename, word_array, dictionary,num_lines, num_words, num_unique_words):\n \"\"\"\n Save word array and dictionary for faster load.\n :param filename: Filename, with path. Saved as python pickle file.\n :param word_array: Numpy integer word array of document\n :param dictionary: Word -> int document\n :return: None\n Note: no unit test coverage\n \"\"\"\n word_array_dict = dict()\n word_array_dict['word_array'] = word_array\n word_array_dict['dictionary'] = dictionary\n word_array_dict['num_lines'] = num_lines\n word_array_dict['num_words'] = num_words\n word_array_dict['num_unique_words'] = num_unique_words\n\n with open(filename + '.p', 'wb') as f:\n pickle.dump(word_array_dict, f)\n\n\ndef load_word_array(filename):\n \"\"\"\n Load integer word array and dictionary saved by save_word_array()\n :param filename: Same filename used with save_word_array()\n :return: 2-tuple\n 0) Numpy word array of integers (document representation)\n 1) Word -> int dictionary\n Note: no unit test coverage\n \"\"\"\n with open(filename + '.p', 'rb') as f:\n word_array_dict = pickle.load(f)\n\n return word_array_dict['word_array'], word_array_dict['dictionary'], word_array_dict['num_lines'], \\\n word_array_dict['num_words'], word_array_dict['num_unique_words']\n","sub_path":"src/docload.py","file_name":"docload.py","file_ext":"py","file_size_in_byte":6501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"268466150","text":"import torch\nimport math\nimport torch.nn.functional as F\nimport numpy as np\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom transformers import RobertaTokenizer, RobertaModel\nfrom model.rnn import RNNEncoder, max_along_time, mean_along_time\nfrom model.modules import CharMatching, ContextMatching\n\nclass MMT_joint(nn.Module):\n def __init__(self, args, vocab, n_dim, image_dim, layers, dropout, num_choice=5):\n super().__init__()\n self.vocab = vocab\n V = len(vocab)\n D = n_dim\n self.hidden_dim = n_dim\n \n #video_encoder_layer = nn.TransformerEncoderLayer(d_model=300, nhead=6, dim_feedforward=1024, dropout=0.1, activation='gelu')\n #self.video_encoder = nn.TransformerEncoder(video_encoder_layer, num_layers=1)\n self.video_encoder = nn.GRU(image_dim + 21, 150, bidirectional=True, batch_first=True)\n\n multimodal_encoder_layer = nn.TransformerEncoderLayer(d_model=n_dim, nhead=6, dim_feedforward=1024, dropout=0.5, activation='gelu')\n self.transformer = nn.TransformerEncoder(multimodal_encoder_layer, num_layers=2)\n\n self.embedding = nn.Embedding(V, D)\n n_dim = args.n_dim\n image_dim = args.image_dim\n\n self.tokenizer = RobertaTokenizer.from_pretrained('roberta-base')\n self.language_model = RobertaModel.from_pretrained('roberta-base', return_dict=True) \n #for param in self.language_model.base_model.parameters():\n # param.requires_grad = False\n\n # Update config to finetune token type embeddings\n #self.language_model.config.type_vocab_size = 3 \n\n # Create a new Embeddings layer, with 2 possible segments IDs instead of 1\n #self.language_model.embeddings.token_type_embeddings = nn.Embedding(3, self.language_model.config.hidden_size)\n \n # Initialize it\n #self.language_model.embeddings.token_type_embeddings.weight.data.normal_(mean=0.0, std=self.language_model.config.initializer_range)\n\n '''\n # Freeze the first 10 layers\n modules = [self.language_model.encoder.layer[:10]]\n for module in modules:\n for param in module.parameters():\n param.requires_grad = False\n '''\n\n #self.cmat = ContextMatching(n_dim * 3) \n #self.lstm_raw = RNNEncoder(300, 150, bidirectional=True, dropout_p=0, n_layers=1, rnn_type=\"lstm\")\n self.lstm_script = RNNEncoder(321, 150, bidirectional=True, dropout_p=0, n_layers=1, rnn_type=\"lstm\")\n self.script_on = \"script\" in args.stream_type\n self.vbb_on = \"visual_bb\" in args.stream_type\n self.vmeta_on = \"visual_meta\" in args.stream_type\n #self.conv_pool = Conv1d(n_dim*4+1, n_dim*2)\n\n self.character = nn.Parameter(torch.randn(22, D, device=args.device, dtype=torch.float), requires_grad=True)\n self.norm1 = Norm(D)\n\n self.lang_proj = nn.Linear(768, 300)\n self.visual_proj = nn.Linear(2048, 300) \n \n #self.mh_video = nn.MultiheadAttention(300, 6) \n #self.context_gru = nn.GRU(300, 150, bidirectional=True, batch_first=True)\n self.cross1 = UtilityLayer(300)\n self.cross2 = UtilityLayer(300)\n self.cross3 = UtilityLayer(300)\n self.context_proj = nn.Linear(5*300,300)\n\n self.char_classifier = nn.Linear(300, 21)\n self.mask_classifier = nn.Linear(300, self.tokenizer.vocab_size)\n\n self.output = nn.Linear(300, 1)\n\n self.answer_rnn = nn.LSTM(300, 300, 1, batch_first=True, dropout=0)\n\n speaker_name = [ \n 'None', # index 0: unknown speaker \n 'Anna', 'Chairman', 'Deogi', 'Dokyung', 'Gitae',\n 'Haeyoung1', 'Haeyoung2', 'Heeran', 'Hun', 'Jeongsuk',\n 'Jinsang', 'Jiya', 'Kyungsu', 'Sangseok', 'Seohee', \n 'Soontack', 'Sukyung', 'Sungjin', 'Taejin', 'Yijoon'\n ]\n self.speaker_to_index = {name: index for index, name in enumerate(speaker_name)} \n self.index_to_speaker = {v: k for k, v in self.speaker_to_index.items()}\n\n if self.script_on:\n self.lstm_script = RNNEncoder(321, 150, bidirectional=True, dropout_p=0, n_layers=1, rnn_type=\"lstm\")\n self.classifier_script = nn.Sequential(nn.Linear(n_dim*2, 1), nn.Softmax(dim=1))\n self.mhattn_script = CharMatching(4, D, D)\n\n if self.vmeta_on: \n self.lstm_vmeta = RNNEncoder(321, 150, bidirectional=True, dropout_p=0, n_layers=1, rnn_type=\"lstm\")\n self.classifier_vmeta = nn.Sequential(nn.Linear(n_dim*2, 1), nn.Softmax(dim=1))\n self.mhattn_vmeta = CharMatching(4, D, D)\n\n if self.vbb_on:\n self.lstm_vbb = RNNEncoder(image_dim+21, 150, bidirectional=True, dropout_p=0, n_layers=1, rnn_type=\"lstm\")\n self.vbb_fc = nn.Sequential(\n nn.Dropout(0.5),\n nn.Linear(image_dim, n_dim),\n nn.Tanh(),\n )\n self.classifier_vbb = nn.Sequential(nn.Linear(n_dim*2, 1), nn.Softmax(dim=1))\n\n self.mhattn_vbb = CharMatching(4, D, D)\n\n\n def _to_one_hot(self, y, n_dims, mask, dtype=torch.cuda.FloatTensor):\n scatter_dim = len(y.size())\n y_tensor = y.type(torch.LongTensor).view(*y.size(), -1).cuda()\n y_tensor = y.view(*y.size(), -1).cuda()\n zeros = torch.zeros(*y.size(), n_dims).type(dtype).cuda()\n out = zeros.scatter(scatter_dim, y_tensor, 1)\n\n out_mask,_ = self.len_to_mask(mask, out.shape[1])\n out_mask = out_mask.unsqueeze(2).repeat(1, 1, n_dims)\n\n return out.masked_fill_(out_mask, 0)\n\n\n def load_embedding(self, pretrained_embedding):\n print('Load pretrained embedding ...')\n #self.embedding.weight.data.copy_(pretrained_embedding)\n self.embedding.weight.data.copy_(torch.from_numpy(pretrained_embedding))\n\n def len_to_mask(self, lengths, len_max):\n #len_max = lengths.max().item()\n mask = torch.arange(len_max, device=lengths.device,\n dtype=lengths.dtype).expand(len(lengths), len_max) >= lengths.unsqueeze(1)\n mask = torch.as_tensor(mask, dtype=torch.uint8, device=lengths.device)\n\n return mask, len_max\n\n def get_name(self, x, x_l):\n x_mask = x.masked_fill(x>20, 21)\n x_onehot = self._to_one_hot(x_mask, 22, x_l)\n x_sum = torch.sum(x_onehot[:,:,:21], dim=1)\n return x_sum > 0\n\n def forward(self, que, answers, **features):\n '''\n filtered_sub (B, max_sub_len)\n filtered_sub_len (B)\n filtered_speaker (B, max_sub_len)\n\n filtered_visual (B, max_v_len*3)\n filtered_visual_len (B)\n\n filtered_image (B, max_v_len, 512)\n filtered_image_len (12)\n\n que (B, max_que_len)\n que_len (B)\n\n answers (B, 5, max_ans_len)\n ans_len (B, 5)\n \n print(que.shape)\n print(answers.shape)\n for key, value in features.items():\n print(key, value.shape)\n \n\n '''\n batch_size = que.shape[0]\n\n text = features['text_masked']\n text_lengths = features['text_masked_l']\n token_type_ids = features['token_type_ids']\n #labels = features['labels']\n\n # -------------------------------- #\n outputs = self.language_model(que)\n e_q = outputs.last_hidden_state\n e_q = self.lang_proj(e_q)\n # -------------------------------- #\n e_ans = []\n for i in range(5):\n outputs = self.language_model(answers[:,i,:])\n embedded = outputs.last_hidden_state\n embedded = self.lang_proj(embedded)\n e_ans.append(embedded)\n \n if self.script_on:\n s_len = features['filtered_sub_len']\n spk = features['filtered_speaker']\n spk_onehot = self._to_one_hot(spk, 21, mask=s_len)\n e_s = torch.cat([e_script, spk_onehot], dim=2)\n H_S, _ = self.lstm_script(e_s, s_len)\n\n if self.vmeta_on:\n vmeta = features['filtered_visual'].view(batch_size, -1, 3)\n vmeta_len = features['filtered_visual_len'].double()*2/3\n\n vp = vmeta[:,:,0]\n vp = vp.unsqueeze(2).repeat(1,1,2).view(batch_size, -1)\n vbe = vmeta[:,:,1:3].contiguous()\n vbe = vbe.view(batch_size, -1)\n #e_vbe = self.embedding(vbe)\n e_vbe = self.language_model(vbe).last_hidden_state\n e_vbe = self.lang_proj(e_vbe)\n # -------------------------------- #\n vp_onehot = self._to_one_hot(vp, 21, mask=vmeta_len)\n e_vbe = torch.cat([e_vbe, vp_onehot], dim=2)\n #vp_flag = [torch.matmul(vp_onehot, concat_qa[i].unsqueeze(2)) for i in range(5)]\n #vp_flag = [(vp_flag[i] > 0).type(torch.cuda.FloatTensor) for i in range(5)]\n M, _ = self.lstm_vmeta(e_vbe, vmeta_len)\n\n if self.vbb_on:\n e_vbb = features['filtered_person_full']\n vbb_len = features['filtered_person_full_len']\n\n vp = features['filtered_visual'].view(batch_size, -1, 3)[:,:,0]\n vp = vp.unsqueeze(2).view(batch_size, -1)\n vp_onehot = self._to_one_hot(vp, 21, mask=vbb_len)\n e_vbb = torch.cat([e_vbb, vp_onehot], dim=2)\n #vp_flag = [torch.matmul(vp_onehot, concat_qa[i].unsqueeze(2)) for i in range(5)]\n #vp_flag = [(vp_flag[i] > 0).type(torch.cuda.FloatTensor) for i in range(5)]\n H_B, _ = self.lstm_vbb(e_vbb, vbb_len)\n\n\n #S = H_S\n #M = H_M\n #B = H_B\n #Q = e_q\n #Q = torch.stack([q_c[i] for i in range(5)], dim=1)\n #F = features['images'].squeeze()\n #video = features['filtered_image']\n #per_person_features = self.visual_proj(features['per_person_features'])\n #video = self.visual_proj(video)\n\n attention_mask, _ = self.len_to_mask(text_lengths, text.shape[1])\n #outputs = self.language_model(text, token_type_ids=token_type_ids, attention_mask=attention_mask)\n text_length = text.size(1)\n outputs = self.language_model(text, attention_mask=attention_mask)\n text = outputs.last_hidden_state\n text = self.lang_proj(text)\n\n # encode video frames\n video = features['filtered_person_full']\n bb_lengths = features['filtered_person_full_len']\n frame_person = features['filtered_visual'].view(batch_size, -1, 3)[:,:,0]\n frame_person = frame_person.unsqueeze(2).view(batch_size, -1)\n frame_person = self._to_one_hot(frame_person, 21, mask=bb_lengths)\n video = torch.cat([video, frame_person], dim=-1)\n video_length = video.size(1)\n #video = self.visual_proj(video) \n video, _ = self.video_encoder(video)\n \n #inpt = torch.cat([Q,sep,a,sep,e_script,sep,per_person_features], dim=1)\n inpt = torch.cat([text,video,M], dim=1)\n inpt = inpt.permute(1,0,2) # sequence first\n out = self.transformer(inpt)\n out = out.permute(1,0,2) # batch first\n context = out[:,0,:]\n\n # predict person contained in each bounding box\n char = self.char_classifier(context.unsqueeze(dim=1).repeat(1, video_length, 1))\n \n # predict masked tokens\n labels = self.mask_classifier(out[:,:text_length,:])\n\n ### DISCRIMINATIVE DECODER\n\n num_options = 5\n hidden_dim = 300\n\n # stack answers\n e_ans = torch.stack(e_ans) \n\n # run through lstm\n e_ans = e_ans.reshape(batch_size * num_options, -1, hidden_dim)\n answers, _ = self.answer_rnn(e_ans)\n answers = answers[:,-1,:]\n answers = answers.reshape(num_options, batch_size, hidden_dim)\n\n # batch first\n answers = answers.permute(1,0,2)\n\n # shape the context so it is the same as the answers\n context = context.unsqueeze(dim=1).repeat(1, num_options, 1)\n\n answers = answers.contiguous().view(batch_size * num_options, hidden_dim)\n context = context.contiguous().view(batch_size * num_options, hidden_dim)\n\n # compute scores\n scores = torch.sum(answers * context, 1)\n scores = scores.view(batch_size, num_options)\n \n return scores, char, labels \n\n\n def _generate_square_subsequent_mask(self, sz):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask\n\n\n def processor(self, context, context_l, qa_character, q_embed, q_l, a_embed, a_l, mhattn):\n #print(context.size(), context_l, len(qa_character), q_embed.size(), q_l, len(a_embed), a_l)\n u_q = self.cmat(context, context_l, q_embed, q_l)\n u_a = torch.stack([self.cmat(context, context_l, a_embed[i], a_l[i]) for i in range(5)])\n u_ch = torch.stack([mhattn(qa_character[i], context, context_l) for i in range(5)])\n return u_q, u_a, u_ch\n\n def stream_processor(self, classifier, mhattn, ctx_flag, ctx, ctx_l,\n qa_character, q_embed, q_l, a_embed, a_l):\n \n u_q = self.cmat(ctx, ctx_l, q_embed, q_l)\n u_a = [self.cmat(ctx, ctx_l, a_embed[i], a_l[i]) for i in range(5)]\n u_ch = [mhattn(qa_character[i], ctx, ctx_l) for i in range(5)]\n\n concat_a = [torch.cat([ctx, u_q,u_a[i], u_ch[i], ctx_flag[i]], dim=-1) for i in range(5)] \n \n # ctx, u_ch[i], ctx_flag[i],\n # exp_2 : ctx, u_a[i], u_q, ctx_flag[i], u_ch[i]\n maxout = [self.conv_pool(concat_a[i], ctx_l) for i in range(5)]\n\n answers = torch.stack(maxout, dim=1)\n return out\n\n @classmethod\n def resolve_args(cls, args, vocab):\n return cls(args, vocab, args.n_dim, args.image_dim, args.layers, args.dropout)\n\nclass Conv1d(nn.Module):\n def __init__(self, n_dim, out_dim):\n super().__init__()\n out_dim = int(out_dim/4)\n self.conv_k1 = nn.Conv1d(n_dim, out_dim, kernel_size=1, stride=1)\n self.conv_k2 = nn.Conv1d(n_dim, out_dim, kernel_size=2, stride=1)\n self.conv_k3 = nn.Conv1d(n_dim, out_dim, kernel_size=3, stride=1)\n self.conv_k4 = nn.Conv1d(n_dim, out_dim, kernel_size=4, stride=1)\n #self.maxpool = nn.MaxPool1d(kernel_size = )\n\n def forward(self, x, x_l):\n # x : (B, T, 5*D)\n x_pad = torch.zeros(x.shape[0],3,x.shape[2]).type(torch.cuda.FloatTensor)\n x = torch.cat([x, x_pad], dim=1)\n x1 = F.relu(self.conv_k1(x.transpose(1,2)))[:,:,:-3]\n x2 = F.relu(self.conv_k2(x.transpose(1,2)))[:,:,:-2]\n x3 = F.relu(self.conv_k3(x.transpose(1,2)))[:,:,:-1]\n x4 = F.relu(self.conv_k4(x.transpose(1,2)))\n out = torch.cat([x1, x2, x3, x4], dim=1)\n out = out.transpose(1,2)\n return max_along_time(out, x_l)\n\n\nclass Norm(nn.Module):\n def __init__(self, d_model, eps = 1e-6):\n super().__init__()\n\n self.size = d_model\n # create two learnable parameters to calibrate normalisation\n self.alpha = nn.Parameter(torch.ones(self.size))\n self.bias = nn.Parameter(torch.zeros(self.size))\n self.eps = eps\n def forward(self, x):\n norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim=-1, keepdim=True) + self.eps) + self.bias\n return norm\n\nclass UtilityBlock(nn.Module):\n \"\"\"Efficient attention mechanism for many utilities block implemented for the visual dialog task (here: three utilities).\n Args:\n hidden_dim: dimension of the feature vector. Also the dimension of the final context vector provided to the decoder (required).\n feedforward_dim: dimension of the hidden feedforward layer, implementation details from \"Attention is all you need\" (default=2048).\n n_head: the number of heads in the multihead attention layers (default=8).\n dropout: the dropout probability (default=0.1).\n \"\"\"\n def __init__(self, hidden_dim, feedforward_dim=2048, n_head=8, dropout=0.1):\n super(UtilityBlock, self).__init__()\n self.multihead_attn = nn.MultiheadAttention(hidden_dim, n_head) # dropout? separate attention modules?\n self.linear = nn.Linear(2*hidden_dim, hidden_dim)\n self.relu = nn.ReLU(hidden_dim)\n self.dropout = nn.Dropout(dropout)\n self.norm = nn.LayerNorm([hidden_dim], elementwise_affine=False)\n\n def forward(self, target, source_a, source_b):\n \"\"\"Passes the inputs through the utility attention block. For a detailed description see the paper. Inputs are tensors for each utility. The output is the updated utility tensor.\n Args:\n target: the target utility. The output will be of the same shape as this target utility.\n source_a: the first source utility to attend to.\n source_b: the second source utility to attend to.\n \"\"\"\n # Permute to fit multihead attention input\n target = target.permute(1,0,2)\n source_a = source_a.permute(1,0,2)\n source_b = source_b.permute(1,0,2)\n\n # Apply multihead attention mechanism for target and multiple sources as described in the paper\n #out_t, _ = self.multihead_attn(target, target, target) # self attention for target utility\n out_a, _ = self.multihead_attn(target, source_a, source_a) # attention to source utility a\n out_b, _ = self.multihead_attn(target, source_b, source_b) # attention to source utility b\n\n # Permute back to batch-first\n target = target.permute(1,0,2)\n #out_t = out_t.permute(1,0,2)\n out_a = out_a.permute(1,0,2)\n out_b = out_b.permute(1,0,2)\n \n # Add & norm\n out_a = self.norm(out_a + target)\n out_b = self.norm(out_b + target)\n\n #out = torch.cat((out_t, out_a, out_b), dim=2) # concatenate the resulting output tensors\n out = torch.cat([out_a, out_b], dim=2) # concatenate the resulting output tensors\n out = self.relu(self.linear(out)) \n out = self.dropout(out)\n out = self.norm(out + target) # add & norm (residual target)\n return out\n\nclass UtilityLayer(nn.Module):\n \"\"\"Efficient attention mechanism for many utilities layer implemented for the visual dialog task (here: three utilities). The layer consist of three parallel utility attention blocks.\n Args:\n hidden_dim: dimension of the feature vector. Also the dimension of the final context vector provided to the decoder (required).\n feedforward_dim: dimension of the hidden feedforward layer, implementation details from \"Attention is all you need\" (default=2048).\n n_head: the number of heads in the multihead attention layers (default=8).\n dropout: the dropout probability (default=0.1).\n \"\"\"\n def __init__(self, hidden_dim, feedforward_dim=1024, n_head=5, dropout=0.1):\n super(UtilityLayer, self).__init__()\n self.utility_t = UtilityBlock(hidden_dim, feedforward_dim, n_head, dropout)\n self.utility_v = UtilityBlock(hidden_dim, feedforward_dim, n_head, dropout)\n self.utility_a = UtilityBlock(hidden_dim, feedforward_dim, n_head, dropout)\n self.norm = nn.LayerNorm(hidden_dim)\n trm_layer = nn.TransformerEncoderLayer(d_model=hidden_dim, nhead=n_head, dim_feedforward=feedforward_dim, dropout=dropout, activation='gelu')\n self.trm_t = nn.TransformerEncoder(trm_layer, num_layers=1, norm=self.norm)\n self.trm_v = nn.TransformerEncoder(trm_layer, num_layers=1, norm=self.norm)\n self.trm_a = nn.TransformerEncoder(trm_layer, num_layers=1, norm=self.norm)\n\n def forward(self, T, V, A):\n \"\"\"Passes the input utilities through the utility attention layer. Inputs are passed through their respective blocks in parallel. The output are the three updated utility tensors.\n Args:\n V: the visual utility tensor\n Q: the question utility tensor\n R: the history utility tensor\n \"\"\"\n T_out = self.utility_t(T, V, A)\n T_out = self.trm_t(T_out)\n V_out = self.utility_v(V, T, A)\n V_out = self.trm_v(V_out)\n A_out = self.utility_a(A, T, V)\n A_out = self.trm_a(A_out)\n return T_out, V_out, A_out\n","sub_path":"code/model/MMT_joint.py","file_name":"MMT_joint.py","file_ext":"py","file_size_in_byte":20161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"83275334","text":"\n#from __future__ import division\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport sys\nimport caffe\nimport progressbar\nimport h5py\nfrom sklearn import preprocessing\n\nimport ipdb\n\nbar = progressbar.ProgressBar()\ncaffe_root = './caffe/' # this file should be run from {caffe_root}/examples (otherwise change this line)\nsys.path.insert(0, caffe_root + 'python')\ncaffe.set_device(0) # if we have multiple GPUs, pick the first one\ncaffe.set_mode_gpu()\n\nmodel_def = './deploy_single.prototxt'\nmodel_weights = './training/single_iter_20000.caffemodel'\n\nnet = caffe.Net(model_def, # defines the structure of the model\n model_weights, # contains the trained weights\n caffe.TEST) # use test mode (e.g., don't perform dropout)\n\ntransformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})\ntransformer.set_transpose('data', (2,0,1)) # move image channels to outermost dimension\ntransformer.set_channel_swap('data', (2,1,0)) # swap channels from RGB to BGR\n\n\n\ndataroot = \"/mnt/hdd/dataset/audioset/eval_spectrogram_25ms_6frame/\"\nwith open('./data_val_single.txt') as f:\n content = f.readlines()\nprobs = np.zeros((len(content),527)) \ni = 0\nlabel = []\nfor row in bar(content):\n\timg_file = row.split(' ')[0]\n\timage = caffe.io.load_image(dataroot + img_file)#image.shape = 50 96 3\n\ttransformed_image = transformer.preprocess('data', image) #trainsformed_image = 3 50 96\n\tnet.blobs['data'].data[...] = transformed_image\n\tnet.forward()\n\toutput_prob = net.blobs['score'].data[0] #(527,)\n\tprobs[i] = output_prob\n\tlabel.append(int(row.split(' ')[1]))\n\ti +=1\n\n#with h5py.File('./label_valf6.h5', 'r') as f:\n#\tlabel = f['rabel'][()]\t#label.shape (1027365, 527)\n#label = np.load('./label_valf6_short.npy','r')\n\n#blabel = MultiLabelBinarizer().fit_transform(label) \nlb = preprocessing.LabelBinarizer()\nlb.fit(label)\nblabel = lb.transform(label)\nprobi = (probs >= 0.5).astype(float)\nacc = (blabel == probi).astype(float)\n\nfor i in xrange(probi.shape[1]):\n\tprint(' %d th acc is %f\\n'%(i, sum(acc[:,i]) / acc.shape[0]) )\n\nprint(' total acc is %f\\n'%(sum(sum(acc)) / np.prod(acc.shape))) \n","sub_path":"inference_single.py","file_name":"inference_single.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"110041415","text":"#/usr/bin/env python3\n#coding=utf-8\n\nimport select\nfrom socket import *\n\nSERVER_ADDR = ('127.0.0.1', 9999)\n\nconnections = {}\npoll = select.poll()\ntcp_s = socket(AF_INET, SOCK_STREAM)\ntcp_s.bind(SERVER_ADDR)\ntcp_s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\ntcp_s.listen()\n\n# 注册到 poll\npoll.register(tcp_s.fileno())\nwhile 1:\n pollList = poll.poll()\n for fd, event in pollList:\n if fd == tcp_s.fileno():\n conn, addr = tcp_s.accept()\n key = conn.fileno()\n # 添加到连接列表里\n connections.update({key: (conn, addr)})\n poll.register(conn.fileno())\n else:\n conn, addr = connections[fd]\n data = conn.recv(1024)\n if data:\n conn.send(b're: '+data)\n print(data)\n else:\n # 断开连接取消注册\n poll.unregister(fd)\n conn.close()\n del connections[fd]\n","sub_path":"python/网络通信/media/poll-server.py","file_name":"poll-server.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"317713859","text":"import unittest\n\nfrom rlcard.games.nolimitholdem.game import NolimitholdemGame as Game\n\nclass TestNolimitholdemMethods(unittest.TestCase):\n\n def test_get_action_num(self):\n game = Game()\n action_num = game.get_action_num()\n self.assertEqual(action_num, 103)\n\n def test_init_game(self):\n\n game = Game()\n state, player_id = game.init_game()\n test_id = game.get_player_id()\n self.assertEqual(test_id, player_id)\n self.assertIn('call', state['legal_actions'])\n self.assertIn('fold', state['legal_actions'])\n for i in range(3,100):\n self.assertIn(i , state['legal_actions'])\n\n def test_step(self):\n game = Game()\n\n # test raise\n _, player_id = game.init_game()\n init_raised = game.round.raised[player_id]\n game.step(10)\n step_raised = game.round.raised[player_id]\n self.assertEqual(init_raised+10, step_raised)\n\n # test call\n game.init_game()\n init_not_raise_num = game.round.not_raise_num\n game.step('call')\n step_not_raise_num = game.round.not_raise_num\n self.assertEqual(init_not_raise_num+1, step_not_raise_num)\n\n # test fold\n game.init_game()\n game.step('fold')\n self.assertTrue(game.round.player_folded)\n\n # test check\n game.init_game()\n game.step('call')\n game.step('check')\n self.assertEqual(game.round_counter, 1)\n\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"tests/games/test_nolimitholdem_game.py","file_name":"test_nolimitholdem_game.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"633013895","text":"from CipherInterface import *\n\nclass PlayFair(CipherInterface):\n def __init__(self, key):\n self.keyMatrix = self.setKey(key)\n\n def setKey(self, key):\n alphabet = ['a', 'b', 'c', 'd', 'e',\n 'f', 'g', 'h', 'i', 'k',\n 'l', 'm', 'n', 'o', 'p',\n 'q', 'r', 's', 't', 'u',\n 'v', 'w', 'x', 'y', 'z']\n splitlist0 = list(key)\n\n splitlist1 = []\n\n for x in splitlist0:\n if x not in splitlist1:\n splitlist1.append(x)\n\n if 'i' in splitlist1:\n splitlist1.remove('j')\n\n matrixlist = []\n\n for x in splitlist1:\n if x not in matrixlist:\n matrixlist.append(x)\n\n for x in alphabet:\n if x not in matrixlist:\n matrixlist.append(x)\n\n matrix = [[], [], [], [], []]\n listcount = 0\n row = -1\n for x in range(0, len(matrixlist), 5):\n row += 1\n for y in range(x, x+5):\n matrix[row].append(matrixlist[listcount])\n listcount += 1\n\n return matrix\n\n def encrypt(self, plaintext):\n text = list(plaintext)\n ciphertext = []\n\n i = 0\n for x in range(len(text)/2):\n if text[i] == text[i+1]:\n text.insert(i+1, 'x')\n i += 2\n\n if len(text) % 2 == 1:\n text.append('x')\n\n i = 0\n pairs = []\n for x in range(1, len(text)//2 + 1):\n pairs.append(text[i:i+2])\n i += 2\n\n for x in pairs:\n for y in range(5):\n for z in range(5):\n if self.keyMatrix[y][z] == x[0]:\n row0 = y\n col0 = z\n\n for y in range(5):\n for z in range(5):\n if self.keyMatrix[y][z] == x[1]:\n row1 = y\n col1 = z\n\n if row0 == row1:\n if col0 == 4:\n col0 = -1\n if col1 == 4:\n col1 = -1\n\n ciphertext.append(self.keyMatrix[row0][col0+1])\n ciphertext.append(self.keyMatrix[row0][col1+1])\n elif col0 == col1:\n if row0 == 4:\n row1 = -1\n if row1 == 4:\n row1 = -1\n\n ciphertext.append(self.keyMatrix[row0+1][col0])\n ciphertext.append(self.keyMatrix[row1+1][col1])\n else:\n ciphertext.append(self.keyMatrix[row0][col1])\n ciphertext.append((self.keyMatrix[row1][col0]))\n\n output = \"\"\n for x in ciphertext:\n output += x\n\n return output\n\n def decrypt(self, ciphertext):\n text = list(ciphertext)\n\n plaintext = []\n\n i = 0\n pairs = []\n for x in range(1, len(text)//2 + 1):\n pairs.append(text[i:i+2])\n i += 2\n\n for x in pairs:\n for y in range(5):\n for z in range(5):\n if self.keyMatrix[y][z] == x[0]:\n row0 = y\n col0 = z\n\n for y in range(5):\n for z in range(5):\n if self.keyMatrix[y][z] == x[1]:\n row1 = y\n col1 = z\n\n if row0 == row1:\n if col0 == 4:\n col0 = -1\n if col1 == 4:\n col1 = -1\n\n plaintext.append(self.keyMatrix[row0][col0-1])\n plaintext.append(self.keyMatrix[row0][col1-1])\n elif col0 == col1:\n if row0 == 4:\n row1 = -1\n if row1 == 4:\n row1 = -1\n\n plaintext.append(self.keyMatrix[row0-1][col0])\n plaintext.append(self.keyMatrix[row1-1][col1])\n else:\n plaintext.append(self.keyMatrix[row0][col1])\n plaintext.append((self.keyMatrix[row1][col0]))\n\n output = \"\"\n for x in plaintext:\n output += x\n\n return output\n","sub_path":"PlayFair.py","file_name":"PlayFair.py","file_ext":"py","file_size_in_byte":4182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"636488460","text":"from .r_dependencies import *\nfrom .r_base import r_base\nclass r_statistics(r_base):\n def calculate_ave_CV_R(self,data_I):\n # calculate average and CV of data\n # Call to R\n try:\n # convert lists to R objects\n data_R = robjects.FloatVector(data_I);\n\n data_ave_R = self.stats.ave(data_R);\n data_ave_O = data_ave_R.rx2(1)[0];\n\n data_var_R = self.stats.var(data_R);\n data_var = data_var_R.rx2(1)[0];\n data_CV_O = sqrt(data_var)/data_ave_O*100;\n\n return data_ave_O, data_CV_O;\n except:\n print('error in R')\n def calculate_ave_var_R(self,data_I):\n # calculate average and CV of data\n # Call to R\n try:\n # convert lists to R objects\n data_R = robjects.FloatVector(data_I);\n\n data_ave_R = self.stats.ave(data_R);\n data_ave_O = data_ave_R.rx2(1)[0];\n\n data_var_R = self.stats.var(data_R);\n data_var_O = data_var_R.rx2(1)[0];\n\n return data_ave_O, data_var_O;\n except Exception as e:\n print(e);\n exit(-1);\n def calculate_pairwiseTTest(self,data_I,pooled_sd_I = \"FALSE\", paired_I=\"TRUE\",padjusted_method_I = \"bonferroni\",alternative_I = \"two.sided\"):\n '''calculate a pairwise t-test using R's built in Stats package\n padjusted_methods: (\"holm\", \"hochberg\", \"hommel\", \"bonferroni\", \"BH\", \"BY\", \"fdr\", \"none\"\n alternative_tests: (\"greater\",\"less\",\"two.sided\")\n Note pooled_sd and paired cannot both be True\n '''\n\n #make the dataFrame\n \n #format into R matrix and list objects\n # convert data dict to matrix filling in missing values\n # with 'NA'\n sns = []\n cn = []\n for d in data_I:\n sns.append(d['sample_name_short']); \n cn.append(d['component_name']);\n sns_sorted = sorted(set(sns))\n cn_sorted = sorted(set(cn))\n concentrations = ['NA' for r in range(len(sns_sorted)*len(cn_sorted))];\n cnt = 0;\n cnt_bool = True;\n sna = []\n for c in cn_sorted:\n for s in sns_sorted:\n for d in data_I:\n if d['sample_name_short'] == s and d['component_name'] == c:\n if d['calculated_concentration']:\n concentrations[cnt] = d['calculated_concentration'];\n if cnt_bool:\n sna.append(d['sample_name_abbreviation']);\n break;\n cnt = cnt+1\n cnt_bool = False;\n if len(cn_sorted)>0:\n print('more than one component detected!')\n return None;\n # check if there were any missing values in the data set in the first place\n mv = 0;\n for c in concentrations:\n if c=='NA':\n mv += 1;\n if mv==0:\n # Call to R\n try:\n # convert lists to R matrix\n concentrations_r = '';\n for c in concentrations:\n concentrations_r = (concentrations_r + ',' + str(c));\n concentrations_r = concentrations_r[1:];\n r_statement = ('concentrations_v = c(%s)' % concentrations_r);\n ans = robjects.r(r_statement);\n # convert lists to R list\n sna_r = '';\n for c in sna:\n sna_r = (sna_r + ',' + '\"' + c + '\"');\n sna_r = sna_r[1:];\n r_statement = ('sna_v = c(%s)' % sna_r);\n ans = robjects.r(r_statement);\n # get basic stats\n mean = None; #same order as sna\n var = None;\n n = None;\n r_statement = ('tapply(concentrations_v,sna_v,mean)'); # calculate the mean\n ans = robjects.r(r_statement);\n mean = numpy.array(ans);\n r_statement = ('tapply(concentrations_v,sna_v,var)'); # calculate the variance\n ans = robjects.r(r_statement);\n var = numpy.array(ans);\n r_statement = ('tapply(concentrations_v,sna_v,length)'); # calculate the # of samples\n ans = robjects.r(r_statement);\n n = numpy.array(ans);\n #convert to Data Frame\n r_statement = ('dF = data.frame(concentrations_v,sna_v)');\n ans = robjects.r(r_statement);\n r_statement = ('names(dF) = c(\"concentrations\",\"sna\")');\n ans = robjects.r(r_statement);\n r_statement = ('attach(dF)');\n ans = robjects.r(r_statement);\n # call paired T-test with without correction\n r_statement = ('pairwise.t.test(concentrations_v, sna_v, p.adjust.method = \"none\", pool.sd = %s, paired = %s, alternative = \"%s\")' %(pooled_sd_I, paired_I ,alternative_I));\n ans = robjects.r(r_statement);\n test_description = ans.rx('method')[0][0]\n pvalues = numpy.array(ans.rx('p.value')[0]);\n rownames = numpy.array(ans[2].rownames);\n colnames = numpy.array(ans[2].colnames);\n # call paired T-test with correction\n r_statement = ('pairwise.t.test(concentrations_v, sna_v, p.adjust.method = \"%s\", pool.sd = %s, paired = %s, alternative = \"%s\")' %(padjusted_method_I, pooled_sd_I, paired_I ,alternative_I))\n ans = robjects.r(r_statement);\n test_description = ans.rx('method')[0][0]\n pvalues_adjusted = numpy.array(ans.rx('p.value')[0]);\n pvalue_adjusted_description = ans.rx('p.adjust.method')[0][0]\n rownames_adjusted = numpy.array(ans[2].rownames);\n colnames_adjusted = numpy.array(ans[2].colnames);\n # convert array back to dict\n data_pairwise = [];\n # extract out unique sna's in order\n sna_set = [];\n for s in sna:\n if not(s in sna_set):\n sna_set.append(s);\n # extract out unique sna's in order\n for c1 in range(len(rownames)):\n for c2 in range(len(colnames)):\n if c1 != c2 and pvalues[c1,c2]!='NA':\n # extract out post hoc results\n pair = colnames[c2];\n pvalue = pvalues[c1,c2];\n pvalue_adjusted = pvalues_adjusted[c1,c2];\n #foldChange = mean[c2]/mean[c1];\n for r in range(len(cn_sorted)):\n data_tmp = {};\n data_tmp['sample_name_abbreviation_1'] = rownames[c1];\n data_tmp['sample_name_abbreviation_2'] = pair;\n data_tmp['component_name'] = cn_sorted[r];\n #data_tmp['mean'] = mean[c1];\n #data_tmp['var'] = var[c1];\n #data_tmp['n'] = n[c1];\n data_tmp['test_stat'] = None;\n data_tmp['test_description'] = test_description;\n data_tmp['pvalue'] = pvalue;\n data_tmp['pvalue_corrected'] = pvalue_adjusted;\n data_tmp['pvalue_corrected_description'] = pvalue_adjusted_description;\n #data_tmp['fold_change'] = foldChange;\n data_pairwise.append(data_tmp);\n except Exception as e:\n print(e);\n exit(-1);\n return data_anova,data_pairwise;\n def calculate_twoSampleTTest(self,data_1_I, data_2_I, alternative_I = \"two.sided\", mu_I = 0, paired_I=\"TRUE\", var_equal_I = \"TRUE\", ci_level_I = 0.95, padjusted_method_I = \"bonferroni\"):\n '''calculate a two Sample t-test using R's built in Stats package\n padjusted_methods: (\"holm\", \"hochberg\", \"hommel\", \"bonferroni\", \"BH\", \"BY\", \"fdr\", \"none\"\n alternative_tests: (\"greater\",\"less\",\"two.sided\")\n '''\n #make the dataFrame\n\n #format into R matrix and list objects\n # convert data dict to matrix filling in missing values\n # with 'NA'\n concentrations_1 = [];\n for d in data_1_I:\n if d:\n concentrations_1.append(d);\n else:\n concentrations_1.append('NA')\n concentrations_2 = [];\n for d in data_2_I:\n if d:\n concentrations_2.append(d);\n else:\n concentrations_2.append('NA')\n # Call to R\n try:\n # convert lists to R lists\n concentrations_1_r = '';\n for c in concentrations_1:\n concentrations_1_r = (concentrations_1_r + ',' + str(c));\n concentrations_1_r = concentrations_1_r[1:];\n r_statement = ('concentrations_1_v = c(%s)' % concentrations_1_r);\n ans = robjects.r(r_statement);\n concentrations_2_r = '';\n for c in concentrations_2:\n concentrations_2_r = (concentrations_2_r + ',' + str(c));\n concentrations_2_r = concentrations_2_r[1:];\n r_statement = ('concentrations_2_v = c(%s)' % concentrations_2_r);\n ans = robjects.r(r_statement);\n # call paired T-test without correction\n r_statement = ('t.test(concentrations_1_v,concentrations_2_v, alternative = \"%s\", mu = %s, paired = %s, var.equal = %s, conf.level = %s)'\\\n %(alternative_I,mu_I, paired_I, var_equal_I ,ci_level_I));\n ans = robjects.r(r_statement);\n test_stat = ans.rx2('statistic')[0]\n test_description = ans.rx2('method')[0]\n pvalue = ans.rx2('p.value')[0]\n mean = ans.rx2('estimate')[0]\n ci = numpy.array(ans.rx2('conf.int'))\n # adjust the p-value\n r_statement = ('p.adjust(%s, method = \"%s\")' %(pvalue,padjusted_method_I));\n ans = robjects.r(r_statement);\n pvalue_adjusted = ans[0]\n pvalue_adjusted_description = padjusted_method_I\n # extract out data\n data_tmp = {};\n data_tmp['mean'] = mean;\n data_tmp['ci_lb'] = ci[0];\n data_tmp['ci_ub'] = ci[1];\n data_tmp['ci_level'] =ci_level_I;\n data_tmp['test_stat'] = test_stat;\n data_tmp['test_description'] = test_description;\n data_tmp['pvalue'] = pvalue;\n data_tmp['pvalue_corrected'] = pvalue_adjusted;\n data_tmp['pvalue_corrected_description'] = pvalue_adjusted_description;\n except Exception as e:\n print(e);\n exit(-1);\n return data_tmp;\n def calculate_oneSampleTTest(self,data_1_I, alternative_I = \"two.sided\", mu_I = 0, paired_I=\"TRUE\", var_equal_I = \"TRUE\", ci_level_I = 0.95, padjusted_method_I = \"bonferroni\"):\n '''calculate a two Sample t-test using R's built in Stats package\n padjusted_methods: (\"holm\", \"hochberg\", \"hommel\", \"bonferroni\", \"BH\", \"BY\", \"fdr\", \"none\"\n alternative_tests: (\"greater\",\"less\",\"two.sided\")\n '''\n #make the dataFrame\n\n #format into R matrix and list objects\n # convert data dict to matrix filling in missing values\n # with 'NA'\n concentrations_1 = [];\n for d in data_1_I:\n if d:\n concentrations_1.append(d);\n else:\n concentrations_1.append('NA')\n # Call to R\n try:\n # convert lists to R lists\n concentrations_1_r = '';\n for c in concentrations_1:\n concentrations_1_r = (concentrations_1_r + ',' + str(c));\n concentrations_1_r = concentrations_1_r[1:];\n r_statement = ('concentrations_1_v = c(%s)' % concentrations_1_r);\n ans = robjects.r(r_statement);\n # call paired T-test without correction\n r_statement = ('t.test(concentrations_1_v,alternative = \"%s\", mu = %s, paired = %s, var.equal = %s, conf.level = %s)'\\\n %(alternative_I,mu_I, paired_I, var_equal_I ,ci_level_I));\n ans = robjects.r(r_statement);\n test_stat = ans.rx2('statistic')[0]\n test_description = ans.rx2('method')[0]\n pvalue = ans.rx2('p.value')[0]\n #mean = ans.rx2('estimate')[0]\n ci = numpy.array(ans.rx2('conf.int'))\n # adjust the p-value\n r_statement = ('p.adjust(%s, method = \"%s\")' %(pvalue,padjusted_method_I));\n ans = robjects.r(r_statement);\n pvalue_adjusted = ans[0]\n pvalue_adjusted_description = padjusted_method_I\n # get basic stats\n mean = None; #same order as sna\n var = None;\n n = None;\n r_statement = ('mean(concentrations_1_v)'); # calculate the mean\n ans = robjects.r(r_statement);\n mean = ans[0];\n r_statement = ('var(concentrations_1_v)'); # calculate the variance\n ans = robjects.r(r_statement);\n var = ans[0];\n r_statement = ('length(concentrations_1_v)'); # calculate the # of samples\n ans = robjects.r(r_statement);\n n = ans[0];\n\n # convert array back to dict\n data_tmp = {};\n data_tmp['mean'] = mean;\n data_tmp['var'] = var;\n data_tmp['cv'] = sqrt(var)/abs(mean)*100 #glog normalization will have negative values\n data_tmp['n'] = n;\n data_tmp['ci_lb'] = ci[0];\n data_tmp['ci_ub'] = ci[1];\n data_tmp['ci_level'] =ci_level_I;\n data_tmp['test_stat'] = test_stat;\n data_tmp['test_description'] = test_description;\n data_tmp['pvalue'] = pvalue;\n data_tmp['pvalue_corrected'] = pvalue_adjusted;\n data_tmp['pvalue_corrected_description'] = pvalue_adjusted_description;\n except Exception as e:\n print(e);\n return None;\n #exit(-1);\n return data_tmp;","sub_path":"calculate_utilities/r_statistics.py","file_name":"r_statistics.py","file_ext":"py","file_size_in_byte":14400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"244354758","text":"#!/usr/bin/env python\n#import roslib; roslib.load_manifest('smach_tutorials')\nimport rospy\nimport smach\nimport smach_ros\nimport time\nfrom smach import Concurrence\n\nclass Foo(smach.State):\n\tdef __init__(self):\n\t\tsmach.State.__init__(self, outcomes = ['outcome7'])\n\t\tself.counter = 0\n\n\n\tdef execute(self, userdata):\n\t\trospy.loginfo('Executing state FOO')\n\t\trospy.loginfo('Counter = %f'%self.counter) \n\t\tif self.counter < 8:\n\t\t\tself.counter += 1\n\t\t\ttime.sleep(1)\n\t\t\treturn 'still processing'\n\t\telse:\n\t\t\treturn 'outcome7'\n\n\nclass Boo(smach.State):\n\tdef __init__(self):\n\t\tsmach.State.__init__(self, outcomes =['outcome2','still_processing'])\n\t\tself.counter = 0\n\tdef execute(self, userdata):\n\t\trospy.loginfo('Executing state BOO')\n\t\trospy.loginfo('Counter = %f'%self.counter) \n\t\tif self.counter < 6:\n\t\t\tself.counter = self.counter + 1\n\t\t\ttime.sleep(1)\n\t\t\treturn 'still_processing'\n\t\t\n\t\t\t#print 'EXECUTED'\n\t\telse:\n\t\t\treturn 'outcome2'\n\n\n\nclass Hoo(smach.State):\n\tdef __init__(self):\n\t\tsmach.State.__init__(self, outcomes =['outcome3','still_processing'])\n\t\tself.counter = 0\n\tdef execute(self, userdata):\n\t\trospy.loginfo('Executing state HOO')\n\t\trospy.loginfo('Counter = %f'%self.counter) \n\t\tif self.counter < 6:\n\t\t\tself.counter = self.counter + 1\n\t\t\ttime.sleep(1)\n\t\t\treturn 'still_processing'\n\t\t\n\t\t\t#print 'EXECUTED'\n\t\telse:\n\t\t\treturn 'outcome3'\n\nclass Idle(smach.State):\n\tdef __init__(self):\n\t\tsmach.State.__init__(self, outcomes =['outcome1'])\n\t\t#self.counter = 0\n\tdef execute(self, userdata):\n\t\trospy.loginfo('Executing state IDLE')\n\t\t#rospy.loginfo('Counter = %f'%self.counter)\n\t\ttime.sleep(3)\n\t\treturn 'outcome1'\n\n#class Con2(smach.Concurrence):\n\t#sm_con2 = smach.Concurrence(outcomes = ['outcome8','outcome7'],\n\t\t\t\t\t\t\t\t\t\t#default_outcome = 'outcome7',\n\t\t\t\t\t\t\t\t\t\t#outcome_map = {'outcome8':{'HOO':'outcome10','GOO':'outcome9'}})\n#\tdef __init__(self):\n#\t\tsmach.Concurrence.__init__(self, outcomes = ['outcome8','outcome7'],\n#\t\t\t\t\t\t\t\t\t\t default_outcome = 'outcome7',\n#\t\t\t\t\t\t\t\t\t\t outcome_map = {'outcome8':{'HOO':'outcome9',\n#\t\t\t\t\t\t\t\t\t\t \t\t\t\t\t\t\t'GOO':'outcome10'}})\n#\t\t\t\t\t\t\t\t\t\t \t\t\t\t#'outcome7':{'HOO':'still processing',\n\t\t\t\t\t\t\t\t\t\t \t\t\t\t#'GOO':'still processing'}})\n\t#d\tsmach.StateMachine('CON2',sm_con2,transitions = {'outcome7':'CON2','outcome8':'outcome5'})\n#\t\tself.counter = 0\n\n\t#def execute(self, parent_ud = smach.UserData()):\n\n\t\n\n\t#def execute(self,userdata):\n\t#\ttime.sleep(1)\n\t\t#get_children(self)\n\t\t\n\t#\tif self.counter < 10:\n\t#\t\tself.counter = self.counter + 1\n\t#\t\treturn 'still processing'\n\t#\telse:\n\n\t#\treturn 'processing'\n\n #class SM(smach.StateMachine):\n #\tdef __init__(self):\n #\t\tsmach.StateMachine.__init__(self, outcomes = ['outcome6'])\n\n# \t\t#smach.StateMachine.add('GOO',Goo(), transitions = {'outcome10':'outcome8'})\n\n# class SM_2(smach.StateMachine):\n# \tdef __init__(self):\n# \t\tsmach.StateMachine.__init__(self,outcomes = ['outcome5','outcome6'])\n\n\ndef main():\n\trospy.init_node('smach_example_state_machine')\n\n\t#sm = smach.StateMachine(outcomes= ['outcome6'])\n\tsm = smach.StateMachine(outcomes = ['outcome6'])\n\twith sm:\n\t\tsmach.StateMachine.add('IDLE',Idle(), transitions = {'outcome1':'CON'})\n\n\t\tsm_con = smach.Concurrence(outcomes = ['outcome5','outcome4'],\n\t\t\t\t\t\t\t\t default_outcome = 'outcome5',\n\t\t\t\t\t\t\t\t outcome_map = {'outcome4':{'FOO':'outcome7',\n\t\t\t\t\t\t\t\t \t\t\t\t \t\t\t 'SM_SUB':'outcome10'},\n\t\t\t\t\t\t\t\t \t\t\t\t 'outcome5':{'SM_SUB':'bounce'}})\n\n\t\tsm_sub = smach.StateMachine(outcomes = ['outcome10','bounce'])\n\n\t\twith sm_con:\n\t\t\t\n\n\t\t\tsmach.Concurrence.add('FOO',Foo())\n\t\t\tsmach.Concurrence.add('SM_SUB',sm_sub)\n\t\t\t\n\t\t\twith sm_sub:\n\n\t\t\t\tsmach.StateMachine.add('BOO',Boo(), transitions = {'outcome2':'HOO','still_processing':'bounce'})\n\t\t\t\tsmach.StateMachine.add('HOO',Hoo(), transitions = {'outcome3':'outcome10','still_processing':'bounce'})\n\n\t\t\t\n\t\tsmach.StateMachine.add('CON', sm_con, transitions={'outcome5':'CON','outcome4':'outcome6'})\n\n\n\t\t#\n\n\tsis = smach_ros.IntrospectionServer('server_name', sm, '/SM_ROOT')\n\tsis.start()\n\t#sm.execute()\n\t#sm_con.execute()\n\toutcome = sm.execute()\n\n\trospy.spin()\n\tsis.stop()\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"src/template_sm/src/smach_concurrence_demo_4.py","file_name":"smach_concurrence_demo_4.py","file_ext":"py","file_size_in_byte":4070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"221341546","text":"# def create_dictionary():\n# new_dictionary = {\"day\": \"between sunrise and sunset\",\n# \"night\": \"when the moon is out\"\n# }\n# return new_dictionary\n\n\ndef create_patient():\n new_patient = {\n \"first name\":\"Smith\",\n \"age\":60,\n \"married\":False,\n \"test_results\":[0,16,23,2.3]\n }\n #test_one = new_patient[\"test results\"][1]\n #print(test_one)\n return new_patient\n\ndef save_Json(patient):\n import json\n filename = \"patient_data.txt\"\n out_file = open(filename,'w')\n json.dump(patient,out_file)\n out_file.close()\n\n# def read_dictionary(my_dict):\n# my_key = \"day\"\n# y = my_dict[my_key]\n# print(\"The definition of {} is {}\".format(my_key,y))\n# return y\n#\n# def add_info(my_dict):\n# my_dict[\"lunch\"] = \"The meal I eat in the middle of the day\"\n# my_dict[\"day\"] = \"when I am not sleeping\"\n# return my_dict\n\n\nif __name__ == \"__main__\":\n # x = create_dictionary()\n # read_dictionary(x)\n # print(x)\n # x = add_info(x)\n # print(x)\n # print(type(x))\n # z = x.get(\"dinner\")\n # print(z)\n x = create_patient()\n save_Json(x)","sub_path":"dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"359988061","text":"#AL Interpreter\n#Python 3.6.4\nimport sys\nimport math\ninput_memory = []\nlabel_table={}\nsymbol_table={}\n\n#function to Check the syntax of the labels\ndef VerifySyntax_LBL(token):\n if token.isalnum() and token.isupper() and token[0].isalpha():\n return\n else:\n print('Error: Wrong Syntax {}. Check Documentation'.format(token))\n sys.exit()\n#function to Check Operands syntax\ndef VerifySyntax_OPD(token):\n if( token.isalnum() and token.isupper() and token[0].isalpha()) or token.isnumeric() or (token[:1]=='-' and token[1:].isnumeric()):\n return\n else:\n print('Error: Wrong Syntax {}. Check Documentation'.format(token))\n sys.exit()\n\ndef truncate(variable):\n ## In this function we only keep track of the 10 right digits\n ## 9999999999 + 1 would lead to 10000000000 which is a 0\n ## This won't be handled as the user should not enter such values\n ## So an overflow won't lead to an error but a wrong output\n trunc = 1\n if variable < 0:\n trunc = -1\n variable = trunc*(abs(variable)%(10**10))\n return variable\n\n#function that returns value from either a numeric or a symbol\ndef symbol_or_variable(opn):\n if opn.isnumeric() or (opn[:1]=='-' and opn[1:].isnumeric()):\n return int(opn)\n else:\n if opn in symbol_table:\n return int(symbol_table[opn])\n else:\n print(\"{} was not defined\".format(opn))\n\n#function to format the lines by removing spaces\ndef clean_line(line):\n line = ''.join(line.split())\n return str(line)\n\n#function that takes a full operation then returns the tokens\ndef parse_operation(operation):\n op = str(operation[5:9]).rstrip()\n VerifySyntax_LBL(op)\n opn1 = str(operation[10:14]).rstrip()\n opn2 = str(operation[15:19]).rstrip()\n opn3 = str(operation[20:24]).rstrip()\n VerifySyntax_OPD(opn1)\n VerifySyntax_OPD(opn2)\n VerifySyntax_OPD(opn3)\n return op,opn1,opn2,opn3\n\n#Instruction Set\ndef ADD(opn1,opn2,opn3,program_counter,input_pointer):\n opn1 = symbol_or_variable(opn1)\n opn2 = symbol_or_variable(opn2)\n Sum = opn1 + opn2\n if abs(Sum) > 9999999999:\n print(\"Data Overflow/Underflow @ {}, result will be truncated...\".format(program_counter))\n Sum = truncate(Sum)\n Sum = round(Sum)\n symbol_table[opn3] = Sum\n return program_counter,input_pointer\n\ndef SUB(opn1,opn2,opn3,program_counter,input_pointer):\n opn1 = symbol_or_variable(opn1)\n opn2 = symbol_or_variable(opn2)\n sub = opn1 - opn2\n if abs(sub) > 9999999999:\n print(\"Data Overflow/Underflow @ {}, result will be truncated...\".format(program_counter))\n sub = truncate(sub)\n sub = round(sub)\n symbol_table[opn3] = sub\n return program_counter,input_pointer\n\ndef READ(opn1,opn2,opn3,program_counter,input_pointer):\n if(input_pointer>=len(input_memory)):\n print(\"Error: No input left to read @{}\".format(program_counter))\n sys.exit()\n elif(abs(int(input_memory[input_pointer]))>9999999999):\n print(\"Data Overflow/Underflow @ {}, result will be truncated...\".format(program_counter))\n read_input = truncate(input_memory[input_pointer])\n symbol_table[opn3] = read_input\n else:\n symbol_table[opn3] = input_memory[input_pointer]\n input_pointer += 1\n return program_counter,input_pointer\n\ndef WRIT(opn1,opn2,opn3,program_counter,input_pointer):\n opn1 = symbol_or_variable(opn1)\n print(\"Output: {}\".format(opn1))\n return program_counter,input_pointer\n\ndef ASGN(opn1,opn2,opn3,program_counter,input_pointer):\n opn1 = symbol_or_variable(opn1)\n symbol_table[opn3] = opn1\n return program_counter,input_pointer\n\ndef MULT(opn1,opn2,opn3,program_counter,input_pointer):\n opn1 = symbol_or_variable(opn1)\n opn2 = symbol_or_variable(opn2)\n print(\"{} {}\".format(opn1,opn2))\n mult = opn1 * opn2\n if abs(mult) > 9999999999:\n print(\"Data Overflow/Underflow @ {}, result will be truncated...\".format(program_counter))\n mult = truncate(mult)\n mult = round(mult)\n symbol_table[opn3] = mult\n return program_counter,input_pointer\n\ndef DIV(opn1,opn2,opn3,program_counter,input_pointer):\n opn1 = symbol_or_variable(opn1)\n opn2 = symbol_or_variable(opn2)\n div = opn1/opn2\n if abs(div) > 9999999999:\n print(\"Data Overflow/Underflow @ {}, result will be truncated...\".format(program_counter))\n div = truncate(div)\n div = round(div)\n symbol_table[opn3] = div\n return program_counter,input_pointer\n\ndef SQR(opn1,opn2,opn3,program_counter,input_pointer):\n sqr = symbol_or_variable(opn1)**2\n if abs(sqr) > 9999999999:\n print(\"Data Overflow/Underflow @ {}, result will be truncated...\".format(program_counter))\n sqr = truncate(sqr)\n sqr = round(sqr)\n symbol_table[opn3] = sqr\n return program_counter,input_pointer\n\ndef SQRT(opn1,opn2,opn3,program_counter,input_pointer):\n Sqrt = math.sqrt(symbol_or_variable(opn1))\n if abs(Sqrt) > 9999999999:\n print(\"Data Overflow/Underflow @ {}, result will be truncated...\".format(program_counter))\n Sqrt = truncate(Sqrt)\n Sqrt = round(Sqrt)\n symbol_table[opn3] = Sqrt\n return program_counter,input_pointer\n\n\ndef EQL(opn1,opn2,opn3,program_counter,input_pointer):\n opn1 = symbol_or_variable(opn1)\n opn2 = symbol_or_variable(opn2)\n if(opn1==opn2):\n program_counter = label_table[opn3]\n else:\n program_counter +=1\n return program_counter,input_pointer\n\ndef NEQ(opn1,opn2,opn3,program_counter,input_pointer):\n opn1 = symbol_or_variable(opn1)\n opn2 = symbol_or_variable(opn2)\n if(opn1!=opn2):\n program_counter = label_table[opn3]\n else:\n program_counter +=1\n return program_counter,input_pointer\n\ndef GTEQ(opn1,opn2,opn3,program_counter,input_pointer):\n opn1 = symbol_or_variable(opn1)\n opn2 = symbol_or_variable(opn2)\n if(opn1>=opn2):\n program_counter = label_table[opn3]\n else:\n program_counter +=1\n return program_counter,input_pointer\n\ndef LT(opn1,opn2,opn3,program_counter,input_pointer):\n opn1 = symbol_or_variable(opn1)\n opn2 = symbol_or_variable(opn2)\n if(opn1= (len(program_memory)):\n break\n op, opn1, opn2, opn3 = parse_operation(program_memory[program_counter])\n if(op in operations_dict):\n program_counter,input_pointer = operations_dict[op](opn1,opn2,opn3,program_counter,input_pointer)\n else:\n print('operation doesn\\'t exist @ {} {}'.format(program_counter, op))\n sys.exit()\n if(op in ['EQL', 'NEQ', 'GTEQ', 'LT', 'ITJP']): # in case of branching, continue so the program counter is not incremented\n continue\n program_counter += 1\n","sub_path":"AL_Interpreter.py","file_name":"AL_Interpreter.py","file_ext":"py","file_size_in_byte":10239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"164343163","text":"from typing import *\nfrom object_detection.model_loader import WatchMode\nfrom fish.config import *\n\n\npseudo_threshold = 0.05\npseudo_iou_threshold = 0.35\n\n# train\nuse_amp = True\n\n# data\nbatch_size = 2\n\n# model\nbackbone_id = 7\nchannels = 64\nbox_depth = 1\nfpn_depth = 1\nlr = 1e-4\nout_ids: List[int] = [6, 7]\n\nmetric: Tuple[str, WatchMode] = (\"score\", \"max\")\npretrained = True\n\n\n# criterion\ntopk = 30\nbox_weight = 1\ncls_weight = 2\nconfidence_threshold = 0.001\niou_threshold = 0.31\npre_box_limit = 10000\n\nanchor_ratios = [1.0]\nanchor_scales = [1.0]\nnum_anchors = len(anchor_ratios) * len(anchor_scales)\nanchor_size = 1\n\nout_dir = f\"/store/efficientdet-{backbone_id}-{num_anchors}-{channels}-{''.join([str(i) for i in out_ids])}\"\n","sub_path":"fish/effdet/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"351619058","text":"from aiohttp import web\nimport psycopg2.errors\nfrom urllib.parse import urlencode\n\nfrom .config import db_block, web_routes\n\n@web_routes.post('/action/xuanke/add')\nasync def xuanke_add(request):\n params = await request.post()\n xstu_sn = params.get(\"stu_sn\")\n plan_sn = params.get(\"plan_sn\")\n plan_xueqi = params.get(\"plan_xueqi\")\n \n with db_block() as db:\n db.execute(\"\"\"\n INSERT INTO xuanke (xstu_sn, plan_sn) \n VALUES ( %(xstu_sn)s, %(plan_sn)s)\n \"\"\", dict(xstu_sn=xstu_sn, plan_sn = plan_sn))\n \n\n return web.HTTPFound(location=\"/xuanke\")\n\n@web_routes.post('/action/xuanke/delete/{xstu_sn}/{plan_sn}')\nasync def action_xuanke_delete(request):\n \n xstu_sn = request.match_info.get(\"xstu_sn\")\n plan_sn = request.match_info.get(\"plan_sn\")\n \n with db_block() as db:\n db.execute(\"\"\"\n DELETE FROM xuanke\n WHERE xstu_sn = %(xstu_sn)s AND plan_sn = %(plan_sn)s\n \"\"\", dict(xstu_sn=xstu_sn, plan_sn = plan_sn))\n return web.HTTPFound(location=\"/xuanke\")\n ","sub_path":"serv/xuanke_actions.py","file_name":"xuanke_actions.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"437604960","text":"\"\"\"\r\n\r\nUsage:\r\n main.py [--data --folder ] [--batch-size ] [--bptt ]\r\n [--embed ] [--hidden ] [--cell ] [--num-layers ] [--resume ]\r\n [--dropout-embed ] [--dropout-input ] [--dropout-hidden ]\r\n [--dropconnect ] [--dropout-other ]\r\n [--epochs ] [--lr ] [--l2 ] [--ar ] [--tar ]\r\n [--clip ] [--device ]\r\n [--seed ]\r\n\r\nData Options:\r\n --data Dataset to use [default: ptb]\r\n --folder Directory to load dataset [default: ../../data/ptb]\r\n --batch-size Training batch size [default: 80]\r\n --bptt Base length of variable truncated BPTT [default: 70]\r\n\r\nModel Options:\r\n --embed Word embedding size [default: 400]\r\n --hidden Recurrent hidden layer size [default: 1150]\r\n --cell Recurrent cell type [default: LSTM]\r\n --num-layers Number of recurrent hidden layers [default: 3]\r\n --resume Path to resume model parameters\r\n\r\nModel Dropout Options:\r\n --dropout-embed Dropout rate in embedding layer [default: 0.1]\r\n --dropout-input Dropout rate of embedding input [default: 0.65]\r\n --dropout-hidden Dropout rate of hidden output of each recurrent layer [default: 0.3]\r\n --dropconnect DropConnect rate of weights of recurrent layers [default: 0.5]\r\n --dropout-other Dropout rate of other parts [default: 0.4]\r\n\r\nTraining Options:\r\n --epochs Number of epochs to train [default: 8000]\r\n --lr Learning rate [default: 30]\r\n --l2 L2 regularization scale [default: 1.2e-6]\r\n --ar Acticvation regularization scale [default: 2]\r\n --tar Temporal activation regularization scale [default: 1]\r\n --clip Gradient clipping scale [default: 0.25]\r\n --device GPU device to run\r\n\r\nRandom Options:\r\n --seed Random seed [default: 1]\r\n\r\n\"\"\"\r\nimport numpy as np\r\nimport torch\r\nimport copy\r\n\r\nimport nnlearn.dataset as Dataset\r\nimport nnlearn.dataset.transform as Transform\r\nimport nnlearn.model as Model\r\nimport nnlearn.optimizer as Optimizer\r\nimport nnlearn.learner as Learner\r\nimport nnlearn.callback as Callback\r\n\r\nfrom splitcross import SplitCrossEntropyLoss\r\n\r\n\r\n# Hook Function Loading Penn Treebank Dataset\r\ndef PennTreebank(learner, folder, bptt, batch_size):\r\n transform = Transform.TokenToTensor()\r\n\r\n train_data = Dataset.BPTTDataLoader(\r\n Dataset.PennTreebank(folder, mode='train', transform=transform),\r\n bptt=bptt, batch_size=batch_size, shuffle=False)\r\n valid_data = Dataset.BPTTDataLoader(\r\n Dataset.PennTreebank(folder, mode='valid', transform=transform),\r\n bptt=bptt, batch_size=10, shuffle=False)\r\n test_data = Dataset.BPTTDataLoader(\r\n Dataset.PennTreebank(folder, mode='test', transform=transform),\r\n bptt=bptt, batch_size=1, shuffle=False)\r\n\r\n # update number of tokens for model initialization\r\n learner.model_args['num_tokens'] = len(train_data.dataset.dictionary)\r\n\r\n return train_data, valid_data, test_data\r\n\r\n# Hook Function Using NT-ASGD\r\nclass NTASGD(Callback):\r\n def epoch_midterm(self):\r\n \"\"\"Operation between training an epoch and evaluation of that epoch\"\"\"\r\n if self.learner.epoch == 2:\r\n buffer = []\r\n buffer.append(\"Parameter After Epoch {}\".format(self.learner.epoch))\r\n buffer += [itr.cpu().data for itr in self.learner.model.parameters()]\r\n torch.save(buffer, 'buffer.pt')\r\n exit()\r\n else:\r\n pass\r\n\r\n # switch parameters to averaged weights from NT-ASGD initial point\r\n self.param_buffer = {}\r\n for param_group in self.learner.optimizer.param_groups:\r\n if 't0' in param_group:\r\n for param in param_group:\r\n self.param_buffer[param] = param.data.clone()\r\n param.data = self.learner.optimizer.state[param]['ax'].clone()\r\n else:\r\n pass\r\n\r\n def epoch_end(self):\r\n \"\"\"Operation after the end of a whole epoch\"\"\"\r\n # switch parameters back to continue training\r\n for param in self.learner.model.parameters():\r\n if param in self.param_buffer:\r\n param.data = self.param_buffer[param].clone()\r\n else:\r\n pass\r\n\r\n # switch to use ASGD if necessary (no copy in buffer means no averaging)\r\n loss_buffer = self.learner.loss_buffer['valid']\r\n is_asgd = isinstance(self.learner.optimizer, Optimizer.ASGD)\r\n is_averaged = (len(self.param_buffer) > 0)\r\n is_nonmono = len(loss_buffer) > self.train_args['nonmono'] + 1\r\n # MAYBE ERROR OF ORIGINAL CODE\r\n # ============================\r\n # It should compare current loss with n steps before (including current)\r\n # rather than all but n steps before.\r\n # ```\r\n # history = loss_buffer[-self.learner.train_args['nonmono']:]\r\n # ```\r\n # Force to fit original design.\r\n if len(self.learner.loss_buffer['valid']) > 0:\r\n current = loss_buffer[-1]\r\n history = loss_buffer[:-self.learner.train_args['nonmono'] - 1]\r\n is_better = (current > min(history))\r\n else:\r\n is_better = False\r\n if (not is_asgd) and (not is_averaged) and is_nonmono and is_better:\r\n self.learner.logger.info('Switch To NT-ASGD')\r\n self.learner.optimizer = Optimizer.ASGD(\r\n self.learner.model.parameters(), t0=0, lambd=0., **self.optimizer_args)\r\n else:\r\n pass\r\n\r\n# Main Process\r\ndef main(args):\r\n # parse arguments\r\n dataset = args['--data']\r\n folder = args['--folder']\r\n bptt = args['--bptt']\r\n bsz = args['--batch-size']\r\n num_embed = args['--embed']\r\n num_hidden = args['--hidden']\r\n cell = args['--cell']\r\n num_layers = args['--num-layers']\r\n resume = args['--resume']\r\n drop_embed = args['--dropout-embed']\r\n drop_input = args['--dropout-input']\r\n drop_hidden = args['--dropout-hidden']\r\n dropconnect = args['--dropconnect']\r\n drop_other = args['--dropout-other']\r\n num_epochs = args['--epochs']\r\n lr = args['--lr']\r\n l2 = args['--l2']\r\n ar = args['--ar']\r\n tar = args['--tar']\r\n clip = args['--clip']\r\n device = args['--device']\r\n seed = args['--seed']\r\n\r\n learner = Learner.LanguageAWDLearner(\r\n device=device, random_seed=seed,\r\n dataset_hook=PennTreebank, dataset_args=dict(\r\n folder=folder, bptt=bptt, batch_size=bsz),\r\n model=Model.LangAWD, model_args=dict(\r\n input=num_embed, hidden=num_hidden, recurrent=cell, num_layers=num_layers,\r\n dropout=dict(\r\n embed=drop_embed, input=drop_input, hidden=drop_hidden,\r\n weight=dropconnect, other=drop_other)),\r\n criterion=SplitCrossEntropyLoss, criterion_args=dict(\r\n hidden_size=num_embed, splits=[], verbose=False),\r\n optimizer=Optimizer.SGD, optimizer_args=dict(lr=lr, weight_decay=l2),\r\n metrics=['ppl'],\r\n callbacks=[NTASGD],\r\n train_args=dict(alpha=ar, beta=tar, clip=clip, bptt=bptt))\r\n learner.fit(num_epochs=num_epochs, eval_init=False, eval_train=False)\r\n\r\nif __name__ == '__main__':\r\n import docopt\r\n from schema import Schema, Use, And, Or\r\n args = docopt.docopt(__doc__, version='NNLearn Package Test (AWD-LSTM + (PennTreebank + WikiText))')\r\n requirements = {\r\n '--batch-size' : And(Use(int), lambda x: x > 0,\r\n error='Training batch size should be integer > 0'),\r\n '--bptt' : And(Use(int), lambda x: x > 0,\r\n error='Base length of variable truncated BPTT should be integer > 0'),\r\n '--embed' : And(Use(int), lambda x: x > 0,\r\n error='Word embedding size should be integer > 0'),\r\n '--hidden' : And(Use(int), lambda x: x > 0,\r\n error='Recurrent hidden layer size should be integer > 0'),\r\n '--num-layers' : And(Use(int), lambda x: x > 0,\r\n error='Number of recurrent hidden layers should be integer > 0'),\r\n '--dropout-embed' : And(Use(float), lambda x: (x > 0) & (x < 1),\r\n error='Dropout rate should be float in (0, 1)'),\r\n '--dropout-input' : And(Use(float), lambda x: (x > 0) & (x < 1),\r\n error='Dropout rate should be float in (0, 1)'),\r\n '--dropout-hidden' : And(Use(float), lambda x: (x > 0) & (x < 1),\r\n error='Dropout rate should be float in (0, 1)'),\r\n '--dropconnect' : And(Use(float), lambda x: (x > 0) & (x < 1),\r\n error='Dropout rate should be float in (0, 1)'),\r\n '--dropout-other' : And(Use(float), lambda x: (x > 0) & (x < 1),\r\n error='Dropout rate should be float in (0, 1)'),\r\n '--epochs' : And(Use(int), lambda x: x > 0,\r\n error='Number of epochs should be integer > 0'),\r\n '--lr' : And(Use(float), lambda x: x > 0,\r\n error='Learning rate should be float > 0'),\r\n '--l2' : And(Use(float), lambda x: x > 0,\r\n error='L2 regularization scale should be float > 0'),\r\n '--ar' : And(Use(float), lambda x: x > 0,\r\n error='Activation regularization scale should be float > 0'),\r\n '--tar' : And(Use(float), lambda x: x > 0,\r\n error='Temporal activation regularization should be float > 0'),\r\n '--clip' : And(Use(float), lambda x: x > 0,\r\n error='Gradient clipping scale should be float > 0'),\r\n '--device' : Or(None, And(Use(int), lambda x: x >= 0),\r\n error='GPU device should be integer >= 0'),\r\n '--seed' : And(Use(int), lambda x: x > 0,\r\n error='Random seed should be integer > 0'),\r\n object : object,\r\n }\r\n args = Schema(requirements).validate(args)\r\n main(args)\r\n","sub_path":"validate/validate-3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"283623863","text":"# GNU MediaGoblin -- federated, autonomous media hosting\n# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n\nfrom functools import wraps\n\nfrom urlparse import urljoin\nfrom werkzeug.exceptions import Forbidden, NotFound\nfrom werkzeug.urls import url_quote\n\nfrom mediagoblin import mg_globals as mgg\nfrom mediagoblin.db.models import MediaEntry, User\nfrom mediagoblin.tools.response import redirect, render_404\n\n\ndef require_active_login(controller):\n \"\"\"\n Require an active login from the user.\n \"\"\"\n @wraps(controller)\n def new_controller_func(request, *args, **kwargs):\n if request.user and \\\n request.user.status == u'needs_email_verification':\n return redirect(\n request, 'mediagoblin.user_pages.user_home',\n user=request.user.username)\n elif not request.user or request.user.status != u'active':\n next_url = urljoin(\n request.urlgen('mediagoblin.auth.login',\n qualified=True),\n request.url)\n\n return redirect(request, 'mediagoblin.auth.login',\n next=next_url)\n\n return controller(request, *args, **kwargs)\n\n return new_controller_func\n\ndef active_user_from_url(controller):\n \"\"\"Retrieve User() from URL pattern and pass in as url_user=...\n\n Returns a 404 if no such active user has been found\"\"\"\n @wraps(controller)\n def wrapper(request, *args, **kwargs):\n user = User.query.filter_by(username=request.matchdict['user']).first()\n if user is None:\n return render_404(request)\n\n return controller(request, *args, url_user=user, **kwargs)\n\n return wrapper\n\n\ndef user_may_delete_media(controller):\n \"\"\"\n Require user ownership of the MediaEntry to delete.\n \"\"\"\n @wraps(controller)\n def wrapper(request, *args, **kwargs):\n uploader_id = kwargs['media'].uploader\n if not (request.user.is_admin or\n request.user.id == uploader_id):\n raise Forbidden()\n\n return controller(request, *args, **kwargs)\n\n return wrapper\n\n\ndef user_may_alter_collection(controller):\n \"\"\"\n Require user ownership of the Collection to modify.\n \"\"\"\n @wraps(controller)\n def wrapper(request, *args, **kwargs):\n creator_id = request.db.User.find_one(\n {'username': request.matchdict['user']}).id\n if not (request.user.is_admin or\n request.user.id == creator_id):\n raise Forbidden()\n\n return controller(request, *args, **kwargs)\n\n return wrapper\n\n\ndef uses_pagination(controller):\n \"\"\"\n Check request GET 'page' key for wrong values\n \"\"\"\n @wraps(controller)\n def wrapper(request, *args, **kwargs):\n try:\n page = int(request.GET.get('page', 1))\n if page < 0:\n return render_404(request)\n except ValueError:\n return render_404(request)\n\n return controller(request, page=page, *args, **kwargs)\n\n return wrapper\n\n\ndef get_user_media_entry(controller):\n \"\"\"\n Pass in a MediaEntry based off of a url component\n \"\"\"\n @wraps(controller)\n def wrapper(request, *args, **kwargs):\n user = User.query.filter_by(username=request.matchdict['user']).first()\n if not user:\n raise NotFound()\n\n media = None\n\n # might not be a slug, might be an id, but whatever\n media_slug = request.matchdict['media']\n\n # if it starts with id: it actually isn't a slug, it's an id.\n if media_slug.startswith(u'id:'):\n try:\n media = MediaEntry.query.filter_by(\n id=int(media_slug[3:]),\n state=u'processed',\n uploader=user.id).first()\n except ValueError:\n raise NotFound()\n else:\n # no magical id: stuff? It's a slug!\n media = MediaEntry.query.filter_by(\n slug=media_slug,\n state=u'processed',\n uploader=user.id).first()\n\n if not media:\n # Didn't find anything? Okay, 404.\n raise NotFound()\n\n return controller(request, media=media, *args, **kwargs)\n\n return wrapper\n\n\ndef get_user_collection(controller):\n \"\"\"\n Pass in a Collection based off of a url component\n \"\"\"\n @wraps(controller)\n def wrapper(request, *args, **kwargs):\n user = request.db.User.find_one(\n {'username': request.matchdict['user']})\n\n if not user:\n return render_404(request)\n\n collection = request.db.Collection.find_one(\n {'slug': request.matchdict['collection'],\n 'creator': user.id})\n\n # Still no collection? Okay, 404.\n if not collection:\n return render_404(request)\n\n return controller(request, collection=collection, *args, **kwargs)\n\n return wrapper\n\n\ndef get_user_collection_item(controller):\n \"\"\"\n Pass in a CollectionItem based off of a url component\n \"\"\"\n @wraps(controller)\n def wrapper(request, *args, **kwargs):\n user = request.db.User.find_one(\n {'username': request.matchdict['user']})\n\n if not user:\n return render_404(request)\n\n collection_item = request.db.CollectionItem.find_one(\n {'id': request.matchdict['collection_item'] })\n\n # Still no collection item? Okay, 404.\n if not collection_item:\n return render_404(request)\n\n return controller(request, collection_item=collection_item, *args, **kwargs)\n\n return wrapper\n\n\ndef get_media_entry_by_id(controller):\n \"\"\"\n Pass in a MediaEntry based off of a url component\n \"\"\"\n @wraps(controller)\n def wrapper(request, *args, **kwargs):\n media = MediaEntry.query.filter_by(\n id=request.matchdict['media_id'],\n state=u'processed').first()\n # Still no media? Okay, 404.\n if not media:\n return render_404(request)\n\n given_username = request.matchdict.get('user')\n if given_username and (given_username != media.get_uploader.username):\n return render_404(request)\n\n return controller(request, media=media, *args, **kwargs)\n\n return wrapper\n\n\ndef get_workbench(func):\n \"\"\"Decorator, passing in a workbench as kwarg which is cleaned up afterwards\"\"\"\n\n @wraps(func)\n def new_func(*args, **kwargs):\n with mgg.workbench_manager.create() as workbench:\n return func(*args, workbench=workbench, **kwargs)\n\n return new_func\n","sub_path":"decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":7301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"451557457","text":"# chapter3-4\n\n################\n###---関数---###\n################\n\ndef mul(a,b):\n '''掛け算を行う関数''' # docstring(関数の説明)を指定\n return a*b\n\nprint(mul(3,4))\n# print(help(mul)) # docstringを表示\n\n# 引数にデフォルト値をつける\ndef convert_jou(jou, unit=\"江戸間\"):\n if unit == \"江戸間\":\n base = 0.88 * 1.76\n elif unit == \"京間\":\n base = 0.955 * 1.91\n elif unit == \"中京間\":\n base = 0.91 * 1.82\n m2 = jou * base\n s = \"{0}で{1}畳は{2}m^2\".format(unit,jou,m2)\n print(s)\n\n\n# 関数を実行\nconvert_jou(6, \"江戸間\")\nconvert_jou(6)\n\n# 可変長引数の指定\ndef sumArgs(*args):\n v = 0\n for n in args:\n v += n\n return v\n\nprint(sumArgs(1,2,3))\nprint(sumArgs(1,2,3,4,5,6))\n","sub_path":"chapter3/chapter3-4.py","file_name":"chapter3-4.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"457687116","text":"# load libraries\r\nimport trimesh\r\nimport torch\r\nimport json\r\nimport os\r\nfrom tqdm import tqdm as tqdm\r\nimport matplotlib.pyplot as plt\r\n\r\nimport numpy as np\r\nfrom types import SimpleNamespace\r\nfrom utils import render\r\nfrom shapenet_dataloader import ShapeNetMesh, FixedPointsCachedDataset\r\nfrom shapeflow.layers.deformation_layer import NeuralFlowDeformer\r\nfrom shapenet_embedding import LatentEmbedder\r\nimport shapeflow.utils.train_utils as utils\r\nfrom torch.utils.data import DataLoader\r\nimport pickle\r\nimport time\r\nfrom utils import render\r\n\r\n\r\ndef export_obj_cpu(filename, pc, colors=None, random_trans=[0,0,0]):\r\n # random_trans = random.uniform(1, 2)\r\n with open('%s'%(filename), 'w') as f:\r\n for i,p in enumerate(pc):\r\n x,y,z = p\r\n x += random_trans[0]\r\n y += random_trans[1]\r\n z += random_trans[2]\r\n r,g,b = [1,0,0]\r\n if colors is not None:\r\n r,g,b = colors[i]\r\n f.write('v {:.4f} {:.4f} {:.4f} \\\r\n {:.4f} {:.4f} {:.4f} \\n'.format(x, y, z, r, g, b))\r\n\r\n\r\n# choice of checkpoint to load\r\nrun_dir = \"/media/andy/Elements/Shapeflow_data/runs/pretrained_ckpt\"\r\ncheckpoint = \"checkpoint_latest.pth.tar_deepdeform_100.pth.tar\"\r\ndevice = torch.device(\"cuda\")\r\n\r\n\r\n# load training args\r\nargs = SimpleNamespace(**json.load(open(os.path.join(run_dir, 'params.json'), 'r')))\r\n\r\n# setup model\r\ndeformer = NeuralFlowDeformer(latent_size=args.lat_dims, f_width=args.deformer_nf, s_nlayers=2, \r\n s_width=5, method=args.solver, nonlinearity=args.nonlin, arch='imnet',\r\n adjoint=args.adjoint, rtol=args.rtol, atol=args.atol, via_hub=True,\r\n no_sign_net=(not args.sign_net), symm_dim=(2 if args.symm else None))\r\nlat_params = torch.nn.Parameter(torch.randn(4746, args.lat_dims)*1e-1, requires_grad=True)\r\ndeformer.add_lat_params(lat_params)\r\ndeformer.to(device)\r\n\r\n# load checkpoint\r\nresume_dict = torch.load(os.path.join(run_dir, checkpoint))\r\nstart_ep = resume_dict[\"epoch\"]\r\nglobal_step = resume_dict[\"global_step\"]\r\ntracked_stats = resume_dict[\"tracked_stats\"]\r\ndeformer.load_state_dict(resume_dict[\"deformer_state_dict\"])\r\nsample_points = 300\r\n# dataloader\r\ndata_root = args.data_root.replace('shapenet_watertight', 'shapenet_simplified')\r\nmesh_dataset = ShapeNetMesh(data_root=data_root, split=\"train\", category='chair', \r\n normals=False)\r\npoint_dataset = FixedPointsCachedDataset(\"/media/andy/Elements/Shapeflow_data/data/shapenet_pointcloud/train/03001627.pkl\", npts=sample_points)\r\n\r\n\r\n# take a sample point cloud from a shape\r\np = pickle.load(open(\"/media/andy/Elements/Shapeflow_data/data/shapenet_pointcloud/val/03001627.pkl\", \"rb\"))\r\nname = list(p.keys())[2]\r\ninput_points = p[name]\r\nmesh_gt = trimesh.load(\"/media/andy/Elements/Shapeflow_data/data/shapenet_simplified/val/03001627/bcc73b8ff332b4df3d25ee35360a1f4d/model.ply\")\r\n\r\n# view point\r\neye_1 = [.8, .4, .5]\r\neye_2 = [.3, .4, .9]\r\ncenter = [0, 0, 0]\r\nup = [0, 1, 0]\r\n\r\ndef rgb2rgba(rgb):\r\n \"\"\"remove white background.\"\"\"\r\n rgb = rgb.copy() / 255.\r\n alpha = np.linalg.norm(1-rgb, axis=-1) != 0\r\n alpha = alpha.astype(np.float32)[..., None]\r\n rgba = np.concatenate([rgb, alpha], axis=-1)\r\n return rgba\r\n\r\n# subsample points\r\npoint_subsamp = mesh_gt.sample(sample_points)\r\nexport_obj_cpu('inputs_fullpc.obj',mesh_gt.sample(2048),random_trans=[-3,0,0])\r\n\r\n# img_mesh, _, _, _ = render.render_trimesh(mesh_gt, eye_1, center, up, light_intensity=3)\r\n# img_pt_sub, _, _, _ = render.render_trimesh(trimesh.PointCloud(point_subsamp), \r\n# eye_1, center, up, light_intensity=3, point_size=8)\r\n# # virtual scan (view 2) and unproject depth\r\n# _, scan_depth, world2cam, cam2img = render.render_trimesh(mesh_gt, eye_2, center, up, res=(112, 112))\r\n# points_unproj = render.unproject_depth_img(scan_depth, cam2img, world2cam)\r\n# img_pt_dep, _, _, _ = render.render_trimesh(trimesh.PointCloud(points_unproj), \r\n# eye_1, center, up, light_intensity=3, point_size=5)\r\n\r\n# size_per_fig = 8\r\n# fig, axes = plt.subplots(figsize=(size_per_fig*4, size_per_fig), ncols=4)\r\n# axes[0].imshow(rgb2rgba(img_mesh))\r\n# axes[0].axis('off')\r\n# # axes[0].set_title(\"Ground Truth Mesh\")\r\n\r\n# axes[1].imshow(rgb2rgba(img_pt_sub))\r\n# axes[1].axis('off')\r\n# # axes[1].set_title(\"Sparse Point Samples\")\r\n\r\n# d = scan_depth.copy()\r\n# d[scan_depth==0] = np.nan\r\n# axes[2].imshow(d, cmap='coolwarm')\r\n# axes[2].axis('off')\r\n# # axes[2].set_title(\"Depth Scan\")\r\n\r\n# axes[3].imshow(rgb2rgba(img_pt_dep))\r\n# axes[3].axis('off')\r\n# # axes[3].set_title(\"Scanned Points (view 1)\")\r\n\r\n# plt.show()\r\n\r\nembedder = LatentEmbedder(point_dataset, mesh_dataset, deformer, topk=5)\r\n\r\n# inputs = input_points[:2048] \r\n# inputs = points_unproj\r\ninputs = mesh_gt.sample(sample_points) + np.random.randn(sample_points, 3) * 0.005\r\nprint(inputs.shape)\r\nexport_obj_cpu('inputs_subsampled.obj',inputs,random_trans=[-1.5,0,0])\r\n\r\ninput_pts = torch.tensor(inputs)[None].to(device)\r\nlat_codes_pre, lat_codes_post = embedder.embed(input_pts, matching=\"two_way\", verbose=True, lr=1e-2, embedding_niter=30, finetune_niter=30, bs=8, seed=1)\r\n\r\n# retrieve, save results\r\ndeformed_meshes, orig_meshes, dist = embedder.retrieve(lat_codes_post, tar_pts=inputs, matching=\"two_way\")\r\n\r\nasort = np.argsort(dist)\r\ndist = [dist[i] for i in asort]\r\ndeformed_meshes_ = [deformed_meshes[i] for i in asort]\r\norig_meshes_ = [orig_meshes[i] for i in asort]\r\n\r\n# pick_idx = np.argmin(dist)\r\nfor pick_idx in range(5):\r\n v, f = deformed_meshes_[pick_idx]\r\n mesh = trimesh.Trimesh(v, f)\r\n vo, fo = orig_meshes_[pick_idx]\r\n mesh_o = trimesh.Trimesh(vo, fo)\r\n # img_orig, _, _, _ = render.render_trimesh(mesh_o.copy(), eye_1, center, up, res=(512,512), light_intensity=8)\r\n colors = np.zeros_like(inputs[:sample_points]); colors[:, 1] = 1.;\r\n export_obj_cpu(\"latent-opt_deformed_%d.obj\"%(pick_idx), v,random_trans=[pick_idx*1.5,0,0])\r\n export_obj_cpu(\"latent-opt_orig_%d.obj\"%(pick_idx), vo,random_trans=[pick_idx*1.5,2,0])\r\n # img_def, _, _, _ = render.render_trimesh([mesh.copy(),\r\n # ],#trimesh.PointCloud(inputs[:512], colors=colors)], \r\n # eye_1, center, up, res=(512,512), light_intensity=8,\r\n # point_size=5)\r\n # img_gt, _, _, _ = render.render_trimesh(mesh_gt.copy(), eye_1, center, up, res=(512,512), light_intensity=8)\r\n # fig, axes = plt.subplots(ncols=3, nrows=1, figsize=(24, 8))\r\n # best = \" (best)\" if pick_idx == np.argmin(dist) else \"\"\r\n # axes[0].imshow(rgb2rgba(img_orig))\r\n # axes[0].axis('off')\r\n # axes[0].set_title(\"Retrieved Shape\"+best)\r\n # axes[1].imshow(rgb2rgba(img_def))\r\n # axes[1].axis('off')\r\n # axes[1].set_title(\"Deformed Shape\"+best)\r\n # axes[2].imshow(rgb2rgba(img_gt))\r\n # axes[2].axis('off')\r\n # axes[2].set_title(\"GT Shape\"+best)\r\n # plt.axis('off')\r\n # plt.show()\r\n\r\nlat_codes_ = torch.tensor(lat_codes_post).to(embedder.device)\r\nlat_src = torch.zeros_like(lat_codes_)\r\nlat_src_tar = torch.stack([lat_src, lat_codes_], dim=1)\r\n_ = embedder.deformer.net.update_latents(lat_src_tar)\r\n\r\n# create query grid\r\nr0, r1, r2 = 6, 11, 6\r\nb = mesh.bounding_box.bounds\r\ns = 0.05\r\nxyz_grid = torch.stack(torch.meshgrid(torch.linspace(b[0,0]-s, b[1,0]+s, r0),\r\n torch.linspace(b[0,1]-s, b[1,1]+s, r1),\r\n torch.linspace(b[0,2]-s, b[1,2]+s, r2)), dim=-1)\r\nxyz_pt = xyz_grid.reshape(1, -1, 3).to(embedder.device)\r\nvel = embedder.deformer.net(torch.tensor(0.5), xyz_pt)\r\nvel_np = vel.detach().cpu().numpy().reshape(r0, r1, r2, 3)\r\nxyz_np = xyz_pt.detach().cpu().numpy().reshape(r0, r1, r2, 3)\r\n\r\n# from mpl_toolkits.mplot3d import Axes3D\r\n# from matplotlib import cm\r\n# import matplotlib.pyplot as plt\r\n# import numpy as np\r\n\r\n# def set_axes_equal(ax):\r\n# '''Make axes of 3D plot have equal scale so that spheres appear as spheres,\r\n# cubes as cubes, etc.. This is one possible solution to Matplotlib's\r\n# ax.set_aspect('equal') and ax.axis('equal') not working for 3D.\r\n\r\n# Input\r\n# ax: a matplotlib axis, e.g., as output from plt.gca().\r\n# '''\r\n\r\n# x_limits = [-.2, .2] # ax.get_xlim3d()\r\n# y_limits = [-.2, .2] # ax.get_ylim3d()\r\n# z_limits = [-.5, .5] # ax.get_zlim3d()\r\n\r\n# x_range = abs(x_limits[1] - x_limits[0])\r\n# x_middle = np.mean(x_limits)\r\n# y_range = abs(y_limits[1] - y_limits[0])\r\n# y_middle = np.mean(y_limits)\r\n# z_range = abs(z_limits[1] - z_limits[0])\r\n# z_middle = np.mean(z_limits)\r\n\r\n# # The plot bounding box is a sphere in the sense of the infinity\r\n# # norm, hence I call half the max range the plot radius.\r\n# plot_radius = 0.5*max([x_range, y_range, z_range])\r\n\r\n# ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])\r\n# ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])\r\n# ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])\r\n\r\n# fig = plt.figure(figsize=(10, 10))\r\n# ax = fig.gca(projection='3d')\r\n# ax.view_init(elev=30, azim=-30)\r\n\r\n# v = mesh.copy().vertices\r\n# xyz = xyz_np.reshape(-1, 3)\r\n# uvw = vel_np.reshape(-1, 3)\r\n\r\n# ax.plot_trisurf(v[:, 0], v[:, 2], v[:, 1], triangles=mesh.faces, color=np.ones(3), linewidth=0.2)\r\n# ax.quiver(xyz[:, 0], xyz[:, 2], xyz[:, 1],\r\n# uvw[:, 0], uvw[:, 2], uvw[:, 1],\r\n# length=0.05, color=\"black\", normalize=True)\r\n\r\n# ax.set_axis_off()\r\n# set_axes_equal(ax)\r\n# # plt.savefig(\"flow.pdf\")\r\n\r\n","sub_path":"ShapeFlow/latent_optimization.py","file_name":"latent_optimization.py","file_ext":"py","file_size_in_byte":9719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"345608988","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Aug 5 10:10:27 2018\r\n\r\n@author: haoyu\r\n\"\"\"\r\n\r\nimport pickle \r\nimport numpy as np\r\n\r\ndef create_img_submission_file(orig_sar_img_arr,proc_sar_img_arr,x_vec,y_vec):\r\n submission = {}\r\n submission['orig_sar_img'] = np.array(orig_sar_img_arr)\r\n submission['proc_sar\"img'] = np.array(proc_sar_img_arr)\r\n submission['x_axis'] = x_vec\r\n submission['y_axis'] = y_vec\r\n with open('group_3_SAR_img.pkl', 'wb') as handle:\r\n pickle.dump(submission, handle, protocol=pickle.HIGHEST_PROTOCOL)\r\n\r\n\r\n","sub_path":"img_submission.py","file_name":"img_submission.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"198393345","text":"import tensorflow as tf\nimport numpy as np\nimport itertools\n\nfrom tensorflow.python.training import moving_averages\n\n\n######## LAYERS ########\ndef dense(input_data, output_dim, name):\n input_dim = input_data.get_shape().as_list()[-1]\n \"\"\"NN fully connected layer.\"\"\"\n with tf.variable_scope(name): \n W = tf.get_variable(\"W\", [input_dim, output_dim],\n initializer=tf.contrib.layers.xavier_initializer()) \n b = tf.get_variable(\"b\", [output_dim], initializer=tf.constant_initializer(0))\n return tf.matmul(input_data, W, name=\"matmul\") + b\n\ndef batch_normalization(input_data, is_train, name='BatchNormalization'):\n \"\"\"NN batch normalization layer.\"\"\"\n x = input_data\n BN_DECAY = 0.9997\n BN_EPSILON = 0.001\n x_shape = x.get_shape()\n params_shape = x_shape[-1:]\n with tf.variable_scope(name):\n axis = list(range(len(x_shape) - 1))\n beta = tf.get_variable('beta',\n params_shape,\n initializer=tf.zeros_initializer)\n gamma = tf.get_variable('gamma',\n params_shape,\n initializer=tf.ones_initializer)\n moving_mean = tf.get_variable('moving_mean',\n params_shape,\n initializer=tf.zeros_initializer,\n trainable=False)\n moving_variance = tf.get_variable('moving_variance',\n params_shape,\n initializer=tf.ones_initializer,\n trainable=False)\n\n # These ops will only be preformed when training.\n mean, variance = tf.nn.moments(x, axis)\n update_moving_mean = moving_averages.assign_moving_average(moving_mean,\n mean, BN_DECAY)\n update_moving_variance = moving_averages.assign_moving_average(\n moving_variance, variance, BN_DECAY)\n tf.add_to_collection('update_ops', update_moving_mean)\n tf.add_to_collection('update_ops', update_moving_variance)\n\n mean, variance = tf.cond(\n is_train, lambda: (mean, variance),\n lambda: (moving_mean, moving_variance))\n\n x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON)\n\n return x\n\ndef dense_relu_batch(input_data, N, H, is_train, name):\n \"\"\"NN dense relu batch layer.\"\"\"\n with tf.variable_scope(name):\n affine = dense(input_data, N, H, \"dense\")\n bn = batch_normalization(affine, is_train, \"batch\")\n return tf.nn.relu(bn, \"relu\")\n\ndef dense_relu(input_data, N, H, name):\n \"\"\"NN dense relu layer\"\"\"\n with tf.variable_scope(name):\n affine = dense(input_data, N, H, \"dense\")\n return tf.nn.relu(affine, \"relu\")\n\ndef multi_dense_relu_batch(input_data, N, Hs, is_train, name):\n \"\"\"NN multi dense relu batch layer.\"\"\"\n with tf.variable_scope(name):\n output = input_data\n for i, H in enumerate(itertools.izip([N] + Hs, Hs)):\n output = dense_relu_batch(output, H[0], H[1], is_train, \"fc_\" + str(i))\n return output\n\ndef conv2d(input_data, filter_size, stride, name):\n \"\"\"NN 2D convolutional layer.\"\"\"\n with tf.variable_scope(name):\n W = tf.get_variable(\"W\", filter_size,\n initializer=tf.contrib.layers.xavier_initializer_conv2d())\n conv = tf.nn.conv2d(input_data, W,\n [1, stride, stride, 1], \"SAME\", name=\"conv2d\")\n biases = tf.get_variable(\"b\", shape=filter_size[-1])\n bias = tf.reshape(tf.nn.bias_add(conv, biases),\n conv.get_shape().as_list())\n\n return bias\n\ndef conv_words(input_data, window_size, num_filters, name):\n \"\"\"NN convolution over window_size words across entire embedding dimension\"\"\"\n batch_size, sentence_length, embedding_dim = input_data.get_shape().as_list()\n input_data = tf.reshape(input_data,\n [batch_size, sentence_length, embedding_dim, 1])\n with tf.variable_scope(name):\n filter_size = [window_size, embedding_dim, 1, num_filters]\n W = tf.get_variable(\"W\", filter_size,\n initializer=tf.contrib.layers.xavier_initializer())\n conv = tf.nn.conv2d(input_data, W, [1,1,1,1], padding='VALID')\n biases = tf.get_variable(\"b\", shape=filter_size[-1])\n bias = tf.reshape(tf.nn.bias_add(conv, biases),\n conv.get_shape().as_list())\n return bias\n\ndef maxpool2d(input_data, stride, name):\n \"\"\"NN 2D max pooling layer.\"\"\"\n with tf.variable_scope(name):\n filter_size = [1, stride, stride, 1]\n return tf.nn.max_pool(input_data, filter_size,\n filter_size, \"SAME\", name=\"max_pool\")\n\ndef conv2d_relu_batch(input_data, filter_size, stride, is_train, name):\n with tf.variable_scope(name):\n conv = conv2d(input_data, filter_size, stride, \"conv2d\")\n bn = batch_normalization(conv, is_train, \"batch\")\n return tf.nn.relu(bn, \"relu\")\n\ndef conv2d_relu(input_data, filter_size, stride, name):\n with tf.variable_scope(name):\n conv = conv2d(input_data, filter_size, stride, \"conv2d\")\n return tf.nn.relu(conv, \"relu\")\n\ndef softmax_loss(logits, labels):\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits,\n labels, name='cross_entropy')\n cross_entropy_mean = tf.reduce_mean(\n cross_entropy, name='mean_cross_entropy')\n tf.add_to_collection('losses', cross_entropy_mean)\n return tf.add_n(tf.get_collection('losses'), name='total_loss')\n","sub_path":"classifiers/cnn_model/src/layer_utils.py","file_name":"layer_utils.py","file_ext":"py","file_size_in_byte":5313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"30557785","text":"from django.template.loader import render_to_string\n\nfrom django.http import HttpResponseRedirect\nfrom django.http import JsonResponse\nfrom django.core import urlresolvers\n\nfrom django.db.models import Q\n\nfrom .forms import GuestForm, RsvpFormSet\nfrom .models import Guest\n\ndef get_guest(request):\n if 'guest_id' not in request.session:\n return None\n return Guest.filter(id=request.session['guest_id']).first()\n\ndef rsvp_form(request):\n guest = Guest.objects.get(id=request.session['guest_id'])\n if guest.group:\n guests = guest.group.guest_set.filter(~Q(id=guest.id)).order_by(\"last_name\", \"first_name\")\n guests = [guest] + list(guests)\n else:\n guests = [guest]\n\n if request.method == 'POST':\n formset = RsvpFormSet(request.POST)\n # Only allow submitter to change allowed guests\n for form in formset:\n form.fields['guest'].queryset = guests\n if form.is_valid():\n for form in formset:\n form.full_clean()\n guest = form.clean()['guest']\n guest.attending = form.clean()['attending']\n guest.email = form.clean()['email']\n guest.save()\n\n return JsonResponse({\n 'redirect': '/thanks'\n })\n else:\n for form in formset:\n form.initial = {'guest': Guest.objects.get(id=form['guest'].value())}\n else:\n formset = RsvpFormSet(initial=[{'guest': g, 'email': g.email, 'attending': g.attending} for g in guests])\n\n\n\n return JsonResponse({\n 'content': render_to_string('rsvp/rsvp_form.html', {'rsvp_formset': formset, 'action': urlresolvers.reverse('rsvp-form')}, request=request)\n })\n\ndef guest_form(request):\n if 'guest_id' in request.session:\n return HttpResponseRedirect(urlresolvers.reverse('rsvp-form'))\n\n if request.method == 'POST':\n form = GuestForm(request.POST)\n if form.is_valid():\n guests = Guest.objects.filter(\n first_name__iexact=form.cleaned_data['first_name'],\n last_name__iexact=form.cleaned_data['last_name'])\n\n if len(guests) < 1:\n form.add_error(\"__all__\", \"No guest matches name, please ensure it is spelt the same as your invitation\")\n else:\n guest = guests.first()\n request.session['guest_id'] = guest.id\n\n if form.is_valid():\n return HttpResponseRedirect(urlresolvers.reverse('rsvp-form'))\n else:\n return JsonResponse({\n 'content': render_to_string('rsvp/form.html', {'form': form, 'action': urlresolvers.reverse('guest-form')}, request=request)\n })\n\n form = GuestForm()\n\n return JsonResponse({\n 'content': render_to_string('rsvp/form.html', {'form': form, 'action': urlresolvers.reverse('guest-form')}, request=request)\n })\n","sub_path":"api/wedding_api/rsvp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"612751637","text":"# python3\r\nimport sys\r\n\r\n\r\ndef build_suffix_array(s):\r\n \"\"\"\r\n Build suffix array of the string text and\r\n return a list result of the same length as the text\r\n such that the value result[i] is the index (0-based)\r\n in text where the i-th lexicographically smallest\r\n suffix of text starts.\r\n BuildSuffixArray(S)\r\n order ← SortCharacters(S)\r\n class ← ComputeCharClasses(S, order)\r\n L ← 1\r\n while L < |S|:\r\n order ← SortDoubled(S, L, order, class)\r\n class ← UpdateClasses(order, class, L)\r\n L ← 2L\r\n return order \r\n \"\"\"\r\n n = len(s)\r\n order = sort_char(s)\r\n cl = compute_char_classes(s, order)\r\n l = 1\r\n while l < n:\r\n order = sort_doubles(s, l, order, cl)\r\n cl = update_classes(order, cl, l)\r\n l *= 2\r\n return order\r\n\r\ndef update_classes(new_order, cl, l):\r\n \"\"\"given the renewed order of the doubled suffix, compute the new buckets\r\n UpdateClasses(newOrder, class, L)\r\n n ← |newOrder|\r\n newClass ← array of size n\r\n newClass[newOrder[0]] ← 0\r\n for i from 1 to n − 1:\r\n cur ← newOrder[i], prev ← newOrder[i − 1]\r\n mid ← (cur + L)(mod n), midPrev ← (prev + L)(mod n)\r\n if class[cur] ̸= class[prev] or class[mid] ̸= class[midPrev]:\r\n newClass[cur] ← newClass[prev] + 1\r\n else:\r\n newClass[cur] ← newClass[prev]\r\n return newClass\r\n\r\n \"\"\"\r\n n = len(new_order)\r\n new_cl = [None] * n\r\n new_cl[new_order[0]] = 0\r\n for i in range(1, n):\r\n cur, prev = new_order[i], new_order[i - 1]\r\n mid, mid_prev = (cur + l) % n, (prev + l) % n # in case idx out of range n\r\n if cl[cur] != cl[prev] or cl[mid] != cl[mid_prev]:\r\n new_cl[cur] = new_cl[prev] + 1\r\n else:\r\n new_cl[cur] = new_cl[prev]\r\n return new_cl\r\n\r\n\r\ndef sort_doubles(s, l, order, cl):\r\n \"\"\"given the order of the l-length suffix of s, compute the order of the doubled-l-length suffix\r\n\r\n SortDoubled(S, L, order, class)\r\n position ← zero array of size |S|\r\n newOrder ← array of size |S|\r\n for i from 0 to |S| − 1:\r\n position[class[i]] ← position[class[i]] + 1\r\n for j from 1 to |S| − 1:\r\n position[j] ← position[j] + position[j − 1]\r\n for i from |S| − 1 down to 0:\r\n start ← (order[i] − L + |S|) mod |S|\r\n cl ← class[start]\r\n position[cl] ← position[cl] − 1\r\n newOrder[position[cl]] ← start\r\n return newOrder\r\n \"\"\"\r\n n = len(s)\r\n position = [0] * n\r\n new_order = [None] * n\r\n for i in range(n):\r\n position[cl[i]] = position[cl[i]] + 1 # initialize the number of every cl as 1\r\n for j in range(1, n):\r\n position[j] = position[j] + position[j - 1] # count the occurances\r\n for i in range(n-1, -1, -1):\r\n start = (order[i] - l + n) % n # shift the suffix by length L counter_clockwise, so that we can sort the doubled suffix by its second half\r\n c_start = cl[start] # count sort the first half\r\n position[c_start] = position[c_start] - 1 \r\n new_order[position[c_start]] = start\r\n return new_order\r\n\r\n\r\n\r\ndef compute_char_classes(s, order):\r\n \"\"\" compute equivalant classes(buckets) of the characters in string s\r\n ComputeCharClasses(S, order)\r\n class ← array of size |S|\r\n class[order[0]] ← 0\r\n for i from 1 to |S| − 1:\r\n if S[order[i]] ̸= S[order[i − 1]]:\r\n class[order[i]] = class[order[i − 1]] + 1\r\n else:\r\n class[order[i]] = class[order[i − 1]]\r\n return class\r\n \"\"\"\r\n n = len(s)\r\n cl = [None] * n\r\n cl[order[0]] = 0\r\n for i in range(1, n):\r\n if ord(s[order[i]]) != ord(s[order[i - 1]]):\r\n cl[order[i]] = cl[order[i - 1]] + 1\r\n else:\r\n cl[order[i]] = cl[order[i - 1]]\r\n return cl\r\n\r\ndef sort_char(s):\r\n \"\"\"count sort by character's integer code\r\n Output: list of index sorted by s[index]'s integer code\r\n >>> sort_char('bca')\r\n [2, 0, 1]\r\n\r\n SortCharacters(S)\r\n order ← array of size |S|\r\n position ← zero array of size |Σ|\r\n for i from 0 to |S| − 1:\r\n position[S[i]] ← position[S[i]] + 1\r\n for j from 1 to |Σ| − 1:\r\n position[j] ← position[j] + position[j − 1]\r\n for i from |S| − 1 down to 0:\r\n c ← S[i]\r\n position[c] ← position[c] − 1\r\n order[position[c]] ← i\r\n return order\"\"\"\r\n n = len(s)\r\n order = [None] * n\r\n position = [0] * 256\r\n for i in range(n):\r\n position[ord(s[i])] = position[ord(s[i])] + 1\r\n for j in range(256):\r\n position[j] = position[j] + position[j - 1]\r\n for i in range(n-1, -1, -1):\r\n c = ord(s[i])\r\n position[c] = position[c] - 1\r\n order[position[c]] = i\r\n return order\r\n\r\n\r\ndef find_occurrences(text, patterns):\r\n occs = set()\r\n text = text + '$'\r\n\r\n suffix_array = build_suffix_array(text)\r\n # print(suffix_array)\r\n \r\n for pattern in patterns:\r\n result = pattern_matching(text, pattern, suffix_array)\r\n# print(result)\r\n for i in result:\r\n occs.add(i)\r\n \r\n return occs\r\n\r\ndef pattern_matching(text, pattern, suffix_array):\r\n \"\"\"binary search the starting position + ending position of the pattern on the (sorted) suffix array\r\n fact 1: the occurrences of P in S = all suffixes of S having P as a prefix\r\n fact 2: For any prefix A, all suffixes of S that have prefix A are \r\n contiguous entries in the suffix array\r\n\r\n \"\"\" \r\n n = len(text)\r\n lo = 0\r\n hi = n\r\n while lo < hi:\r\n mid = (lo + hi) // 2\r\n if pattern > text[suffix_array[mid]:]:\r\n lo = mid + 1\r\n else:\r\n hi = mid\r\n start = lo\r\n hi = n\r\n m = len(pattern)\r\n while lo < hi:\r\n mid = (lo + hi) // 2\r\n if pattern < text[suffix_array[mid]:suffix_array[mid]+m]: \r\n hi = mid\r\n else:\r\n lo = mid + 1 \r\n end = hi\r\n # print(start, end)\r\n if start > end:\r\n return None\r\n else:\r\n return [suffix_array[i] for i in range(start, end)]\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n text = sys.stdin.readline().strip()\r\n pattern_count = int(sys.stdin.readline().strip())\r\n patterns = sys.stdin.readline().strip().split()\r\n occs = find_occurrences(text, patterns)\r\n print(\" \".join(map(str, occs)))","sub_path":"04_algo_on_str/W3_4/suffix_array_matching.py","file_name":"suffix_array_matching.py","file_ext":"py","file_size_in_byte":6426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"89957105","text":"from .singleton_driver import SingleDriver\nfrom .utils.json_utils import JsonUtils\nfrom .logger import logger\n\n\nclass Browser:\n URL = JsonUtils('config.json').get_data('link')\n IMPLICITLY_WAIT = JsonUtils('config.json').get_data('implicitly_wait')\n\n @staticmethod\n def get_browser():\n browser = SingleDriver().get_driver()\n return browser\n\n @staticmethod\n def open_url(url=None):\n if url is None:\n url = Browser.URL\n logger.info('Try to get driver')\n browser = Browser.get_browser()\n logger.info(f'Try to open url \"{url}\"')\n browser.get(url)\n browser.implicitly_wait(Browser.IMPLICITLY_WAIT)\n\n @staticmethod\n def get_current_url():\n browser = Browser.get_browser()\n return browser.current_url\n\n @staticmethod\n def get_cookies():\n browser = Browser.get_browser()\n logger.info('Getting cookies')\n return browser.get_cookies()\n\n @staticmethod\n def get_screenshot(name):\n browser = Browser.get_browser()\n browser.get_screenshot_as_file(name)\n\n @staticmethod\n def get_cookie(name):\n browser = Browser.get_browser()\n logger.info(f'Getting cookie named as \"{name}\"')\n return browser.get_cookie(name)\n\n @staticmethod\n def add_cookie(cookie):\n browser = Browser.get_browser()\n logger.info('Adding cookies')\n browser.add_cookie(cookie)\n\n @staticmethod\n def change_cookie(name_cookie_to_change, new_value):\n browser = Browser.get_browser()\n logger.info(f'Changing cookies \"{name_cookie_to_change}\" to new value \"{new_value}\"')\n cookie_to_change = browser.get_cookie(name_cookie_to_change)\n cookie_to_change['value'] = new_value\n browser.add_cookie(cookie_to_change)\n\n @staticmethod\n def delete_cookie(name):\n browser = Browser.get_browser()\n logger.info(f'Deleting cookie named as \"{name}\"')\n browser.delete_cookie(name)\n\n @staticmethod\n def delete_all_cookies():\n browser = Browser.get_browser()\n logger.info('Deleting all cookies')\n browser.delete_all_cookies()\n\n @staticmethod\n def maximize():\n browser = SingleDriver().get_driver()\n logger.info('Maximize window')\n browser.maximize_window()\n\n @staticmethod\n def refresh():\n browser = SingleDriver().get_driver()\n logger.info('Refresh window')\n browser.refresh()\n\n @staticmethod\n def quit_browser():\n driver = SingleDriver()\n browser = driver.get_driver()\n browser.quit()\n driver.del_driver()\n\n @staticmethod\n def switch_to_top():\n browser = SingleDriver().get_driver()\n logger.info('Switching to default frame')\n browser.switch_to.default_content()\n\n @staticmethod\n def switch_to_frame(element):\n browser = SingleDriver().get_driver()\n logger.info('Switching to IFrame')\n browser.switch_to.frame(element.find_element())\n\n @staticmethod\n def go_to_other_tab():\n browser = SingleDriver().get_driver()\n if len(browser.window_handles[1]) > 1:\n logger.info('Switching to other tab')\n browser.switch_to.window(browser.window_handles[1])\n\n @staticmethod\n def go_to_main_tab():\n browser = SingleDriver().get_driver()\n logger.info('Switching to main tab')\n browser.switch_to.window(browser.window_handles[0])\n\n @staticmethod\n def confirm_alert():\n logger.info('Switching to alert and confirm')\n Browser.get_browser().switch_to.alert.accept()\n\n @staticmethod\n def dismiss_alert():\n logger.info('Switching to alert and dismiss')\n Browser.get_browser().switch_to.alert.dismiss()\n\n @staticmethod\n def get_text_from_alert():\n logger.info('Getting text from alert')\n return Browser.get_browser().switch_to.alert.text\n\n @staticmethod\n def input_text_into_alert(text):\n logger.info(f'Prompting \"{text}\" into alert')\n Browser.get_browser().switch_to.alert.send_keys(text)\n","sub_path":"framework/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"295573738","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\nimport logging\nimport threading\nimport time\nimport socket\nimport random\n\nimport rflib.ipc.IPC as IPC\nimport rflib.ipc.MongoIPC as MongoIPC\nfrom rflib.ipc.RFProtocol import *\nfrom rflib.ipc.RFProtocolFactory import RFProtocolFactory\nfrom rflib.defs import *\nfrom rflib.types.Match import *\nfrom rflib.types.Action import *\nfrom rflib.types.Option import *\n\n\nclass RFMonitor(RFProtocolFactory, IPC.IPCMessageProcessor):\n \"\"\"Monitors all the controller instances for failiure\n\n Attributes-\n controllers: A dictionary mapping controller address and\n port to controller role and number of devices \n it is connected to.\n monitors: A dictionary mapping controllers to monitor objects\n responsible for scheduling tests.\n eligible_masters: A dictionary mapping controllers to the maximum\n count of devices they are connected too.\n\n \"\"\"\n def __init__(self, *arg, **kwargs):\n self.controllers = dict()\n self.monitors = dict()\n self.eligible_masters = dict()\n self.controllerLock = threading.Lock()\n self.ipc = MongoIPC.MongoIPCMessageService(MONGO_ADDRESS,\n MONGO_DB_NAME,\n RFMONITOR_ID,\n threading.Thread,\n time.sleep)\n self.ipc.listen(RFMONITOR_RFPROXY_CHANNEL, self, self, False)\n self.log = logging.getLogger(\"rfmonitor\")\n self.log.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(logging.Formatter(logging.BASIC_FORMAT))\n self.log.addHandler(ch)\n self.test_controllers()\n\n def process(self, _from, to, channel, msg):\n \"\"\"Process messages sent by controllers.\n\n Types of messages being handled:\n CONTROLLER_REGISTER -- Register Controller details with RFMonitor.\n\n \"\"\" \n type_ = msg.get_type()\n address = msg.get_ct_addr()\n port = msg.get_ct_port()\n role = msg.get_ct_role()\n if type_ == CONTROLLER_REGISTER:\n self.controllerLock.acquire()\n try:\n if ((address + ':' + str(port)) not in \n self.controllers):\n self.controllers[address + ':' + str(port)] = {\n 'role': role,\n 'count': 1\n }\n self.log.info(\"A %s controller at %s:%s is up\",\n role, address, port)\n else:\n self.controllers[msg.get_ct_addr() + ':'\n + str(msg.get_ct_port())]['count'] += 1\n controller_count = self.controllers[address + ':' \n + str(port)]['count']\n\n if not self.eligible_masters:\n self.eligible_masters[address + ':' + str(port)] = \\\n controller_count\n else:\n maximum_controller_count = self.eligible_masters.values()[0]\n if maximum_controller_count < controller_count:\n self.eligible_masters = {}\n self.eligible_masters[address + ':' + str(port)] = \\\n controller_count\n elif maximum_controller_count == controller_count:\n self.eligible_masters[address + ':' + str(port)] = \\\n controller_count\n\n finally:\n self.controllerLock.release()\n\n def test_controllers(self):\n \"\"\"Invoke test on all the controllers\"\"\"\n while True:\n #Extract all the keys from self.controllers first so that \n #the main thread does not block the IPC thread\n self.controllerLock.acquire()\n try:\n controllers = self.controllers.keys()\n finally:\n self.controllerLock.release()\n for controller in controllers:\n host, port = controller.split(':')\n port = int(port)\n if controller in self.monitors:\n monitor = self.monitors[controller]\n #check if scheduled time has passed\n if monitor.timeout < time.time():\n self.test(host, port)\n monitor.schedule_test()\n else:\n continue\n else:\n monitor = Monitor(host, port, callback_time=5000)\n self.monitors[controller] = monitor\n\n def test(self, host, port):\n \"\"\"Test if a controller is up.\n\n Keyword Arguments:\n host -- host ip address at which controller is listening.\n port -- port at which the controller is listening at `host` address.\n\n \"\"\"\n s = socket(AF_INET, SOCK_STREAM)\n s.settimeout(1)\n result = s.connect_ex((host, port))\n\n if result != 0:\n self.log.info(\"Controller listening at %s:%s died\", host, port)\n self.handle_controller_death(host, port)\n s.close()\n\n def handle_controller_death(self, host, port):\n \"\"\"Remove all entries coresponding to a controller and \n elect new master if master controller is dead\n\n Keyword Arguments:\n host -- host ip address at which controller was listening.\n port -- port at which the controller was listening at `host` address.\n\n \"\"\"\n master = False\n self.controllerLock.acquire()\n try:\n if self.controllers[host + ':' + str(port)]['role'] == \"master\":\n master = True\n self.controllers.pop(host + ':' + str(port), None)\n self.monitors.pop(host + ':' + str(port), None)\n self.eligible_masters.pop(host + ':' + str(port), None)\n finally:\n self.controllerLock.release()\n if master:\n self.elect_new_master()\n\n def elect_new_master(self):\n \"\"\"Elect new master controller and inform to rfproxy\"\"\"\n master_key = random.randint(0, len(self.eligible_masters)-1)\n new_master = self.eligible_masters.keys()[master_key]\n self.log.info(\"The new master is %s\", new_master)\n host, port = new_master.split(\":\")\n msg = ElectMaster(ct_addr=host, ct_port=port)\n self.ipc.send(RFMONITOR_RFPROXY_CHANNEL, str(0), msg)\n\n\nclass Monitor(object):\n \"\"\"Monitors each controller individually\"\"\"\n def __init__(self, host, port, callback_time=1000):\n \"\"\"Initialize Monitor\n\n Keyword Arguments:\n host -- host ip address at which controller is listening.\n port -- port at which the controller is listening at `host` address.\n test -- callback function to be called periodically.\n callback_time -- time interval (in milliseconds) at which `test` is run.\n\n \"\"\"\n super(Monitor, self).__init__()\n self.host = host\n self.port = port\n self.callback_time = callback_time\n self.timeout = time.time()\n self.schedule_test()\n\n def schedule_test(self):\n \"\"\"Schedule the next test\"\"\"\n current_time = time.time()\n if self.timeout <= current_time:\n self.timeout += self.callback_time/1000.00\n\n\nif __name__ == \"__main__\":\n description = 'RFMonitor monitors RFProxy instances for failiure'\n epilog = 'Report bugs to: https://github.com/routeflow/RouteFlow/issues'\n RFMonitor()\n","sub_path":"rfserver/rfmonitor.py","file_name":"rfmonitor.py","file_ext":"py","file_size_in_byte":7702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"80389065","text":"# Copyright 2008-2018 Univa Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport mock\nimport pytest\n\nfrom tortuga.db.models.node import Node\nfrom tortuga.db.models.softwareProfile import SoftwareProfile\nfrom tortuga.exceptions.operationFailed import OperationFailed\nfrom tortuga.node.nodeManager import NodeManager\nfrom .osUtilityMock import get_os_object_factory\n\n\n@mock.patch('tortuga.os_utility.osUtility.getOsObjectFactory',\n side_effect=get_os_object_factory)\nclass TestNodeManager:\n\n def test_simple_validate_delete_nodes_request(\n self, get_os_object_factory_mock): \\\n # pylint: disable=unused-argument\n \"\"\"\n Simple delete of multiple nodes in same software profile\n \"\"\"\n\n swprofile = SoftwareProfile(name='swprofile1', lockedState='Unlocked')\n\n nodes = [\n Node(name='compute-01', softwareprofile=swprofile),\n Node(name='compute-02', softwareprofile=swprofile),\n ]\n\n NodeManager()._NodeManager__validate_delete_nodes_request(nodes, False)\n\n def test_validate_delete_nodes_request_alt(\n self, get_os_object_factory_mock): \\\n # pylint: disable=unused-argument\n \"\"\"\n Simple delete of multiple nodes with one profile locked and one not\n \"\"\"\n\n swprofile1 = SoftwareProfile(name='swprofile1', lockedState='Unlocked')\n swprofile2 = SoftwareProfile(name='swprofile1', lockedState='SoftLocked')\n\n nodes = [\n Node(name='compute-01', softwareprofile=swprofile1),\n Node(name='compute-02', softwareprofile=swprofile2),\n ]\n\n with pytest.raises(OperationFailed):\n NodeManager()._NodeManager__validate_delete_nodes_request(\n nodes, False)\n\n def test_simple_validate_delete_nodes_request_alt(\n self, get_os_object_factory_mock): \\\n # pylint: disable=unused-argument\n \"\"\"\n Delete from soft locked software profile without force\n \"\"\"\n\n nodes = [\n Node(name='compute-01',\n softwareprofile=SoftwareProfile(name='swprofile1',\n lockedState='SoftLocked')),\n ]\n\n with pytest.raises(OperationFailed):\n NodeManager()._NodeManager__validate_delete_nodes_request(\n nodes, False)\n\n\n def test_simple_validate_delete_nodes_request_alt_with_force(\n self, get_os_object_factory_mock): \\\n # pylint: disable=unused-argument\n \"\"\"\n Delete from soft locked software profile with force\n \"\"\"\n\n nodes = [\n Node(name='compute-01',\n softwareprofile=SoftwareProfile(name='swprofile1',\n lockedState='SoftLocked')),\n ]\n\n NodeManager()._NodeManager__validate_delete_nodes_request(nodes, True)\n\n\n def test_simple_validate_delete_nodes_request_alt2(\n self, get_os_object_factory_mock): \\\n # pylint: disable=unused-argument\n \"\"\"\n Delete from hard locked software profile\n \"\"\"\n\n nodes = [\n Node(name='compute-01',\n softwareprofile=SoftwareProfile(name='swprofile1',\n lockedState='HardLocked')),\n ]\n\n with pytest.raises(OperationFailed):\n NodeManager()._NodeManager__validate_delete_nodes_request(\n nodes, False)\n","sub_path":"src/installer/tests/test_NodeManager.py","file_name":"test_NodeManager.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"137808488","text":"#!/usr/bin/python\n#\n# Copyright 2018-2021 Polyaxon, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport sys\n\nimport click\n\nfrom polyaxon_sdk import V1ComponentHub, V1ComponentVersion\nfrom polyaxon_sdk.rest import ApiException\nfrom urllib3.exceptions import HTTPError\n\nfrom polyaxon import settings\nfrom polyaxon.cli.dashboard import get_dashboard_url\nfrom polyaxon.cli.errors import handle_cli_error\nfrom polyaxon.cli.options import (\n OPTIONS_COMPONENT_HUB,\n OPTIONS_COMPONENT_VERSION,\n OPTIONS_OWNER,\n)\nfrom polyaxon.cli.utils import get_entity_details\nfrom polyaxon.client import PolyaxonClient\nfrom polyaxon.constants.globals import DEFAULT_HUB, NO_AUTH\nfrom polyaxon.env_vars.getters import get_component_info\nfrom polyaxon.exceptions import PolyaxonException\nfrom polyaxon.logger import clean_outputs\nfrom polyaxon.polyaxonfile import get_specification\nfrom polyaxon.schemas.cli.client_config import ClientConfig\nfrom polyaxon.utils.formatting import (\n Printer,\n dict_tabulate,\n dict_to_tabulate,\n get_meta_response,\n list_dicts_to_tabulate,\n)\nfrom polyaxon.utils.query_params import get_query_params\nfrom polyaxon.utils.validation import validate_tags\n\n\ndef get_current_or_public_client():\n if settings.CLI_CONFIG.is_ce:\n return PolyaxonClient(config=ClientConfig(), token=NO_AUTH)\n\n return PolyaxonClient()\n\n\ndef get_specification_details(specification):\n if specification.inputs:\n Printer.print_header(\"Component inputs:\")\n objects = list_dicts_to_tabulate([i.to_dict() for i in specification.inputs])\n dict_tabulate(objects, is_list_dict=True)\n\n if specification.outputs:\n Printer.print_header(\"Component outputs:\")\n objects = list_dicts_to_tabulate([o.to_dict() for o in specification.outputs])\n dict_tabulate(objects, is_list_dict=True)\n\n Printer.print_header(\"Content:\")\n click.echo(specification.to_dict())\n\n\ndef get_component_version_details(response):\n content = response.content\n response = dict_to_tabulate(\n response.to_dict(), humanize_values=True, exclude_attrs=[\"content\"]\n )\n\n Printer.print_header(\"Component info:\")\n dict_tabulate(response)\n\n if content:\n specification = get_specification(data=content)\n get_specification_details(specification)\n else:\n Printer.print_warning(\n \"This component version does not have any polyaxonfile content!\"\n )\n\n\ndef get_info(component: str = None, version: str = None, use_local_owner: bool = False):\n if not any([component, version]):\n Printer.print_error(\n \"A component or a component version is required.\", sys_exit=True\n )\n if all([component, version]):\n Printer.print_error(\n \"Only a component or a component version is required, not both.\",\n sys_exit=True,\n )\n\n if component:\n entity = component\n entity_name = \"component\"\n is_version = False\n else:\n entity = version\n entity_name = \"component version\"\n is_version = True\n\n try:\n owner, component_hub, component_version = get_component_info(\n entity, use_local_owner\n )\n return owner, component_hub, component_version, is_version\n except PolyaxonException as e:\n handle_cli_error(\n e,\n message=\"Could not resolve the {} from the value `{}`.\".format(\n entity_name, entity\n ),\n sys_exit=True,\n )\n\n\n@click.group()\n@clean_outputs\ndef hub():\n \"\"\"Commands for component hub.\"\"\"\n\n\n@hub.command()\n@click.option(\n \"--name\", type=str, help=\"The component hub name, e.g. 'kaniko' or 'acme/kaniko'.\"\n)\n@click.option(\"--description\", type=str, help=\"Description of the component.\")\n@click.option(\"--tags\", type=str, help=\"Tags of the component, comma separated values.\")\n@click.option(\n \"--public\", is_flag=True, help=\"Set the visibility of the component to public.\"\n)\n@clean_outputs\ndef create(name, description, tags, public):\n \"\"\"Create a new component.\n\n Example:\n\n \\b\n $ polyaxon hub create --name=kaniko --description=\"Tool to build container images\"\n\n \\b\n $ polyaxon hub create --name=owner/name --description=\"Component description\"\n \"\"\"\n if not name:\n Printer.print_error(\n \"Please provide a name to create a component hub.\",\n command_help=\"hub create\",\n sys_exit=True,\n )\n owner, hub_name, _, _ = get_info(name, None, True)\n\n tags = validate_tags(tags)\n\n if not owner or not hub_name:\n Printer.print_error(\n \"Please provide a valid component name with --name=owner/hub-name. \"\n )\n sys.exit(1)\n\n try:\n hub_config = V1ComponentHub(\n name=hub_name, description=description, tags=tags, is_public=public\n )\n polyaxon_client = PolyaxonClient()\n _hub = polyaxon_client.component_hub_v1.create_component_hub(owner, hub_config)\n except (ApiException, HTTPError) as e:\n handle_cli_error(\n e, message=\"Could not create component hub `{}`.\".format(hub_name)\n )\n sys.exit(1)\n\n Printer.print_success(\n \"Component hub `{}` was created successfully.\".format(_hub.name)\n )\n click.echo(\n \"You can view this component hub on Polyaxon UI: {}\".format(\n get_dashboard_url(subpath=\"{}/hub/{}\".format(owner, _hub.name))\n )\n )\n\n\n@hub.command()\n@click.option(\n \"-f\",\n \"--file\",\n \"polyaxonfile\",\n type=click.Path(exists=True),\n help=\"The component spec version to push.\",\n)\n@click.option(\n \"--name\",\n type=str,\n help=\"The component version name, e.g. 'kaniko' or 'kaniko:1.2' \"\n \"or 'acme/kaniko:latest' or 'acme/kaniko:dev'.\",\n)\n@click.option(\"--description\", type=str, help=\"Description of the version.\")\n@click.option(\"--tags\", type=str, help=\"Tags of the version, comma separated values.\")\n@clean_outputs\ndef push(polyaxonfile, name, description, tags):\n \"\"\"Push a new component version.\n If the name corresponds to an existing component version, it will be updated.\n\n Example:\n\n \\b\n $ polyaxon hub push -f polyaxonfile.yaml --name=kaniko:latest --description=\"Tool to build container images\"\n\n \\b\n $ polyaxon hub push -f polyaxonfile.yaml --name=owner/name:v1 --description=\"Component description\"\n \"\"\"\n if not name:\n Printer.print_error(\n \"Please provide a name to create a component version.\",\n command_help=\"hub push\",\n sys_exit=True,\n )\n owner, hub_name, version, is_version = get_info(None, name, True)\n tags = validate_tags(tags)\n\n if not polyaxonfile or not os.path.isfile(polyaxonfile):\n Printer.print_error(\n \"Please provide a path to a polyaxonfile to create a component version.\",\n command_help=\"hub push\",\n sys_exit=True,\n )\n try:\n plx_file = get_specification(data=polyaxonfile)\n except Exception as e:\n handle_cli_error(e, message=\"Polyaxonfile is not valid.\")\n sys.exit(1)\n\n if not owner or not hub_name or not version:\n Printer.print_error(\n \"Please provide a valid component version with --name=owner/hub-name:version. \"\n )\n sys.exit(1)\n\n polyaxon_client = PolyaxonClient()\n try:\n polyaxon_client.component_hub_v1.get_component_version(owner, hub_name, version)\n to_update = True\n except (ApiException, HTTPError):\n to_update = False\n\n if to_update:\n if not click.confirm(\n \"A component version {}/{}:{} already exists. \"\n \"Do you want to push force this version?\".format(owner, hub_name, version)\n ):\n click.echo(\"Existing without pushing component version.\")\n sys.exit(1)\n\n try:\n hub_config = V1ComponentVersion(\n name=version,\n description=description,\n tags=tags,\n content=plx_file.to_dict(dump=True),\n )\n if to_update:\n _version = polyaxon_client.component_hub_v1.update_component_version(\n owner,\n hub_name,\n version,\n hub_config,\n )\n else:\n _version = polyaxon_client.component_hub_v1.create_component_version(\n owner,\n hub_name,\n hub_config,\n )\n except (ApiException, HTTPError) as e:\n handle_cli_error(\n e, message=\"Could not create component version `{}`.\".format(hub_name)\n )\n sys.exit(1)\n\n Printer.print_success(\n \"Component version `{}` was created successfully.\".format(_version.name)\n )\n click.echo(\n \"You can view this component version on Polyaxon UI: {}\".format(\n get_dashboard_url(\n subpath=\"{}/hub/{}/versions/{}\".format(owner, hub_name, _version.name)\n )\n )\n )\n\n\n@hub.command()\n@click.option(*OPTIONS_OWNER[\"args\"], **OPTIONS_OWNER[\"kwargs\"])\n@click.option(*OPTIONS_COMPONENT_HUB[\"args\"], **OPTIONS_COMPONENT_HUB[\"kwargs\"])\n@click.option(\n \"--query\",\n \"-q\",\n type=str,\n help=\"To filter the component hub/versions based on this query spec.\",\n)\n@click.option(\n \"--sort\",\n \"-s\",\n type=str,\n help=\"To order the component hub/versions based on the sort spec.\",\n)\n@click.option(\"--limit\", type=int, help=\"To limit the list of component hub/versions.\")\n@click.option(\n \"--offset\", type=int, help=\"To offset the list of component hub/versions.\"\n)\n@clean_outputs\ndef ls(owner, component, query, sort, limit, offset):\n \"\"\"List component hub/versions by owner or owner/component.\"\"\"\n if owner and component:\n Printer.print_error(\n \"Only an owner or a component is required, not both.\", sys_exit=True\n )\n if component:\n owner, component_hub, component_version, is_version = get_info(component, None)\n else:\n owner = owner or DEFAULT_HUB\n component_hub = None\n if not owner:\n Printer.print_error(\n \"Please provide a valid owner --owner/-o or a component --component/-c.\"\n )\n sys.exit(1)\n\n def list_versions():\n component_info = \" \".format(owner, component_hub)\n try:\n polyaxon_client = get_current_or_public_client()\n params = get_query_params(\n limit=limit, offset=offset, query=query, sort=sort\n )\n response = polyaxon_client.component_hub_v1.list_component_versions(\n owner, component_hub, **params\n )\n except (ApiException, HTTPError) as e:\n message = \"Could not get list of component version.\"\n handle_cli_error(e, message=message)\n sys.exit(1)\n\n meta = get_meta_response(response)\n if meta:\n Printer.print_header(\"Versions for {}\".format(component_info))\n Printer.print_header(\"Navigation:\")\n dict_tabulate(meta)\n else:\n Printer.print_header(\"No version found for {}\".format(component_info))\n\n objects = list_dicts_to_tabulate(\n [o.to_dict() for o in response.results],\n humanize_values=True,\n exclude_attrs=[\n \"uuid\",\n \"readme\",\n \"description\",\n \"owner\",\n \"owner\",\n \"role\",\n \"settings\",\n \"content\",\n \"live_state\",\n ],\n )\n if objects:\n Printer.print_header(\"Component versions:\")\n dict_tabulate(objects, is_list_dict=True)\n\n def list_components():\n try:\n polyaxon_client = get_current_or_public_client()\n params = get_query_params(\n limit=limit, offset=offset, query=query, sort=sort\n )\n response = polyaxon_client.component_hub_v1.list_component_hubs(\n owner, **params\n )\n except (ApiException, HTTPError) as e:\n message = \"Could not get list of components.\"\n handle_cli_error(e, message=message)\n sys.exit(1)\n\n meta = get_meta_response(response)\n if meta:\n Printer.print_header(\"Components for owner {}\".format(owner))\n Printer.print_header(\"Navigation:\")\n dict_tabulate(meta)\n else:\n Printer.print_header(\"No component hub found for owner {}\".format(owner))\n\n objects = list_dicts_to_tabulate(\n [o.to_dict() for o in response.results],\n humanize_values=True,\n exclude_attrs=[\n \"uuid\",\n \"readme\",\n \"description\",\n \"owner\",\n \"role\",\n \"settings\",\n \"live_state\",\n ],\n )\n if objects:\n Printer.print_header(\"Components:\")\n dict_tabulate(objects, is_list_dict=True)\n\n if component:\n list_versions()\n else:\n list_components()\n\n\n@hub.command()\n@click.option(*OPTIONS_COMPONENT_HUB[\"args\"], **OPTIONS_COMPONENT_HUB[\"kwargs\"])\n@click.option(*OPTIONS_COMPONENT_VERSION[\"args\"], **OPTIONS_COMPONENT_VERSION[\"kwargs\"])\n@clean_outputs\ndef get(component, version):\n \"\"\"Get info for a component hub by name, or owner/hub_name,\n or component version by name, name:tag, owner/name:tag.\n\n Examples:\n\n To get a default component hub:\n\n \\b\n $ polyaxon hub get -h tensorboard\n\n To get by specific owner/name\n\n \\b\n $ polyaxon hub get -p owner/my-component\n \"\"\"\n owner, component_hub, component_version, is_version = get_info(component, version)\n\n try:\n polyaxon_client = get_current_or_public_client()\n if is_version:\n response = polyaxon_client.component_hub_v1.get_component_version(\n owner, component_hub, component_version\n )\n get_component_version_details(response)\n else:\n response = polyaxon_client.component_hub_v1.get_component_hub(\n owner, component_hub\n )\n response.owner = owner\n get_entity_details(response, \"Component hub\")\n except (ApiException, HTTPError) as e:\n handle_cli_error(\n e,\n message=\"Could not get `{}`.\".format(\n component_version if is_version else component_hub\n ),\n sys_exit=True,\n )\n\n\n@hub.command()\n@click.option(*OPTIONS_COMPONENT_HUB[\"args\"], **OPTIONS_COMPONENT_HUB[\"kwargs\"])\n@click.option(*OPTIONS_COMPONENT_VERSION[\"args\"], **OPTIONS_COMPONENT_VERSION[\"kwargs\"])\n@clean_outputs\ndef delete(component, version):\n \"\"\"Delete a component hub or a component version.\"\"\"\n owner, component_hub, component_version, is_version = get_info(\n component, version, True\n )\n full_entity = (\n \"{}/{}:{}\".format(owner, component_hub, component_version)\n if is_version\n else \"{}/{}\".format(owner, component_hub)\n )\n\n if not click.confirm(\n \"Are sure you want to delete component {} `{}`\".format(\n \"version\" if is_version else \"hub\", full_entity\n )\n ):\n click.echo(\"Existing without deleting component hub.\")\n sys.exit(1)\n\n try:\n polyaxon_client = PolyaxonClient()\n if is_version:\n polyaxon_client.component_hub_v1.delete_component_version(\n owner, component_hub, component_version\n )\n else:\n polyaxon_client.component_hub_v1.delete_component_hub(owner, component_hub)\n Printer.print_success(\n \"Component {} `{}` was delete successfully\".format(\n \"version\" if is_version else \"hub\", full_entity\n )\n )\n except (ApiException, HTTPError) as e:\n handle_cli_error(\n e,\n message=\"Could not delete component {} `{}`.\".format(\n \"version\" if is_version else \"hub\", full_entity\n ),\n )\n sys.exit(1)\n\n\n@hub.command()\n@click.option(*OPTIONS_COMPONENT_HUB[\"args\"], **OPTIONS_COMPONENT_HUB[\"kwargs\"])\n@click.option(*OPTIONS_COMPONENT_VERSION[\"args\"], **OPTIONS_COMPONENT_VERSION[\"kwargs\"])\n@click.option(\n \"--name\",\n type=str,\n help=\"Name of the component hub, must be unique for the same user.\",\n)\n@click.option(\"--description\", type=str, help=\"Description of the component hub.\")\n@click.option(\n \"--tags\", type=str, help=\"Tags of the run, comma separated values (optional).\"\n)\n@click.option(\n \"--private\",\n type=bool,\n help=\"Set the visibility of the component hub to private/public.\",\n)\n@clean_outputs\ndef update(component, version, name, description, tags, private):\n \"\"\"Update component hub.\n\n Uses /docs/core/cli/#caching\n\n Example:\n\n \\b\n $ polyaxon hub update foobar --description=\"Image Classification with DL using TensorFlow\"\n\n \\b\n $ polyaxon hub update mike1/foobar --description=\"Image Classification with DL using TensorFlow\"\n\n \\b\n $ polyaxon hub update --tags=\"foo, bar\"\n \"\"\"\n owner, component_hub, component_version, is_version = get_info(\n component, version, True\n )\n full_entity = (\n \"{}/{}:{}\".format(owner, component_hub, component_version)\n if is_version\n else \"{}/{}\".format(owner, component_hub)\n )\n\n update_dict = {}\n if name:\n update_dict[\"name\"] = name\n\n if description:\n update_dict[\"description\"] = description\n\n tags = validate_tags(tags)\n if tags:\n update_dict[\"tags\"] = tags\n\n if private is not None:\n update_dict[\"is_public\"] = not private\n\n if not update_dict:\n Printer.print_warning(\n \"No argument was provided to update the component {}.\".format(\n \"version\" if is_version else \"hub\"\n )\n )\n sys.exit(1)\n\n try:\n polyaxon_client = PolyaxonClient()\n if is_version:\n response = polyaxon_client.component_hub_v1.patch_component_version(\n owner, component_hub, component_version, body=update_dict\n )\n Printer.print_success(\"Component version updated.\")\n get_component_version_details(response)\n else:\n response = polyaxon_client.component_hub_v1.patch_component_hub(\n owner, component_hub, body=update_dict\n )\n Printer.print_success(\"Component updated.\")\n get_entity_details(response, \"Component hub\")\n except (ApiException, HTTPError) as e:\n handle_cli_error(\n e,\n message=\"Could not update component {} `{}`.\".format(\n \"version\" if is_version else \"hub\", full_entity\n ),\n )\n sys.exit(1)\n\n\n@hub.command()\n@click.option(*OPTIONS_COMPONENT_HUB[\"args\"], **OPTIONS_COMPONENT_HUB[\"kwargs\"])\n@click.option(*OPTIONS_COMPONENT_VERSION[\"args\"], **OPTIONS_COMPONENT_VERSION[\"kwargs\"])\n@click.option(\n \"--yes\",\n \"-y\",\n is_flag=True,\n default=False,\n help=\"Automatic yes to prompts. \"\n 'Assume \"yes\" as answer to all prompts and run non-interactively.',\n)\n@click.option(\n \"--url\",\n is_flag=True,\n default=False,\n help=\"Print the url of the dashboard for this component hub.\",\n)\n@clean_outputs\ndef dashboard(component, version, yes, url):\n \"\"\"Open this operation's dashboard details in browser.\"\"\"\n owner, component_hub, component_version, is_version = get_info(component, version)\n subpath = (\n \"{}/hub/{}/versions?version={}\".format(owner, component_hub, component_version)\n if is_version\n else \"{}/hub/{}\".format(owner, component_hub)\n )\n\n hub_url = get_dashboard_url(subpath=subpath, use_cloud=settings.CLI_CONFIG.is_ce)\n if url:\n Printer.print_header(\"The dashboard is available at: {}\".format(hub_url))\n sys.exit(0)\n if not yes:\n click.confirm(\n \"Dashboard page will now open in your browser. Continue?\",\n abort=True,\n default=True,\n )\n click.launch(hub_url)\n","sub_path":"core/polyaxon/cli/hub.py","file_name":"hub.py","file_ext":"py","file_size_in_byte":20627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"311826265","text":"#!/usr/bin/python3\r\n \r\nimport scapy.all as scapy\r\nfrom scapy.layers.inet import IP as IP\r\nfrom scapy.layers.inet import UDP as UDP\r\nfrom scapy.layers.dns import DNS as DNS\r\nfrom scapy.layers.dns import DNSQR\r\nfrom scapy.all import sniff\r\n \r\ndef start_sniff():\r\n print (\"Server listening...: \")\r\n sniff(iface='eth0', filter='udp port 53',count=4, prn=packet_build)\r\n \r\ncounter = []\r\npacketBytes=[]\r\ndef packet_build(p):\r\n ip_layer = p.getlayer(IP)\r\n src_ip = ip_layer.src\r\n udp_layer = p.getlayer(UDP)\r\n src_port = udp_layer.sport\r\n udp_layer.dport\r\n \r\n dns_layer = p.getlayer(DNS)\r\n dns_id = dns_layer.id\r\n \r\n dnsqr_layer = p[DNSQR].qname\r\n \r\n if dns_id == 2:\r\n counter.append(1)\r\n coun = len(counter)\r\n print(f\"Received_Packets No. {coun}, From IP : {src_ip}, on Port : {src_port}.\")\r\n data_dencoded = dnsqr_layer.decode('utf-8').replace(\".\", \"\")\r\n f = open ('/root/Downloads/Scapy/received/data.txt', 'a')\r\n f.write(data_dencoded)\r\n \r\nstart_sniff()","sub_path":"receiver.py","file_name":"receiver.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"283923436","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n__author__ = 'Yee_172'\n__date__ = '2017/10/27'\n\n\nimport sys\nimport urllib.request\n\n\nheaders = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}\n\nurl = input('Your website: ')\nN = int(input('Brush time: '))\n\nreq = urllib.request.Request(url=url, headers=headers)\nfor i in range(N):\n urllib.request.urlopen(req)\n sys.stdout.write('%08.5f' % ((i + 1) * 100 / N) + '%\\r')\n sys.stdout.flush()\n","sub_path":"Website_Flux_Brush.py","file_name":"Website_Flux_Brush.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"572475584","text":"#!python\n\nimport zipfile\nimport urllib\nimport time\nimport glob\nimport os\nimport os.path\nfrom os.path import join\nfrom os.path import isdir\n\ndef zipdir(zip, dir, prefix='', pat='*'):\n cwd = os.getcwd()\n os.chdir(dir)\n for f in glob.glob( pat ):\n if isdir( join(dir, f) ):\n zipdir(zip, join(dir, f), join(prefix, f), pat)\n else:\n zip.write(f, join(prefix, f))\n os.chdir(cwd)\n\nbdir = 'build/'\nhost = \"http://localhost/\"\nfiles = 'index.html,p21.html,p22.html,p23.html,p31.html,p32.html,p321.html,p322.html,p330.html,p331.html,p332.html,p333.html,p41.html,p42.html,p51.html,p52.html'.split(',')\n\nzipName = \"{}/{}.zip\".format(bdir, time.strftime('%Y%m%d_%H_%M_%S') )\nzip = zipfile.ZipFile(zipName, 'w')\n\nfor i in files:\n url = host + i\n #print 'fetching {}'.format(url)\n file = bdir+i\n fo = urllib.urlopen(url)\n zip.writestr( i, fo.read().replace('/static/page/', '') )\n\nfor d in 'js,img,css'.split(','):\n zipdir(zip, 'page/static/page/'+d, d )\nzip.close()\n\n\n\n","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"132335580","text":"from mebo.mebo import Mebo\n# replace with IP of your mebo. You can probably get it from your router. Autodiscovery is coming\nm = Mebo(ip='192.219.13.81')\n# supported directions ('n', 'ne', 'e', 'se', 's', 'sw', 'w', 'nw',)\n# velocity is how fast the wheels turn (yes, it's technically speed, but originally I had a sign on velocity.\n# Then I discovered that there were cardinal direction api calls and had to change it\nwhile(1):\n m.move('n', speed=255, duration=1000)\n time.sleep(20)\n# dur is the value taken by the API. I'll clean it up soon - values < 1000 ms don't work\nm.claw.open(dur=1000)","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"180436459","text":"# Copyright 2021 AI Singapore. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pandas as pd\nimport plotly.graph_objects as go\nimport plotly.express as px\nimport dash_table\n\n\ndef plot_probabilities_spread_pattern(df_specific_label: pd.DataFrame):\n '''\n Display scatter plot for probabilities comparison on correct data point vs miss-predicted data point\n for each class label\n\n Arguments:\n df_specific_label (:obj:`~pd.DataFrame`):\n dataframe of 1 specific label of 1 model type, output from int_miss_predictions\n\n Returns:\n :obj:`~plotly.graph_objects.Figure`:\n figure displaying scatter plot outlining probabilities comparison on correct data point vs miss-predicted data point \\\n for each class label\n '''\n label = list(df_specific_label.columns)[1]\n model_name = df_specific_label['model'].values[0]\n\n fig = px.scatter(df_specific_label,\n x=list(df_specific_label.index),\n y=df_specific_label[label],\n color='pred_state',\n category_orders={\"pred_state\": [\"correct\", \"miss-predict\"]},\n color_discrete_sequence=px.colors.qualitative.D3)\n\n fig.update_layout(\n title=f'Class {label}
[ {model_name} ]

',\n title_x=0.6,\n yaxis_title=f\"Probability is_class_{label} \",\n yaxis_showgrid=False,\n xaxis_title=\"data_point index\",\n xaxis_showgrid=False,\n legend_title=\"\", \n legend=dict(orientation=\"h\", yanchor=\"bottom\", y=1.03, xanchor=\"right\", x=0.8), \n width=250,\n height=600,\n margin=dict(t=170, b=0, l=12, r=12, pad=10))\n\n fig.update_yaxes(range=[0, 1])\n fig.update_xaxes(rangemode=\"tozero\")\n fig.add_hline(y=0.5, line_dash=\"dot\")\n\n # iterate through all traces, to ensure all label-class have consistent format\n for i in range(len(fig.data)):\n if fig.data[i]['legendgroup'] == 'correct':\n fig.data[i]['marker']['color'] = '#1f77b4'\n fig.data[i]['hovertemplate'] = \"Index %{x}
\" + \"[ correct ]

\" + \\\n \"probability: %{y:.4f}
\" + \"\"\n\n elif fig.data[i]['legendgroup'] == 'miss-predict':\n fig.data[i]['marker']['color'] = '#FF7F0E'\n fig.data[i]['hovertemplate'] = \"Index %{x}
\" + \"[ miss-predict ]

\" + \\\n \"probability: %{y:.4f}
\" + \"\"\n return fig\n\n\ndef plot_simple_probs_spread_overview(df_label_state: pd.DataFrame):\n '''\n Display data table listing simple stats on ss, %correct, % wrong, accuracy for each label class\n\n Arguments:\n df_label_state (:obj:`~pd.DataFrame`):\n dataframe containing info on simple stats, output from int_miss_predictions\n\n Returns:\n :obj:`~dash_table.DataTable`:\n table object outlining simple stats on ss, %correct, % wrong, accuracy for each label class\n '''\n fig = dash_table.DataTable(\n id='table', \n columns=[{'id': c, 'name': c} for c in df_label_state.columns], \n style_cell={'font-family': 'verdana', \n 'font-size': '14px', \n 'border': 'none', \n 'minWidth': '100px'},\n style_header={'display': 'none'},\n style_table={'width': '550', 'margin': 'auto'},\n style_data={'lineHeight': '15px'},\n style_data_conditional=[{'if': {'column_id': 'index'}, 'textAlign': 'left'},\n {'if': {'column_id': 'state_value'}, 'textAlign': 'right'}],\n data=df_label_state.to_dict('records'))\n return fig\n\n\ndef plot_prediction_offset_overview(df: pd.DataFrame):\n '''\n Display scatter plot for overview on prediction offset values\n\n Arguments:\n df (:obj:`~pd.DataFrame`):\n dataframe containing calculated offset values, output from int_miss_predictions\n\n Returns:\n :obj:`~plotly.graph_objects.Figure`:\n figure displaying scatter plot outlining overview on prediction offset values by index\n '''\n pred_cols = [col for col in df.columns if 'yPred_' in col]\n offset_cols = [col for col in df.columns if 'offset_' in col]\n corrected_legend_names = [col.replace('yPred_', '') for col in pred_cols]\n df.insert(0, 'index', list(df.index))\n\n fig = px.scatter(df, x='index', y=offset_cols[0], custom_data=['index'], color_discrete_sequence=px.colors.qualitative.D3)\n fig.data[0].name = corrected_legend_names[0]\n fig.update_traces(showlegend=True, hovertemplate=\"Data Index : %{x}
Prediction Offset : %{y}\")\n\n if len(pred_cols) > 1: # Bimodal\n fig.add_trace(go.Scatter(\n x=df['index'], \n y=df[offset_cols[1]], \n name=corrected_legend_names[1], \n mode='markers',\n marker=dict(color='#FF7F0E'),\n hovertemplate=\"Data Index : %{x}
Prediction Offset : %{y}\"))\n\n # add reference baseline [mainly to have baseline included in legend]\n fig.add_trace(go.Scatter(\n x=[0, len(df)], \n y=[0] * 2, \n name=\"Baseline [Prediction - Actual]\", \n visible=True, \n hoverinfo='skip',\n mode='lines',\n line=dict(color=\"green\", dash=\"dot\")))\n # referece baseline [mainly for the dotted line in graph, but no legend generated]\n fig.add_hline(y=0, line_dash=\"dot\")\n\n fig.update_layout(\n title='Prediction Offset Overview by Datapoint Index', \n xaxis_title='Datapoint Index', \n yaxis_title='Offset from baseline', \n title_x=0.5,\n legend=dict(orientation=\"h\", yanchor=\"bottom\", y=1.02, xanchor=\"right\", x=1), \n width=1000,\n height=550,\n margin=dict(t=110), \n clickmode='event+select')\n\n return fig\n","sub_path":"src/rarity/visualizers/miss_predictions.py","file_name":"miss_predictions.py","file_ext":"py","file_size_in_byte":6469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"324565556","text":"def grayCode(x):\n return bin(x ^ (x>>1))\n\ndef binaryCode(x):\n grayCodeStr = str(bin(x))[2:]\n tempStr = '0'\n binaryCodeStr = \"\"\n for i in range(0,len(grayCodeStr)):\n binaryCodeStr = binaryCodeStr + str(int(grayCodeStr[i]) ^ int(tempStr))\n tempStr = binaryCodeStr[-1]\n return bin(binStrToBin(binaryCodeStr))\n \ndef binStrToBin(x):\n binVal = 0b0\n x = x[::-1]\n for i in range(0, len(x)):\n binVal = binVal + (int(x[i])*2**i)\n return int(binVal)\n\nprint(\"1.To Gray Code\")\nprint(\"2.To Binary Code\")\nchoice = int(input())\nprint()\n\nif choice is 1:\n print(\"Binary Code: \", end='')\n binary = input()\n print(\"Gray Code: \" + str(grayCode(binStrToBin(binary))))\nelse:\n print(\"Gray Code: \", end='')\n gray = binStrToBin(input())\n print(\"Binary Code: \" + str(binaryCode(gray)))","sub_path":"Code/Pers/Python/greyCode.py","file_name":"greyCode.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"557537337","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OdooBro - odoobro.contact@gmail.com\n#\n##############################################################################\n\nimport logging\n\nfrom openerp import api, models\n_logger = logging.getLogger('openerp')\n\n\nclass UpdateFunctionData(models.TransientModel):\n _name = \"update.function.data\"\n\n @api.model\n def update_sale_config_settings(self):\n _logger.info(\"===== START: UPDATE SALE CONFIG SETTINGS =====\")\n # For group\n config_data = {\n 'sale_pricelist_setting': 'formula',\n 'group_pricelist_item': True,\n 'group_sale_pricelist': True,\n 'group_product_pricelist': False\n }\n SaleConfig = self.env['sale.config.settings']\n fs = dict(SaleConfig._fields)\n vals = SaleConfig.default_get(fs)\n vals.update(config_data)\n sale_config = SaleConfig.create(vals)\n sale_config.execute()\n _logger.info(\"===== END: UPDATE SALE CONFIG SETTINGS =====\")\n return True\n","sub_path":"loyalty_card/data/update_function_data.py","file_name":"update_function_data.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"48953260","text":"# 引入库\nimport requests\nimport time\nimport re #正则表达式\n# 写网站站点\nurl = \"http://www.jingcaiyuedu.com/novel/GLSmM4.html\"\n# 写入headers模拟浏览器上网,避免出现个别网站拒绝访问的情况\nheaders = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0\",}\n# get发送请求\nresponse = requests.get(url,headers=headers)\n# 将网页编码方式转换为utf-8\nresponse.encoding = 'utf-8'\n# 网站源码\nhtml = response.text\n# re.findall获取小说的名字\ntitle = re.findall(r'',html)[0]\n# 打印小说的名字\nprint(title)\n# 获取每一章的信息(章节的url)\ndl = re.findall(r'
.*?
',html,re.S)[1]\naill = re.findall(r'href=\"(.*?)\">(.*?)<',dl)\n# 新建文件保存小说内容\nf = open(f\"{title}.txt\",'w',encoding=\"utf-8\")\n# 循环每一个章节,分别去下载\nfor i in aill:\n# 反爬\n time.sleep(1)\n# 章节地址和名\n book_url,book_name = i\n# 正确章节地址http://www.jingcaiyuedu.com/novel/GLSmM4/1.html\n# 拼接正确章节地址\n book_url = f\"http://www.jingcaiyuedu.com{book_url}\"\n# 获取章节\n book_response = requests.get(book_url,headers=headers)\n book_response.encoding = 'utf-8'\n book_html = book_response.text\n if len(re.findall(r'
(.*?)
',book_html,re.S)) == 0:\n print(book_name + 'NULL')\n continue\n# 提取章节内容\n book_content = re.findall(r'
(.*?)
',book_html,re.S)[0]\n# 清洗提取的数据\n book_content = book_content.replace(' ','')\n# 将其中内容的空格部分替换成空\n book_content = book_content.replace(' ','')\n# 将其中内容的 部分替换成空\n book_content = book_content.replace('
','')\n# 将其中内容的
部分替换成空\n book_content = book_content.replace('
','')\n# 将其中内容的
部分替换成空\n# 写入\n f.write(f\"{book_name}\\n\")\n print(book_name)\n f.write(f\"{book_content}\\n\")\n f.write(\"\\n\")\n print(book_url)\n","sub_path":"ghost_soldier.py","file_name":"ghost_soldier.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"343113319","text":"\"\"\"\nwebassets configuration for indyfoodtruck.web\nSee http://webassets.readthedocs.org/\n\"\"\"\n\nfrom django_assets import Bundle, register\nfrom indyfoodtruck.asset_filters import LessFilter\nfrom indyfoodtruck.__version__ import __version__\n\nJS_BUNDLE = Bundle(\n 'web/js/indyfoodtruck.coffee',\n filters='coffeescript,yui_js',\n output='web/js/indyfoodtruck-%s.min.js' % __version__)\n\nCSS_BUNDLE = Bundle(\n 'web/css/indyfoodtruck.less',\n filters=(LessFilter, 'yui_css'),\n output='web/css/indyfoodtruck-%s.min.css' % __version__)\n\nregister('indyfoodtruck.css', CSS_BUNDLE)\nregister('indyfoodtruck.js', JS_BUNDLE)\n","sub_path":"indyfoodtruck/web/assets.py","file_name":"assets.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"579918455","text":"from time import datetime\nfrom types import StringType, FunctionType\n\nurgencies = {\n 'add':'++',\n 'sub':'--',\n 'in':'<-',\n 'out':'->',\n 'notice':'!!',\n 'sys':'**'\n}\n\ndef debug(item=None, time=datetime.now(), urgency='sub'):\n if urgency not in urgencies:\n raise Exception('Not a valid logging urgency')\n\n if type(item) == FunctionType:\n def wrapper(*args):\n resp = item(*args)\n debug_print(resp, time, urgency)\n return resp\n return wrapper\n elif type(item) == StringType:\n debug_print(item, time, urgency)\n\ndef debug_print(message, time, urgency):\n \"\"\"\n debug_print(\"My foo!\", datetime.now(), 'out') # results in\n \" -> [15:21.33] My foo!\"\n \"\"\"\n timeformat = \"%H:%M.%S\" \n out = \" %s [%s] %s\" % (urgencies[urgency], time.strformat(timeformat), message)\n\n log = cache.get_cache()\n log.append((datetime.now(), out))\n\n # Don't let log go past LOGSIZE\n if (len(log) is settings.LOGSIZE):\n log.pop(0)\n\n self.c.log = log\n\ndef singleton(cls):\n instances = {}\n def getinstance():\n if cls not in instances:\n instances[cls] = cls()\n return instances[cls]\n return getinstance\n\n","sub_path":"winnie/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"261183447","text":"\nfrom b42fd.numerical_model.model import get_state_space\nfrom b42fd.analytical_model.analytical_full import Analytical_Eigenmotion\nfrom b42fd.validation.fuelmass import data\nfrom b42fd.analytical_model.time import TimeTool\nfrom b42fd.helpers import load_data\nimport numpy as np\n\n\n# t_phugoid=53*60+57\n# t_spm =60*60+35\n# t_dutchroll=60*60 #there is another dutch roll in yawning direction. not sure if we had to use it. Time=61*60\n# t_ape_roll=59*60+10\n# t_ape_spiral=62*60+20\n\n\nt_phugoid=53*60\nt_spm =58*60\nt_dutchroll=60*60 #there is another dutch roll in yawning direction. not sure if we had to use it. Time=61*60\nt_ape_roll=57*60\nt_ape_spiral=62*60\n\nA_s_phugoid, A_a_phugoid = get_state_space(t_phugoid)[0], get_state_space(t_phugoid)[4]\nA_s_spm, A_a_spm = get_state_space(t_spm)[0], get_state_space(t_spm)[4]\nA_s_dutchroll, A_a_dutchroll = get_state_space(t_dutchroll)[0], get_state_space(t_dutchroll)[4]\nA_s_ape_roll, A_a_ape_roll = get_state_space(t_ape_roll)[0], get_state_space(t_ape_roll)[4]\nA_s_ape_spiral, A_a_ape_spiral = get_state_space(t_ape_spiral)[0], get_state_space(t_ape_spiral)[4]\n\neigs = np.linalg.eig\nprint(eigs(A_s_phugoid)[0])\nprint(eigs(A_s_spm)[0])\nprint(eigs(A_a_dutchroll)[0])\nprint(eigs(A_a_ape_roll)[0])\nprint(eigs(A_a_ape_spiral)[0])\n\n\nm_pax = np.array([95, 102, 89, 82, 66, 81, 69, 85, 96]) # passenger weights in kg\nM_e = 9165 * 0.453592 # empty aircraft weight in kg\nM_u = 2640 * 0.453592 # mass of fuel\n\n# stationary mesurements results\nCmde = -1.491241347862329\nCma = -0.6746091811758155\n\nCLa = 4.371485054942859\nCD0 = 0.016\ne = 0.6\n\nshort_period= Analytical_Eigenmotion(data, \"short period motion\", t=t_spm,M_u=M_u, m_pax=m_pax,CLa=CLa, CD0=CD0, e=e, Cma=Cma)\nphugoid= Analytical_Eigenmotion(data, \"phugoid oscillation\", t=t_phugoid, M_u=M_u,m_pax=m_pax,CLa=CLa, CD0=CD0, e=e, Cma=Cma)\ndutch_roll= Analytical_Eigenmotion(data, \"dutch roll\", t=t_phugoid, M_u=M_u, m_pax=m_pax, CLa=CLa, CD0=CD0, e=e, Cma=Cma)\naperiodic_roll= Analytical_Eigenmotion(data, \"aperiodic roll\", t=t_ape_roll, M_u=M_u,m_pax=m_pax, CLa=CLa, CD0=CD0, e=e, Cma=Cma)\naperiodic_spiral=Analytical_Eigenmotion(data, \"aperiodic spiral\", t=t_ape_spiral, M_u=M_u, m_pax=m_pax, CLa=CLa, CD0=CD0, e=e, Cma=Cma)\nprint(\"\")\nprint(phugoid.eigvalues)\nprint(short_period.eigvalues)\nprint(dutch_roll.eigvalues)\nprint(aperiodic_roll.eigvalues)\nprint(aperiodic_spiral.eigvalues)\n","sub_path":"b42fd/verification/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"592093696","text":"from typing import Callable\n\nfrom jschon.exceptions import CatalogueError\nfrom jschon.json import AnyJSONCompatible, JSON\nfrom jschon.jsonschema import Keyword, JSONSchema, Scope\n\n__all__ = [\n 'FormatKeyword',\n 'FormatValidator',\n]\n\nFormatValidator = Callable[[AnyJSONCompatible], None]\n\n\nclass FormatKeyword(Keyword):\n key = \"format\"\n\n def __init__(self, parentschema: JSONSchema, value: str):\n super().__init__(parentschema, value)\n\n from jschon.catalogue import Catalogue\n try:\n self.validator: FormatValidator = Catalogue.get_format_validator(value)\n except CatalogueError:\n self.validator = None\n\n def evaluate(self, instance: JSON, scope: Scope) -> None:\n scope.annotate(instance, self.key, self.json.value)\n if self.validator is not None:\n try:\n self.validator(instance.value)\n except ValueError as e:\n scope.fail(instance, f'The instance is invalid against the \"{self.json.value}\" format: {e}')\n else:\n scope.noassert()\n","sub_path":"jschon/vocabulary/format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"559888820","text":"#https://www.hackerrank.com/challenges/ctci-bfs-shortest-reach/problem\nimport sys\nclass Vertex():\n def __init__(self,node):\n self.id = node\n self.neighbors = []\n def addNeighbor(self,node1):\n v = Vertex(node1)\n self.neighbors.append(v)\n v.neighbors.append(self)\n \nfrom collections import deque\nclass Graph():\n \n def __init__(self,n):\n self.vertices = {x : [] for x in range(n)}\n self.edges = []\n self.edgeLength = 6\n \n def connect(self,v1,v2):\n if v1!=v2:\n self.edges.append((v1,v2))\n self.vertices[v1].append(v2)\n self.vertices[v2].append(v1)\n \n def find_all_distances(self,start):\n q = deque();\n q.append(start);\n \n distances = [-1 for x in range (len(self.vertices))]\n distances[start] = 0\n \n while(q):\n v = q.popleft();\n for each in self.vertices[v]:\n if distances[each] == -1:\n distances[each] = distances[v]+self.edgeLength\n q.append(each)\n \n return distances\n \n \n \n\nt = int(input()) #No. of queries\nfor i in range(t): # i will be 0,1,..t-1\n n,m = [int(value) for value in input().split()]\n graph = Graph(n)\n for i in range(m):\n x,y = [int(x) for x in input().split()]\n graph.connect(x-1,y-1) \n s = int(input())\n d = graph.find_all_distances(s-1)\n for each in d:\n if each != 0:\n sys.stdout.write(str(each) + \" \")\n print()\n \n'''\nSample Input\n\n2\n4 2\n1 2\n1 3\n1\n3 1\n2 3\n2\nSample Output\n\n6 6 -1\n-1 6\n'''","sub_path":"python/Graphs/BFSShortestpath.py","file_name":"BFSShortestpath.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"4344267","text":"from django.urls import path\nfrom .import views\n\nurlpatterns=[\n path('',views.main,name=\"main\"),\n path('index/',views.index,name=\"index\"),\n path('Problem///',views.Problem,name=\"Problem\"),\n path('Profile/',views.Profile,name=\"Profile\"),\n path('login/',views.login,name=\"login\"),\n path('register/',views.register,name=\"register\"),\n path('explore/',views.explore,name= \"explore\"),\n\n path('questionsList//',views.questionsList,name=\"questionsList\"),\n\n\n \n\n]\n\n\n","sub_path":"JudgeSystemProject/JudgeSystemApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"194096019","text":"#important imports\nimport torch\nfrom torchvision import datasets,transforms\n#data loading\ntr = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))])\ntrainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=tr )\ntrainLoader = torch.utils.data.DataLoader(trainset,batch_size = 64,shuffle=True)\ntestset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/',download=True, train = False, transform=tr)\ntestLoader = torch.utils.data.DataLoader(testset,batch_size = 64,shuffle = True)\n\nfrom torch import nn\nimport torch.nn.functional as F\nclass model(nn.Module):\n def __init__(self,inputSize,outputSize,hiddenLayers,dropOut):\n super().__init__()\n self.hiddenlayer= nn.ModuleList([nn.Linear(inputSize,hiddenLayers[0])])\n layers = zip(hiddenLayers[:-1],hiddenLayers[1:])\n self.hiddenlayer.extend([nn.Linear(h1,h2) for h1,h2 in layers])\n self.output = nn.Linear(hiddenLayers[-1],outputSize)\n self.dropout = nn.Dropout(p=dropOut)\n def forward(self,x):\n x=x.view(x.shape[0],-1)\n for l in self.hiddenlayer:\n x = F.relu(l(x))\n x = self.dropout(x)\n x = (F.log_softmax(self.output(x),dim=1))\n return x\n\nneuNet = model(784,10,[256,128,64],0.2)\nprint(neuNet)\n\nfrom torch import optim\noptimizer = optim.Adam(neuNet.parameters(),lr = 0.003)\n\ncriterion = nn.NLLLoss()\n\nfor e in range(1):\n for images,labels in trainLoader:\n optimizer.zero_grad()\n loss = criterion(neuNet(images),labels)\n loss.backward()\n optimizer.step()\n print(\"epoch {0}/{1} completed\".format(e+1,5))\nprint(\"end of the training\")\n\nprint(\"the model parameters {0}\".format(neuNet.state_dict().keys()))\n\ntorch.save(neuNet.state_dict(),'checkpoint.pth')\n\nstate_dict = torch.load('checkpoint.pth')\nmodel = neuNet.load_state_dict(state_dict)\n\nprint(model)\nprint(neuNet)\n","sub_path":"savingModel.py","file_name":"savingModel.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"418623392","text":"def build(self, to, reply_to=None, cc=None, bcc=None):\n if (self.headers is None):\n headers = {\n \n }\n else:\n headers = self.headers.copy()\n if (options.get('mail.enable-replies') and ('X-Sentry-Reply-To' in headers)):\n reply_to = headers['X-Sentry-Reply-To']\n else:\n reply_to = set((reply_to or ()))\n reply_to.discard(to)\n reply_to = ', '.join(reply_to)\n if reply_to:\n headers.setdefault('Reply-To', reply_to)\n message_id = make_msgid(get_from_email_domain())\n headers.setdefault('Message-Id', message_id)\n subject = self.subject\n if (self.reply_reference is not None):\n reference = self.reply_reference\n subject = ('Re: %s' % subject)\n else:\n reference = self.reference\n if isinstance(reference, Group):\n (thread, created) = GroupEmailThread.objects.get_or_create(email=to, group=reference, defaults={\n 'project': reference.project,\n 'msgid': message_id,\n })\n if (not created):\n headers.setdefault('In-Reply-To', thread.msgid)\n headers.setdefault('References', thread.msgid)\n msg = EmailMultiAlternatives(subject=subject.splitlines()[0], body=self.__render_text_body(), from_email=self.from_email, to=(to,), cc=(cc or ()), bcc=(bcc or ()), headers=headers)\n html_body = self.__render_html_body()\n if html_body:\n msg.attach_alternative(html_body.decode('utf-8'), 'text/html')\n return msg","sub_path":"Data Set/bug-fixing-5/92518b3b397083b47de75e1d3be0278ddb3481a5--fix.py","file_name":"92518b3b397083b47de75e1d3be0278ddb3481a5--fix.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"421881219","text":"# -*- coding: utf-8 -*-\n# Copyright (C) 2004-2013 Mag. Christian Tanzer. All rights reserved\n# Glasauergasse 32, A--1130 Wien, Austria. tanzer@swing.co.at\n# ****************************************************************************\n#\n# This module is licensed under the terms of the BSD 3-Clause License\n# .\n# ****************************************************************************\n#\n#++\n# Name\n# TFL.SDG.C.Array\n#\n# Purpose\n# C array declaration\n#\n# Revision Dates\n# 9-Aug-2004 (CT) Creation\n# 12-Aug-2004 (MG) Format changed\n# 21-Sep-2004 (CT) `c_format` changed (use `head` instead of `front` for\n# `description`)\n# 23-Sep-2004 (CT) `c_format` changed (total revamp of `x_forms` for\n# `initializers`)\n# 23-Sep-2004 (MG) `vaps_channel_format` added\n# 27-Oct-2004 (MG) Calculate the default of `bounds` based on the length\n# of `init` (to be backward compatible)\n# 16-Nov-2004 (MG) Multidimension array support added\n# 16-Sep-2005 (MG) Changed to support `fmt` again\n# 19-Oct-2006 (CED) Length check added\n# 31-Jul-2007 (MG) Add description to `h_format`\n# 18-Oct-2007 (MZO) [25170] `init_comments` added\n# 6-Dec-2007 (CT) Imports fixed\n# 26-Feb-2012 (MG) `__future__` imports added\n# ««revision-date»»···\n#--\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom _TFL import TFL\nfrom _TFL.pyk import pyk\n\nimport _TFL._SDG._C._Decl_\nimport _TFL._SDG._C.Expression\nimport _TFL._SDG._C.Struct\nimport _TFL._SDG._C.Type\nimport _TFL._SDG._C.Var\n\nfrom _TFL.predicate import un_nested\n\nclass Array (TFL.SDG.C._Var_) :\n \"\"\"C array declaration\"\"\"\n\n Ancestor = TFL.SDG.C._Var_\n\n init_arg_defaults = dict \\\n ( bounds = \"\"\n , fmt = \"%s\"\n , per_row = 0\n )\n\n _autoconvert = dict \\\n ( bounds = lambda s, k, v : s._convert_bounds (v)\n )\n _bounds = property (lambda s : \"][\".join (s.bounds))\n _common_format = \"\".join \\\n ( ( Ancestor._common_head\n , \"\"\" [%(::._bounds:)s]\"\"\"\n )\n )\n h_format = \"\".join \\\n ( ( _common_format\n , \";%(:head= :*description:)s\"\n )\n )\n\n c_format = \"\".join \\\n ( ( _common_format\n , \"\"\"%(:front= =%(NL)s%(base_indent)s:*initializers:)s\"\"\"\n , \";\"\n , \"\"\"%(:head= :*description:)s\"\"\"\n )\n )\n\n vaps_channel_format = \"\"\"\n %(name)s %(::._bounds:)s %(::*type:)s\n \"\"\"\n ### to be able to use `Ancestor._common_format` which references `struct`\n struct = None\n\n def __init__ \\\n (self, type, name\n , bounds = None\n , init = ()\n , init_comments = ()\n , ** kw\n ) :\n if bounds is None :\n bounds = len (init)\n if isinstance (bounds, int) :\n assert len (init) <= bounds, (bounds, init)\n self.__super.__init__ \\\n (type, name, bounds = bounds, init = init, ** kw)\n if self.init :\n self.initializers = self._setup_initializers \\\n (self.init, init_comments = init_comments)\n # end def __init__\n\n def _convert_bounds (self, bounds) :\n if isinstance (bounds, pyk.string_types + pyk.int_types) :\n bounds = (bounds, )\n return [str (b) for b in bounds]\n # end def _convert_bounds\n\n def _setup_initializers \\\n ( self\n , init_list\n , description = None\n , init_comments = ()\n ) :\n result = TFL.SDG.C.Init_Comp (description = description)\n t = self._struct or self.type\n if isinstance (t, (TFL.SDG.C.Struct, TFL.SDG.C.Array)) :\n Init = t._setup_initializers\n else :\n if len (self.bounds) <= 1 :\n fmt = self.fmt.replace (\"%%\", \"%\")\n Init = lambda v, ** kw : \\\n TFL.SDG.C.Init_Atom (fmt % (v, ), ** kw)\n kw = dict (format = self.fmt)\n else :\n return self._apply_array_level (init_list, description or \"\")\n if not init_comments :\n init_comments = [None] * len (init_list)\n for k, (v, comment) in enumerate (zip (init_list, init_comments)) :\n d = \"[%s]\" % k\n if comment :\n d = \"%s %s\" % (d, comment)\n result.add (Init (v, description = d))\n return result\n # end def _setup_initializers\n\n def _apply_array_level (self, init_list, level) :\n result = TFL.SDG.C.Init_Comp (description = \"%s\" % (level, ))\n for k, v in enumerate (init_list) :\n desc = \"%s[%d]\" % (level, k)\n if isinstance (v, (tuple, list)) :\n result.add (self._apply_array_level (v, desc))\n else :\n result.add ( TFL.SDG.C.Init_Atom (v, description = desc))\n return result\n # end def _apply_array_level\n\n# end class Array\n\nif __name__ != \"__main__\" :\n TFL.SDG.C._Export (\"*\")\n### __END__ TFL.SDG.C.Array\n","sub_path":"Functions/venv/lib/python3.6/site-packages/_TFL/_SDG/_C/Array.py","file_name":"Array.py","file_ext":"py","file_size_in_byte":5273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"534087353","text":"import unittest\nfrom services.organization import Organization\n\nclass TestOrganizationSrv(unittest.TestCase):\n def __init__(self, methodName=\"\"):\n super(TestOrganizationSrv, self).__init__(methodName)\n self.organizationSrv = Organization()\n self.organizationSrv.data = [{\n \"_id\": 100,\n \"domain_names\": [\"dm100\", \"dm102\", \"dm103\"],\n \"tags\": [\"tag1\", \"tag11\", \"tag111\"],\n \"name\": \"organization 1\"\n },\n {\n \"_id\": 200,\n \"domain_names\": [\"dm201\", \"dm202\", \"dm203\"],\n \"tags\": [\"tag2\", \"tag22\", \"tag222\"],\n \"name\": \"organization 2\"\n },\n {\n \"_id\": 300,\n \"domain_names\": [\"dm301\", \"dm302\", \"dm303\"],\n \"tags\": [\"tag3\", \"tag33\", \"tag333\"],\n \"name\": \"organization 3\"\n }]\n\n def test_get_orgs_by_id(self):\n \n self.organizationSrv.get_orgs_by_id()\n org = self.organizationSrv.orgs_by_ids[200]\n self.assertIsNotNone(org)\n self.assertEqual(org[\"name\"], \"organization 2\")\n\n\n \n def test_find_organizations(self):\n organizations = self.organizationSrv.finds(\"domain_names\", \"dm301\")\n self.assertTrue(len(organizations) == 1)\n self.assertEqual(organizations[0][\"name\"], \"organization 3\")\n self.assertEqual(organizations[0][\"_id\"], 300)\n\n organizations = self.organizationSrv.finds(\"tags\", \"tag22\")\n self.assertTrue(len(organizations) == 1)\n self.assertEqual(organizations[0][\"name\"], \"organization 2\")\n self.assertEqual(organizations[0][\"_id\"], 200)\n\n organizations = self.organizationSrv.finds(\"tags\", \"tag444\")\n self.assertTrue(len(organizations) == 0)\n \n def test_list_fields(self):\n fields = self.organizationSrv.fields\n self.assertTrue(\"_id\" in fields)\n self.assertTrue(\"tags\" in fields)\n self.assertTrue(\"domain_names\" in fields)\n","sub_path":"test/test_organization_srv.py","file_name":"test_organization_srv.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"179093169","text":"import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimg = mpimg.imread('lab1.jpg')\nimgplot = plt.imshow(img)\nplt.show()\nred, blue, green = img.copy(), img.copy(), img.copy()\nred[:,:, 1], red[:, :, 2] = 0, 0\nplt.imshow(red)\nplt.show()\nblue[:, :, 0], blue[:, :, 1] = 0, 0\nplt.imshow(blue)\nplt.show()\ngreen[:, :, 0], green[:, :, 2] = 0, 0\nplt.imshow(green)\nplt.show()\nlum_img = img[:,:,0]\nimgplot = plt.imshow(lum_img, clim=(0.0, 50.0))\nplt.show()\nlum_img1 = img[:,:,0]\nimgplot = plt.imshow(lum_img, clim=(0.0, 255.0))\nplt.show()\nimgplot = plt.imshow(np.dot(img[...,:3], [0.33, 0.33, 0.33]), cmap=\"gray\")\nplt.show()","sub_path":"Lab_5/Lab_5.3.py","file_name":"Lab_5.3.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"324081070","text":"__author__ = 'XChen'\nimport pandas as pd\nimport numpy as np\nfrom scipy import stats\nimport scipy as sp\nimport matplotlib.pyplot as plt\nimport MySQLdb\n#import pymysql\nfrom sklearn.linear_model import LinearRegression\n\n\nsdax = MySQLdb.connect(host='sdax-retail-dev.cd50doqou1ik.eu-west-1.rds.amazonaws.com', port=3306, user='wil', passwd='821027Si$', db='sdax_dev')\ncur = sdax.cursor()\n\n# execute SQL query using execute() method.\ncur.execute (\"select JAN_2012,FEB_2012,MAR_2012,APR_2012,MAY_2012,JUN_2012,JUL_2012,AUG_2012,SEP_2012,OCT_2012,NOV_2012,DEC_2012,JAN_2013,FEB_2013,MAR_2013,APR_2013,MAY_2013,JUN_2013,JUL_2013,AUG_2013,SEP_2013,OCT_2013,NOV_2013,DEC_2013,JAN_2014,FEB_2014,MAR_2014,APR_2014,MAY_2014,JUN_2014,JUL_2014,AUG_2014,SEP_2014,OCT_2014,NOV_2014,DEC_2014,JAN_2015,FEB_2015,MAR_2015,APR_2015,MAY_2015,JUN_2015 from NIELSEN_With_Zero\")\n\ndf = cur.fetchall()\nlengthDF = len(df)\nexampleDF = df[8822][:]\n\n##### Plot the example time series\n# fig = plt.figure(figsize=(12,8))\n# ax = fig.add_subplot(111)\n# plt.plot(exampleDF)\n# plt.title('Sales example')\n# plt.xlabel('Month')\n# plt.ylabel('Sales')\n# plt.show()\n# legend = ax.legend(loc = 'upper left')\n# legend.prop.set_size(20)\n\n\n# for row in df:\n# print(row[0], row[1])\n\n\n# disconnect from server\ncur.close()\nsdax.close()\n\n######Process the data\ndef Process_data(DF, window, horizon):\n \"\"\"\n Input:\n to_forecast, univariate time series organized as numpy array\n window, number of items to use in the forecast window\n horizon, horizon of the forecast\n Output:\n X, a matrix where each row contains a forecast window\n y, the target values for each row of X\n \"\"\"\n shape = DF.shape[:-1] + (DF.shape[-1] - window + 1, window)\n strides = DF.strides + (DF.strides[-1],)\n X = np.lib.stride_tricks.as_strided(DF,\n shape=shape,\n strides=strides)\n Y = np.array([X[i+horizon][-1] for i in range(len(X)-horizon)])\n return X[:-horizon], Y\n\ndef mape(Esti, Gtrue):\n \"\"\" returns the mean absolute percentage error \"\"\"\n idx = Gtrue != 0.0\n return 100*np.mean(np.abs(Esti[idx]-Gtrue[idx])/Gtrue[idx])\n\nk = 18 # number of previous observations to use\nh = 1 # forecast horizon\nvectorPred = [None] * 11\n\nfor ii in range(0,10):\n exDF = np.asarray(df[ii][:])\n X,Y = Process_data(exDF, k, h)\n\n m = len(exDF)-k-h # number of samples to take in account\n regressor = LinearRegression(normalize=True)\n regressor.fit(X[:m], Y[:m])\n predResult = regressor.predict(X[m:])\n flagP = sum(exDF[37:])\n if flagP == 0:\n predResult = 0.00000\n else:\n continue\n\n # print('The error of line is %0.2f%%' % mape(regressor.predict(X[m:]),Y[m:]))\n vectorPred[ii] = predResult\n\n\n# import statsmodels.api as sm\n# arma_res = sm.tsa.ARMA(exDF, order=(2,2)).fit()\n# preds, stderr, ci = arma_res.forecast(1)\n\n\n# plt.figure(figsize=(8,6))\n# plt.plot(Y, label='True data', color='#377EB8', linewidth=2)\n# plt.plot(regressor.predict(X),\n# '--', color='#EB3737', linewidth=3, label='Prediction')\n# plt.plot(Y[:m], label='True data', color='#3700B8', linewidth=2)\n# # plt.xticks(arange(len(dates))[1::4],dates[1::4], rotation=45)\n# plt.legend(loc='upper left')\n# # ylabel('beer consumed (millions of litres)')\n# plt.show()\n\n\nvectorP = np.asarray(vectorPred)\nnp.savetxt(\"Nielsen_OneStepPred_With_Zero_B.csv\", vectorP, delimiter=\",\")\n\n\n","sub_path":"PredictionCode/Nielsen_With_Zero.py","file_name":"Nielsen_With_Zero.py","file_ext":"py","file_size_in_byte":3441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"626010742","text":"#coding=utf8\nfrom TexasSyncSocket import TexasSyncSocket\nimport TexasConfig\n\n#加万能豆\ndef CMD_AddPowerGameCoin(strUserName, nCoin):\n print(\"CMD_AddPowerGameCoin user:\" + strUserName + \",coin:\" + str(nCoin) + \" ...\")\n s = TexasSyncSocket()\n if s.Connect(TexasConfig.DBSERVER_HOST, TexasConfig.DBSERVER_PORT) == False:\n return False\n ret = s.PowerGameCoinChange(strUserName, nCoin)\n print(ret)\n s.Close()\n\n if ret[0] != 0:\n return False\n print(\"CMD_AddPowerGameCoin user:\" + strUserName + \",coin:\" + str(nCoin) + \" OK\")\n return True\n\nstrUser = \"chaim0415\"\nCMD_AddPowerGameCoin(strUser, 8000000)\n","sub_path":"addpowercoin.py","file_name":"addpowercoin.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"438564481","text":"#!/usr/bin/env python\nimport sys\nfrom distutils.core import setup\n\nversion = '0.1.2'\n\nkwargs = {\n 'name': 'pydnstest',\n 'version': version,\n 'description': 'DNS toolkit',\n 'long_description':\n \"\"\"pydnstest is a DNS software testing library.\n It supports parsing and running Unbound-like test scenarios,\n and setting up a mock DNS server. It's based on dnspython.\"\"\",\n 'author': 'Marek Vavrusa',\n 'author_email': 'marek@vavrusa.com',\n 'license': 'BSD',\n 'url': 'https://github.com/CZ-NIC/deckard',\n 'packages': ['pydnstest'],\n 'install_requires': [\n 'dnspython',\n 'jinja2'\n ],\n 'classifiers': [\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"Programming Language :: Python\",\n \"Topic :: Internet :: Name Service (DNS)\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n}\n\nsetup(**kwargs)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"566907894","text":"#!/usr/bin/env python3\n\nimport json\nimport random\n\nimport pandas as pd\nimport numpy as np\n\nfrom eval_ddos_block_sim import block_traffic_sim_both, block_sim_both_plot, block_traffic_sim_friend_tier1, \\\n block_sim_friend_tier1_plot\nfrom eval_ddos_route_sim import route_sim_multiprocess as route_sim\nfrom eval_ddos_utils import load_as_graph, get_stub_as_list, get_as_country, asn_lookup, get_vaild_dns_lists, \\\n get_non_stub_as_list\n\n\ndef generate_stat(stat_file, target_num = 10, dns_server_num = 3000, target_country_set=None, dns_country_set=None):\n stubs = get_stub_as_list()\n\n if target_country_set:\n asn_country, country_asn_set = get_as_country()\n select_country_as = set()\n for i in target_country_set:\n select_country_as |= country_asn_set[i]\n target_stubs = set(stubs) & select_country_as\n else:\n target_stubs = set(stubs)\n\n target_stubs = random.sample(target_stubs, target_num)\n print(\"target_stubs\", target_stubs)\n\n nameservers = pd.read_csv(\"data/nameservers.csv\", dtype=str)\n\n country_col = nameservers.columns[2]\n ip_col = nameservers.columns[0]\n dns_ip_list = []\n for i, row in nameservers.iterrows():\n ip = str(row[ip_col])\n if \":\" in ip: # ipv6\n continue\n if dns_country_set:\n country = str(row[country_col])\n if country not in dns_country_set:\n continue\n dns_ip_list.append(ip)\n print(\"dns ip list length\", len(dns_ip_list))\n\n g = load_as_graph()\n\n stat={}\n for asn in target_stubs:\n a = stat[asn] = {}\n random.shuffle(dns_ip_list)\n i = 0\n count = 0\n while count < dns_server_num:\n ip = dns_ip_list[i]\n asn = asn_lookup(ip)\n if asn and asn in g.nodes:\n a[ip] = {\"vol\": 1.0, \"as\":asn}\n count += 1\n i+=1\n if i>=len(dns_ip_list):\n break\n print(\"select dns list length\",len(a))\n\n vaild_dns_lists = get_vaild_dns_lists()\n for k,v in stat.items():\n vaild_dns_list = vaild_dns_lists.get(k,[])\n for dns in vaild_dns_list:\n if dns in v:\n v[dns][\"inwhitelist\"] = True\n print(\"inwhitelist\", dns)\n\n json.dump(stat, open(stat_file, 'w'), indent=4)\n print(\"dumped\")\n\n\ndef main():\n common = \"gen-stat-20World-10000World-01301200\"\n\n stat_file = \"result/%s.json\" % common\n sim_route_file = 'result/%s-sim-route.json' % common\n sim_block_file_both = 'result/%s-sim-block.csv' % common\n\n #generate_stat(stat_file, 20, 10000, None, None)\n #route_sim(stat_file, sim_route_file)\n #block_traffic_sim_both(sim_route_file, sim_block_file_both, np.linspace(0.05, 0.5, 19) , 50, incremental=200)\n #print(len(get_non_stub_as_list()))\n block_sim_both_plot(sim_block_file_both, fig_save=True, name_prefix=common + \"-\")\n\ndef main1():\n common = \"result/gen2-stat-500AS-5000DNS-01301900\"\n\n stat_file = \"%s.json\" % common\n sim_route_file = '%s-sim-route.json' % common\n sim_block_file = '%s-sim-block.csv' % common\n\n # generate_stat(stat_file, 500, 5000, None, None)\n # route_sim(stat_file, sim_route_file)\n # block_traffic_sim_friend_tier1(sim_route_file, sim_block_file, mp=True)\n block_sim_friend_tier1_plot(sim_block_file, fig_save=True, figpath_prefix=common)\n\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"eval_ddos.py","file_name":"eval_ddos.py","file_ext":"py","file_size_in_byte":3423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"522476109","text":"from copy import deepcopy\nfrom pathlib import Path\n\nfrom scrapli_replay.server.collector import ScrapliCollector\n\nimport scrapli\nfrom scrapli.driver.core.cisco_nxos.base_driver import PRIVS\n\nTEST_DATA_DIR = f\"{Path(scrapli.__file__).parents[1]}/tests/test_data\"\n\n\ndef main():\n privs = deepcopy(PRIVS)\n privs.pop(\"exec\")\n privs[\"privilege_exec\"].previous_priv = \"\"\n privs[\"privilege_exec\"].escalate = \"\"\n privs[\"privilege_exec\"].escalate_prompt = \"\"\n\n scrapli_kwargs = {\n \"host\": \"localhost\",\n \"port\": 22022,\n \"ssh_config_file\": False,\n \"auth_strict_key\": False,\n \"auth_username\": \"vrnetlab\",\n \"auth_password\": \"VR-netlab9\",\n \"auth_secondary\": \"VR-netlab9\",\n \"platform\": \"cisco_nxos\",\n \"privilege_levels\": privs,\n \"timeout_ops\": 120.0,\n \"timeout_socket\": 120.0,\n \"timeout_transport\": 120.0,\n \"comms_ansi\": True,\n }\n\n collector = ScrapliCollector(\n channel_inputs=[\"show version\", \"show run\"],\n interact_events=[\n [\n (\"clear logg onboard\", \"Do you want to continue? (y/n) [n]\", False),\n (\"y\", \"switch#\", False),\n ]\n ],\n paging_indicator=\"--More--\",\n paging_escape_string=\"q\",\n collector_session_filename=f\"{TEST_DATA_DIR}/mock_server_sessions/nxos.yaml\",\n **scrapli_kwargs,\n )\n\n collector.open()\n collector.collect()\n collector.close()\n collector.dump()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tests/integration/collect/nxos.py","file_name":"nxos.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"631642884","text":"import threading\nimport random\nimport logging\n\nlogging.basicConfig(level=__debug__,format='[%(levelname)s] (%(threadName)-10s) %(message)s')\nexitFlag = False\nlst = [\"apple\",\"mango\",\"grapes\",\"orange\",\"pineapple\"]\ndef print_list():\n global exitFlag\n if not exitFlag:\n with queue:\n if not len(lst) == 0:\n name = lst.pop(0)\n logging.debug(\"printing name %s\",name)\n # logging.debug(\"\")\n # print name\n else:\n logging.debug(\"setting flag exitFlag to True\")\n exitFlag = True\n\nqueue = threading.Lock()\n\nwhile not exitFlag:\n for i in range(3):\n logging.debug(\"Starting thread %d\",i)\n t = threading.Thread(name=\"thread-\"+str(i),target=print_list)\n t.start()\n\nmain_thread = threading.currentThread()\nif exitFlag:\n for i in threading.enumerate():\n if i is not main_thread:\n i.join()\n logging.debug(\"exited\")","sub_path":"python/threads/thread_sync.py","file_name":"thread_sync.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"288579631","text":"from flask import Flask, jsonify, request\nimport os\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_marshmallow import Marshmallow \nfrom flask_jwt_extended import JWTManager, create_access_token, get_jwt_identity, jwt_required\n\n\n\n\napp = Flask(__name__)\nbasedir = os.path.abspath(os.path.dirname(__file__))\napp.config['SQLALCHEMY_DATABASE_URI'] = \"sqlite:///\" + os.path.join(basedir, 'recipes.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['JWT_SECRET_KEY'] = 'super jwt key'\ndb = SQLAlchemy(app)\nma = Marshmallow(app)\njwt = JWTManager(app)\n\n# Flask CLI \n@app.cli.command('create_database')\ndef create_database():\n db.create_all()\n\n print('Database tables created')\n\n@app.cli.command('seed_db')\ndef seed_database():\n user1 = User(email='admin@gmail.com', username='adminuser', is_admin=True)\n user2 = User(email='testuser@gmail.com', username='testuser', is_admin=False)\n recipe1 = Recipe(name='Roast_Chicken', protein='chicken', ingredients='chicken, onions, carrots, celery, garlic, oil, lemon, fresh herbs')\n recipe2 = Recipe(name='Creamy_Beef_Pasta', protein='beef', ingredients='beef, garlic, basil, oregano, salt, pepper, flour, tomato sauce, beef broth, pasta, heavy cream, cheddar cheese')\n db.session.add_all([user1, user2, recipe1, recipe2])\n db.session.commit()\n print('Database seeded')\n\n@app.cli.command('destroy_database')\ndef destroy_databse():\n db.drop_all()\n print('Database tables destroyed')\n\n\n# Database Models\n\nclass User(db.Model):\n __tablename__ = 'users'\n\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.String, unique=True)\n username = db.Column(db.String) #should be unique\n is_admin = db.Column(db.Boolean)\n\nclass Recipe(db.Model):\n __tablename__ = 'recipes'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String, unique=True)\n protein = db.Column(db.String)\n ingredients = db.Column(db.String)\n\n\n# Marshmallow Schema\n\nclass UserSchema(ma.SQLAlchemySchema):\n class Meta:\n model = User\n\n email = ma.auto_field()\n username = ma.auto_field()\n links = ma.auto_field()\n\n # Hyperlinks\n links = ma.Hyperlinks(\n {\n 'self': ma.URLFor(\"get_user_detail\", values=dict(username=\"\")),\n \"collection\": ma.URLFor(\"get_all_users\"),\n }\n )\n\nclass RecipeSchema(ma.SQLAlchemySchema):\n class Meta:\n model = Recipe\n\n name = ma.auto_field()\n protein = ma.auto_field()\n ingredients = ma.auto_field()\n links = ma.auto_field()\n\n # Hyperlinks\n links = ma.Hyperlinks(\n {\n 'self': ma.URLFor(\"get_recipe_detail\", values=dict(recipe_name=\"\")),\n \"collection\": ma.URLFor(\"get_all_recipes\"),\n }\n )\n\n# Define the ability to serialize objects\nuser_schema = UserSchema()\nrecipe_schema = RecipeSchema()\n\n# Define the ability to serialize a collection of objects\nusers_schema = UserSchema(many=True)\nrecipes_schema = RecipeSchema(many=True)\n\n\n\n# Main Code\n@app.route('/recipes/', methods=['GET'])\ndef get_all_recipes():\n \"\"\"\n GET: Returns a list of all recipes \n No authentification is required to access the recipes \n \"\"\"\n recipes = Recipe.query.all()\n return jsonify(recipes_schema.dump(recipes))\n\n@app.route('/recipes/', methods=['GET'])\ndef get_recipe_detail(recipe_name:str):\n \"\"\"\n GET: Returns recipe data for the given recipe\n \"\"\"\n recipe = Recipe.query.filter_by(name=recipe_name).first()\n return recipe_schema.dump(recipe)\n\n@app.route('/recipes/', methods=['POST'])\n@jwt_required()\ndef add_recipe():\n \"\"\"\n POST: Adds a new recipe to the database\n User needs to be logged in to access this route. Furthermore the user has to be an admin\n \"\"\"\n current_user = get_jwt_identity()\n user = User.query.filter_by(username=current_user).first()\n if user.is_admin:\n name = request.json.get('name')\n recipe = Recipe.query.filter_by(name=name).first()\n if recipe:\n return jsonify({\"message\": \"Recipe already exists!\"}), 409\n ingredients = request.json.get('ingredients')\n protein = request.json.get('protein')\n recipe = Recipe(name=name, ingredients=ingredients, protein=protein)\n db.session.add(recipe)\n db.session.commit()\n return jsonify({\"message\": \"New recipe added\"})\n \n\n return jsonify({\"message\": \"Unauthorised\"}), 403\n\n\n@app.route('/recipes/', methods=['POST'])\n@jwt_required()\ndef put_recipe_detail(recipe_name:str):\n \"\"\"\n POST: Modifies recipe data for the given recipe\n User needs to be authenticated to access this endpoint\n Furthermore the user needs to be admin\n \"\"\"\n #current user is the username of the user\n current_user = get_jwt_identity()\n user = User.query.filter_by(username=current_user).first()\n if user.is_admin:\n recipe = Recipe.query.filter_by(name=recipe_name).first()\n if recipe: \n recipe.name = request.json.get('name')\n recipe.ingredients = request.json.get('ingredients')\n recipe.protein = request.json.get('protein')\n db.session.add(recipe)\n db.session.commit()\n return jsonify({\"message\": \"Recipe updated\"})\n return jsonify({\"message\": \"Recipe does not exist\"}), 404\n\n return jsonify({\"message\": \"Unauthorised\"}), 403\n\n\n@app.route('/recipes/', methods=['DELETE'])\n@jwt_required()\ndef delete_recipe_detail(recipe_name:str):\n \"\"\"\n DELETE: Deletes the given \n User needs to be authenticated to access this endpoint\n Furthermore the user needs to be admin\n\n \"\"\"\n current_user = get_jwt_identity()\n user = User.query.filter_by(username=current_user).first()\n if user.is_admin:\n recipe = Recipe.query.filter_by(name=recipe_name).first()\n db.session.delete(recipe)\n db.session.commit()\n return jsonify({\"message\": \"Recipe Deleted\"})\n\n return jsonify({\"message\": \"Unauthorised\"}), 403\n\n@app.route('/users/', methods=['GET'])\n@jwt_required()\ndef get_all_users():\n \"\"\"\n GET: Returns a list of all users\n User needs to be logged in to access this endpoint \n Furthermore the user needs to be admin\n \"\"\"\n current_user = User.query.filter_by(username=get_jwt_identity()).first()\n\n if current_user.is_admin:\n users_list = User.query.all()\n return jsonify(users_schema.dump(users_list))\n return jsonify({\"message\": \"Unauthorised\"}), 403\n\n@app.route('/users/', methods=['POST'])\n@jwt_required()\ndef add_new_user():\n \"\"\"\n POST: Adds a new user to the database\n User needs to be logged in to access this endpoint \n Furthermore the user needs to be admin\n \"\"\"\n current_user = User.query.filter_by(username=get_jwt_identity()).first()\n if current_user.is_admin:\n email = request.json.get('email')\n user = User.query.filter_by(email=email).first()\n if user:\n return jsonify({\"message\": \"Email already registered!\"}), 409\n username = request.json.get('username')\n is_admin = bool(request.json.get('is_admin'))\n user = User(username=username, email=email, is_admin=is_admin)\n db.session.add(user)\n db.session.commit()\n return jsonify({\"message\": \"New user added\"})\n \n\n return jsonify({\"message\": \"Unauthorised\"}), 403\n\n\n\n@app.route('/users/', methods=['GET'])\n@jwt_required()\ndef get_user_detail(username:str):\n \"\"\"\n GET: Returns user data for the given user\n User needs to be logged in to access this endpoint\n Furthermore a user can only access their own data. Admin user can access all user data\n \"\"\"\n user = User.query.filter_by(username=username).first()\n current_user = User.query.filter_by(username=get_jwt_identity()).first()\n if user:\n if current_user.username == user.username or current_user.is_admin:\n return user_schema.dump(user)\n return jsonify({\"message\": \"Unauthorised\"}), 403\n return jsonify({\"message\": \"Bad Request\"}), 400\n\n@app.route('/users/', methods=['POST'])\n@jwt_required()\ndef update_user_detail(username:str):\n \"\"\"\n PUT: Modifies user data for the given user\n\n \"\"\"\n user = User.query.filter_by(username=username).first()\n current_user = User.query.filter_by(username=get_jwt_identity()).first()\n if current_user.is_admin:\n email = request.json.get('email')\n username = request.json.get('username')\n is_admin = request.json.get('is_admin')\n user = User.query.filter_by(username=username).first()\n if not user:\n return jsonify({\"message\": \"Bad Request\"}), 400\n \n user.username = username\n user.email = email\n if is_admin == \"1\":\n user.is_admin = True\n else:\n user.is_admin = False\n print(user.username, user.email, user.is_admin)\n db.session.add(user)\n db.session.commit()\n return jsonify({\"message\": \"User updated\"})\n return jsonify({\"message\": \"Unauthorised\"}), 403\n \n\n@app.route('/users/', methods=['DELETE'])\n@jwt_required()\ndef delete_user(username:str):\n \"\"\"\n DELETE: Deletes the given user\n Only admin can delete users\n\n \"\"\"\n current_user = User.query.filter_by(username=get_jwt_identity()).first()\n\n if current_user.is_admin:\n user = User.query.filter_by(username=username).first()\n if user:\n db.session.delete(user)\n db.session.commit()\n return jsonify({\"message\": \"User deleted\"})\n return jsonify({\"message\": \"Bad Request\"}), 400\n print('User deleted')\n return jsonify({\"message\": \"Unauthorised\"}), 403\n\n\n# Create a registration route for creating new users. If the email address is already\n# registered, return 409 (conflict)\n@app.route('/register', methods=['POST'])\ndef register():\n email = request.json.get('email')\n username = request.json.get('username')\n\n user = User.query.filter_by(email=email).first()\n if user:\n return jsonify({'message': 'Email address already registered!'}), 409\n new_user = User(email=email, username=username, is_admin=False)\n db.session.add(new_user)\n db.session.commit()\n return jsonify({'message': 'User created successfully'}), 201 \n\n\n# Create a route to authenticate your users and return JWTs. The\n# create_access_token() function is used to actually generate the JWT.\n# for simplicity sake users only need to provide email and username to login\n@app.route(\"/login\", methods=[\"POST\"])\ndef login():\n submitted_email = request.json.get(\"email\")\n submitted_username = request.json.get(\"username\")\n user = User.query.filter_by(email=submitted_email).first()\n if user:\n if user.username == submitted_username:\n access_token = create_access_token(identity=submitted_username)\n return jsonify(access_token=access_token)\n \n return jsonify({\"message\": \"Bad username or email\"}), 401\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n app.run()\n\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":11083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"242677427","text":"import os\nimport sys\nimport pickle\n\nimport numpy as np\n\nclass TimitMFCC:\n LETTERS = [\n \"\", \"_\", \"'\", \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\",\n \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\", \"v\", \"w\", \"x\", \"y\", \"z\",\n ]\n\n MFCC_DIM = 26\n\n class Dataset:\n def __init__(self, data, shuffle_batches, seed=42):\n self._data = {}\n self._data[\"mfcc\"] = data[\"mfcc\"]\n self._data[\"letters\"] = [letters.astype(np.int32) + 1 for letters in data[\"letters\"]]\n self._size = len(self._data[\"mfcc\"])\n\n self._shuffler = np.random.RandomState(seed) if shuffle_batches else None\n\n @property\n def data(self):\n return self._data\n\n @property\n def size(self):\n return self._size\n\n def batches(self, size=None):\n permutation = self._shuffler.permutation(self._size) if self._shuffler else np.arange(self._size)\n while len(permutation):\n batch_size = min(size or np.inf, len(permutation))\n batch_perm = permutation[:batch_size]\n permutation = permutation[batch_size:]\n\n batch = {}\n for key, values in self._data.items():\n max_length = max(len(values[i]) for i in batch_perm)\n batch[key] = np.zeros([batch_size, max_length, *values[batch_perm[0]].shape[1:]], values[batch_perm[0]].dtype)\n batch[key + \"_len\"] = np.zeros([batch_size], dtype=np.int32)\n\n for i, index in enumerate(batch_perm):\n batch[key][i][:len(values[index])] = values[index]\n batch[key + \"_len\"][i] = len(values[index])\n yield batch\n\n def __init__(self, path=\"timit_mfcc.pickle\"):\n if not os.path.exists(path):\n print(\"The Timit dataset is not public, you need to manually download it\", file=sys.stderr)\n sys.exit(1)\n\n with open(path, \"rb\") as timit_mfcc_file:\n data = pickle.load(timit_mfcc_file)\n\n for dataset in [\"train\", \"dev\", \"test\"]:\n setattr(self, dataset, self.Dataset(data[dataset], shuffle_batches=dataset == \"train\"))\n","sub_path":"deep-learning/rnn/timit_mfcc.py","file_name":"timit_mfcc.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"449251636","text":"from django import forms\nfrom django.urls import reverse_lazy\nfrom rest_framework.fields import ChoiceField\n\nfrom BioPyApp.models import Batch, Class, Event, Process, Variable\nfrom BioPyApp.widgets import CRUDWidgetWrapper\n\nunfolding_method_options = (\n (\"idx\",\"Indexed\"),\n (\"ts\",\"Timeseries\"),\n (\"bw_tv\",\"Batch-wise (TxV)\"),\n (\"bw_vt\",\"Batch-wise (VxT)\"),\n (\"vw_bt\",\"Variable-wise (BxT)\"),\n (\"vw_tb\",\"Variable-wise (TxB)\"),\n (\"tw_bv\",\"Time-wise (BxV)\"),\n (\"tw_vb\",\"Time-wise (VxB)\"),\n)\nunfolding_axis_options = ((\"x\",\"X Axis (Column)\"),(\"y\",\"Y Axis (Row)\"))\ntime_reference_options = [(\"elapsed\",\"Elapsed\"),(\"timestamp\",\"Absolute\")]\ncompression_options = (('sparse',\"Sparse\"),('dense','Dense'))\nfile_format_options=[\"parquet\",\"pickle\",\"csv\",\"hdf\",\"xlsx\",\"json\",\"feather\",\"stata\",\"msgpack\"]\n\n\nclass SelectVariablePredictorsForm(forms.Form):\n\n predictors = forms.MultipleChoiceField()\n \n def __init__(self, *args, **kwargs):\n batches = kwargs.pop('batches')\n super(SelectVariablePredictorsForm,self).__init__(*args, **kwargs)\n preds=Variable.objects.filter(batch__in=batches).values_list('name',flat=True).distinct()\n choices = [(p,p) for p in preds ]\n self.fields['predictors'].choices = choices\n self.fields['predictors'].widget = CRUDWidgetWrapper(\n self.fields['predictors'].widget\n ,reverse_lazy('create_variable'),None,None)\n\nclass SelectEventPredictorsForm(forms.Form):\n\n predictors = forms.MultipleChoiceField()\n \n def __init__(self, *args, **kwargs):\n batches = kwargs.pop('batches')\n super(SelectEventPredictorsForm,self).__init__(*args, **kwargs)\n preds=Event.objects.filter(batch__in=batches).values_list('name',flat=True).distinct()\n choices = [(p,p) for p in preds]\n self.fields['predictors'].choices = choices\n self.fields['predictors'].widget = CRUDWidgetWrapper(\n self.fields['predictors'].widget\n ,reverse_lazy('create_event'),None,None)\n\nclass SelectClassPredictorsForm(forms.Form):\n\n predictors = forms.MultipleChoiceField()\n \n def __init__(self, *args, **kwargs):\n batches = kwargs.pop('batches')\n super(SelectClassPredictorsForm,self).__init__(*args, **kwargs)\n preds=Class.objects.filter(batch__in=batches).values_list('name',flat=True).distinct()\n choices = [(p,p) for p in preds]\n self.fields['predictors'].choices = choices\n self.fields['predictors'].widget = CRUDWidgetWrapper(\n self.fields['predictors'].widget\n ,reverse_lazy('create_class'),None,None)\n\nclass SelectSingleProcessDataframeOptionsForm(forms.Form):\n unfolding_method = forms.ChoiceField(choices=unfolding_method_options)\n unfolding_axis = forms.ChoiceField(choices=unfolding_axis_options)\n time_reference = forms.ChoiceField(choices=time_reference_options)\n compression = forms.ChoiceField(choices=compression_options)\n file_format = forms.ChoiceField(choices=zip(file_format_options,[o.title() for o in file_format_options]))\n\nclass MultiProcessForm(forms.Form):\n pass\n","sub_path":"BioPyApp/forms/dataframe.py","file_name":"dataframe.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"347105331","text":"import numpy as np\nimport random\nimport bisect\n\n\nclass TSPACO:\n def __init__(self, distance_matrix, initialization_value, evaporation_rate, intensification_value, alpha=1., beta=1.,\n ant_number=30, n_best_to_intensify=1, pher_min=None, pher_max=None):\n \"\"\" Initializes all needed parameters for the TSP-ACO \"\"\"\n\n self.distance_matrix = np.asarray(distance_matrix)\n self.cities = self.distance_matrix.shape[0]\n self.initialization_value = initialization_value\n self.evaporation_rate = evaporation_rate\n self.intensification_value = intensification_value\n self.alpha = alpha\n self.beta = beta\n self.ant_number = ant_number\n self.n_best_to_intensify = n_best_to_intensify\n # minimal and maximal values for pheromones which didn't turn out to be so great\n self.pher_min = pher_min\n self.pher_max = pher_max\n # never changes, so calculating once in the beginning saves time\n # divide, except where distance is 0, there the value of the \"out\" array stays\n self.heuristic_matrix = np.divide(1, self.distance_matrix, out=np.zeros_like(self.distance_matrix, dtype=float),\n where=self.distance_matrix != 0)\n self.pheromone_matrix = None\n self.initialize()\n\n def run(self, iterations=1):\n \"\"\" Runs the algorithm for the given number of iterations \"\"\"\n best_paths_lengths = np.zeros(iterations)\n best_paths = np.zeros((iterations, self.cities), dtype=np.int64)\n for i in range(iterations):\n if (i+1) % 100 == 0:\n print(f\"Iteration {i+1}\")\n paths = self.construct_solutions()\n self.evaporate()\n # calculate indices for best paths, then take N and intensify them\n path_qualities = np.argsort([self.objective_function(path) for path in paths])\n cur_best_paths = paths[path_qualities[:self.n_best_to_intensify]]\n self.intensify(cur_best_paths)\n best_paths[i] = cur_best_paths[0]\n best_paths_lengths[i] = self.objective_function(cur_best_paths[0])\n return best_paths, best_paths_lengths\n\n def objective_function(self, solution):\n \"\"\" Returns the length of the given solution \"\"\"\n return sum(self.distance_matrix[solution[i], solution[(i + 1) % len(solution)]] for i in range(len(solution)))\n\n def initialize(self):\n \"\"\" Initializes the pheromone matrix with the initialization value\"\"\"\n self.pheromone_matrix = np.full_like(self.distance_matrix, self.initialization_value, dtype=float)\n\n def construct_solutions(self):\n \"\"\" Calculates a solution for every ant and returns them \"\"\"\n\n # calculate weights for each city to go to any other city. It's not a probability because normalization\n # is omitted (the denominator of the equation), which means values are not between 0 and 1.\n # This saves time and more importantly means the values don't need to be recalculated every time\n # a column gets set to 0 when the corresponding city is not up for selection anymore.\n cite_weights = self.pheromone_matrix.copy()\n if self.alpha == 1 and self.beta == 1:\n cite_weights *= self.heuristic_matrix\n elif self.alpha != 1 or self.beta != 0:\n cite_weights = cite_weights ** self.alpha * self.heuristic_matrix ** self.beta\n\n # calculate path for each ant\n paths = np.zeros((self.ant_number, self.cities), dtype=int)\n for k in range(self.ant_number):\n cities_left = cite_weights.copy()\n # random starting city\n i = random.randrange(0, self.cities)\n paths[k, 0] = i\n # we want to do as little as possible in the innermost loop, so we make an array of random numbers now\n # instead of calling random every time inside, which generates a lof of overhead\n rands = np.random.rand(self.cities)\n for cnt in range(1, self.cities):\n # set \"probability\" to go from any city to current city to 0 since it can't go back there\n cities_left[:, i] = 0\n # to get a random number considering weights we calculate the cumulative sum of the weights, which\n # basically means we get ascending numbers, but the step size/difference to the last entry differs,\n # corresponding to how likely this particular entry should be.\n # We then multiply a random number in [0, 1) by the upper limit of the cumulative sum and\n # find the point/index at which this number would be inserted to keep the cumulative sum ordered\n # (which is what bisect does), which then makes for a weighted random selection of a city\n cum_sum = np.cumsum(cities_left[i])\n i = bisect.bisect_right(cum_sum, cum_sum[-1] * rands[cnt])\n paths[k, cnt] = i\n return paths\n\n def evaporate(self):\n \"\"\" Evaporation by a fixed percentage \"\"\"\n self.pheromone_matrix *= (1 - self.evaporation_rate)\n if self.pher_min is not None:\n self.pheromone_matrix[self.pheromone_matrix < self.pher_min] = self.pher_min\n\n def intensify(self, paths):\n \"\"\" Intensification by a fixed value \"\"\"\n for path in paths:\n for i in range(self.cities):\n self.pheromone_matrix[path[i], path[(i + 1) % self.cities]] += self.intensification_value\n if self.pher_max is not None:\n self.pheromone_matrix[self.pheromone_matrix > self.pher_max] = self.pher_max\n","sub_path":"Final/TSPACO.py","file_name":"TSPACO.py","file_ext":"py","file_size_in_byte":5662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"486493477","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor\nfrom torch_geometric.nn import global_add_pool\n\nimport pandas as pd\nfrom tqdm import tqdm\nfrom torch_geometric.data import DataLoader\nimport numpy as np\nimport random\n\nclass TripletLossRegression(nn.Module):\n \"\"\"\n anchor, positive, negative are node-level embeddings of a GNN before they are sent to a pooling layer,\n and hence are expected to be matrices.\n anchor_gt, positive_gt, and negative_gt are ground truth tensors that correspond to the ground-truth\n values of the anchor, positive, and negative respectively.\n \"\"\"\n\n def __init__(self, margin: float = 0.0, eps=1e-6):\n super(TripletLossRegression, self).__init__()\n self.margin = margin\n self.eps = eps\n\n def forward(self, anchor_batch, negative_batch, positive_batch,\n anchor: Tensor, negative: Tensor, positive: Tensor,\n anchor_gt: Tensor, negative_gt: Tensor, positive_gt: Tensor) -> Tensor:\n anchor = global_add_pool(anchor, anchor_batch)\n\n positive = global_add_pool(positive, positive_batch)\n\n negative = global_add_pool(negative, negative_batch)\n\n pos_distance = torch.linalg.norm(positive - anchor, dim=1)\n negative_distance = torch.linalg.norm(negative - anchor, dim=1)\n\n coeff = torch.div(torch.abs(negative_gt - anchor_gt) , (torch.abs(positive_gt - anchor_gt) + self.eps))\n loss = F.relu((pos_distance - coeff * negative_distance) + self.margin)\n return torch.mean(loss)\n\n\n\"\"\"\ndynamic triplet dataset based on error\n\"\"\"\n# def createTripletLoader(model, train_loader, dataset, errorThres = 5) -> (anchor_loader, pos_loader, neg_loader)\ndef createTripletLoader(device, model, train_loader, dataset, args, errorThres = 5):\n\n # 2. get losses for training dataset\n y_true, y_pred = [], []\n for step, batch in enumerate(tqdm(train_loader, desc=\"Sampling Triplets\")):\n # put batch to cuda\n batch = batch.to(device)\n\n # get prediction\n pred = model(batch).view(-1, )\n pred = pred.detach().cpu().tolist()\n y_pred.extend(pred)\n\n # get labels\n label = batch.y.detach().cpu().tolist()\n y_true.extend(label)\n \n if step == 100:\n break\n\n\n # 3. convert to dataframe\n trainDF = pd.DataFrame(zip(y_pred, y_true), columns=[\"y_pred\", \"y_true\"])\n trainDF[\"error\"] = (trainDF[\"y_pred\"] - trainDF[\"y_true\"]).apply(lambda x: abs(x))\n # bin y_pred\n trainDF[\"y_class\"] = trainDF[\"y_true\"].apply(lambda x: int(np.floor(x)))\n\n # 4. pick data with error threshold < k\n highErrorDF = trainDF[trainDF.error > errorThres]\n lowErrorDF = trainDF[trainDF.error < errorThres]\n # create [anchorID, posId, negID]\n triplets = []\n # get number of data\n ndata = len(y_pred)\n for i, row in tqdm(list(highErrorDF.iterrows())):\n i_class = row[\"y_class\"]\n\n # 4a. set i to be pos, find anchor and neg samples\n # set default to be random\n tripA = [random.randint(0, ndata), random.randint(0, ndata), random.randint(0, ndata)]\n tripA[1] = i\n # find anchor by sampling from lowErrorDF of same class\n tripA[0] = lowErrorDF[lowErrorDF.y_class == i_class].sample(1).index.item()\n # find negative by sampling from lowErrorDF of other class\n tripA[2] = lowErrorDF[lowErrorDF.y_class != i_class].sample(1).index.item()\n triplets.append(tripA)\n\n # 4b. set i to be neg, find anchor and pos samples\n # set default to be random\n tripB = [random.randint(0, ndata), random.randint(0, ndata), random.randint(0, ndata)]\n tripB[2] = i\n # find anchor by sampling from lowErrorDF of same class\n tripB[0] = lowErrorDF[lowErrorDF.y_class != i_class].sample(1).index.item()\n # find positive by sampling from lowErrorDF of other class\n tripB[1] = lowErrorDF[lowErrorDF.y_class != i_class].sample(1).index.item()\n triplets.append(tripB)\n\n if len(triplets) == 0:\n raise Exception(\"no triplets found\")\n \n # 5. create anchor, pos and neg IDs\n triplets = np.array(triplets)\n anchorIDs = list(triplets[:, 0])\n posIDs = list(triplets[:, 1])\n negIDs = list(triplets[:, 2])\n\n # 6. create triplet loaders\n anchor_loader = DataLoader(dataset[anchorIDs], batch_size=args.batch_size, shuffle=True, num_workers = args.num_workers)\n positive_loader = DataLoader(dataset[posIDs], batch_size=args.batch_size, shuffle=True, num_workers = args.num_workers)\n negative_loader = DataLoader(dataset[negIDs], batch_size=args.batch_size, shuffle=True, num_workers = args.num_workers)\n\n return anchor_loader, positive_loader, negative_loader","sub_path":"examples/lsc/pcqm4m/loss_functions/TripletLossRegression.py","file_name":"TripletLossRegression.py","file_ext":"py","file_size_in_byte":4771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"297654901","text":"#!/usr/bin/env python3\n\ninput = open('day07.txt').read().strip().split(',')\ninput = [int(x) for x in input]\n\n\ndef part1(input):\n low = min(input)\n high = max(input)\n least = None\n for pos in range(low, high+1):\n fuel = sum(abs(pos - x) for x in input)\n if least is None or least > fuel:\n least = fuel\n return least\n\n\ndef part2(input):\n low = min(input)\n high = max(input)\n least = None\n for pos in range(low, high+1):\n fuel = sum((abs(pos - x) + 1) * abs(pos - x) / 2 for x in input)\n if least is None or least > fuel:\n least = fuel\n return int(least)\n\n\nif __name__ == '__main__':\n print('--- Day 7: The Treachery of Whales ---')\n print(f'Part 1: {part1(input)}')\n print(f'Part 2: {part2(input)}')\n","sub_path":"2021/day07.py","file_name":"day07.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"6714546","text":"from tamil_segmenter_modified import stem\n#import Stemmer\n\n# SECTION: helper methods\n\nextra_chars = set(['ஂ', 'ா', 'ி', 'ீ', 'ு', 'ூ', 'ெ', 'ே', 'ை', 'ொ', 'ோ', 'ௌ', '்', 'ௗ'])\n\n# splitting the word into natural Tamil characters rather than the\n# the Unicode characters\ndef listify(word):\n l = list(word)\n i = 0\n while i < len(l):\n if l[i] in extra_chars:\n l[i-1] = l[i-1] + l[i]\n l.pop(i)\n else:\n i += 1\n return l\n\n# source: https://www.geeksforgeeks.org/longest-common-substring-dp-29/\n# This code is contributed by Soumen Ghosh\n\n# This is needed here to deal with both prefix and suffix morphemes, and also the weird consonant changes where\n# morphemes\n\n# Returns length of longest common \n# substring of X[0..m-1] and Y[0..n-1] \ndef LCSubStr(X, Y, m, n): \n \n # Create a table to store lengths of \n # longest common suffixes of substrings. \n # Note that LCSuff[i][j] contains the \n # length of longest common suffix of \n # X[0...i-1] and Y[0...j-1]. The first \n # row and first column entries have no \n # logical meaning, they are used only \n # for simplicity of the program. \n \n # LCSuff is the table with zero \n # value initially in each cell \n LCSuff = [[0 for k in range(n+1)] for l in range(m+1)] \n \n # To store the length of \n # longest common substring \n result = 0\n # to track where the longest common substring is in X\n end_in_X = -1\n end_in_Y = -1\n \n # Following steps to build \n # LCSuff[m+1][n+1] in bottom up fashion \n for i in range(m + 1): \n for j in range(n + 1): \n if (i == 0 or j == 0): \n LCSuff[i][j] = 0\n elif (X[i-1] == Y[j-1]): \n LCSuff[i][j] = LCSuff[i-1][j-1] + 1\n if LCSuff[i][j] > result:\n result = LCSuff[i][j]\n end_in_X = i\n end_in_Y = j\n else: \n LCSuff[i][j] = 0\n return result, end_in_X, end_in_Y\n\ndef get_ngrams(wordL, k):\n ret = []\n for i in range(len(wordL) + 1 - k):# range(1-k, len(wordL)):\n ret.append(''.join(wordL[i:i+k]))\n # ret.append(word[max(i, 0) : min(i+k, len(word)-1)])\n #if len(ret) == 0 and len(wordL) > 0: # word is shorter than n-gram window\n # ret.append(''.join(wordL))\n return ret\n\n# SECTION: \"atomisation\" methods that are used in fastText\n\ndef trivial_atoms(word):\n return [word]\n\ndef stem_only(word):\n st = stem(word, morphemes=False)\n if len(st) > 0:\n return [st]\n else:\n return [word]\n\ndef skipgram_atoms(word, minL=5, maxL=5):\n bigword = '<' + word + '>'\n wordL = list(bigword)\n\n ret = []\n for window in range(minL, maxL+1):\n for substr in get_ngrams(wordL, window):\n ret.append(substr)\n\n if len(bigword) > maxL:\n ret.append(bigword)\n\n return ret\n\n\"\"\" test_words = ['abcabc']\nfor w in test_words:\n print(' '.join(skipgram_atoms(w, 3, 6))) \"\"\"\n\n# SECTION: actual segmentation methods\n\n# all of these take a word as input and output something of the form\n# [list of prefix atoms, list of stem atoms, list of suffix atoms]\n\n# pre-stem, stem, after-stem\ndef basic_split(word, use_listify=True):\n st = stem(word, morphemes=False)\n if use_listify:\n wordL = listify(word)\n stL = listify(st)\n else:\n wordL = list(word)\n stL = list(st)\n\n if len(stL) > 0:\n length, endW, endS = LCSubStr(wordL, stL, len(wordL), len(stL))\n else: # in case the stemmer says the word is stemless\n length = 0\n st2, prefixout2, suffixout2 = stem(word, morphemes=True)\n if len(prefixout2) == 0: # the whole word is suffix\n endW = 0\n else: # the whole word is prefix\n endW = len(wordL)\n\n prefixL = wordL[: endW - length]\n suffixL = wordL[endW :]\n\n if len(prefixL) > 0:\n prefixout = [''.join(prefixL)]\n else:\n prefixout = []\n\n if len(st) > 0:\n stout = [st]\n else:\n stout = []\n\n if len(suffixL) > 0:\n suffixout = [''.join(suffixL)]\n else:\n suffixout = []\n \n # stem_in_word = ''.join(wordL[endW-length : endW])\n return [prefixout, stout, suffixout]\n\n# same as above but also splitting either part into n-grams\n# word: the word to be stemmed\n# minL, maxL: the range of lengths of n-grams taken\n# use_listify: self-explanatory\n# to_affix: whether to apply n-grams to the prefix and suffix\n# to_stem: whether to apply n-grams to the stem\n\n# in Bojanowski et al: minL = 3, maxL = 6, to_affix = to_stem = True essentially\n# slight differences: they include specialised begin and end characters\n# they also include n-grams across stem-affix boundaries, which we will not have here\n# but this is a strictly good thing for us\ndef ngram_split(word, minL, maxL, use_listify=True, to_affix=True, to_stem=True):\n st = stem(word, morphemes=False)\n if use_listify:\n wordL = listify(word)\n stL = listify(st)\n else:\n wordL = list(word)\n stL = list(st)\n \n if len(stL) > 0:\n length, endW, endS = LCSubStr(wordL, stL, len(wordL), len(stL))\n else: # in case the stemmer says the word is stemless\n length = 0\n st2, prefixout2, suffixout2 = stem(word, morphemes=True)\n if len(prefixout2) == 0: # the whole word is suffix\n endW = 0\n else: # the whole word is prefix\n endW = len(wordL)\n\n prefixL = wordL[: endW - length]\n suffixL = wordL[endW :]\n prefixout = []\n suffixout = []\n stout = []\n\n if to_affix:\n for window in range(minL, maxL+1):\n for gram in get_ngrams(prefixL, window):\n prefixout.append(gram)\n for gram in get_ngrams(suffixL, window):\n suffixout.append(gram)\n if len(prefixout) == 0 and len(prefixL) > 0:\n prefixout.append(''.join(prefixL))\n if len(suffixout) == 0 and len(suffixL) > 0:\n suffixout.append(''.join(suffixL))\n else:\n if len(prefixL) > 0:\n prefixout.append(''.join(prefixL))\n if len(suffixL) > 0:\n suffixout.append(''.join(suffixL))\n\n if to_stem:\n for window in range(minL, maxL+1):\n for gram in get_ngrams(stL, window):\n stout.append(gram)\n if len(stout) == 0 and len(st) > 0:\n stout.append(st)\n elif len(st) > 0:\n stout.append(st)\n\n return [prefixout, stout, suffixout]\n\n# splitting into morphemes\n# including an option for still applying n-grams over the stem\n# minL, maxL only matter if this is done\ndef morpheme_split(word, minL=5, maxL=5, use_listify=False, to_stem=False):\n st, prefixout, suffixout = stem(word, morphemes=True)\n if use_listify:\n stL = listify(st)\n else:\n stL = list(st)\n\n stout = []\n if to_stem:\n for window in range(minL, maxL+1):\n for gram in get_ngrams(stL, window):\n stout.append(gram)\n # if the stem is too short to be added then we should add it\n # if the stem is too long then we should also add it\n if (len(stout) == 0 or len(stL) > maxL) and len(st) > 0:\n stout.append(st)\n elif len(st) > 0:\n stout.append(st)\n \n ret = []\n for stuff in prefixout:\n ret.append(stuff)\n for stuff in stout:\n ret.append(stuff)\n for stuff in suffixout:\n ret.append(stuff)\n return ret\n\n\"\"\" words = ['மகன்தான்', 'வர', 'வர்', 'தர', 'வந்த', 'தந்த', 'வந்தவர்', 'தந்தவர்', 'வந்தவர்கள்', 'தந்தவர்கள்', 'அமைய']\nfor w in words:\n print(\"Word: \" + w)\n print(\"Stem: \" + stem(w, morphemes=False))\n print(\"Morphemes with whole letters\")\n print(morpheme_split(w, minL=1, maxL=3, use_listify=True, to_stem=True))\n print(\"Morphemes with partial letters\")\n print(morpheme_split(w, minL=3, maxL=5, use_listify=False, to_stem=True)) \"\"\"","sub_path":"pytorch-word2vec-master/word2atoms.py","file_name":"word2atoms.py","file_ext":"py","file_size_in_byte":8068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"171947755","text":"#encoding=utf-8\n\nfrom django.shortcuts import render\nfrom blogs.models import Tag, Category, BaseModel\nfrom common.helpers import paged_items, ok_json\nfrom common.pc_m import judge_pc_or_mobile\nfrom blogs.models import Category, Article\n\n\ndef blogs(request):\n nav_bar = \"blog\"\n user_agt = judge_pc_or_mobile(request.META.get(\"HTTP_USER_AGENT\"))\n cat_id = int(request.GET.get(\"cat_id\", 0))\n blog_cat_list = Category.objects.all()\n blog_list = Article.objects.filter(is_active=True).order_by(\"-id\")\n if cat_id not in [\"0\", 0, \"\"]:\n blog_list = blog_list.filter(category__id=cat_id)\n if user_agt is False:\n blog_list = paged_items(request, blog_list)\n return render(request, 'web/pages/blog/blog.html', locals())\n else:\n blog_list = paged_items(request, blog_list)\n return render(request, 'web/pages/blog/blog.html', locals())\n\n\ndef blog_detail(request, id):\n nav_bar = \"blog\"\n blog_dtl = Article.objects.filter(id=id).first()\n blog_dtl.views += 1\n blog_dtl.save()\n user_agt = judge_pc_or_mobile(request.META.get(\"HTTP_USER_AGENT\"))\n if user_agt is False:\n return render(request, 'web/pages/blog/blog_detail.html', locals())\n else:\n return render(request, 'web/pages/blog/blog_detail.html', locals())","sub_path":"blogs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"535207929","text":"import facebook\n\ntoken = '1172179256135929|1MatpThkY-qP-6zo_9Xe0LI-qCo'\ndetail = \"checkins,category,category_list,name,id,location\"\nmin_checkins = 20\n\ndef setCheckin(value):\n min_checkins = value\n\nclass Place():\n def __init__(self, name, id):\n self.name = name\n self.id = id\n self.checkins = 0\n self.root = None\n self.key = None\n self.location = {}\n self.link = None\n\nclass Result():\n def __init__(self, key):\n self.root = \"Facebook\"\n self.key = key\n self.latlng = list()\n self.place = {}\n\ndef request(lat,lng,distance,key):\n graph = facebook.GraphAPI(token)\n location_facebook = graph.request('search', {'q': str(key), 'type': 'place', 'center': str(lat)+','+str(lng),'limit': str(1000), 'distance': str(distance) , 'fields': detail})\n \n fb = Result(key)\n \n for i in range(len(location_facebook['data'])):\n for j in range(len(location_facebook['data'][i]['category_list'])):\n if location_facebook['data'][i].get('checkins','None') == 'None':\n break\n if location_facebook['data'][i]['checkins'] >= min_checkins and location_facebook['data'][i]['category'] != 'City':\n object = Place(location_facebook['data'][i]['name'].encode('utf8'),location_facebook['data'][i]['id'])\n object.checkins = location_facebook['data'][i]['checkins']\n object.location = location_facebook['data'][i]['location']\n object.root = \"Facebook\"\n object.key = key\n \n fb.latlng.insert(len(fb.latlng), [float(object.location['latitude']), float(object.location['longitude'])])\n fb.place.update({str(object.location['latitude'])+\",\"+str(object.location['longitude']): object})\n break\n return fb\n","sub_path":"ApiFacebook/ApiFacebook.py","file_name":"ApiFacebook.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"55163435","text":"import os\nimport sqlite3\nimport tqdm\nfrom tqdm import trange\n\n\ndef datewriter(tag, type, start, end, word, path, filename):\n with open(path + '/' + filename + \".ann\", 'a') as f:\n newline = [tag, type, str(start), str(end), word]\n newline = tag+'\\t'+type+' '+str(start)+' '+str(end)+'\\t'+word+'\\n'\n f.write(newline)\n\n\ndef dataopener(dbpath, path, dataname):\n connection = sqlite3.connect(dbpath)\n database = connection.cursor()\n\n #!start code block\n database.execute(\"select id from \" + dataname)\n ID = database.fetchall()\n ID = [x[0] for x in ID]\n\n # ?Their assume sql is in below format\n # ?id|tag|filename|type|start|end|word\n print(\"Start Writing ann file\")\n for x in ID:\n database.execute(\n \"select * from \" + dataname + \" where id == %d\" % (x))\n dataline = database.fetchall()[0]\n tag = dataline[1]\n filename = dataline[2]\n filename = os.path.splitext(filename)[0]\n type = dataline[3]\n start = dataline[4]\n end = dataline[5]\n word = dataline[6]\n datewriter(tag, type, start, end, word, path, filename)\n print(\"Finish transfor data into ann file\")\n #!end code block\n\n database.close()\n connection.commit()\n connection.close()\n\n return \"All Done\"\n","sub_path":"TianChI/COMP1/src/dataFormter.py","file_name":"dataFormter.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"250504220","text":"def parse(data):\n\treturn {x:data.count(x) for x in data}\n\ndef answer(data,n):\n\tcounts = parse(data)\n\tbanlist = []\n\treturndata = []\n\t\n\tfor number in counts:\n\t\tif counts[number] > n:\n\t\t\tbanlist.append(number)\n\t\n\tfor number in data:\n\t\tif number not in banlist:\n\t\t\treturndata.append(number)\n\tprint(returndata)\n\nanswer([1,2,3], 6)","sub_path":"Test-1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"256008066","text":"import socket\nimport sys\nimport time\ndef domain2ip(domain_file):\n domains = open(domain_file,'r').readlines()\n for domain in domains:\n domain = domain.strip()\n try:\n ip = socket.gethostbyname(domain)\n except:\n ip = '127.0.0.1'\n print(ip,domain)\n\nif __name__ == '__main__':\n domain2ip(sys.argv[1])\n","sub_path":"domain2ip.py","file_name":"domain2ip.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"204165899","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\nfrom .items import SiteData\nimport html2text\n\nclass CawpspiderPipeline(object):\n\tdef process_item(self, sitedata, spider):\n\t\th=htm2text.HTML2Text()\t\t \n\t\th.ignore_images=True\n\t\th.re_space=True\n\t\th.skip_internal_links=True\n\t\th.ignore_links=True\n\t\th.single_line_break=True\n\t\tcleantext=h.handle(sitedata['text'])\n\t\tsitedata['text']=cleantext\n\t\treturn sitedata\n","sub_path":"CAWPr/CAWPspider/CAWPspider/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"471335668","text":"\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\n\nfrom profileapp.forms import LoginForm, RegisterForm, UserForm, UserProfileForm\nfrom profileapp.models import UserProfile\nfrom utils.helpers import paginate\nfrom cities.models import Country, Region\n\n\ndef index(request):\n return render(request, 'profileapp/index.html')\n\n\ndef about(request):\n return render(request, 'profileapp/about.html')\n\n\ndef regions(request, id):\n country = Country.objects.get(id=id)\n regions = Region.objects.filter(country=country).order_by('name')\n result = \"\"\n for region in regions:\n result += \"\" % (region.id, region.name,)\n return HttpResponse(result)\n\n\ndef user_login(request):\n errors = []\n nxt = request.GET.get('next', 'profileapp:index')\n form = LoginForm(request.POST or None)\n if form.is_valid():\n user = authenticate(\n username=form.cleaned_data.get('username'),\n password=form.cleaned_data.get('password')\n )\n if user is not None:\n login(request, user)\n return redirect(nxt)\n errors.append(\"Incorrect username or password\")\n args = {'form': form, 'error': errors}\n return render(request, 'profileapp/login.html', args)\n\n\ndef user_logout(request):\n if request.user.is_authenticated:\n logout(request)\n return redirect('profileapp:index')\n\n\ndef user_register(request):\n form = RegisterForm(request.POST or None)\n if form.is_valid():\n form.save()\n user = authenticate(\n username=form.cleaned_data.get('username'),\n password=form.cleaned_data.get('password')\n )\n if user is not None:\n login(request, user)\n return redirect('profileapp:index')\n return redirect('profileapp:login')\n args = {'form': form}\n return render(request, 'profileapp/register.html', args)\n\n\n@login_required\ndef user(request, pk):\n user = User.objects.get(pk=pk)\n is_current = False\n if request.user == user:\n is_current = True\n args = {\n 'is_current': is_current, 'user': user\n }\n return render(request, 'profileapp/user.html', args)\n\n\n@login_required\ndef users(request):\n args = {\n 'users': paginate(\n User.objects.all().exclude(id=request.user.id),\n int(request.GET.get('page', 1))\n )\n }\n return render(request, 'profileapp/users.html', args)\n\n\n@login_required\ndef user_edit(request):\n form = UserForm(request.POST or None, instance=request.user)\n if form.is_valid():\n request.user.first_name = form.cleaned_data.get('first_name')\n request.user.last_name = form.cleaned_data.get('last_name')\n request.user.username = form.cleaned_data.get('username')\n request.user.email = form.cleaned_data.get('email')\n request.user.save()\n return redirect('profileapp:user', request.user.id)\n args = {'form': form}\n return render(request, 'profileapp/user_edit.html', args)\n\n\n@login_required\ndef profile_edit(request):\n try:\n profile = UserProfile.objects.get(user=request.user)\n except UserProfile.DoesNotExist:\n profile = None\n form = UserProfileForm(request.POST or None)\n if form.is_valid():\n gender = form.cleaned_data.get('gender')\n about = form.cleaned_data.get('about')\n phone = form.cleaned_data.get('phone')\n country = form.cleaned_data.get('country')\n Region = form.cleaned_data.get('Region')\n if profile:\n profile.gender = gender\n profile.about = about\n profile.phone = phone\n profile.country = country\n profile.Region = Region\n profile.user = request.user\n profile.save()\n else:\n UserProfile(\n gender=gender, phone=phone, country=country, Region=Region,\n about=about, user=request.user\n ).save()\n return redirect('profileapp:user', request.user.pk)\n if profile:\n form = UserProfileForm(instance=profile)\n args = {'profile': profile, 'form': form}\n return render(request, 'profileapp/profile_edit.html', args)\n\n\n@login_required\ndef user_delete(request):\n if request.user.is_authenticated:\n request.user.delete()\n logout(request)\n return redirect('profileapp:index')\n","sub_path":"profileapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"251898852","text":"# This file should be in /front-end/django-project/sim_worker/celery.py as well as in /worker/sim_worker/celery.py\n# If you modify one, please copy/paste the modification in the other one\n\nfrom celery import Celery\n\napp = Celery('sim_worker',\n broker='redis://128.3.144.76:6379/0',\n backend='redis://128.3.144.76:6379/0',\n include=['sim_worker.tasks'])\n\n# Optional configuration, see the application user guide.\napp.conf.update(\n CELERY_TASK_RESULT_EXPIRES=3600,\n CELERY_TRACK_STARTED=True,\n)\n\nif __name__ == '__main__':\n app.start()\n","sub_path":"front_end/django-project/sim_worker/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"346809860","text":"from flask import render_template, flash, redirect, url_for, abort\nfrom flask.ext.login import current_user, login_required\n\nfrom app import db\nfrom app.models import Group, Contact\nfrom . import main\nfrom .forms import SearchForm, NewGroupForm, EditGroupForm, NewContactForm, EditContactForm\n\n\n@main.route('/')\ndef index():\n return render_template('index.html')\n\n\n@main.route('/search', methods=['GET', 'POST'])\n@login_required\ndef search():\n form = SearchForm()\n if form.validate_on_submit():\n return redirect(url_for('.search_results', query=form.search.data))\n return render_template('search.html', form=form)\n\n\n@main.route('/search/results/')\n@login_required\ndef search_results(query):\n contacts = Contact.query.search(query).filter_by(user_id=current_user.get_id()).all()\n return render_template('search_results.html', query=query, contacts=contacts)\n\n\n@main.route('/new/group', methods=['GET', 'POST'])\n@login_required\ndef new_group():\n form = NewGroupForm()\n if form.validate_on_submit():\n group = Group(group_name=form.name.data, user_id=current_user.get_id())\n db.session.add(group)\n db.session.commit()\n flash('Group has been created.')\n return redirect(url_for('.list_groups'))\n return render_template('groups/new_group.html', form=form)\n\n\n@main.route('/list/groups')\n@login_required\ndef list_groups():\n groups = Group.query.filter_by(user_id=current_user.get_id()).order_by(Group.group_name).all()\n total_groups = Group.query.filter(Group.user_id == current_user.get_id()).count()\n return render_template('groups/list_groups.html', groups=groups, total_groups=total_groups)\n\n\n@main.route('/edit/group/', methods=['GET', 'POST'])\n@login_required\ndef edit_group(id):\n group = Group.query.get_or_404(id)\n if current_user != group.user:\n abort(403)\n form = EditGroupForm()\n if form.validate_on_submit():\n group.group_name = form.name.data\n db.session.add(group)\n db.session.commit()\n flash('Group name has been updated.')\n return redirect(url_for('.list_groups'))\n form.name.data = group.group_name\n return render_template('groups/edit_group.html', form=form)\n\n\n@main.route('/details/group/')\n@login_required\ndef group_details(id):\n group = Group.query.get_or_404(id)\n contacts = Contact.query.join(Group).filter(Contact.group_id == id).order_by(Contact.contact_name).all()\n if current_user != group.user:\n abort(403)\n return render_template('groups/group_details.html', group=group, contacts=contacts)\n\n\n@main.route('/delete/group/')\n@login_required\ndef delete_group(id):\n group = Group.query.get_or_404(id)\n if current_user != group.user:\n abort(403)\n db.session.delete(group)\n db.session.commit()\n flash('Group has been deleted.')\n return redirect(url_for('.list_groups'))\n\n\n@main.route('/new/contact', methods=['GET', 'POST'])\n@login_required\ndef new_contact():\n if Group.query.filter_by(user_id=current_user.get_id()).first() is None:\n flash('Create a group first before creating a new contact.')\n return redirect(url_for('.new_group'))\n form = NewContactForm()\n if form.validate_on_submit():\n contact = Contact(contact_name=form.name.data, contact_nickname=form.nickname.data,\n contact_company=form.company.data, contact_job_title=form.job_title.data,\n contact_email=form.email.data, contact_phone=form.phone.data,\n contact_address=form.address.data, contact_birthday=form.birthday.data,\n contact_notes=form.notes.data, group_id=form.group.data, user_id=current_user.get_id())\n db.session.add(contact)\n db.session.commit()\n flash('Contact has been created.')\n return redirect(url_for('.list_contacts'))\n return render_template('contacts/new_contact.html', form=form)\n\n\n@main.route('/list/contacts')\n@login_required\ndef list_contacts():\n contacts = Contact.query.join(Group).filter_by(user_id=current_user.get_id()).order_by(Contact.contact_name).all()\n total_contacts = Contact.query.filter(Contact.user_id == current_user.get_id()).count()\n return render_template('contacts/list_contacts.html', contacts=contacts, total_contacts=total_contacts)\n\n\n@main.route('/edit/contact/', methods=['GET', 'POST'])\n@login_required\ndef edit_contact(id):\n contact = Contact.query.get_or_404(id)\n if current_user != contact.user:\n abort(403)\n form = EditContactForm()\n if form.validate_on_submit():\n contact.contact_name = form.name.data\n contact.contact_nickname = form.nickname.data\n contact.contact_company = form.company.data\n contact.contact_job_title = form.job_title.data\n contact.contact_email = form.email.data\n contact.contact_phone = form.phone.data\n contact.contact_address = form.address.data\n contact.contact_birthday = form.birthday.data\n contact.contact_notes = form.notes.data\n contact.group_id = form.group.data\n db.session.add(contact)\n db.session.commit()\n flash('Contact has been updated.')\n return redirect(url_for('.list_contacts'))\n form.name.data = contact.contact_name\n form.nickname.data = contact.contact_nickname\n form.company.data = contact.contact_company\n form.job_title.data = contact.contact_job_title\n form.email.data = contact.contact_email\n form.phone.data = contact.contact_phone\n form.address.data = contact.contact_address\n form.birthday.data = contact.contact_birthday\n form.notes.data = contact.contact_notes\n form.group.data = contact.group_id\n return render_template('contacts/edit_contact.html', form=form)\n\n\n@main.route('/details/contact/')\n@login_required\ndef contact_details(id):\n contact = Contact.query.get_or_404(id)\n if current_user != contact.user:\n abort(403)\n return render_template('contacts/contact_details.html', contact=contact)\n\n\n@main.route('/delete/contact/')\n@login_required\ndef delete_contact(id):\n contact = Contact.query.get_or_404(id)\n if current_user != contact.user:\n abort(403)\n db.session.delete(contact)\n db.session.commit()\n flash('Contact has been deleted.')\n return redirect(url_for('.list_contacts'))\n","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"489850879","text":"from django.core.management.base import BaseCommand\nfrom eveuniverse.models import EveType\nfrom eveuniverse.tasks import update_or_create_eve_object\n\nfrom allianceauth.services.hooks import get_extension_logger\nfrom app_utils.logging import LoggerAddTag\n\nfrom ... import __title__, constants\nfrom ...models import EveOreType\nfrom . import get_input\n\nlogger = LoggerAddTag(get_extension_logger(__name__), __title__)\n\n\nclass Command(BaseCommand):\n help = \"Preloads data like ore types from ESI.\"\n\n def handle(self, *args, **options):\n self.stdout.write(\"Loading all ore types from ESI. This can take a while.\")\n ore_types_count = EveOreType.objects.count()\n self.stdout.write(\n f\"You currently have {ore_types_count} ore types in your database.\"\n )\n self.stdout.write()\n user_input = get_input(\"Are you sure you want to proceed? (y/N)?\")\n\n if user_input.lower() == \"y\":\n self.stdout.write(\"Tasks for loading ore types have been started.\")\n update_or_create_eve_object.delay(\n model_name=\"EveCategory\",\n id=constants.EVE_CATEGORY_ID_ASTEROID,\n include_children=True,\n enabled_sections=[\n EveType.Section.DOGMAS,\n EveType.Section.TYPE_MATERIALS,\n ],\n )\n self.stdout.write(self.style.SUCCESS(\"Done\"))\n else:\n self.stdout.write(self.style.WARNING(\"Aborted\"))\n","sub_path":"moonmining/management/commands/moonmining_load_eve.py","file_name":"moonmining_load_eve.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"489934227","text":"from algorithms.JiGen.src.models import resnet\nfrom algorithms.JiGen.src.models import caffenet\n\nnets_map = {\n 'caffenet': caffenet.caffenet,\n 'resnet18': resnet.resnet18,\n 'resnet50': resnet.resnet50\n}\n\ndef get_model(name):\n if name not in nets_map:\n raise ValueError('Name of model unknown %s' % name)\n\n def get_model_fn(**kwargs):\n return nets_map[name](**kwargs)\n\n return get_model_fn","sub_path":"algorithms/JiGen/src/models/model_factory.py","file_name":"model_factory.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"297481231","text":"import leather\n\ncolumn_data = [\n ('Hello', 3),\n ('How', 5),\n ('Are', 9),\n ('You', 4)\n]\n\nline_data = [\n ('Hello', 1),\n ('How', 5),\n ('Are', 4),\n ('You', 3)\n]\n\ndot_data = [\n ('Hello', 3),\n ('How', 5),\n ('Are', 9),\n ('You', 4)\n]\n\nchart = leather.Chart('Mixed shapes')\nchart.add_columns(column_data)\nchart.add_lines(line_data)\nchart.add_dots(dot_data)\nchart.to_svg('examples/charts/mixed_shapes.svg')\n","sub_path":"examples/mixed_shapes.py","file_name":"mixed_shapes.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"654494025","text":"import numpy as np ##Import numpy\nimport cv2 ##Import computer vision library\n\n\nvideo= cv2.VideoCapture(0) ##Create a new video object, conected to the first webcam\n\nframe_width = int(video.get(3))\nframe_height = int(video.get(4))\nout = cv2.VideoWriter('prueba1_.avi',cv2.VideoWriter_fourcc('M','J','P','G'),10,(frame_width,frame_height))\n\nfin = False\ncentro_x = 0\n\nwhile(1):\n okay,image = video.read() #Save video frame on image and status in okay\n\n if okay: #if frame image is completted\n blur = cv2.GaussianBlur(image,(5,5),0) ##GaussianBlur for filtering signal\n hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV) ##BGR to HSV for easier identification of object color\n\n #Select lower and upper color detection range\n lower_color = np.array([40,70,70])\n upper_color = np.array([80,200,200])\n\n #save in mask hsv image with filter color\n mask = cv2.inRange(hsv,lower_color,upper_color)\n mask = cv2.GaussianBlur(mask,(5,5),0)\n output = cv2.bitwise_and(image, image, mask = mask)\n\n moments = cv2.moments(mask)\n m00 = moments['m00']\n centro_x,centro_y = -1,-1\n if m00 != 0:\n centro_x = int(moments['m10']/m00)\n centro_y = int(moments['m01']/m00)\n\n if centro_y != -1 and centro_y != -1:\n ctr = (centro_x,centro_y)\n cv2.circle(image,ctr,5,(255,0,0),4)\n\n #cv2.circle(output,(300,250),50,(0,255,0))\n cv2.putText(image,\"X=\"+str(centro_x)+\", Y=\"+str(centro_y),(5,470),cv2.FONT_ITALIC,.4,(255,255,255),1,cv2.LINE_AA)\n cv2.putText(image,\"Instituto Tecnologico de Ciudad Guzman\",(316,450),cv2.FONT_ITALIC,.5,(255,255,255),1,cv2.LINE_AA)\n cv2.putText(image,\"Ball and Plate Project - Ing. Electronica\",(310,470),cv2.FONT_ITALIC,.5,(255,255,255),1,cv2.LINE_AA)\n #cv2.imshow(\"Images\",np.hstack([image,output]))\n cv2.imshow(\"Images\",image)\n out.write(image)\n if cv2.waitKey(1) & 0xFF == ord('b'):\n fin = True\n break\nout.release()\nvideo.release()\ncv2.destroyAllWindows()\n","sub_path":"object-detection/test/color_tracking.py","file_name":"color_tracking.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"241370090","text":"import dpp\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.distributions as td\nfrom copy import deepcopy\ntorch.set_default_tensor_type(torch.cuda.FloatTensor)\n\n\n### Config\n\nseed = 0\nnp.random.seed(seed)\ntorch.manual_seed(seed)\n\n## General data config\ndataset_name = 'synth/hawkes2' # other: [ 'yelp_toronto', 'wikipedia', 'mooc', 'stack_overflow', 'lastfm',\n # 'reddit', 'synth/poisson', 'synth/renewal', 'synth/self_correcting',\n # 'synth/hawkes1', 'synth/hawkes2']\n\nsplit = 'whole_sequences' # How to split the sequences (other 'each_sequence' -- split every seq. into train/val/test)\n\n## General model config\nuse_history = True # Whether to use RNN to encode history\nhistory_size = 64 # Size of the RNN hidden vector\nrnn_type = 'RNN' # Which RNN cell to use (other: ['GRU', 'LSTM'])\nuse_embedding = False # Whether to use sequence embedding (should use with 'each_sequence' split)\nembedding_size = 32 # Size of the sequence embedding vector\n # IMPORTANT: when using split = 'whole_sequences', the model will only learn embeddings\n # for the training sequences, and not for validation / test\ntrainable_affine = False # Train the final affine layer\n\n## Decoder config\ndecoder_name = 'LogNormMix' # other: ['RMTPP', 'FullyNeuralNet', 'Exponential', 'SOSPolynomial', 'DeepSigmoidalFlow']\nn_components = 64 # Number of components for a mixture model\nhypernet_hidden_sizes = [] # Number of units in MLP generating parameters ([] -- affine layer, [64] -- one layer, etc.)\n\n## Flow params\n# Polynomial\nmax_degree = 3 # Maximum degree value for Sum-of-squares polynomial flow (SOS)\nn_terms = 4 # Number of terms for SOS flow\n# DSF / FullyNN\nn_layers = 2 # Number of layers for Deep Sigmoidal Flow (DSF) / Fully Neural Network flow (Omi et al., 2019)\nlayer_size = 64 # Number of mixture components / units in a layer for DSF and FullyNN\n\n## Training config\nregularization = 1e-5 # L2 regularization parameter\nlearning_rate = 1e-3 # Learning rate for Adam optimizer\nmax_epochs = 1000 # For how many epochs to train\ndisplay_step = 50 # Display training statistics after every display_step\npatience = 50 # After how many consecutive epochs without improvement of val loss to stop training\n\n\n\n### Data\n\nprint('Loading data...')\nif '+' not in dataset_name:\n dataset = dpp.data.load_dataset(dataset_name)\nelse:\n # If '+' in dataset_name, load all the datasets together and concatenate them\n # For example, dataset_name='synth/poisson+synth/renewal' loads poisson and renewal datasets\n dataset_names = [d.strip() for d in dataset_name.split('+')]\n dataset = dpp.data.load_dataset(dataset_names.pop(0))\n for d in dataset_names:\n dataset += dpp.data.load_dataset(dataset_names.pop(0))\n\n# Split into train/val/test, on each sequence or assign whole sequences to different sets\nif split == 'each_sequence':\n d_train, d_val, d_test = dataset.train_val_test_split_each(seed=seed)\nelif split == 'whole_sequences':\n d_train, d_val, d_test = dataset.train_val_test_split_whole(seed=seed)\nelse:\n raise ValueError(f'Unsupported dataset split {split}')\n\n# Calculate mean and std of the input inter-event times and normalize only input\nmean_in_train, std_in_train = d_train.get_mean_std_in()\nstd_out_train = 1.0\nd_train.normalize(mean_in_train, std_in_train, std_out_train)\nd_val.normalize(mean_in_train, std_in_train, std_out_train)\nd_test.normalize(mean_in_train, std_in_train, std_out_train)\n\n# Break down long train sequences for faster batch traning and create torch DataLoaders\nd_train.break_down_long_sequences(128)\ncollate = dpp.data.collate\ndl_train = torch.utils.data.DataLoader(d_train, batch_size=64, shuffle=True, collate_fn=collate)\ndl_val = torch.utils.data.DataLoader(d_val, batch_size=1, shuffle=False, collate_fn=collate)\ndl_test = torch.utils.data.DataLoader(d_test, batch_size=1, shuffle=False, collate_fn=collate)\n\n# Set the parameters for affine normalization layer depending on the decoder (see Appendix D.3 in the paper)\nif decoder_name in ['RMTPP', 'FullyNeuralNet', 'Exponential']:\n _, std_out_train = d_train.get_mean_std_out()\n mean_out_train = 0.0\nelse:\n mean_out_train, std_out_train = d_train.get_log_mean_std_out()\n\n\n\n### Model setup\nprint('Building model...')\n\n# General model config\ngeneral_config = dpp.model.ModelConfig(\n use_history=use_history,\n history_size=history_size,\n rnn_type=rnn_type,\n use_embedding=use_embedding,\n embedding_size=embedding_size,\n num_embeddings=len(dataset),\n)\n\n# Decoder specific config\ndecoder = getattr(dpp.decoders, decoder_name)(general_config,\n n_components=n_components,\n hypernet_hidden_sizes=hypernet_hidden_sizes,\n max_degree=max_degree,\n n_terms=n_terms,\n n_layers=n_layers,\n layer_size=layer_size,\n shift_init=mean_out_train,\n scale_init=std_out_train,\n trainable_affine=trainable_affine)\n\n# Define model\nmodel = dpp.model.Model(general_config, decoder)\nmodel.use_history(general_config.use_history)\nmodel.use_embedding(general_config.use_embedding)\n\n# Define optimizer\nopt = torch.optim.Adam(model.parameters(), weight_decay=regularization, lr=learning_rate)\n\n\n### Traning\nprint('Starting training...')\n\n# Function that calculates the loss for the entire dataloader\ndef get_total_loss(loader):\n loader_log_prob, loader_lengths = [], []\n for input in loader:\n loader_log_prob.append(model.log_prob(input).detach())\n loader_lengths.append(input.length.detach())\n return -model.aggregate(loader_log_prob, loader_lengths)\n\nimpatient = 0\nbest_loss = np.inf\nbest_model = deepcopy(model.state_dict())\ntraining_val_losses = []\n\nfor epoch in range(max_epochs):\n model.train()\n for input in dl_train:\n opt.zero_grad()\n log_prob = model.log_prob(input)\n loss = -model.aggregate(log_prob, input.length)\n loss.backward()\n opt.step()\n\n model.eval()\n loss_val = get_total_loss(dl_val)\n training_val_losses.append(loss_val.item())\n\n if (best_loss - loss_val) < 1e-4:\n impatient += 1\n if loss_val < best_loss:\n best_loss = loss_val.item()\n best_model = deepcopy(model.state_dict())\n else:\n best_loss = loss_val.item()\n best_model = deepcopy(model.state_dict())\n impatient = 0\n\n if impatient >= patience:\n print(f'Breaking due to early stopping at epoch {epoch}')\n break\n\n if (epoch + 1) % display_step == 0:\n print(f\"Epoch {epoch+1:4d}, loss_train_last_batch = {loss:.4f}, loss_val = {loss_val:.4f}\")\n\n### Evaluation\n\nmodel.load_state_dict(best_model)\nmodel.eval()\n\npdf_loss_train = get_total_loss(dl_train)\npdf_loss_val = get_total_loss(dl_val)\npdf_loss_test = get_total_loss(dl_test)\n\nprint(f'Time NLL\\n'\n f' - Train: {pdf_loss_train:.4f}\\n'\n f' - Val: {pdf_loss_val.item():.4f}\\n'\n f' - Test: {pdf_loss_test.item():.4f}')\n","sub_path":"code/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"162885898","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/xpython/pyvm2.py\n# Compiled at: 2020-05-08 06:20:17\n\"\"\"A pure-Python Python bytecode interpreter.\"\"\"\nfrom __future__ import print_function, division\nimport linecache, logging, operator, sys, six\nfrom six.moves import reprlib\nfrom xdis import PYTHON3, PYTHON_VERSION, IS_PYPY, op_has_argument\nfrom xdis.util import code2num, CO_NEWLOCALS\nfrom xdis.op_imports import get_opcode_module\nfrom xpython.pyobj import Frame, Block, traceback_from_frame\nfrom xpython.byteop import get_byteop\nPY2 = not PYTHON3\nlog = logging.getLogger(__name__)\nif PYTHON3:\n byteint = lambda b: b\nelse:\n byteint = ord\nrepr_obj = reprlib.Repr()\nrepr_obj.maxother = 120\nrepper = repr_obj.repr\n\nclass VMError(Exception):\n \"\"\"For raising errors in the operation of the VM.\"\"\"\n pass\n\n\nclass VMRuntimeError(Exception):\n \"\"\"RuntimeError in operation of the VM.\"\"\"\n pass\n\n\nclass VirtualMachine(object):\n\n def __init__(self, python_version=PYTHON_VERSION, vmtest_testing=False, is_pypy=IS_PYPY):\n self.frames = []\n self.frame = None\n self.return_value = None\n self.last_exception = None\n self.last_traceback_limit = None\n self.last_traceback = None\n self.version = python_version\n self.is_pypy = is_pypy\n self.vmtest_testing = vmtest_testing\n self.last_exception = None\n self.fn2native = {}\n self.in_exception_processing = False\n self.VMError = VMError\n int_vers = int(python_version * 10)\n version_info = (int_vers // 10, int_vers % 10)\n self.opc = get_opcode_module(version_info)\n self.byteop = get_byteop(self, python_version, is_pypy)\n return\n\n def top(self):\n \"\"\"Return the value at the top of the stack, with no changes.\"\"\"\n return self.frame.stack[(-1)]\n\n def pop(self, i=0):\n \"\"\"Pop a value from the stack.\n\n Default to the top of the stack, but `i` can be a count from the top\n instead.\n\n \"\"\"\n return self.frame.stack.pop(-1 - i)\n\n def push(self, *vals):\n \"\"\"Push values onto the value stack.\"\"\"\n self.frame.stack.extend(vals)\n\n def popn(self, n):\n \"\"\"Pop a number of values from the value stack.\n\n A list of `n` values is returned, the deepest value first.\n\n \"\"\"\n if n:\n ret = self.frame.stack[-n:]\n self.frame.stack[(-n):] = []\n return ret\n else:\n return []\n\n def peek(self, n):\n \"\"\"Get a value `n` entries down in the stack, without changing the stack.\"\"\"\n return self.frame.stack[(-n)]\n\n def push_block(self, type, handler=None, level=None):\n if level is None:\n level = len(self.frame.stack)\n self.frame.block_stack.append(Block(type, handler, level))\n return\n\n def pop_block(self):\n return self.frame.block_stack.pop()\n\n def top_block(self):\n return self.frame.block_stack[(-1)]\n\n def jump(self, jump):\n \"\"\"Move the bytecode pointer to `jump`, so it will execute next.\"\"\"\n self.frame.f_lasti = jump\n\n def make_frame(self, code, callargs={}, f_globals=None, f_locals=None):\n log.debug('make_frame: code=%r, callargs=%s, f_globals=%r, f_locals=%r', code, repper(callargs), (\n type(f_globals), id(f_globals)), (\n type(f_locals), id(f_locals)))\n if f_globals is not None:\n f_globals = f_globals\n if f_locals is None:\n f_locals = f_globals\n elif self.frames:\n f_globals = self.frame.f_globals\n if f_locals is None:\n f_locals = {}\n else:\n f_globals = f_locals = {'__builtins__': __builtins__, \n '__name__': '__main__', \n '__doc__': None, \n '__package__': None}\n if code.co_flags & CO_NEWLOCALS:\n f_locals = {'__locals__': {}}\n f_locals.update(callargs)\n frame = Frame(code, f_globals, f_locals, self.frame)\n log.debug('%r', frame)\n return frame\n\n def push_frame(self, frame):\n self.frames.append(frame)\n self.frame = frame\n\n def pop_frame(self):\n self.frames.pop()\n if self.frames:\n self.frame = self.frames[(-1)]\n else:\n self.frame = None\n return\n\n def print_frames(self):\n \"\"\"Print the call stack for debugging. Note that the\n format exactly the same as in traceback.print_tb()\n \"\"\"\n for f in self.frames:\n filename = f.f_code.co_filename\n lineno = f.line_number()\n print(' File \"%s\", line %d, in %s' % (filename, lineno, f.f_code.co_name))\n linecache.checkcache(filename)\n line = linecache.getline(filename, lineno, f.f_globals)\n if line:\n print(' ' + line.strip())\n\n def resume_frame(self, frame):\n frame.f_back = self.frame\n log.debug('resume_frame: %r', frame)\n val = self.run_frame(frame)\n frame.f_back = None\n return val\n\n def run_code(self, code, f_globals=None, f_locals=None):\n \"\"\"run code using f_globals and f_locals in our VM\"\"\"\n frame = self.make_frame(code, f_globals=f_globals, f_locals=f_locals)\n try:\n val = self.run_frame(frame)\n except Exception:\n if self.vmtest_testing:\n raise\n if self.last_traceback:\n self.last_traceback.print_tb()\n print('%s' % self.last_exception[0].__name__, end='')\n exc_value = self.last_exception[1]\n tail = ': %s' % ('\\n').join(self.last_exception[1].args) if self.last_exception[1].args else ''\n print(tail)\n raise VMRuntimeError\n\n if self.frames:\n raise VMError('Frames left over!')\n if self.frame and self.frame.stack:\n raise VMError('Data left on stack! %r' % self.frame.stack)\n return val\n\n def instruction_info(self, byteName, arguments, opoffset):\n frame = self.frame\n code = frame.f_code\n return '%d: %s %s\\n\\t%s in %s:%s' % (\n opoffset,\n byteName,\n arguments,\n code.co_name,\n code.co_filename,\n frame.f_lineno)\n\n def unwind_block(self, block):\n if block.type == 'except-handler':\n offset = 3\n else:\n offset = 0\n while len(self.frame.stack) > block.level + offset:\n self.pop()\n\n if block.type == 'except-handler':\n tb, value, exctype = self.popn(3)\n self.last_exception = (exctype, value, tb)\n\n def parse_byte_and_args(self):\n \"\"\" Parse 1 - 3 bytes of bytecode into\n an instruction and optionally arguments.\"\"\"\n f = self.frame\n f_code = f.f_code\n co_code = f_code.co_code\n extended_arg = 0\n while True:\n opoffset = f.f_lasti\n line_number = self.linestarts.get(opoffset, None)\n byteCode = byteint(co_code[opoffset])\n byteName = self.opc.opname[byteCode]\n f.f_lasti += 1\n arg = None\n arguments = []\n if op_has_argument(byteCode, self.opc):\n if PYTHON_VERSION >= 3.6:\n intArg = code2num(co_code, f.f_lasti) | extended_arg\n f.f_lasti += 1\n if byteCode == self.opc.EXTENDED_ARG:\n extended_arg = intArg << 8\n continue\n else:\n extended_arg = 0\n else:\n intArg = code2num(co_code, f.f_lasti) + code2num(co_code, f.f_lasti + 1) * 256 + extended_arg\n f.f_lasti += 2\n if byteCode == self.opc.EXTENDED_ARG:\n extended_arg = intArg * 65536\n continue\n else:\n extended_arg = 0\n if byteCode in self.opc.CONST_OPS:\n arg = f_code.co_consts[intArg]\n elif byteCode in self.opc.FREE_OPS:\n if intArg < len(f_code.co_cellvars):\n arg = f_code.co_cellvars[intArg]\n else:\n var_idx = intArg - len(f.f_code.co_cellvars)\n arg = f_code.co_freevars[var_idx]\n elif byteCode in self.opc.NAME_OPS:\n arg = f_code.co_names[intArg]\n elif byteCode in self.opc.JREL_OPS:\n arg = f.f_lasti + intArg\n elif byteCode in self.opc.JABS_OPS:\n arg = intArg\n elif byteCode in self.opc.LOCAL_OPS:\n arg = f_code.co_varnames[intArg]\n else:\n arg = intArg\n arguments = [\n arg]\n elif PYTHON_VERSION >= 3.6:\n f.f_lasti += 1\n break\n\n return (\n byteName, arguments, opoffset, line_number)\n\n def log(self, byteName, arguments, opoffset, line_number):\n \"\"\" Log arguments, block stack, and data stack for each opcode.\"\"\"\n if line_number is not None:\n op = 'Line %4d, ' % line_number\n else:\n op = ' '\n op += '%3d: %s' % (opoffset, byteName)\n if arguments:\n op += ' %r' % (arguments[0],)\n indent = ' ' * (len(self.frames) - 1)\n stack_rep = repper(self.frame.stack)\n block_stack_rep = repper(self.frame.block_stack)\n log.debug(' %sframe.stack: %s' % (indent, stack_rep))\n log.debug(' %sblocks : %s' % (indent, block_stack_rep))\n log.info('%s%s' % (indent, op))\n return\n\n def dispatch(self, byteName, arguments, opoffset):\n \"\"\" Dispatch by bytename to the corresponding methods.\n Exceptions are caught and set on the virtual machine.\"\"\"\n why = None\n self.in_exception_processing = False\n try:\n if byteName.startswith('UNARY_'):\n self.unaryOperator(byteName[6:])\n elif byteName.startswith('BINARY_'):\n self.binaryOperator(byteName[7:])\n elif byteName.startswith('INPLACE_'):\n self.inplaceOperator(byteName[8:])\n elif 'SLICE+' in byteName:\n self.sliceOperator(byteName)\n else:\n if hasattr(self.byteop, byteName):\n bytecode_fn = getattr(self.byteop, byteName, None)\n if not bytecode_fn:\n raise VMError('Unknown bytecode type: %s\\n\\t%s' % (\n self.instruction_info(byteName, arguments, opoffset),\n byteName))\n why = bytecode_fn(*arguments)\n except:\n self.last_exception = sys.exc_info()\n if not self.in_exception_processing:\n if self.last_exception[0] != SystemExit:\n log.info('exception in the execution of instruction:\\n\\t%s' % self.instruction_info(byteName, arguments, opoffset))\n self.last_traceback = traceback_from_frame(self.frame)\n self.in_exception_processing = True\n why = 'exception'\n\n return why\n\n def manage_block_stack(self, why):\n \"\"\" Manage a frame's block stack.\n Manipulate the block stack and data stack for looping,\n exception handling, or returning.\"\"\"\n assert why != 'yield'\n block = self.frame.block_stack[(-1)]\n if block.type == 'loop' and why == 'continue':\n self.jump(self.return_value)\n why = None\n return why\n else:\n self.pop_block()\n self.unwind_block(block)\n if block.type == 'loop' and why == 'break':\n why = None\n self.jump(block.handler)\n return why\n if self.version < 3.0:\n if block.type == 'finally' or block.type == 'setup-except' and why == 'exception' or block.type == 'with':\n if why == 'exception':\n exctype, value, tb = self.last_exception\n self.push(tb, value, exctype)\n else:\n if why in ('return', 'continue'):\n self.push(self.return_value)\n self.push(why)\n why = None\n self.jump(block.handler)\n return why\n else:\n if why == 'exception' and block.type in ('setup-except', 'finally'):\n self.push_block('except-handler')\n exctype, value, tb = self.last_exception\n self.push(tb, value, exctype)\n self.push(tb, value, exctype)\n why = None\n self.jump(block.handler)\n return why\n if block.type == 'finally':\n if why in ('return', 'continue'):\n self.push(self.return_value)\n self.push(why)\n why = None\n self.jump(block.handler)\n return why\n return why\n\n def run_frame(self, frame):\n \"\"\"Run a frame until it returns (somehow).\n\n Exceptions are raised, the return value is returned.\n\n \"\"\"\n self.push_frame(frame)\n self.f_code = self.frame.f_code\n self.linestarts = dict(self.opc.findlinestarts(self.f_code, dup_lines=True))\n opoffset = 0\n while True:\n byteName, arguments, opoffset, line_number = self.parse_byte_and_args()\n if log.isEnabledFor(logging.INFO):\n self.log(byteName, arguments, opoffset, line_number)\n why = self.dispatch(byteName, arguments, opoffset)\n if why == 'exception':\n if not self.in_exception_processing:\n if self.last_exception[0] != SystemExit:\n log.info('exception in the execution of instruction:\\n\\t%s' % self.instruction_info(byteName, arguments, opoffset))\n self.last_traceback = traceback_from_frame(self.frame)\n self.in_exception_processing = True\n if why == 'reraise':\n why = 'exception'\n if why != 'yield':\n while why and frame.block_stack:\n why = self.manage_block_stack(why)\n\n if why:\n break\n\n self.pop_frame()\n if why == 'exception':\n if self.last_exception and self.last_exception[0]:\n six.reraise(*self.last_exception)\n else:\n raise VMError('Borked exception recording')\n self.in_exception_processing = False\n return self.return_value\n\n UNARY_OPERATORS = {'POSITIVE': operator.pos, \n 'NEGATIVE': operator.neg, \n 'NOT': operator.not_, \n 'CONVERT': repr, \n 'INVERT': operator.invert}\n\n def unaryOperator(self, op):\n x = self.pop()\n self.push(self.UNARY_OPERATORS[op](x))\n\n BINARY_OPERATORS = {'POWER': pow, \n 'MULTIPLY': operator.mul, \n 'DIVIDE': getattr(operator, 'div', lambda x, y: None), \n 'FLOOR_DIVIDE': operator.floordiv, \n 'TRUE_DIVIDE': operator.truediv, \n 'MODULO': operator.mod, \n 'ADD': operator.add, \n 'SUBTRACT': operator.sub, \n 'SUBSCR': operator.getitem, \n 'LSHIFT': operator.lshift, \n 'RSHIFT': operator.rshift, \n 'AND': operator.and_, \n 'XOR': operator.xor, \n 'OR': operator.or_}\n if PYTHON_VERSION >= 3.5:\n BINARY_OPERATORS['MATRIX_MULTIPLY'] = operator.matmul\n\n def binaryOperator(self, op):\n x, y = self.popn(2)\n self.push(self.BINARY_OPERATORS[op](x, y))\n\n def inplaceOperator(self, op):\n x, y = self.popn(2)\n if op == 'POWER':\n x **= y\n elif op == 'MULTIPLY':\n x *= y\n elif op in ('DIVIDE', 'FLOOR_DIVIDE'):\n x //= y\n elif op == 'TRUE_DIVIDE':\n x /= y\n elif op == 'MODULO':\n x %= y\n elif op == 'ADD':\n x += y\n elif op == 'SUBTRACT':\n x -= y\n elif op == 'LSHIFT':\n x <<= y\n elif op == 'RSHIFT':\n x >>= y\n elif op == 'AND':\n x &= y\n elif op == 'XOR':\n x ^= y\n elif op == 'OR':\n x |= y\n elif op == 'MATRIX_MULTIPLY':\n operator.imatmul(x, y)\n else:\n raise VMError('Unknown in-place operator: %r' % op)\n self.push(x)\n\n def sliceOperator(self, op):\n start = 0\n end = None\n op, count = op[:-2], int(op[(-1)])\n if count == 1:\n start = self.pop()\n elif count == 2:\n end = self.pop()\n elif count == 3:\n end = self.pop()\n start = self.pop()\n l = self.pop()\n if end is None:\n end = len(l)\n if op.startswith('STORE_'):\n l[start:end] = self.pop()\n elif op.startswith('DELETE_'):\n del l[start:end]\n else:\n self.push(l[start:end])\n return","sub_path":"pycfiles/x_python-1.1.0-py2.7/pyvm2.py","file_name":"pyvm2.py","file_ext":"py","file_size_in_byte":17448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"135549437","text":"# 회귀분석 모델 : 자동차 연비 예측\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport tensorflow as tf\r\nfrom tensorflow.keras import layers\r\n\r\ndataset = pd.read_csv(\"https://raw.githubusercontent.com/pykwon/python/master/testdata_utf8/auto-mpg.csv\")\r\nprint(dataset.head(3))\r\ndel dataset['car name']\r\nprint(dataset.head(3))\r\npd.set_option('display.max_columns', 100)\r\nprint(dataset.corr())\r\ndataset.drop(['cylinders', 'acceleration', 'model year','origin'], axis='columns', inplace=True)\r\nprint()\r\nprint(dataset.head(2))\r\ndataset['horsepower'] = dataset['horsepower'].apply(pd.to_numeric, errors='coerce') # data중에 ? 가 있어 형변환 하면 NaN이 생김\r\nprint(dataset.info())\r\nprint(dataset.isna().sum()) # 6\r\ndataset = dataset.dropna()\r\n\r\nimport seaborn as sns\r\n# sns.pairplot(dataset[['mpg','displacement','horsepower','weight']], diag_kind='kde')\r\n# plt.show()\r\n\r\n# train / test\r\ntrain_dataset = dataset.sample(frac=0.7, random_state=123)\r\ntest_dataset = dataset.drop(train_dataset.index)\r\nprint(train_dataset.shape) # (274, 4)\r\nprint(test_dataset.shape) # (118, 4)\r\n\r\n# 표준화 작업(수식을 직접 사용)을 위한 준비\r\ntrain_stat = train_dataset.describe()\r\n# print(train_stat)\r\ntrain_stat.pop('mpg')\r\ntrain_stat = train_stat.transpose()\r\nprint(train_stat)\r\n\r\n# label : mpg\r\ntrain_labels = train_dataset.pop('mpg')\r\nprint(train_labels[:2])\r\ntest_labels = test_dataset.pop('mpg')\r\nprint(test_labels[:2])\r\n\r\ndef st_func(x):\r\n return ((x - train_stat['mean']) / train_stat['std'])\r\n\r\n# print(st_func(10))\r\n# print(train_dataset[:3])\r\n# print(st_func(train_dataset[:3]))\r\nst_train_data = st_func(train_dataset) # train feature\r\nst_test_data = st_func(test_dataset) # test feature\r\n# ------------ 모델에 적용할 dataset 준비 완료 -------------------\r\n\r\n# Model\r\ndef build_model():\r\n network = tf.keras.Sequential([\r\n layers.Dense(units=64, input_shape = [3], activation='linear'),\r\n layers.Dense(64, activation='linear'), # relu\r\n layers.Dense(1, activation='linear'),\r\n ])\r\n # opti = tf.keras.optimizers.RMSprop(0.01)\r\n opti = tf.keras.optimizers.Adam(0.01)\r\n network.compile(optimizer = opti, loss='mean_squared_error',\\\r\n metrics=['mean_absolute_error','mean_squared_error'])\r\n return network\r\n \r\nprint(build_model().summary())\r\nmodel = build_model()\r\n# fit() 전에 모델을 실행해 볼 수도 있다.\r\nprint(model.predict(st_train_data[:1]))\r\n\r\n# 훈련\r\nepochs = 5000\r\n\r\n# 학습 조기 종료 \r\nearly_stop = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)\r\n\r\nhistory = model.fit(st_train_data, train_labels, batch_size=32,\\\r\n epochs=epochs, validation_split=0.2, verbose=1)\r\ndf = pd.DataFrame(history.history)\r\nprint(df.head(3))\r\nprint(df.columns)\r\n\r\n# 시각화\r\ndef plot_history(history):\r\n hist = pd.DataFrame(history.history)\r\n hist['epoch'] = history.epoch\r\n plt.figure(figsize = (8, 12))\r\n \r\n plt.subplot(2, 1, 1)\r\n plt.xlabel('epoch')\r\n plt.ylabel('Mean Abs Error[MPG]')\r\n plt.plot(hist['epoch'], hist['mean_absolute_error'], label='train error')\r\n plt.plot(hist['epoch'], hist['val_mean_absolute_error'], label='val error')\r\n plt.legend()\r\n \r\n plt.subplot(2, 1, 2)\r\n plt.xlabel('epoch')\r\n plt.ylabel('Mean Abs Error[MPG]')\r\n plt.plot(hist['epoch'], hist['mean_squared_error'], label='train error')\r\n plt.plot(hist['epoch'], hist['val_mean_squared_error'], label='val error')\r\n plt.legend()\r\n plt.show()\r\n \r\nplot_history(history)\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"python_tensorflow/tf_test2/ke11cars.py","file_name":"ke11cars.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"249760719","text":"#!usr/bin/python\n# ! _*_coding:utf-8_*_\n__author__ = 'ycl'\nimport os,sys,base.config\n\n#sign_apk.jar 和签名key所在文件夹路径\nsign_jar_dir_path = base.config.sign_apk_dir+os.sep\n\ndef load_cmd(path):\n par_dir,filename = os.path.split(path)\n sign_path = par_dir+os.sep+\"sign_\"+filename\n cmd_sign = \"java -jar \"+sign_jar_dir_path+\"signapk.jar \"+sign_jar_dir_path+\"testkey.x509.pem \"+sign_jar_dir_path+\"testkey.pk8 \"+\\\n path+\" \"+sign_path\n return cmd_sign\n\nif __name__=='__main__':\n input_path = sys.argv[1]\n # input_path = \"/Users/shuuseiyang/Desktop/creak/mpe/mpe.apk\"\n sign_cmd = load_cmd(input_path)\n os.popen(sign_cmd)\n\n # raw_input(\"Press Enter to continue\")","sub_path":"creak/sign_apk.py","file_name":"sign_apk.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"147224176","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n#from pyvirtualdisplay import Display\nfrom bs4 import BeautifulSoup\nimport time\nimport datetime\nimport traceback\nfrom selenium.webdriver.chrome.options import Options\nfrom influx import select, insert \n\n\nurl = 'https://web.kma.go.kr/eng/weather/forecast/current_korea.jsp'\n\ndef correct_url(url): \n\tif not url.startswith(\"http://\") and not url.startswith(\"https://\"):\n\t\turl = \"http://\" + url\n\treturn url\n\ndef scrollDown(browser, numberOfScrollDowns):\n\tbody = browser.find_element_by_tag_name(\"body\")\n\twhile numberOfScrollDowns >=0:\n\t\tbody.send_keys(Keys.PAGE_DOWN)\n\t\tnumberOfScrollDowns -= 1\n\t\ttime.sleep(0.3)\n\treturn browser\n\ndef crawl_url(url, run_headless=False):\n\twhile (1):\n\t\ttry:\n\t\t\turl = correct_url(url)\n\t\t\tchrome_options = Options()\n\t\t\tchrome_options.add_argument(\"--headless\")\t\n\t\t\tchrome_options.add_argument('--no-sandbox')\n\t\t\tchrome_options.add_argument('--disable-dev-shm-usage')\n\t\t\tbrowser = webdriver.Chrome(chrome_options=chrome_options)\n\t\t\tbrowser.get(url)\n\t\t\t#time.sleep(1)\n\t\t\t\n\t\t\tcurr_time = str(datetime.datetime.now())\n\t\t\tprint (str(curr_time))\n\t\t\t\n\t\t\tcontent = browser.page_source\n\t\t\tsoup = BeautifulSoup(content)\n\t\t\t\n\t\t\tlocations = soup.findAll('tr')\n\t\t\tlocation_infos = []\n\n\t\t\tfor location in locations:\n\t\t\t\tinfos = location.findAll(\"td\")\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tif (infos != []):\n\t\t\t\t\tstation, weather, visibility, \\\n\t\t\t\t\tcloud, temp, wind_dir, wind_speed, \\\n\t\t\t\t\thum, _, air_pressure \t\t\t\t\t\t= infos\n\n\t\t\t\t\tinsert(infos)\n\n\t\t\tbrowser.quit()\n\t\t\ttime.sleep(60)\t\t# Get data one minute / time\n\t\t\t\n\t\texcept:\n\t\t\ttraceback.print_exc()\n\t\nif __name__=='__main__':\n\tcrawl_url(url)\n\t\n\t\n","sub_path":"crawl_influxdb/crawl_temperature_influx.py","file_name":"crawl_temperature_influx.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"481037018","text":"import urllib\nimport csv\nimport os.path as pth\nimport os\nimport xml.etree.ElementTree as ET\n\n\nfiles = []\ndataset_dir = \"/home/jupyter/dataset/train\"\ntry:\n for filename in os.listdir(dataset_dir):\n if not filename.endswith('.xml'):\n continue\n\n path = os.path.join(dataset_dir, filename)\n if pth.isfile(path):\n tree = ET.parse(path)\n root = tree.getroot()\n for file_name in root.iter('filename'):\n file_name.text = filename[:-3] + \"jpg\"\n for width in root.iter('width'):\n width.text = str(round(int(width.text)/2))\n for height in root.iter('height'):\n height.text = str(round(int(height.text)/2))\n for xmin in root.iter('xmin'):\n xmin.text = str(round(int(xmin.text)/2))\n for ymin in root.iter('ymin'):\n ymin.text = str(round(int(ymin.text)/2))\n for xmax in root.iter('xmax'):\n xmax.text = str(round(int(xmax.text)/2))\n for ymax in root.iter('ymax'):\n ymax.text = str(round(int(ymax.text)/2))\n print(root)\n tree.write(path)\nexcept IOError:\n print(\"File 'annotations.csv' is no exist\")\n\n","sub_path":"dataset_preprocess/edit_anotations.py","file_name":"edit_anotations.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"506818285","text":"# -*- coding: utf-8 -*-\n\n# coding: utf-8\n\n# # Mask R-CNN Demo\n#\n# A quick intro to using the pre-trained model to detect and segment objects.\n\n# In[1]:\n\n\nimport os\nimport sys\nimport random\nimport math\nimport numpy as np\nimport skimage.io\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n# Root directory of the project\nfrom IPython.core.display import JSON\n\nROOT_DIR = os.path.abspath(\"./\")\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn import utils\nimport mrcnn.model as modellib\nfrom mrcnn import visualize\n# Import COCO config\n# sys.path.append(os.path.join(ROOT_DIR, \"./coco/\")) # To find local version\nimport coco\n\n# get_ipython().run_line_magic('matplotlib', 'inline')\n\n# Directory to save logs and trained model\nMODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n# Local path to trained weights file\nCOCO_MODEL_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\n# Download COCO trained weights from Releases if needed\nif not os.path.exists(COCO_MODEL_PATH):\n utils.download_trained_weights(COCO_MODEL_PATH)\n\n\nclass InferenceConfig(coco.CocoConfig):\n # Set batch size to 1 since we'll be running inference on\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\n\nconfig = InferenceConfig()\nconfig.display()\n\n# ## Create Model and Load Trained Weights\n\n# In[3]:\n\n\n# Create model object in inference mode.\nmodel = modellib.MaskRCNN(mode=\"inference\", model_dir=MODEL_DIR, config=config)\n\n# Load weights trained on MS-COCO\nmodel.load_weights(COCO_MODEL_PATH, by_name=True)\n\nclass_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n 'bus', 'train', 'truck', 'boat', 'traffic light',\n 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',\n 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',\n 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',\n 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\n 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',\n 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\n 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',\n 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',\n 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',\n 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',\n 'teddy bear', 'hair drier', 'toothbrush']\n\nimport time\nfrom skimage.measure import find_contours\n\ndef handleImage(image):\n # Load a random image from the images folder\n # file_names = \"./image/test1.jpeg\"\n\n # 把数据存入data中。最后转成json\n data = {}\n # 存储图像的大小,在安卓设备绘制遮罩的时候需要根据这个大小进行缩放\n data[\"shape\"] = [image.shape[0], image.shape[1]]\n # 一张图可能有多个人物,一个人物会有多个轮廓,用数组存\n data['contours'] = []\n # Run detection\n results = model.detect([image], verbose=1)\n\n # Visualize results\n r = results[0]\n\n # 以下代码修改自visualize.display_instances方法\n # 获取id\n class_ids = r['class_ids']\n masks = r['masks']\n boxes = r['rois']\n # Number of instances\n N = boxes.shape[0]\n\n\n\n\n # 转换轮廓数据为坐标数据\n def getContourData(contour):\n contourData = []\n for point in contour:\n contourData.append([point[0], point[1]])\n return contourData\n\n for i in range(N):\n class_id = class_ids[i]\n label = class_names[class_id]\n # 如果不是人,跳过\n if (label != 'person'):\n continue\n # Bounding box\n if not np.any(boxes[i]):\n # Skip this instance. Has no bbox. Likely lost in image cropping.\n continue\n\n # Mask\n mask = masks[:, :, i]\n\n # Mask Polygon\n # Pad to ensure proper polygons for masks that touch image edges.\n padded_mask = np.zeros(\n (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)\n padded_mask[1:-1, 1:-1] = mask\n contours = find_contours(padded_mask, 0.5)\n # for verts in contours:\n # 遍历轮廓\n for contour in contours:\n # Subtract the padding and flip (y, x) to (x, y)\n contour = np.fliplr(contour) - 1\n # 轮廓数据构添加到集合中。\n contourData = getContourData(contour)\n data['contours'].append(contourData)\n # printImg(contour, contour)\n\n return data\n\n\n\nimport cv2\nimport time\nimport demoForData\n\nimport numpy as np\n\n###################### 视频载入 #############################\ncap = cv2.VideoCapture(\"../app/src/main/res/raw/video.mp4\")\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\n# out = cv2.VideoWriter('E:\\\\Data_Set\\\\AODnet\\\\测试视频\\\\生成视频\\\\output11.avi', fourcc, 20, (1920, 1080))\n\n##################### 模型载入 #############################\n\n##################### 视频处理 #############################\nnum = 0\ncurrentTime = time.time()\n# 总共215帧\nfile = open(\"video.txt\", \"a+\",encoding=\"utf-8\")\n\ncurrentFrame = 0\nwhile cap.isOpened():\n preFrameTime = time.time()\n currentFrame += 1\n # get a frame\n # 小于几就是从几开始\n if currentFrame<1:\n continue\n rval, frame = cap.read()\n # save a frame\n if rval == True:\n # print(type(rval))\n # print(type(frame))\n # 获取到当前帧的数据\n try:\n print(\"====开始处理第\" + str(currentFrame) + \"帧\")\n try:\n frameData = demoForData.handleImage(frame)\n print(\"====处理完毕第\" + str(currentFrame) + \"帧\" + str(time.time() - preFrameTime))\n file.write(\"\\n\"+str(currentFrame)+str(frameData))\n except:\n file.write(\"\\nerror\")\n\n print(\"====识别错误第\" + str(currentFrame) + \"帧\" + str(time.time() - preFrameTime))\n\n except Exception as e:\n print(\"====错误处理第\" + str(currentFrame) + \"帧\")\n print(e)\n\n print(\"====用时\" + str(time.time() - currentTime) + \",总共215帧,当前处理了\" + str(currentFrame) + \"帧\")\n print(\"===============================================\")\n\n else:\n print(\"====error处理第\" + str(currentFrame) + \"帧\")\n break\nprint(\"====close\")\n\nfile.close()\n","sub_path":"Py3_6/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":6666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"544195178","text":"from marshmallow import fields\nfrom .....messaging.agent_message import AgentMessage, AgentMessageSchema\nfrom ..message_types import PRESENT_PROOF, PROTOCOL_PACKAGE\n\nHANDLER_CLASS = f\"{PROTOCOL_PACKAGE}.handlers.present_proof.PresentProofHandler\"\n\n\nclass PresentProof(AgentMessage):\n class Meta:\n handler_class = HANDLER_CLASS\n schema_class = \"PresentProofSchema\"\n message_type = PRESENT_PROOF\n\n def __init__(\n self,\n _id: str = None,\n *,\n credential_presentation=None,\n prover_public_did=None,\n decision: bool = True,\n **kwargs,\n ):\n \"\"\"Initialize credential issue object.\"\"\"\n super().__init__(_id=_id, **kwargs)\n self.credential_presentation = credential_presentation\n self.prover_public_did = prover_public_did\n self.decision = decision\n\n\nclass PresentProofSchema(AgentMessageSchema):\n \"\"\"Credential schema.\"\"\"\n\n class Meta:\n \"\"\"Credential schema metadata.\"\"\"\n\n model_class = PresentProof\n\n credential_presentation = fields.Str(required=False)\n prover_public_did = fields.Str(required=False)\n decision = fields.Bool(required=True)\n","sub_path":"aries_cloudagent/protocols/present_proof/v1_1/messages/present_proof.py","file_name":"present_proof.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"484063358","text":"peliculas=[{\r\n \"titulo\" : \"La Patagonia rebelde\",\r\n \"director\" : \"Héctor Olivera\",\r\n \"year\" : 1974,\r\n \"protagonistas\"\t: [\"Héctor Alterio\",\"Luis Brandoni\",\"Federico Luppi\",\"Pepe Soriano\",\"Osvaldo Terranova\",\"José María Gutiérrez\",\"Pedro Aleandro Tacholas\"],\r\n \"portada\" : \"/static/La-patagonia-rebelde.jpg\",\r\n \"codigo\" : 1001},\r\n {\"titulo\" : \"La tregua\",\r\n \"director\" : \"Sergio Renán\",\r\n \"year\" : 1974,\r\n \"protagonistas\" : [\"Héctor Alterio\", \"Ana María Picchio\",\"Oscar Martinez\",\"Norma Aleandro\"],\r\n \"portada\" : \"/static/La-tregua.jpg\",\r\n \"codigo\" : 1002},\r\n {\"titulo\" : \"Camila\",\r\n \"director\" : \"María Luisa Bemberg\",\r\n \"year\" : 1984,\r\n \"protagonistas\" : [\"Héctor Alterio\", \"Susú Pecoraro\",\"Imanol Arias\"],\r\n \"portada\" : \"/static/Camila.jpg\",\r\n \"codigo\" : 1003},\r\n {\"titulo\" : \"Esperando la carroza\",\r\n \"director\" : \"Alejandro Doria\",\r\n \"year\" : 1985,\r\n \"protagonistas\" : [\"Luis Brandoni\",\"China Zorrilla\",\"Antonio Gasalla\",\"Betiana Blum\",\"Juan Manuel Tenuta\",\r\n \"Andrea Tenuta\",\"Darío Grandinetti\",\"Cecilia Rossetto\",\"Enrique Pinti\",\"Angelita Pardo\",\"Clotilde Borella\",\"Pina Criscuolo\",\r\n \"Juan Acosta\",\"Mónica Alessandría\",\"José Andrada\",\"Gofredo Colombo\",\"Julio de Grazia\",\"Mónica Villa\",\r\n \"Lidia Catalano\",\"Cristina Fridman\",\"Miguel Ángel Porro\"],\r\n \"portada\" : \"/static/Carroza.jpg\",\r\n \"codigo\" : 1004},\r\n {\"titulo\" : \"Nueve Reinas\",\r\n \"director\" : \"Fabián Bielinsky\",\r\n \"year\" : 2000,\r\n \"protagonistas\" : [\"Ricardo Darín\",\"Gastón Pauls\",\"Leticia Brédice\",\"Tomás Fonzi\",\"Graciela Tenenbaum\",\r\n \"Oscar Núñez\", \"Alejandro Awada\",\"Elsa Berenguer\",\"Roly Serrano\"],\r\n \"portada\" : \"/static/Nueve.jpg\",\r\n \"codigo\" :1005},\r\n {\"titulo\" : \"El hijo de la novia\",\r\n \"director\" : \"Juan José Campanella\",\r\n \"year\" : 2001,\r\n \"protagonistas\"\t: [\"Ricardo Darín\",\"Héctor Alterio\",\"Norma Aleandro\",\"Natalia Verbeke\",\"Eduardo Blanco\"],\r\n \"portada\" : \"/static/Novia.jpg\",\r\n \"codigo\" : 1006},\r\n {\"titulo\" : \"Historias mínimas\",\r\n \"director\" : \"Carlos Sorín\",\r\n \"year\" : 2002,\r\n \"protagonistas\"\t: [\"Javier Lombardo\",\"Antonio Benedictti\",\"Javiera Bravo\"],\r\n \"portada\" : \"/static/Historia.jpg\",\r\n \"codigo\" : 1007},\r\n {\"titulo\" : \"Tiempo de valientes\",\r\n \"director\" : \"Damián Szifron\",\r\n \"year\" : 2005,\r\n \"protagonistas\"\t: [\"Luis Luque\",\"Diego Peretti\"],\r\n \"portada\" : \"/static/valientes.jpg\",\r\n \"codigo\" : 1008},\r\n {\"titulo\" : \"El secreto de sus ojos\",\r\n \"director\" : \"Juan José Campanella\",\r\n \"year\" : 2009,\r\n \"protagonistas\"\t: [\"Ricardo Darín\",\"Soledad Villamil\",\"Pablo Rago\",\"Javier Godino\",\"Guillermo Francella\"],\r\n \"portada\" : \"/static/secreto.jpg\",\r\n \"codigo\" : 1009},\r\n {\"titulo\" : \"Relatos Salvajes\",\r\n \"director\" : \"Damián Szifron\",\r\n \"year\" : 2014,\r\n \"protagonistas\"\t: [\"Ricardo Darín\",\"Oscar Martínez\",\"Leonardo Sbaraglia\",\"Érica Rivas\",\"Rita Cortese\",\"Julieta Zylberberg\",\"Darío Grandinetti\"],\r\n \"portada\" : \"/static/relato.jpg\",\r\n \"codigo\" : 1010}]","sub_path":"peliculas.py","file_name":"peliculas.py","file_ext":"py","file_size_in_byte":3991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"314401682","text":"'''\nfunction_pipe.py\n\nCopyright 2012-2017 Research Affiliates\n\nAuthors: Christopher Ariza, Max Moroz\n\nCommon usage:\nimport function_pipe as fpn\n'''\n\nimport functools\nimport inspect\nimport collections\nimport types\nimport sys\n\n\n#-------------------------------------------------------------------------------\n# FunctionNode utilities\n\ndef compose(*funcs):\n '''\n Given a list of functions, execute them from right to left, passing\n the returned value of the right f to the left f. Store the reduced function in a FunctionNode\n '''\n # call right first, then left of each pair; each reduction retruns a function\n reducer = functools.reduce(lambda f, g:\n lambda *args, **kaargs: f(g(*args, **kaargs)), funcs)\n # args are reversed to show execution from right to left\n return FunctionNode(reducer, doc_function=compose, doc_args=reversed(funcs))\n\ndef _wrap_unary(func):\n '''Decorator for operator overloads. Given a higher order function that takes one args, wrap it in a FunctionNode function and provide documentation labels.\n '''\n def unary(lhs):\n # wrapped function will prepare correct class, even if a constant\n cls = PipeNode if isinstance(lhs, PipeNode) else FunctionNode\n return cls(func(lhs),\n doc_function=func,\n doc_args=(lhs,)\n )\n return unary\n\ndef _wrap_binary(func):\n '''Decorator for operators. Given a higher order function that takes two args, wrap it in a FunctionNode function and provide documentation labels.\n '''\n def binary(lhs, rhs):\n # wrapped function will prepare correct class, even if a constant\n cls = PipeNode if isinstance(lhs, PipeNode) else FunctionNode\n return cls(func(lhs, rhs),\n doc_function=func,\n doc_args=(lhs, rhs)\n )\n return binary\n\n\ndef _repr(f, doc_args=True):\n '''Provide a string representation of the FN, recursively representing defined arguments.\n '''\n def get_function_name(f):\n '''Get a string representation of the callable, or its code if it is a lambda. In some cases, `f` may not be function, so just return a string.\n '''\n f_type = type(f)\n if f_type is not types.FunctionType or not hasattr(f, '__name__'):\n # functool partial types do not have __name__ attrs, and are not FunctionTypes\n return str(f)\n if f.__name__ == '':\n # split on all white space, and rejoin with single space\n return ' '.join(inspect.getsource(f).split())\n return f.__name__\n\n # find FunctionNode; using hasattr because of testing context issues\n if hasattr(f, '_doc_function'):\n if f._doc_function:\n doc_f = get_function_name(f._doc_function)\n if doc_args:\n args = kwargs = ''\n if f._doc_args:\n args = (str(_repr(v)) for v in f._doc_args)\n if f._doc_kwargs:\n kwargs = (k + '=' + str(_repr(f)) for k, v\n in f._doc_kwargs.items())\n if not args and not kwargs:\n return doc_f\n return doc_f + '(' + ','.join(args) + ','.join(kwargs) + ')'\n return doc_f\n else: # we don't know its structure, use _function\n return get_function_name(f._function)\n return get_function_name(f)\n\n\n\nclass FunctionNode:\n '''A wrapper for a callable that can reside in an expression of numerous FunctionNodes, or be modified with unary or binary operators.\n '''\n __slots__ = (\n '_function',\n '_doc_function',\n '_doc_args',\n '_doc_kwargs',\n )\n\n #---------------------------------------------------------------------------\n def __init__(self,\n function,\n *,\n doc_function=None,\n doc_args=None,\n doc_kwargs=None\n ):\n '''\n Args:\n function: a callable\n doc_function: the function to display; will be set to `function` if nor provided\n '''\n # if a function node, re-wrap\n if isinstance(function, FunctionNode):\n for attr in self.__slots__:\n setattr(self, attr, getattr(function, attr))\n else:\n if callable(function):\n self._function = function\n else:\n # if not a callable, we upgrade a constant, non function value to be a function that returns that value\n self._function = lambda *args, **kwargs: function\n\n # if not supplied, doc_function is set to function\n self._doc_function = doc_function if doc_function else self._function\n self._doc_args = doc_args\n self._doc_kwargs = doc_kwargs\n\n @property\n def unwrap(self):\n '''The doc_function should be set to the core function being wrapped, no matter the level of wrapping.\n '''\n # if the stored function is using pipe_kwarg_bind, need to go lower\n doc_func = self\n while hasattr(doc_func, '_doc_function'):\n doc_func = getattr(doc_func, '_doc_function')\n return doc_func\n\n def __call__(self, *args, **kwargs):\n '''Call the wrapped function.\n '''\n return self._function(*args, **kwargs)\n\n def __str__(self):\n return ''.format(_repr(self))\n\n __repr__ = __str__\n\n #__name__ = ''.format(_repr(self, doc_args=False))\n #__name__ = __str__\n\n def partial(self, *args, **kwargs):\n '''Return a new FunctionNode with a partialed function with args and kwargs'\n '''\n fn = FunctionNode(functools.partial(self._function, *args, **kwargs))\n for attr in self.__slots__:\n if not getattr(fn, attr):\n setattr(fn, attr, getattr(self, attr))\n return fn\n\n #---------------------------------------------------------------------------\n # all unary operators return a function; the _wrap_unary decorator then wraps this function in a FunctionNode\n\n @_wrap_unary\n def __neg__(self):\n return lambda *args, **kwargs: self(*args, **kwargs) * -1\n\n @_wrap_unary\n def __invert__(self):\n '''This is generally expected to be a Boolean inversion, such as ~ (not) applied to a numpy array or pd.Series.\n '''\n return lambda *args, **kwargs: self(*args, **kwargs).__invert__()\n\n @_wrap_unary\n def __abs__(self):\n '''Absolute value; most common usage us on Numpy or Pandas objects, and thus here we np.abs.\n '''\n import numpy as np\n return lambda *args, **kwargs: np.abs(self(*args, **kwargs))\n\n #---------------------------------------------------------------------------\n # all binary operators return a function; the _wrap_binary decorator then wraps this function in a FunctionNode definition and supplies appropriate doc args. Note both left and righ sides are wrapped in FNs to permit operations on constants\n\n @_wrap_binary\n def __add__(lhs, rhs):\n return (lambda *args, **kwargs:\n lhs.__class__(lhs)(*args, **kwargs) +\n lhs.__class__(rhs)(*args, **kwargs))\n\n @_wrap_binary\n def __sub__(lhs, rhs):\n return (lambda *args, **kwargs:\n lhs.__class__(lhs)(*args, **kwargs) -\n lhs.__class__(rhs)(*args, **kwargs))\n\n @_wrap_binary\n def __mul__(lhs, rhs):\n return (lambda *args, **kwargs:\n lhs.__class__(lhs)(*args, **kwargs) *\n lhs.__class__(rhs)(*args, **kwargs))\n\n @_wrap_binary\n def __truediv__(lhs, rhs):\n return (lambda *args, **kwargs:\n lhs.__class__(lhs)(*args, **kwargs) /\n lhs.__class__(rhs)(*args, **kwargs))\n\n @_wrap_binary\n def __pow__(lhs, rhs):\n return (lambda *args, **kwargs:\n lhs.__class__(lhs)(*args, **kwargs) **\n lhs.__class__(rhs)(*args, **kwargs))\n\n @_wrap_binary\n def __radd__(rhs, lhs):\n return (lambda *args, **kwargs:\n rhs.__class__(lhs)(*args, **kwargs) +\n rhs.__class__(rhs)(*args, **kwargs))\n\n @_wrap_binary\n def __rsub__(rhs, lhs):\n return (lambda *args, **kwargs:\n rhs.__class__(lhs)(*args, **kwargs) -\n rhs.__class__(rhs)(*args, **kwargs))\n\n @_wrap_binary\n def __rmul__(rhs, lhs):\n return (lambda *args, **kwargs:\n rhs.__class__(lhs)(*args, **kwargs) *\n rhs.__class__(rhs)(*args, **kwargs))\n\n @_wrap_binary\n def __rtruediv__(rhs, lhs):\n return (lambda *args, **kwargs:\n rhs.__class__(lhs)(*args, **kwargs) /\n rhs.__class__(rhs)(*args, **kwargs))\n\n # comparison operators, expected to return booleans\n @_wrap_binary\n def __eq__(lhs, rhs):\n return (lambda *args, **kwargs:\n lhs.__class__(lhs)(*args, **kwargs) ==\n lhs.__class__(rhs)(*args, **kwargs))\n\n @_wrap_binary\n def __lt__(lhs, rhs):\n return (lambda *args, **kwargs:\n lhs.__class__(lhs)(*args, **kwargs) <\n lhs.__class__(rhs)(*args, **kwargs))\n @_wrap_binary\n def __le__(lhs, rhs):\n return (lambda *args, **kwargs:\n lhs.__class__(lhs)(*args, **kwargs) <=\n lhs.__class__(rhs)(*args, **kwargs))\n @_wrap_binary\n def __gt__(lhs, rhs):\n return (lambda *args, **kwargs:\n lhs.__class__(lhs)(*args, **kwargs) >\n lhs.__class__(rhs)(*args, **kwargs))\n @_wrap_binary\n def __ge__(lhs, rhs):\n return (lambda *args, **kwargs:\n lhs.__class__(lhs)(*args, **kwargs) >=\n lhs.__class__(rhs)(*args, **kwargs))\n\n @_wrap_binary\n def __ne__(lhs, rhs):\n return (lambda *args, **kwargs:\n lhs.__class__(lhs)(*args, **kwargs) !=\n lhs.__class__(rhs)(*args, **kwargs))\n\n #---------------------------------------------------------------------------\n # composition operators\n\n def __rshift__(lhs, rhs):\n '''Composition; return a function that will call LHS first, then RHS\n '''\n return compose(rhs, lhs)\n\n def __rrshift__(rhs, lhs):\n '''Composition; return a function that will call LHS first, then RHS\n '''\n return compose(rhs, lhs)\n\n def __lshift__(lhs, rhs):\n '''Composition; return a function that will call RHS first, then LHS\n '''\n return compose(lhs, rhs)\n\n def __llshift__(rhs, lhs):\n '''Composition; return a function that will call RHS first, then LHS\n '''\n return compose(lhs, rhs)\n\n def __or__(lhs, rhs):\n '''Only implemented for PipeNode.\n '''\n raise NotImplementedError()\n\n def __ror__(rhs, lhs):\n '''Only implemented for PipeNode.\n '''\n raise NotImplementedError()\n\n\n#-------------------------------------------------------------------------------\n# PipeNode and utiltiies\n\n# PipeNode kwargs\nPREDECESSOR_RETURN = 'predecessor_return'\nPREDECESSOR_PN = 'predecessor_pn'\nPN_INPUT = 'pn_input'\nPN_INPUT_SET = {PN_INPUT}\nPIPE_NODE_KWARGS = {PREDECESSOR_RETURN, PREDECESSOR_PN, PN_INPUT}\n\n\nclass PipeNode(FunctionNode):\n '''The multi-call structure of PipeNodes moves a FunctionNode between three states.\n '''\n\n # states\n FACTORY = 'FACTORY'\n EXPRESSION = 'EXPRESSION'\n PROCESS = 'PROCESS'\n\n __slots__ = FunctionNode.__slots__ + (\n '_call_state',\n '_predecessor'\n )\n\n #---------------------------------------------------------------------------\n def __init__(self,\n function,\n *,\n doc_function=None,\n doc_args=None,\n doc_kwargs=None,\n call_state=None,\n predecessor=None\n ):\n super().__init__(function=function,\n doc_function=doc_function,\n doc_args=doc_args,\n doc_kwargs=doc_kwargs\n )\n self._call_state = call_state\n self._predecessor = predecessor\n\n def __str__(self):\n return ''.format(_repr(self))\n\n def partial(*args, **kwargs):\n '''PipeNode calling is dictated by the PipeNode protocol; partial-like behavior in expressions shold be achived with functions decorated with the pipe_node_factory decorator.\n '''\n raise NotImplementedError()\n\n #---------------------------------------------------------------------------\n # pipe node properties\n\n @property\n def call_state(self):\n return self._call_state\n\n @property\n def predecessor(self):\n return self._predecessor\n\n #---------------------------------------------------------------------------\n # composition operators\n\n def __rshift__(lhs, rhs):\n '''Only implemented for FunctionNode.\n '''\n raise NotImplementedError()\n\n def __rrshift__(rhs, lhs):\n '''Only implemented for FunctionNode.\n '''\n raise NotImplementedError()\n\n def __lshift__(lhs, rhs):\n '''Only implemented for FunctionNode.\n '''\n raise NotImplementedError()\n\n def __llshift__(rhs, lhs):\n '''Only implemented for FunctionNode.\n '''\n raise NotImplementedError()\n\n def __or__(lhs, rhs):\n '''Call RHS with LHS as an argument; left is passed as kwarg PREDECESSOR_PN. This calls the RHS immediately and does not return an FN unless prepared as a PipeNode\n '''\n return rhs(**{PREDECESSOR_PN:lhs})\n\n def __ror__(rhs, lhs):\n return rhs(**{PREDECESSOR_PN:lhs})\n\n\n #---------------------------------------------------------------------------\n def __getitem__(self, pn_input):\n '''Call self with the passed PipeNodeInput.\n '''\n pni = pn_input if pn_input else PipeNodeInput()\n return self(**{PN_INPUT:pni})\n\n\n#-------------------------------------------------------------------------------\n# decorator utilities\n\ndef _broadcast(factory_args,\n factory_kwargs,\n processing_args,\n processing_kwargs):\n '''Factor args/kwargs are those given to pipe_node_factory at the expression level. Processing args/kwargs are those given as the initial input, and used to call all processing functions. After calling factor args with processing args, the result is used as core_callable args\n '''\n core_callable_args = [arg(*processing_args, **processing_kwargs)\n if isinstance(arg, PipeNode) else arg\n for arg in factory_args]\n\n core_callable_kwargs = {kw: arg(*processing_args, **processing_kwargs)\n if isinstance(arg, PipeNode) else arg\n for kw, arg in factory_kwargs.items()}\n\n return core_callable_args, core_callable_kwargs\n\n\ndef core_logger(core_callable):\n '''A decorator to provide output on the execution of each core callable call. Alternative decorators can be used to partial pipe_node_factory and pipe_node.\n '''\n def wrapped(*args, **kwargs):\n prefix = '|'\n print('|', str(core_callable), file=sys.stderr)\n post = core_callable(*args, **kwargs)\n return post\n return wrapped\n\n#-------------------------------------------------------------------------------\n# decorators\n\n\ndef pipe_kwarg_bind(*key_positions):\n '''Using FN labels as arguments, define the what positional arguments of the wrapped function will receive from the common FN kwargs.\n '''\n def decorator(f):\n def wrapped(*args, **kwargs):\n # extract args from kwargs based on order of key_positions\n target_args = []\n for pos, k in enumerate(key_positions):\n target_args.append(kwargs.pop(k))\n target_kwargs = {k:v for k, v in kwargs.items()\n if k not in PIPE_NODE_KWARGS}\n return f(*target_args, *args, **target_kwargs)\n return PipeNode(wrapped, doc_function=f)\n return decorator\n\n\ndef pipe_node_factory(core_callable,\n core_decorator=core_logger):\n '''This is a decorator.\n\n Upgrade keyword only arguments from a function that needs expression level args.\n '''\n decorated_core_callable = core_decorator(core_callable)\n\n def factory_f(*f_args, **f_kwargs):\n '''This is the function returned by the decorator, used to create the FunctionNode that resides in expressions after being called with arguments.\n\n f_args and f_kwargs are passed to the core_callable; if f_args or f_kwargs are FunctionNode instances, they will be called with the processing args and kwargs (including PN_INPUT), either from process_f or (if innermost) from expression args.\n '''\n def expression_f(*e_args, **e_kwargs):\n '''This is the FunctionNode that resides in expressions prior to `|` operator evalation. When called with `|`, the predecessor is passed is in e_kwargs as PREDECESSOR_PN. In this usage the e_args will always be empty.\n\n When in the innermost position, expression_f is never called with `|` but with the PN_INPUT; this sitation is identified and the core_callable is called immediately.\n\n e_args will only be used as an innermost call.\n '''\n # identify innermost condition as when the expression level kwargs consists only of PN_INPUT\n if set(e_kwargs.keys()) == PN_INPUT_SET:\n # as this is innermost, processing args (i.e., PipeNodeInput) are given here at the expression level (as no Pipe operator has been used to call the innermost)\n core_callable_args, core_callable_kwargs = _broadcast(\n factory_args=f_args,\n factory_kwargs=f_kwargs,\n processing_args=e_args, # not p_args\n processing_kwargs=e_kwargs) # not p_kwargs\n\n # pack PipeNode protocol kwargs; when used as innermost, a core_callable can only expect to have a PN_INPUT\n core_callable_kwargs[PN_INPUT] = e_kwargs[PN_INPUT]\n\n return decorated_core_callable(*core_callable_args,\n **core_callable_kwargs)\n\n predecessor_pn = e_kwargs.get(PREDECESSOR_PN)\n\n def process_f(*p_args, **p_kwargs):\n # call the predecssor PipeNode (here a process_f) with these processing args; these are always the args given as the initial input to the innermost function, generally a PipeNodeInput\n predecessor_return = predecessor_pn(*p_args, **p_kwargs)\n\n core_callable_args, core_callable_kwargs = _broadcast(\n factory_args=f_args,\n factory_kwargs=f_kwargs,\n processing_args=p_args,\n processing_kwargs=p_kwargs)\n\n # pack PipeNode protocol kwargs\n core_callable_kwargs[PN_INPUT] = p_kwargs[PN_INPUT]\n core_callable_kwargs[PREDECESSOR_PN] = predecessor_pn\n core_callable_kwargs[PREDECESSOR_RETURN] = predecessor_return\n\n return decorated_core_callable(*core_callable_args,\n **core_callable_kwargs)\n\n # we must return a PipeNode here, as this is the final thing returned and might be passed on to another series func\n return PipeNode(process_f,\n doc_function=core_callable,\n #doc_args=e_args,\n #doc_kwargs=e_kwargs, # TODO: does not work\n call_state=PipeNode.PROCESS,\n predecessor=predecessor_pn)\n return PipeNode(expression_f,\n doc_function=core_callable,\n doc_args=f_args,\n doc_kwargs=f_kwargs,\n call_state=PipeNode.EXPRESSION)\n # return a function node so as to make doc_function available in test\n return PipeNode(factory_f,\n doc_function=core_callable,\n call_state=PipeNode.FACTORY)\n\n\ndef pipe_node(core_callable, core_decorator=core_logger):\n '''Decorate a function that takes no expression-level args.\n '''\n # create a factory and call it once with no args to get an expresion-level function\n return pipe_node_factory(core_callable,\n core_decorator=core_decorator)()\n\n\n#-------------------------------------------------------------------------------\nclass PipeNodeInput:\n '''PipeNode input to support store and recall; subclassable to expose other attributes and parameters.\n '''\n\n def __init__(self):\n self._store = collections.OrderedDict()\n\n def store(self, key, value):\n if key in self._store:\n raise KeyError('cannot store the same key', key)\n self._store[key] = value\n\n def recall(self, key):\n return self._store[key]\n\n def store_items(self):\n return self._store.items()\n\n\n#-------------------------------------------------------------------------------\n# utility PipeNodes\n\n@pipe_node_factory\ndef store(label, **kwargs):\n kwargs[PN_INPUT].store(label, kwargs[PREDECESSOR_RETURN])\n return kwargs[PREDECESSOR_RETURN]\n\n@pipe_node_factory\ndef recall(label, **kwargs):\n return kwargs[PN_INPUT].recall(label)\n\n@pipe_node_factory\ndef call(*args, **kwargs):\n '''Call the PipeNode arguments with the PipeNodeInput as necessary (which happens in the broadcast routine in handling *args)\n '''\n return args[-1] # the last result is returned\n\n","sub_path":"function_pipe.py","file_name":"function_pipe.py","file_ext":"py","file_size_in_byte":21523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"232905880","text":"import logging # 引入logging模块\nimport os.path\nimport time\n# 第一步,创建一个logger\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO) # Log等级总开关\n# 第二步,创建一个handler,用于写入日志文件\n#rq = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))\nrq = time.strftime('%Y%m%d', time.localtime(time.time()))\nprint (rq)\n#log_path = os.path.dirname(os.getcwd()) + '/Logs/'\nlog_path = ''\nlog_name = log_path + rq + '.log'\nlogfile = log_name\n# mode a 追加写入, w 覆盖写入\nfh = logging.FileHandler(logfile, mode='a')\nfh.setLevel(logging.DEBUG) # 输出到file的log等级的开关\n\nch = logging.StreamHandler()\nch.setLevel(logging.WARNING) #输出到控制台\n\n# 第三步,定义handler的输出格式\nformatter = logging.Formatter(\"%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s\")\nfh.setFormatter(formatter)\n\n#日志输出到控制台:\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n# 第四步,将logger添加到handler里面\nlogger.addHandler(fh)\n# 日志\nlogger.debug('this is a logger debug message')\nlogger.info('this is a logger info message')\nlogger.warning('this is a logger warning message')\nlogger.error('this is a logger error message')\nlogger.critical('this is a logger critical message')\n\n\n","sub_path":"基本语法/logging_out_logtxt.py","file_name":"logging_out_logtxt.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"637875144","text":"from django import forms\nfrom django.forms import ModelForm\nfrom .models import Topic, ChatMessage, Subscription\nimport bleach\nimport markdown\nfrom django.conf import settings\nfrom django.utils.html import escape\nimport re\nfrom django.core.validators import RegexValidator\n\nclass CreateRoomForm(forms.Form):\n name = forms.CharField(max_length=20, validators=[RegexValidator(r'^[a-z]+$', \"Only lower case letters without spaces are allowed\")])\n\nclass AdminTopicForm(forms.ModelForm):\n class Meta:\n model = Topic\n fields = ['name', 'title']\n\nclass AdminChatMessageForm(forms.ModelForm):\n class Meta:\n model = ChatMessage\n fields = ['topic', 'user', 'message', 'message_html']\n\n def clean(self):\n message = self.cleaned_data['message']\n\n message_html = escape(message)\n urlRegex = re.compile(\n u'(?isu)(\\\\b(?:https?://|www\\\\d{0,3}[.]|[a-z0-9.\\\\-]+[.][a-z]{2,4}/)[^\\\\s()<'\n u'>\\\\[\\\\]]+[^\\\\s`!()\\\\[\\\\]{};:\\'\".,<>?\\xab\\xbb\\u201c\\u201d\\u2018\\u2019])'\n )\n \n processed_urls = list()\n for obj in urlRegex.finditer(message_html):\n old_url = obj.group(0)\n if old_url in processed_urls:\n continue\n processed_urls.append(old_url)\n new_url = old_url\n if not old_url.startswith(('http://', 'https://')):\n new_url = 'http://' + new_url\n new_url = '' + new_url + \"\"\n message_html = message_html.replace(old_url, new_url)\n\n self.cleaned_data['message_html'] = message_html\n\n return self.cleaned_data\n\nclass AdminSubscriptionForm(forms.ModelForm):\n class Meta:\n model = Subscription\n fields = ['topic', 'user', 'deleted']","sub_path":"mainapp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"15585092","text":"from postgreslib.database_connection import DBConnection\nfrom helpers.kafka import KafkaWriter, get_topic\nfrom config.config import JSON_RECORDS\nimport json\nimport time\nfrom json.decoder import JSONDecodeError\nimport os\n\nclass IngestionProducer(KafkaWriter):\n def __init__(self,bootstrap_servers,datasource,outfile = JSON_RECORDS):\n super().__init__(bootstrap_servers)\n self.datasource = datasource\n\n def get_records(self,table,number):\n self.db = DBConnection(self.datasource)\n print(\"running get records {}\".format(number))\n generator,header = self.db.stream_table(table)\n def format_record(record):\n return {str(h.name):str(v) for h,v in zip(header,record)}\n try:\n out = []\n for i , x in enumerate(generator):\n out.append(format_record(x))\n if i == number:\n reason = \"break\"\n break\n except Exception as e:\n reason = e\n finally:\n resp = input(\"stopped on {} write to file? (y/n) : \".format(reason))\n write = resp == \"y\"\n print(\"\\n chosen write {} \".format(write))\n if write:\n path = JSON_RECORDS\n print(\"out len {}\".format(len(out)))\n with open(path,\"w+\") as f:\n f.write(json.dumps(out))\n print(\"wrote {} records to \\n{}\".format(len(out),path))\n else:\n print(\"not writing\")\n\n def get_records_json(self):\n path = JSON_RECORDS\n with open(path,\"r\") as f:\n data = json.loads(f.read())\n return data\n\n def ingest_data(self,table,number = False):\n print(\"in ingest data method max {}\".format(number))\n records = self.get_records_json()\n\n print(\" got {} records to stream\".format(len(records)))\n topic = get_topic(self.datasource,table)\n\n print(\"streaming data from table {} to topic {}\".format(table,topic))\n input(\"press enter to start producing\")\n print(\"producing...\")\n for i,record in enumerate(records):\n self.produce(record, topic)\n if number:\n if i == number:\n break\n self.produce_debug(\"completed producing {}\".format(table))\n\ndef cache_records(bootstrap_servers,db,table,number):\n print(\"main table {}\".format(table))\n producer = IngestionProducer(bootstrap_servers,db)\n producer.get_records(\"sales_orders\",number)\n","sub_path":"producer/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"654451386","text":"'''Calculating digital root of a number '''\n\ntry:\n Number = int(input('Please enter an integer '))\n\nexcept:\n print('Please enter a valid integer ')\n\ndef digital_root(number):\n sum = 0\n\n while (number > 9 ):\n\n while ( number != 0 ) :\n Reminder = number % 10\n number = int(number / 10)\n sum = sum + Reminder\n print('rem = ',Reminder)\n print('Quetient = ', number)\n\n number = sum\n sum = 0\n print('The intermidiate number is ', number)\n\n return(number)\n\nprint('The digital root of ', Number, ' is : ', digital_root(Number))\n\n","sub_path":"Basic Programs/Digital Root.py","file_name":"Digital Root.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"571035607","text":"import json\nimport logging\n\nlog = logging.getLogger(__name__)\nsh = logging.StreamHandler()\nlog.addHandler(sh)\n\n\ndef test_users(api_as_admin):\n new_user_id = 'new@user.com'\n\n # List users\n r = api_as_admin.get('/users')\n assert r.ok\n\n # Get self\n r = api_as_admin.get('/users/self')\n assert r.ok\n\n # Try adding new user missing required attr\n payload = json.dumps({\n '_id': 'jane.doe@gmail.com',\n 'lastname': 'Doe',\n 'email': 'jane.doe@gmail.com',\n })\n r = api_as_admin.post('/users', data=payload)\n assert r.status_code == 400\n assert \"'firstname' is a required property\" in r.text\n\n # Add new user\n r = api_as_admin.get('/users/' + new_user_id)\n assert r.status_code == 404\n payload = json.dumps({\n '_id': new_user_id,\n 'firstname': 'New',\n 'lastname': 'User',\n })\n r = api_as_admin.post('/users', data=payload)\n assert r.ok\n r = api_as_admin.get('/users/' + new_user_id)\n assert r.ok\n\n # Modify existing user\n payload = json.dumps({\n 'firstname': 'Realname'\n })\n r = api_as_admin.put('/users/' + new_user_id, data=payload)\n assert r.ok\n\n # Cleanup\n r = api_as_admin.delete('/users/' + new_user_id)\n assert r.ok\n","sub_path":"test/integration_tests/python/test_users.py","file_name":"test_users.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"530461162","text":"def best_handles(handle, handles, k = 2):\n \n handles_score = []\n handle = handle.lower()\n\n for value in handles:\n #print(value)\n value = value.lower()\n score = 0\n used_letters = set()\n for letter in handle:\n\n if not letter in used_letters:\n #print(letter)\n if letter in value:\n score += 1\n else:\n score -= 1\n used_letters.add(letter)\n for letter in value:\n if not letter in used_letters:\n #print(letter)\n if letter in handle:\n score += 1\n else:\n score -= 1\n\n used_letters.add(letter)\n\n handles_score.append((score,value))\n\n handles_score.sort()\n #print(handles_score)\n return handles_score[-k:]\n\nhandle = \"ilovedogs\"\nhandles = ['DogeCoin', 'YangGang2020', 'HodlForLife', 'fakeDonaldDrumpf', 'GodIsLove', 'BernieOrBust']\n\nprint(best_handles(handle, handles))","sub_path":"assignment_2/twitter.py","file_name":"twitter.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"577423901","text":"\n__copyright__ = \"Copyright 2016, http://radical.rutgers.edu\"\n__license__ = \"MIT\"\n\n\nimport os\n\nimport radical.utils as ru\n\nfrom .base import LaunchMethod\n\n\n# ------------------------------------------------------------------------------\n#\nclass SSH(LaunchMethod):\n\n # --------------------------------------------------------------------------\n #\n def __init__(self, name, cfg, session):\n\n LaunchMethod.__init__(self, name, cfg, session)\n\n # Instruct the ExecWorkers to unset this environment variable.\n # Otherwise this will break nested SSH with SHELL spawner, i.e. when\n # both the sub-agent and CUs are started using SSH.\n self.env_removables.extend([\"RP_SPAWNER_HOP\"])\n\n\n # --------------------------------------------------------------------------\n #\n def _configure(self):\n\n command = ru.which('ssh')\n\n if not command:\n raise RuntimeError(\"ssh not found!\")\n\n # Some MPI environments (e.g. SGE) put a link to rsh as \"ssh\" into\n # the path. We try to detect that and then use different arguments.\n if os.path.islink(command):\n\n target = os.path.realpath(command)\n\n if os.path.basename(target) == 'rsh':\n self._log.info('Detected that \"ssh\" is a link to \"rsh\".')\n return target\n\n command = '%s -o StrictHostKeyChecking=no -o ControlMaster=auto' % command\n\n self.launch_command = command\n\n\n # --------------------------------------------------------------------------\n #\n def construct_command(self, cu, launch_script_hop):\n\n slots = cu['slots']\n cud = cu['description']\n task_exec = cud['executable']\n task_env = cud.get('environment') or dict()\n task_args = cud.get('arguments') or list()\n task_argstr = self._create_arg_string(task_args)\n\n if task_argstr: task_command = \"%s %s\" % (task_exec, task_argstr)\n else : task_command = task_exec\n\n if not launch_script_hop :\n raise ValueError (\"LMSSH.construct_command needs launch_script_hop!\")\n\n if 'nodes' not in slots:\n raise RuntimeError('insufficient information to launch via %s: %s'\n % (self.name, slots))\n\n if len(slots['nodes']) > 1:\n raise RuntimeError('ssh cannot run multinode units')\n\n host = slots['nodes'][0]['name']\n\n # Pass configured and available environment variables.\n # This is a crude version of env transplanting where we prep the\n # shell command line. We likely won't survive any complicated vars\n # (multiline, quotes, etc)\n env_string = ' '.join(['%s=%s' % (var, os.environ[var])\n for var in self.EXPORT_ENV_VARIABLES\n if var in os.environ])\n env_string += ' '.join(['%s=%s' % (var, task_env[var])\n for var in task_env])\n\n ssh_hop_cmd = \"%s %s %s %s\" % (self.launch_command, host, env_string,\n launch_script_hop)\n\n return task_command, ssh_hop_cmd\n\n\n# ------------------------------------------------------------------------------\n\n","sub_path":"src/radical/pilot/agent/launch_method/ssh.py","file_name":"ssh.py","file_ext":"py","file_size_in_byte":3260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"229290340","text":"from flask import jsonify\n\ndef fibGenerator():\n a, b = 0, 1\n yield 0\n while True:\n a, b = b, a + b\n yield a\n\n\ndef fibJSON(sequence):\n fiblist = []\n fib = fibGenerator()\n for n in range(int(sequence)):\n fiblist.append(str(next(fib)))\n return jsonify( {'sequence': ' '.join(fiblist)} )\n","sub_path":"fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"391596588","text":"from django.test import TestCase\nfrom dojo.models import Product\n\n\nclass TaggitTests(TestCase):\n fixtures = ['dojo_testdata.json']\n\n def setUp(self, *args, **kwargs):\n pass\n\n def test_tags_prefetching(self):\n print('\\nadding tags')\n for product in Product.objects.all():\n product.tags = self.add_tags(product.tags, ['product_' + str(product.id)])\n product.save()\n for eng in product.engagement_set.all():\n eng.tags = self.add_tags(eng.tags, ['eng_' + str(eng.id), 'product_' + str(product.id)])\n eng.save()\n for test in eng.test_set.all():\n test.tags = self.add_tags(test.tags, ['test_' + str(test.id), 'eng_' + str(eng.id), 'product_' + str(product.id)])\n test.save()\n\n # print('testing tags for correctness without prefetching')\n self.check_tags(Product.objects.all())\n\n # print('testing tags for correctness with prefetching')\n self.check_tags(Product.objects.all().prefetch_related('tagged_items__tag'))\n\n # print('testing tags for correctness with nested prefetching')\n self.check_tags(Product.objects.all().prefetch_related('tagged_items__tag', 'engagement_set__tagged_items__tag'))\n\n def add_tags(self, curr_tags, extra_tags):\n for tag in extra_tags:\n curr_tags.append(tag)\n return \", \".join(curr_tags)\n\n def check_tags(self, queryset):\n for product in queryset:\n # print(product.name + \": \" + str(product.tags))\n self.assertEqual(len(product.tags), 1)\n self.assertEqual(product.tags[0].name, 'product_' + str(product.id))\n for eng in product.engagement_set.all():\n # print(\" :\" + eng.name + \": \" + str(eng.tags))\n self.assertEqual(len(eng.tags), 2)\n self.assertEqual('product_' + str(product.id) in [tag.name for tag in product.tags], True)\n self.assertEqual('eng_' + str(eng.id) in [tag.name for tag in eng.tags], True)\n self.assertEqual('eng_' + str(eng.id + 1) in [tag.name for tag in eng.tags], False)\n for test in eng.test_set.all():\n # print(\" :\" + eng.name + \": \" + test.test_type.name + \": \" + str(test.tags))\n self.assertEqual(len(test.tags), 3)\n self.assertEqual('product_' + str(product.id) in [tag.name for tag in product.tags], True)\n self.assertEqual('eng_' + str(eng.id) in [tag.name for tag in eng.tags], True)\n self.assertEqual('eng_' + str(eng.id + 1) in [tag.name for tag in eng.tags], False)\n self.assertEqual('test_' + str(test.id) in [tag.name for tag in test.tags], True)\n self.assertEqual('test_' + str(test.id + 1) in [tag.name for tag in test.tags], False)\n","sub_path":"dojo/unittests/test_taggit_tags.py","file_name":"test_taggit_tags.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"379608097","text":"from __future__ import absolute_import, division, print_function\nimport lightgbm as lgb\nimport numpy as np\nimport os\nimport pandas as pd\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import f1_score\nfrom time import gmtime, strftime\n\nfrom config import logger, config\nfrom feature import get_train_test_features, get_train_test_features2, get_train_test_features3, get_train_test_features4, get_train_test_features0\n\ndef eval_f(y_pred, train_data):\n y_true = train_data.label\n y_pred = y_pred.reshape((config.n_class, -1)).T\n y_pred = np.argmax(y_pred, axis=1)\n score = f1_score(y_true, y_pred, average='weighted')\n return 'weighted-f1-score', score, True\n\n\ndef submit_result(submit, result):\n submit['recommend_mode'] = result\n submit.to_csv(config.trn_bag_submission_file, index=False)\n \ndef train_lgb(trn, y, tst):\n params = {'objective': 'multiclass', \n 'num_class': 12, \n 'seed': 2019, \n 'learning_rate': 0.05, \n 'num_threads': 8, \n 'num_leaves': 44, \n 'max_depth': 11, \n 'lambda_l1': 4.717461111446621, \n 'lambda_l2': 10.550885244591129, \n 'feature_fraction': 0.8235898660709667, \n 'bagging_fraction': 0.9018152298305773, \n 'bagging_freq': 3,\n 'verbose': -1}\n \n cat_cols = ['max_dist_mode', 'min_dist_mode', 'max_price_mode',\n 'min_price_mode', 'max_eta_mode', 'min_eta_mode',\n 'first_mode', 'weekday', 'hour']\n \n cat_cols = ['pid', 'max_dist_mode', 'min_dist_mode', 'max_price_mode',\n 'min_price_mode', 'max_eta_mode', 'min_eta_mode', 'first_mode', 'day_of_week', 'req_hour', 'weather']\n\n\n p = np.zeros_like(y)\n best_iteration = 250\n \n lgb_trn = lgb.Dataset(trn, y, categorical_feature=cat_cols, free_raw_data=False)\n prob_trn_tst = 0\n for seed in [0, 17, 23, 29]:\n params['seed'] = 2019 + seed\n print(params)\n clf = lgb.train(params, lgb_trn,\n valid_sets=[lgb_trn],\n num_boost_round=best_iteration,\n verbose_eval=50,\n feval=eval_f)\n \n prob_trn_tst += clf.predict(tst)\n \n prob_trn_tst /= 4.0\n\n np.savetxt(config.predict_trn_tst_bag_file, prob_trn_tst, delimiter=',')\n \n trn_tst = np.argmax(prob_trn_tst, axis=1)\n\n return trn_tst\n\nif __name__ == '__main__':\n\n trn, y, tst, sub = get_train_test_features0()\n\n config.set_algo_name('lgb5')\n config.set_feature_name('f0')\n p_tst = train_lgb(trn, y, tst)\n\n submit_result(sub, p_tst)","sub_path":"src/train_predict_bag_v5.py","file_name":"train_predict_bag_v5.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"553624870","text":"from urlparse import urljoin\n\nimport logging\nimport requests\n\n\nlog = logging.getLogger('query')\n\nclass GDCIndexClient(object):\n\n def __init__(self, uri):\n self.uri = uri\n self.metadata = dict()\n\n def get_related_files(self, uuid):\n # type: str -> List[str]\n if uuid in self.metadata.keys():\n return self.metadata[uuid]['related_files']\n return []\n\n def get_annotations(self, uuid):\n # type: str -> List[str]\n if uuid in self.metadata.keys():\n return self.metadata[uuid]['annotations']\n return []\n\n def get_md5sum(self, uuid):\n # type: str -> str\n if uuid in self.metadata.keys():\n return self.metadata[uuid]['md5sum']\n\n def get_filesize(self, uuid):\n # type: str -> long\n if uuid in self.metadata.keys():\n return long(self.metadata[uuid]['file_size'])\n\n def get_access(self, uuid):\n # type: str -> long\n if uuid in self.metadata.keys():\n return self.metadata[uuid]['access']\n\n def _get_metadata(self, uuids):\n # type: List[str] -> Dict[str]str\n \"\"\" Capture the metadata of all the UUIDs while making\n as little open connections as possible.\n\n self.metadata = {\n str file_id: {\n str access\n str file_size\n str md5sum\n List[str] annotations\n List[str] related files\n }\n }\n \"\"\"\n\n metadata_query = {\n 'fields': 'file_id,file_size,md5sum,annotations.annotation_id,' \\\n 'metadata_files.file_id,index_files.file_id,access',\n 'filters': '{\"op\":\"and\",\"content\":['\n '{\"op\":\"in\",\"content\":{'\n '\"field\":\"files.file_id\",\"value\":'\n '[\"' + '\",\"'.join(uuids) + '\"]}}]}',\n 'from': '0',\n 'size': str(len(uuids)), # one big request\n }\n\n active_meta_url = urljoin(self.uri, 'v0/files')\n legacy_meta_url = urljoin(self.uri, 'v0/legacy/files')\n\n active_json_resp = dict()\n legacy_json_resp = dict()\n\n # using a POST request lets us avoid the MAX URL character length limit\n r_active = requests.post(active_meta_url, json=metadata_query, verify=False)\n r_legacy = requests.post(legacy_meta_url, json=metadata_query, verify=False)\n\n if r_active.status_code == requests.codes.ok:\n active_json_resp = r_active.json()\n\n if r_legacy.status_code == requests.codes.ok:\n legacy_json_resp = r_legacy.json()\n\n r_active.close()\n r_legacy.close()\n\n if not active_json_resp.get('data') and not legacy_json_resp.get('data'):\n log.debug('Unable to retrieve file metadata information. '\n 'continuing downloading as if they were large files')\n return self.metadata\n\n active_hits = active_json_resp['data']['hits']\n legacy_hits = legacy_json_resp['data']['hits']\n\n for h in active_hits + legacy_hits:\n related_returns = h.get('index_files', []) + h.get('metadata_files', [])\n related_files = [ r['file_id'] for r in related_returns ]\n\n annotations = [ a['annotation_id'] for a in h.get('annotations', []) ]\n\n # set the metadata as a class data member so that it can be\n # references as much as needed without needing to calculate\n # everything over again\n if h['id'] not in self.metadata.keys():\n # don't want to overwrite\n self.metadata[h['id']] = {\n 'access': h['access'],\n 'file_size': h['file_size'],\n 'md5sum': h['md5sum'],\n 'annotations': annotations,\n 'related_files': related_files,\n }\n\n return self.metadata\n\n\n def separate_small_files(self,\n ids, # type: Set[str]\n chunk_size, # type: int\n related_files=False, # type: bool\n annotations=False, # type: bool\n ):\n # type: (...) -> (List[str], List[List[str]])\n \"\"\" Separate big and small files\n\n Separate the small files from the larger files in\n order to combine them into single grouped downloads. This will reduce\n the number of open connections needed to be made for many small files.\n\n On top of that, separate the small files by open and controlled access\n so that if a controlled grouping failed, you can handle it as the same\n edge case.\n \"\"\"\n\n bigs = []\n smalls_open = []\n smalls_control = []\n potential_smalls = set()\n\n # go through all the UUIDs and pick out the ones with\n # relate and annotation files so they can be handled by parcel\n log.debug('Grouping ids by size')\n\n self._get_metadata(ids)\n for uuid in ids:\n if uuid not in self.metadata.keys():\n bigs.append(uuid)\n continue\n\n rf = self.get_related_files(uuid)\n af = self.get_annotations(uuid)\n\n # check for related files\n if related_files and rf and uuid not in bigs:\n bigs.append(uuid)\n\n # check for annotation files\n if annotations and af and uuid not in bigs:\n bigs.append(uuid)\n\n # if uuid has no related or annotation files\n # then proceed to the small file sorting with them\n if not af and not rf:\n potential_smalls |= set([uuid])\n\n # the following line is to trigger the first if statement\n # to start the process off properly\n bundle_open_size = chunk_size + 1\n bundle_control_size = chunk_size + 1\n\n i_open = -1\n i_control = -1\n\n for uuid in potential_smalls:\n # grouping of file exceeds chunk_size, create a new grouping\n if bundle_open_size > chunk_size:\n smalls_open.append([])\n i_open += 1\n bundle_open_size = 0\n\n if bundle_control_size > chunk_size:\n smalls_control.append([])\n i_control += 1\n bundle_control_size = 0\n\n # individual file is more than chunk_size, big file download\n if self.get_filesize(uuid) > chunk_size:\n bigs.append(uuid)\n\n # file size is less than chunk_size then group and tarfile it\n else:\n if self.get_access(uuid) == 'open':\n smalls_open[i_open].append(uuid)\n bundle_open_size += self.get_filesize(uuid)\n\n elif self.get_access(uuid) == 'controlled':\n smalls_control[i_control].append(uuid)\n bundle_control_size += self.get_filesize(uuid)\n\n # they are still small files to be downloaded in a group\n smalls = smalls_open + smalls_control\n\n # for logging/reporting purposes\n total_count = len(bigs) + sum([ len(s) for s in smalls ])\n if len(potential_smalls) > total_count:\n log.warning('There are less files to download than originally given')\n log.warning('Number of files originally given: {0}'\\\n .format(len(potential_smalls)))\n\n log.debug('{0} total number of files to download'.format(total_count))\n log.debug('{0} groupings of files'.format(len(smalls)))\n\n smalls = [ s for s in smalls if s != [] ]\n\n return bigs, smalls\n","sub_path":"gdc_client/query/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":7730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"133833783","text":"# python 3.7.9\n\nfrom datetime import datetime\n\n\n\n\n# \ndef prime(number):\n\n number += 1\n\n PrimeNumbers = []\n\n for prime in range(2, number):\n \n count = 2\n\n while True:\n\n if prime % count == 0:\n break\n\n count += 1\n\n if count == 1:\n PrimeNumbers.append(prime)\n break\n\n\n return PrimeNumbers\n\n\nCountNumber = 9\n\n\nfor result in range(CountNumber):\n\n start = datetime.now()\n prime(32768) # 32.768\n\n print(\n \"Test Count #{} | Took = {} second\".format\n (result + 1, (datetime.now()-start).total_seconds())\n )\n","sub_path":"python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"436673928","text":"\"\"\"UCMS URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom . import views\napp_name='assistant'\nurlpatterns = [\n path('home/', views.home, name='home'),\n path('tbl/', views.tbl, name='tbl'),\n path('depart_tbl/', views.depart_tbl, name='depart_tbl'),\n path('subjects/', views.subjects, name='subjects'),\n path('subjects//', views.students, name='students'),\n path('subjects/details//', views.details, name='details'),\n path('results//', views.dgree, name='dgree'),\n path('addclm//', views.AddClm, name='addclm'),\n path('absence//', views.Absences, name='absence'),\n path('addabsence//', views.addAbsences, name='addabsence'),\n]\n","sub_path":"assiatant/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"34155975","text":"from __future__ import with_statement\nimport sys\ntry:\n import queue\nexcept ImportError:\n import Queue as queue\nimport random\nimport Pyro4.core\nfrom workitem import Workitem\nimport os\nimport socket\nfrom time import sleep, time\nfrom functools import partial\n\nPyro4.config.SERIALIZER = 'pickle'\nPyro4.config.SERIALIZERS_ACCEPTED.add('pickle')\n\nCLIENT_NAME = 'C%d@%s:%s' % (os.getpid(), socket.gethostname(), str(time()))\nWAIT_DISPATCHER_TIMEOUT = 4\n\ndef repeater(function, args=None, kwargs=None, exceptions=Exception,\n timeout=0):\n args = args or ()\n kwargs = kwargs or {}\n\n while True:\n try:\n result = function(*args, **kwargs)\n return result\n except exceptions as e:\n print(\"Error:\", e)\n\n if timeout > 0:\n sleep(timeout)\n\ndispatcherRepeater = partial(\n repeater,\n exceptions=(Pyro4.errors.CommunicationError),\n timeout=WAIT_DISPATCHER_TIMEOUT)\n\ndef readNumbers(path):\n print('\\nReading numbers')\n with open(path) as f:\n lines = f.read().splitlines()\n numbers = [int(e) for e in lines]\n return numbers\n\n\ndef placeWork(dispatcher, numbers):\n print('\\nPlacing work items into dispatcher queue')\n for i in range(len(numbers)):\n item = Workitem(i + 1, CLIENT_NAME, numbers[i])\n dispatcherRepeater(dispatcher.putWork, [item])\n\n\ndef collectResults(dispatcher, item_count):\n print('\\nGetting results from dispatcher queue')\n results = {}\n while len(results) < item_count:\n try:\n item = dispatcherRepeater(dispatcher.getResult, [CLIENT_NAME])\n print('Got result: %s (from %s)' % (item, item.processedBy))\n results[item.data] = item.result\n except queue.Empty:\n result_queue_size = dispatcherRepeater(\n dispatcher.resultQueueSize, [CLIENT_NAME])\n print('Not all results available yet (got %d out of %d). Work queue size: %d' %\n (len(results), item_count, item_count - result_queue_size))\n\n dispatcherRepeater(dispatcher.clientExit, [CLIENT_NAME])\n return results\n\n\ndef writeResults(results, path):\n print('\\nWriting results')\n with open(path, 'w') as f:\n for (number, factorials) in results.items():\n f.write(str(number) + ': ' + ', '.join(map(str, factorials)) + '\\n')\n\n\ndef main():\n disp_address = str(sys.argv[1])\n numbers_path = str(sys.argv[2])\n results_path = str(sys.argv[3])\n\n numbers = readNumbers(numbers_path)\n\n with Pyro4.core.Proxy('PYRO:dispatcher@' + disp_address) as dispatcher:\n dispatcherRepeater(dispatcher.clientRegister, [CLIENT_NAME])\n placeWork(dispatcher, numbers)\n results = collectResults(dispatcher, len(numbers))\n\n writeResults(results, results_path)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"05/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"405671442","text":"import base64\nfrom django.http import JsonResponse\nimport requests\nimport json\nfrom bs4 import BeautifulSoup\nfrom time import sleep\n\n\ndef status(status,payload):\n return JsonResponse({\"status\":status,\"payload\":payload})\n\n\ndef process_img(img):\n img = base64.b64decode(img)\n file = {'upload': img}\n r = requests.post('https://api.platerecognizer.com/v1/plate-reader/',\n files=file,\n headers={\"authorization\":\"Token fa43527529a25e1c6b2cd1670c6ccb74d6e1104e\"})\n\n return_dict = r.json()\n print(return_dict)\n if not return_dict['results']:\n return False\n license = return_dict['results'][0]['plate']\n state = return_dict['results'][0]['region']['code']\n if not state or not license:\n return False\n try:\n state = state[3:6]\n state = state.upper()\n except ValueError:\n # Not in US\n # All US State plates are of the form:\n # us-XX where XX is the lowercase state abbreviation\n sucess = False\n return False\n\n return {\"state\":state,\"license\":license}\n\n\ndef process(license, state):\n lister = []\n soup = None\n counter = 0\n url = f\"https://api.carsxe.com/platedecoder?key=zec39wzfq_yagecmcew_twtfw6cmx&plate={license}&state={state}&format=json\"\n # headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0\",\n # \"Accept-Encoding\": \"gzip, deflate\",\n # \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,/;q=0.8\", \"DNT\": \"1\",\n # \"Connection\": \"close\", \"Upgrade-Insecure-Requests\": \"1\"}\n r = requests.get(url)\n if r.status_code == 200:\n # if r.status_code == 200:\n # soup = BeautifulSoup(r.text,'html.parser')\n # print(soup.select(\"body tr b\"))\n # if soup.select(\"body tr b\"):\n # break\n # sleep(20)\n # counter += 20\n # for item in soup.select(\"body tr b\"):\n # item = str(item)\n # item = item.strip(\"\")\n # item = item.strip(\"\")\n # lister.append(item)\n json = r.json()\n if json['success']:\n # print({\"make\":json['CarMake'],\"model\":json[\"CarModel\"].split(\" \")[0],\"year\":int(json[\"RegistrationYear\"])})\n make = json['CarMake']\n model = json[\"CarModel\"].split(\" \")[0]\n if make.lower() == \"mazda\":\n model = model[len(model)-1]\n return {\"make\":make,\"model\":model,\"year\":int(json[\"RegistrationYear\"])}\n else:\n return False\n\n\ndef upload(img):\n url = f\"http://api.carsxe.com/whatcaristhat?key=0hsbdq9rl_o6thqm9v5_bv25wj6aa\"\n r = requests.post(url,headers = {'Content-type': 'text/plain'},data=\"https://upload.wikimedia.org/wikipedia/commons/thumb/6/6d/2006-2009_Honda_Civic_VTi_sedan_%282018-10-19%29_01.jpg/800px-2006-2009_Honda_Civic_VTi_sedan_%282018-10-19%29_01.jpg\")\n if r.status_code == 200:\n print(r.json())\n else:\n print(r.status_code)\n","sub_path":"carz/boiler/api_helper.py","file_name":"api_helper.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"160247440","text":"import gym\nimport time\nimport random\nimport numpy as np\nenv = gym.make('FrozenLake-v0')\nenv.reset()\n\nnumberOfEpisodes = 1000\n#think of it as the levels of a game (progressively harder)\nnumberOfStepsPerEpisode = 10\n# Think of '10' as the progress it is allowed to reach. Number of steps per episode are the actions it can take.\n# it can't exceed 10 steps. If it doesnt reach the goal, the environment resets.\n\nfor episode in range(numberOfEpisodes):\n print(\"resetting..\")\n state = env.reset()\n time.sleep(3)\n # slows the environment down for 3 seconds; think of it as a level reset\n print(\"resetted(not sure if thats an english word lol)\")\n print(\"starting environment\")\n for steps in range(numberOfStepsPerEpisode):\n env.render()\n # render prints the environment (when you play)\n time.sleep(1)\n # slows down the action the agent is taking for 1 second.\n action = env.action_space.sample() #raise hand\n # a code to make a action into an environment - for now it is random and not implemented into anything\n new_state, reward, done, info = env.step(action)\n # the 4 variables are stated above. New state - new action. Reward - based on what the new state is; if it is\n # point worthy, this will be documented in code. Done - true/false. Info: generic of what occurs.\n if done==True:\n break\n","sub_path":"FrozenLake.py","file_name":"FrozenLake.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"372369657","text":"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport av\nimport torch as th\nfrom torch.utils.data import Dataset\nimport pandas as pd\nimport os\nimport numpy as np\nimport random\nimport ffmpeg\nimport time\nimport re\nimport csv\nimport pickle\nfrom joblib import Parallel, delayed\nimport subprocess\n\ndef valid_video(root_dir, vid_idx, video_id):\n vid_path = os.path.join(root_dir, video_id + '.mp4')\n try:\n probe = ffmpeg.probe(vid_path)\n video_stream = next((\n stream for stream in probe['streams'] if stream['codec_type'] == 'video'), \n None\n )\n if video_stream and float(video_stream['duration']) > 4.1:\n print(f\"{vid_idx}: True\", flush=True)\n return True\n else:\n print(f\"{vid_idx}: False (duration short)\", flush=True)\n return False\n except:\n print(f\"{vid_idx}: False\", flush=True)\n return False\n\n\ndef filter_videos(root_dir, vid_paths):\n all_indices = Parallel(n_jobs=30)(delayed(valid_video)(root_dir, vid_idx, vid_paths[vid_idx][0]) for vid_idx in range(0, len(vid_paths)))\n valid_indices = [i for i, val in enumerate(all_indices) if val]\n return valid_indices\n\n\nclass HT100M_Dataset(Dataset):\n \"\"\"HowTo100M Video-Text loader.\"\"\"\n\n def __init__(\n self,\n csv_file='data/howto.csv',\n video_root='/datasets01/HowTo100M/022520/videos',\n caption_root='/private/home/mandelapatrick/data/howto100m_csv',\n token_to_word_path='data/dict.npy',\n min_time=4.0,\n fps=16,\n num_frames=16,\n size=224,\n crop_only=False,\n center_crop=True,\n benchmark=False,\n max_words=20,\n num_candidates=1,\n random_left_right_flip=False,\n num_clips=2\n ):\n \"\"\"\n Args:\n \"\"\"\n print(\"Loading HT100M dataset\")\n assert isinstance(size, int)\n\n # Get csv file\n csv_file = os.path.join(os.path.dirname(__file__), csv_file)\n if not os.path.exists(csv_file):\n i = 0\n file_list = []\n for file_name in os.listdir(video_root):\n if i % 1000 == 0:\n print(i, file_name)\n file_list.append(file_name)\n i += 1\n \n with open(csv_file, 'w', newline='') as outcsv:\n fieldnames = ['video_id']\n writer = csv.DictWriter(outcsv, fieldnames=fieldnames)\n writer.writeheader()\n for id, vid_id in enumerate(file_list):\n if i % 1000 == 0:\n print(i, flush=True)\n writer.writerow({'video_id': vid_id.split('.')[0]})\n \n # Get video paths\n with open(csv_file, newline='') as f:\n reader = csv.reader(f)\n self._path_to_videos = list(reader)\n\n # Get valid indices\n vid_valid_file = os.path.join(os.path.dirname(__file__), 'data/howto_valid_filtered_audio.pkl')\n if not os.path.exists(vid_valid_file):\n self.valid_indices = filter_videos(video_root, self._path_to_videos)\n with open(vid_valid_file, 'wb') as handle:\n pickle.dump(\n self.valid_indices, \n handle, \n protocol=pickle.HIGHEST_PROTOCOL\n )\n else:\n with open(vid_valid_file, 'rb') as handle:\n self.valid_indices = pickle.load(handle)\n\n self.video_root = video_root\n self.caption_root = caption_root\n self.min_time = min_time\n self.size = size\n self.num_frames = num_frames\n self.fps = fps\n self.num_sec = self.num_frames / float(self.fps)\n self.crop_only = crop_only\n self.center_crop = center_crop\n self.benchmark = benchmark\n self.max_words = max_words\n token_to_word = np.load(os.path.join(os.path.dirname(__file__), token_to_word_path))\n self.word_to_token = {}\n for i, t in enumerate(token_to_word):\n self.word_to_token[t] = i + 1\n self.num_candidates = num_candidates\n self.random_flip = random_left_right_flip\n self.num_clips = num_clips\n self._num_retries = 10\n self.num_reverse_clips = 2\n\n def __len__(self):\n return len(self.valid_indices)\n\n def _get_video_ffmpeg(self, video_path, start, end):\n start_seek = random.randint(start, int(max(start, end - self.num_sec)))\n cmd = (\n ffmpeg\n .input(video_path, ss=start_seek, t=self.num_sec + 0.1)\n .filter('fps', fps=self.fps)\n )\n if self.center_crop:\n aw, ah = 0.5, 0.5\n else:\n aw, ah = random.uniform(0, 1), random.uniform(0, 1)\n if self.crop_only:\n cmd = (\n cmd.crop('(iw - {})*{}'.format(self.size, aw),\n '(ih - {})*{}'.format(self.size, ah),\n str(self.size), str(self.size))\n )\n else:\n cmd = (\n cmd.crop('(iw - min(iw,ih))*{}'.format(aw),\n '(ih - min(iw,ih))*{}'.format(ah),\n 'min(iw,ih)',\n 'min(iw,ih)')\n .filter('scale', self.size, self.size)\n )\n if self.random_flip and random.uniform(0, 1) > 0.5:\n cmd = cmd.hflip()\n out, _ = (\n cmd.output('pipe:', format='rawvideo', pix_fmt='rgb24')\n .run(capture_stdout=True, quiet=True)\n )\n video = np.frombuffer(out, np.uint8).reshape([-1, self.size, self.size, 3])\n video = th.from_numpy(video)\n video = video.permute(3, 0, 1, 2)\n if video.shape[1] < self.num_frames:\n zeros = th.zeros((3, self.num_frames - video.shape[1], self.size, self.size), dtype=th.uint8)\n video = th.cat((video, zeros), axis=1)\n # return video[:, :self.num_frames]\n video = video.float()\n video = video / 255.0\n return video[:, :self.num_frames], start_seek\n\n def _split_text(self, sentence):\n w = re.findall(r\"[\\w']+\", str(sentence))\n return w\n\n def _words_to_token(self, words):\n words = [self.word_to_token[word] for word in words if word in self.word_to_token]\n if words:\n we = self._zero_pad_tensor_token(th.LongTensor(words), self.max_words)\n return we\n else:\n return th.zeros(self.max_words, dtype=th.long)\n\n def _zero_pad_tensor_token(self, tensor, size):\n if len(tensor) >= size:\n return tensor[:size]\n else:\n zero = th.zeros(size - len(tensor)).long()\n return th.cat((tensor, zero), dim=0)\n\n def words_to_ids(self, x):\n return self._words_to_token(self._split_text(x))\n\n def _find_nearest_candidates(self, caption, ind):\n start, end = ind, ind\n diff = caption['end'][end] - caption['start'][start]\n n_candidate = 1\n while n_candidate < self.num_candidates:\n if start == 0:\n return 0\n elif end == len(caption) - 1:\n return start - (self.num_candidates - n_candidate)\n elif caption['end'][end] - caption['start'][start - 1] < caption['end'][end + 1] - caption['start'][start]:\n start -= 1\n else:\n end += 1\n n_candidate += 1\n return start\n\n def _get_text(self, caption):\n cap = pd.read_csv(caption)\n ind = random.randint(0, len(cap) - 1)\n if self.num_candidates == 1:\n words = self.words_to_ids(cap['text'].values[ind])\n else:\n words = th.zeros(self.num_candidates, self.max_words, dtype=th.long)\n cap_start = self._find_nearest_candidates(cap, ind)\n for i in range(self.num_candidates):\n words[i] = self.words_to_ids(cap['text'].values[max(0, min(len(cap['text']) - 1, cap_start + i))])\n start, end = cap['start'].values[ind], cap['end'].values[ind]\n #TODO: May need to be improved for edge cases. \n if end - start < self.min_time:\n diff = self.min_time - end + start\n start = max(0, start - diff / 2)\n end = start + self.min_time \n return words, int(start), int(end) \n\n def __getitem__(self, idx):\n \n for i_try in range(self._num_retries):\n \n # Get video id and path\n index_capped = self.valid_indices[idx]\n video_id = self._path_to_videos[index_capped][0]\n video_path = os.path.join(self.video_root, video_id + '.mp4')\n video_list = []\n text_list = []\n audio_list = []\n \n while len(video_list) < self.num_clips:\n # Get caption\n text, start, end = self._get_text(os.path.join(self.caption_root, video_id + '.csv'))\n\n # Decode video\n video = None\n try:\n video, start_sec = self._get_video_ffmpeg(video_path, start, end)\n except Exception as e:\n print(f\"Failed to load video from {video_path} with error {e}\")\n if video is None:\n # let's try another video\n if i_try > self._num_retries // 2:\n idx = random.randint(0, len(self.valid_indices) - 1)\n break\n \n video_list.append(video)\n text_list.append(text)\n\n if len(video_list) == self.num_clips:\n break\n\n if i_try == self._num_retries - 1:\n raise RuntimeError(\n \"Failed to fetch video after {} retries.\".format(\n self._num_retries\n )\n )\n\n # Add reversal option\n for i in range(self.num_clips):\n # Clone frames and spec\n frames = video_list[i].clone()\n text = text_list[i].clone()\n\n for r_ix in range(self.num_reverse_clips):\n # Reverse audio and video\n if r_ix % 2 == 1:\n frames = frames.flip(1) # C T H W \n text = text.flip(0) # T\n \n video_list.append(frames)\n text_list.append(text)\n \n if self.num_reverse_clips == 2:\n video_list = [video_list[i] for i in [0, 2, 1, 3]]\n text_list = [text_list[i] for i in [0, 2, 1, 3]]\n\n if self.num_clips > 1:\n video = th.cat(video_list, dim=0)\n text = th.cat(text_list, dim=0)\n else:\n video = video_list[0]\n text = text_list[0]\n\n label = 0\n vid_idx = index_capped\t\n\n return video, text, label, vid_idx, index_capped","sub_path":"datasets/HT100MDataset.py","file_name":"HT100MDataset.py","file_ext":"py","file_size_in_byte":10905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"352303692","text":"# 챕터5 - if, for, while, continue와 break, 한 줄 for\r\n\r\n# if\r\nweather = input(\"오늘 날씨는 어때요? \") # 사용자 입력 받은 후 str 으로 반환\r\nif weather == \"비\" or weather == \"눈\":\r\n print(\"우산 챙기라우\")\r\nelif weather == \"미세먼지\":\r\n print(\"마스크 챙기라우\")\r\nelse:\r\n print(\"걍 ㄱㄱ\")\r\n\r\n\r\n# for\r\nfor waiting_no in [0, 1, 2, 3, 4]: # range(5) : 0~4, range(1, 6) : 1~5\r\n print(\"대기번호 : {0}\" .format(waiting_no))\r\n\r\nabsent = [2, 5]\r\nno_book = [7]\r\nfor student in range(1, 11):\r\n if student in absent:\r\n continue\r\n elif student in no_book:\r\n print(\"오늘 수업 여기까지. {0}는 교무실로 따라와\" .format(no_book))\r\n break\r\n print(\"{0}, 책 읽어보소\" .format(student))\r\n\r\n# 1, 2, 3, 4, 5 --> 101, 102, 103, 104, 105\r\nstudents = [1, 2, 3, 4, 5]\r\nstudents = [i+100 for i in students]\r\nprint(students)\r\n\r\n\r\n# 퀴즈 5\r\n# 50명의 승객과 매칭 기회가 있을 때, 총 탑승 승객 수를 구하는 프로그램\r\n\r\n# 조건1 : 승객별 운행 소요 시간은 5분 ~ 50분 사이의 난수\r\n# 조건2 : 소요 시간 5분 ~ 15분 사이의 승객만 매칭해야 함\r\n\r\nfrom random import *\r\n\r\ncnt = 0\r\nfor i in range(1, 51):\r\n time = randint(5, 50)\r\n if( 5 <= time <= 15):\r\n match = \"O\"\r\n cnt += 1\r\n else:\r\n match = \" \"\r\n print(\"[{0}] {1}번째 손님 (소요시간 : {2}분)\" .format(match, i, time))\r\n\r\nprint(\"총 탑승 승객 : {0} 분\" .format(cnt))\r\n","sub_path":"chapter5.py","file_name":"chapter5.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"612916476","text":"#!/usr/bin/python3\n\"\"\"List 10 commits (newest to oldest) of the repo \"rails\" by the user \"rails\"\n\"\"\"\n\nif __name__ == \"__main__\":\n import requests\n import sys\n\n repo = sys.argv[1]\n owner = sys.argv[2]\n\n url = 'https://api.github.com/repos/{}/{}/commits'.format(owner, repo)\n req = requests.get(url)\n data = req.json()\n i = 0\n while len(data) > i and i < 10:\n print(data[i].get('sha'), end=': ')\n print(data[i].get('commit').get('author').get('name'))\n i += 1\n","sub_path":"0x11-python-network_1/100-github_commits.py","file_name":"100-github_commits.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"10475513","text":"# -*- coding: utf-8 -*-\nfrom django.test import TestCase\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.models import User\nfrom django.contrib.sites.models import Site\nfrom django.contrib.redirects.models import Redirect\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core import management\nfrom django.conf import settings\nfrom livesettings import config_value \nfrom cc.pages.models import models\nfrom cc.shop.models.products import Product\nfrom cc.core.models import VisibleContentType\nfrom cc.conf import coresettings\nfrom cc.pages.models import Page\n#\n# the core tests\n#\nclass CoreTest(TestCase):\n fixtures = ['core.json', 'pages.json']\n \n def test_default_users(self):\n \"Test the default users are being installed correctly\"\n users = User.objects.all()\n self.assertEquals(users.count(), 3)\n \n \n \n def test_redirects(self):\n \"ensure that redirects are working\"\n if 'cc.pages' not in settings.INSTALLED_APPS:\n return True\n page = Page.objects.get(pk=1)\n old_url = page.get_absolute_url()\n response = self.client.get(old_url)\n self.failUnlessEqual(response.status_code, 200)\n # move the page with a new slug\n page.slug=\"and-form-my-next-trick-i-shall-move-somewhere\"\n page.save()\n new_url = page.get_absolute_url()\n response = self.client.get(old_url)\n self.failUnlessEqual(response.status_code, 301)\n response = self.client.get(new_url)\n self.failUnlessEqual(response.status_code, 200)\n # make sure thre was a redirect planted into the database\n self.failUnlessEqual(Redirect.objects.count(), 1)\n \n \n def test_visbility(self):\n \"test the visibility manager on the models\"\n if 'cc.pages' not in settings.INSTALLED_APPS:\n return True\n all_pages = Page.objects.all()\n visible_pages = Page.objects.visible()\n # all should be visible at this stage\n self.failUnlessEqual(all_pages.count(), visible_pages.count())\n # now make one invisible\n invisible_page = all_pages[1]\n invisible_page.visible = False\n invisible_page.save()\n self.failIfEqual(all_pages.count(), visible_pages.count())\n # make the all invisible\n for obj in visible_pages:\n obj.visible = False\n obj.save()\n # get them again\n visible_pages = Page.objects.visible()\n self.failUnlessEqual(visible_pages.count(),0)\n \n\n ","sub_path":"cc/core/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"119167486","text":"\"\"\"\ncomplex-valued convolutional filter W = A+iB\ncomplex vector h = x + iy\n\nW ∗h = (A ∗ x − B ∗ y) + i(B ∗ x+ A ∗ y)\n\"\"\"\nimport matplotlib.pyplot as plt\nimport librosa\nimport scipy.signal\n\nimport torch\nimport torch.nn as nn\n\nfrom model.complex_nn import CConv2d, CConvTranspose2d, CBatchNorm2d\nfrom model.ISTFT import ISTFT\nfrom data.STFT import STFT\n# from data.conv_stft import *\n\nfrom utils.utils import display_feature\n\n\nclass EncoderBlock(nn.Module):\n\n def __init__(self, in_channels=1, out_channels=45, kernel_size=(7, 5), stride=(2, 2),\n padding=(0, 0), bias=False):\n super(EncoderBlock, self).__init__()\n\n self.cConv = CConv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride,\n padding=padding, bias=bias)\n self.cBN = CBatchNorm2d(out_channels)\n self.leaky_relu = nn.LeakyReLU(inplace=True, negative_slope=0.1)\n\n def forward(self, x):\n cConv = self.cConv(x)\n cBN = self.cBN(cConv)\n output = self.leaky_relu(cBN)\n\n return output\n\n\nclass DecoderBlock(nn.Module):\n\n def __init__(self, in_channels, out_channels, kernel_size, stride, padding=(0, 0), output_padding=(0, 0),\n last=False, bias=False):\n\n super(DecoderBlock, self).__init__()\n self.last = last\n\n self.Trans_cConv = CConvTranspose2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride,\n padding=padding, output_padding=output_padding,bias=bias)\n self.cBN = CBatchNorm2d(num_features=out_channels)\n self.leaky_relu = nn.LeakyReLU(inplace=True, negative_slope=0.1)\n\n def forward(self, x):\n\n Trans_cConv = self.Trans_cConv(x)\n # Paper) last_decoder_layer 에서는 BN과 Activation 을 사용하지 않음\n if self.last:\n output = Trans_cConv\n # mask_real = Trans_cConv[..., 0]\n # mask_imag = Trans_cConv[..., 1]\n #\n # mask_mag = (mask_real ** 2 + mask_imag ** 2)**0.5\n # real_phase = mask_real / (mask_mag + 1e-8)\n # imag_phase = mask_imag / (mask_mag + 1e-8)\n #\n # mask_phase = torch.atan2(imag_phase, real_phase)\n # mask_mag = torch.tanh(mask_mag)\n #\n # return mask_mag, mask_phase\n\n # display_feature(Trans_cConv[..., 0], \"Decoder_8_real\")\n # display_feature(Trans_cConv[..., 1], \"Decoder_8_imag\")\n # mask_phase = Trans_cConv / (torch.abs(Trans_cConv) + 1e-8)\n # print(\"mask_ph: \", mask_phase[0])\n # mask_mag = torch.tanh(torch.abs(Trans_cConv))\n # print(\"mask_mag: \", mask_mag[0])\n # output = mask_phase * mask_mag # [batch, channel, 1539, 214, 2 ]\n # real = output[..., 0]\n # imag = output[..., 1]\n # mag = torch.abs(torch.sqrt(real ** 2 + imag ** 2))\n # phase = torch.atan2(imag, real)\n\n # real_db = librosa.amplitude_to_db(real.cpu().detach().numpy())\n # imag_db = librosa.amplitude_to_db(imag.cpu().detach().numpy())\n # phase_db = librosa.amplitude_to_db(phase.cpu().detach().numpy())\n # mag_db = librosa.amplitude_to_db(mag.cpu().detach().numpy())\n\n #display_spectrogram(real_db, \"mask_Real\")\n #display_spectrogram(imag_db, \"mask_Imag\")\n #display_spectrogram(mag_db, \"mask_mag\")\n #display_spectrogram(phase_db, \"mask_phase\")\n\n else:\n normed = self.cBN(Trans_cConv)\n output = self.leaky_relu(normed)\n\n return output\n\nclass DCUNet16(nn.Module):\n\n def __init__(self, args, n_fft=64, hop_length=16):\n super(DCUNet16, self).__init__()\n\n # ISTFT hyperparam\n self.args = args\n self.n_fft = n_fft\n self.hop_length = hop_length\n # self.stft = STFT(fft_length=n_fft, hop_length=hop_length, normalized=True)\n self.istft = ISTFT(n_fft=n_fft, hop_length=hop_length)\n # self.stft = ConvSTFT(400, 100, 512, 'hanning', 'complex', fix=True).cuda(args.gpu)\n # self.istft = ConviSTFT(400, 100, 512, 'hanning', 'complex', fix=True).cuda(args.gpu)\n\n # Encoder(downsampling)\n self.downsample0 = EncoderBlock(kernel_size=(7, 5), stride=(2, 2), padding=(3, 2), in_channels=1, out_channels=32)\n self.downsample1 = EncoderBlock(kernel_size=(7, 5), stride=(2, 1), padding=(3, 2), in_channels=32, out_channels=32)\n self.downsample2 = EncoderBlock(kernel_size=(7, 5), stride=(2, 2), padding=(3, 2), in_channels=32, out_channels=64)\n self.downsample3 = EncoderBlock(kernel_size=(5, 3), stride=(2, 1), padding=(2, 1), in_channels=64, out_channels=64)\n self.downsample4 = EncoderBlock(kernel_size=(5, 3), stride=(2, 2), padding=(2, 1), in_channels=64, out_channels=64)\n self.downsample5 = EncoderBlock(kernel_size=(5, 3), stride=(2, 1), padding=(2, 1), in_channels=64, out_channels=64)\n self.downsample6 = EncoderBlock(kernel_size=(5, 3), stride=(2, 2), padding=(2, 1), in_channels=64, out_channels=64)\n self.downsample7 = EncoderBlock(kernel_size=(5, 3), stride=(2, 1), padding=(2, 1), in_channels=64, out_channels=64)\n\n # Decoder(Upsampling)\n self.upsample0 = DecoderBlock(kernel_size=(5, 3), stride=(2, 1), padding=(2, 1), in_channels=64,\n out_channels=64)\n self.upsample1 = DecoderBlock(kernel_size=(5, 3), stride=(2, 2), padding=(2, 1), in_channels=128,\n out_channels=64)\n self.upsample2 = DecoderBlock(kernel_size=(5, 3), stride=(2, 1), padding=(2, 1), in_channels=128,\n out_channels=64)\n self.upsample3 = DecoderBlock(kernel_size=(5, 3), stride=(2, 2), padding=(2, 1), in_channels=128,\n out_channels=64, output_padding=(0, 1))\n self.upsample4 = DecoderBlock(kernel_size=(5, 3), stride=(2, 1), padding=(2, 1), in_channels=128,\n out_channels=64)\n self.upsample5 = DecoderBlock(kernel_size=(7, 5), stride=(2, 2), padding=(3, 2), in_channels=128,\n out_channels=32)\n self.upsample6 = DecoderBlock(kernel_size=(7, 5), stride=(2, 1), padding=(3, 2), in_channels=64,\n out_channels=32, output_padding=(1, 0))\n self.upsample7 = DecoderBlock(kernel_size=(7, 5), stride=(2, 2), padding=(3, 2), in_channels=64, out_channels=1,\n bias=True, last=True, output_padding=(0, 1))\n\n def forward(self, input, is_istft=True):\n # print(\"input:\", x.size())\n # print(noisy_stft.size())\n real = input[..., 0]\n imag = input[..., 1]\n # inputs = torch.stack([real, imag], dim=-1).unsqueeze(1)\n #\n spec_mag = torch.sqrt(real ** 2 + imag ** 2 + 1e-8)\n spec_phase = torch.atan2(imag, real)\n # print(\"spec\", spec_mag.size())\n # inp = inputs\n # downsampling/encoding\n # print(\" --[Encoder]-- \")\n # print(\" Input(spec): \", x.size())\n # display_feature(x[..., 0], \"input_real\")\n # display_feature(x[..., 1], \"input_imag\")\n d0 = self.downsample0(input)\n # display_feature(d0[..., 0], \"Encoder_1_real\")\n # display_feature(d0[..., 1], \"Encoder_1_imag\")\n # print(\" d0: \", d0.size())\n d1 = self.downsample1(d0)\n # display_feature(d1[..., 0], \"Encoder_2_real\")\n # display_feature(d1[..., 1], \"Encoder_2_imag\")\n # print(\" d1: \", d1.size())\n d2 = self.downsample2(d1)\n # display_feature(d2[..., 0], \"Encoder_3_real\")\n # display_feature(d2[..., 1], \"Encoder_3_imag\")\n # print(\" d2: \", d2.size())\n d3 = self.downsample3(d2)\n # display_feature(d3[..., 0], \"Encoder_4_real\")\n # display_feature(d3[..., 1], \"Encoder_4_imag\")\n # print(\" d3: \", d3.size())\n d4 = self.downsample4(d3)\n # display_feature(d4[..., 0], \"Encoder_5_real\")\n # display_feature(d4[..., 1], \"Encoder_5_imag\")\n # print(\" d4: \", d4.size())\n d5 = self.downsample5(d4)\n # display_feature(d5[..., 0], \"Encoder_6_real\")\n # display_feature(d5[..., 1], \"Encoder_6_imag\")\n # print(\" d5: \", d5.size())\n d6 = self.downsample6(d5)\n # display_feature(d6[..., 0], \"Encoder_7_real\")\n # display_feature(d6[..., 1], \"Encoder_7_imag\")\n # print(\" d6: \", d6.size())\n d7 = self.downsample7(d6)\n # display_feature(d7[..., 0], \"Encoder_8_real\")\n # display_feature(d7[..., 1], \"Encoder_8_imag\")\n # print(\" d7: \", d7.size())\n\n # print(\" --[Decoder]-- \")\n # bridge 첫번째 Decoder에 skip connection X\n u0 = self.upsample0(d7)\n # display_feature(u0[..., 0], \"Decoder_1_real\")\n # display_feature(u0[..., 1], \"Decoder_1_imag\")\n\n # skip-connection\n c0 = torch.cat((u0, d6), dim=1)\n # print(\" u0: \", u0.size())\n # print(d6.size())\n # print(\" concat(u0,d6): \", d6.size())\n\n u1 = self.upsample1(c0)\n # display_feature(u1[..., 0], \"Decoder_2_real\")\n # display_feature(u1[..., 1], \"Decoder_2_imag\")\n c1 = torch.cat((u1, d5), dim=1)\n # print(\" u1: \", u1.size())\n # print(\" concat(u1,d5): \", c1.size())\n\n u2 = self.upsample2(c1)\n # display_feature(u2[..., 0], \"Decoder_3_real\")\n # display_feature(u2[..., 1], \"Decoder_3_imag\")\n c2 = torch.cat((u2, d4), dim=1)\n # print(\" u2: \", u2.size())\n # print(\" concat(u2,d4): \", c2.size())\n\n u3 = self.upsample3(c2)\n # display_feature(u3[..., 0], \"Decoder_4_real\")\n # display_feature(u3[..., 1], \"Decoder_4_imag\")\n c3 = torch.cat((u3, d3), dim=1)\n # print(\" u3: \", u3.size())\n # print(\" concat(u3,d3): \", c3.size())\n\n u4 = self.upsample4(c3)\n # display_feature(u4[..., 0], \"Decoder_5_real\")\n # display_feature(u4[..., 1], \"Decoder_5_imag\")\n c4 = torch.cat((u4, d2), dim=1)\n # print(\" u4: \", u4.size())\n # print(\" concat(u4,d2): \", c4.size())\n\n u5 = self.upsample5(c4)\n # display_feature(u5[..., 0], \"Decoder_6_real\")\n # display_feature(u5[..., 1], \"Decoder_6_imag\")\n # print(\" u5: \", u5.size())\n # print(d1.size())\n c5 = torch.cat((u5, d1), dim=1)\n # print(\" concat(u5,d1): \", c5.size())\n\n u6 = self.upsample6(c5)\n # display_feature(u6[..., 0], \"Decoder_7_real\")\n # display_feature(u6[..., 1], \"Decoder_7_imag\")\n # print(\" d0 \", d0.size())\n # print(\" u6: \", u6.size())\n c6 = torch.cat((u6, d0), dim=1)\n\n # print(\" concat(u6,d0): \", c6.size())\n\n # u7 = self.upsample7(c6)\n\n # mask_mag, mask_phase = self.upsample7(c6)\n mask = self.upsample7(c6)\n # print(mask.size())\n\n mask_real = mask[..., 0]\n mask_imag = mask[..., 1]\n mask_mag = (mask_real ** 2 + mask_imag ** 2) ** 0.5\n real_phase = mask_real / (mask_mag + 1e-8)\n imag_phase = mask_imag / (mask_mag + 1e-8)\n\n mask_phase = torch.atan2(imag_phase, real_phase)\n mask_mag = torch.tanh(mask_mag)\n est_mag = mask_mag * spec_mag # magnitude mask 입히고\n est_phase = spec_phase + mask_phase # todo phase 더하나?\n\n real = est_mag * torch.cos(est_phase) # todo 이 공식 좀더 자세히 본 기억있음\n imag = est_mag * torch.sin(est_phase)\n spec = torch.stack([real, imag], dim=-1)\n if is_istft:\n # print(est.size())\n output = self.istft(spec)\n output = torch.clamp_(output, -1, 1)\n\n return output\n\n\ndef display_spectrogram(x, title):\n plt.figure(figsize=(15, 10))\n plt.pcolormesh(x[0][0], cmap='hot') # 여기서는 Batch shape가 추가\n plt.colorbar(format=\"%+2.f dB\")\n plt.title(title)\n plt.show()\n\n\nif __name__ == \"__main__\":\n a = torch.randn(2, 1, 1539, 214 ,2)\n\n b = DCUNet16(args=\"aa\", n_fft=3076, hop_length=772)\n print(b(a).size())","sub_path":"model/DCUNet.py","file_name":"DCUNet.py","file_ext":"py","file_size_in_byte":12212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"399071677","text":"import logging as logger\nimport os\n\nimport requests\n\nfrom django.conf import settings\nfrom kolibri.content.utils.annotation import update_channel_metadata_cache\nfrom kolibri.tasks.management.commands.base import AsyncCommand\n\n\nlogging = logger.getLogger(__name__)\n\n\nclass Command(AsyncCommand):\n\n def add_arguments(self, parser):\n parser.add_argument(\"channel_id\", type=str)\n\n def handle_async(self, *args, **options):\n channel_id = options[\"channel_id\"]\n logging.info(\"Downloading data for channel id {}\".format(channel_id))\n\n url = os.path.join(\n settings.CENTRAL_CONTENT_DOWNLOAD_DOMAIN,\n \"content\",\n \"databases\",\n \"{}.sqlite3\".format(channel_id),\n )\n\n dest = os.path.join(\n settings.CONTENT_DATABASE_DIR,\n \"{}.sqlite3\".format(channel_id),\n )\n\n logging.debug(\"URL to fetch: {}\".format(url))\n logging.debug(\"Destination: {}\".format(dest))\n\n r = requests.get(url, stream=True)\n r.raise_for_status()\n\n dbsize = int(r.headers['content-length'])\n\n with self.start_progress(total=dbsize) as progress_update:\n with open(dest, \"wb\") as f:\n for content in r.iter_content(1024):\n f.write(content)\n contentlength = len(content)\n progress_update(contentlength)\n\n update_channel_metadata_cache()\n","sub_path":"kolibri/content/management/commands/importchannel.py","file_name":"importchannel.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"245549128","text":"class Solution:\n def lengthOfLIS(self, nums: List[int]) -> int:\n seq = []\n for i, num in enumerate(nums):\n if len(seq) == 0 or num > seq[-1]:\n seq.append(num)\n else:\n for j in range(len(seq)):\n if seq[j] >= num:\n seq[j] = num\n break\n return len(seq)\n","sub_path":"leetcode/300. Longest Increasing Subsequence.py","file_name":"300. Longest Increasing Subsequence.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"144488854","text":"# -*- coding: utf-8 -*- \n# @Time : 2019/10/17 20:02 \n# @Author : hzz \n# @File : test_trainmodel.py \n# @Software: PyCharm\nimport time\nimport unittest\nfrom common.loggers import Logger\nfrom common.myconfig import Myconfig\nfrom aicenter.modelplant import Modelpage\nfrom aicenter.ai_loginpage import Login\nfrom aicenter_testcase import test_predictionmodel\n\nconfig = Myconfig()\nlog = Logger('trainmodel').getlog()\n\n\nclass TestTrainModel(unittest.TestCase):\n def setUp(self):\n \"\"\"\n\n \"\"\"\n self.driver = Login().ai_login()\n time.sleep(1)\n\n def tearDown(self):\n \"\"\"\n\n \"\"\"\n self.driver.quit()\n\n def test_1(self):\n turn_trainmodel(self.driver)\n log.info('训练模型-新建工程')\n log.info('创建工程')\n project_name = '自动化创建的工程'+time.strftime('%y%m%d%H%M%S', time.localtime())\n project_result = trainmodel_creatproject(self.driver, project_name)\n time.sleep(1)\n self.assertEqual(project_name, project_result)\n log.info('创建模型')\n model_name = '自动化创建的模型'+time.strftime('%y%m%d%H%M%S', time.localtime())\n model_result = trainmodel_creatmodel(self.driver, model_name)\n time.sleep(1)\n self.assertEqual('新增模型成功', model_result)\n log.info('训练模型')\n model_status = trainmodel_train(self.driver, 'test_data_demo_6', 'KEY', 'LBALE')\n self.assertEqual('成功', model_status)\n\n def test_2(self):\n log.info('训练模型-使用原有工程')\n time.sleep(1)\n select_project(self.driver, '自动化创建的工程')\n log.info('创建模型')\n model_result = trainmodel_creatmodel(self.driver,\n '自动化创建的模型'+time.strftime('%y%m%d%H%M%S', time.localtime()))\n self.assertEqual('新增模型成功', model_result)\n log.info('训练模型')\n model_status = trainmodel_train(self.driver, 'sampsampleYBCSB20191101160512')\n self.assertEqual('成功', model_status)\n\n def test_delete_model(self):\n log.info('删除模型')\n time.sleep(1)\n\n def test_3(self):\n log.info('训练模型')\n time.sleep(1)\n select_project(self.driver, '自动化创建的工程')\n log.info('创建模型')\n model_result = trainmodel_creatmodel(self.driver,\n '自动化创建的模型'+time.strftime('%y%m%d%H%M%S', time.localtime()))\n self.assertEqual('新增模型成功', model_result)\n log.info('训练模型')\n model_status = trainmodel_train(self.driver, 'sampsampleYBCSB20191101160512', 1)\n self.assertEqual('成功', model_status)\n test_predictionmodel.predictionmodel(self.driver)\n\n\ndef turn_trainmodel(driver):\n model = Modelpage(driver)\n model.click_modelplant()\n model.click_model_tool()\n time.sleep(1)\n\n\ndef trainmodel_creatproject(driver, project_name):\n \"\"\"\n\n :return:\n \"\"\"\n model = Modelpage(driver)\n model.click_creat_project()\n model.input_project_name(project_name)\n model.input_project_desc('---自动化创建的工程---')\n model.click_project_primary()\n # model.click_first_project()\n time.sleep(3)\n return model.get_first_project_name()\n\n\ndef trainmodel_creatmodel(driver, model_name=None):\n \"\"\"\n\n :param model_name:\n :param driver:\n :return:\n \"\"\"\n if model_name:\n model_name = '自动化创建的模型' + time.strftime('%y%m%d%H%M%S', time.localtime())\n model = Modelpage(driver)\n model.click_creat_model()\n model.input_model_name(model_name)\n model.select_model_type('二分类')\n model.input_model_desc('---自动化创建的模型---')\n model.click_model_primary()\n return model.get_addmoel_succeed()\n\n\ndef trainmodel_train(driver, tablename, id_col, tab_col, forecast=None):\n \"\"\"\n :param tab_col:\n :param id_col:\n :param forecast:\n :param tablename:\n :param driver:\n :return:\n \"\"\"\n model = Modelpage(driver)\n try:\n if not model.judgepage():\n trainmodel_creatmodel(driver)\n model.click_sanmpledb()\n time.sleep(1)\n model.input_tablename(tablename)\n model.click_next_button()\n model.select_id(id_col)\n model.select_tag(tab_col)\n time.sleep(1)\n model.click_next_button()\n time.sleep(1)\n model.click_next_button()\n time.sleep(1)\n model.select_feature_cross('否')\n model.select_feature_filtrate('否')\n model.click_trainmodel()\n model.wait_train()\n time.sleep(1)\n model_status = model.get_model_status()\n if forecast:\n model.click_forecast()\n return model_status\n except Exception as e:\n log.error('训练模型失败%s', e)\n\n\ndef delete_model(driver):\n model = Modelpage(driver)\n try:\n # model.click_first_model()\n model.click_first_more()\n model.click_delete_project()\n time.sleep(1)\n except Exception as e:\n log.error('删除模型失败%s', e)\n\n\ndef select_model(driver, model_name):\n model = Modelpage(driver)\n try:\n model.by_name(model_name)\n except Exception as e:\n log.error('选择模型是失败%s', e)\n\n\ndef select_project(driver, project_name):\n model = Modelpage(driver)\n try:\n model.by_name(project_name)\n except Exception as e:\n log.error('选择项目失败%s', e)\n\n\n\n\n\n","sub_path":"src/python/aicenter_testcase/test_trainmodel.py","file_name":"test_trainmodel.py","file_ext":"py","file_size_in_byte":5521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"316785615","text":"import pandas as pd\nfrom kronall import *\nfrom pauli_matrix import *\nfrom mapping import *\nfrom plot import *\nimport time\n\n\n# In[91]:\n\n\n\n# In[150]:\nstart=time.time()\nnumber=12\neig1=[]\neig2=[]\nmatrix1=[]\nmatrix2=[]\nmatrix3=[]\ncount1=[]\nsign=[1]*number\nfor index1 in range(int(number/2)+1):\n\tif index1>0:\n\t\tsign[index1-1]=-1\n\tmatrice=diag_pauli(cr_ham([i1,z,z],number,sign))\n\tmatrix=0\n\tfor matrixa in matrice:\n\t\tmatrix=matrixa[0][0]*kronall(matrixa[1:])+matrix\n\t\tprint(1)\n\tmatrix1.append(matrix)\n\tmatrice=diag_pauli(cr_ham([i1,x,i],number,sign))\n\tmatrix=0\n\tfor matrixa in matrice:\n\t\tmatrix=matrixa[0][0]*kronall(matrixa[1:])+matrix\n\t\tprint(2)\n\tmatrix2.append(matrix)\n\tmatrice=diag_pauli(cr_ham([i1,i,x],number,sign))\n\tmatrix=0\n\tfor matrixa in matrice:\n\t\tmatrix=matrixa[0][0]*kronall(matrixa[1:])+matrix\n\t\tprint(3)\n\tmatrix3.append(matrix)\n\tcount1.append(count(2,number,sign))\n\nt=1\n\nu1=np.linspace(0,2,60)\n\nfor index in np.arange(0,60):\n\tu=u1[index]\n\teig3=[]\n\tfor index1 in range(int(number/2)+1):\n\t\tmatrix=0.5*u*matrix1[index1]-t*matrix2[index1]-t*matrix3[index1]\n\t\tcount2=count1[index1]\n\t\teig=1000\n\t\ti1=1\n\t\twhile i1==1:\n\t\t\ti_eig=np.argmin(matrix-eig*count2)\n\t\t\tco=count2[i_eig] \n\t\t\teig=matrix[i_eig]/co\n\t\t\tif min(matrix-eig*count2)>=-0.01:\n\t\t\t\tbreak\n\t\teig3.append(eig)\n\teig1.append(min(eig3)+0.5*u)\n\tprint(eig+0.5*u)\t\n\teigvalue,eigvector=np.linalg.eig(0.5*u*np.kron(z,z)-t*np.kron(x,i)-t*np.kron(i,x))\n\teig2.append(min(eigvalue)+0.5*u)\n\nfrom matplotlib import pyplot as plt\nplt.plot(np.linspace(0.0,2.0,60),eig1,'ro',label='simulated')\nplt.plot(np.linspace(0.0,2.0,60),eig2,label='exact')\nplt.xlabel('U')\nplt.ylabel('Energy (a.u.)')\nplt.legend(loc='best',prop={'size':10})\nplt.savefig('plot.png')\nend=time.time()\nprint(float(end-start))\n","sub_path":"mapping_new/hei.py","file_name":"hei.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"368814559","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 2 15:44:59 2018\n\n@author: alekriley\n\"\"\"\n\nimport numpy as np\nimport seaborn as sne\nimport matplotlib.pyplot as plt\nimport scipy.stats as sts\nimport tensorflow as tf\n\ndef mini_batch(batch_size,array_size):\n indices = np.arange(array_size)\n np.random.shuffle(indices)\n i = 0\n while i+batch_size < array_size:\n yield indices[i:i+batch_size]\n i += batch_size\n if not i == array_size: yield indices[i:array_size]\n\nclass VAE:\n def __init__(self,sess,n_features,n_hidden,latent_size,lr=0.005):\n self.sess = sess\n self.batch = tf.placeholder(tf.float32,[None,n_features])\n self.latent_size = latent_size\n \n self.eweights = tf.Variable(tf.truncated_normal([n_features,n_hidden],stddev=tf.sqrt(1./n_hidden)))\n self.ebias = tf.Variable(tf.ones([1,n_hidden])*0.01)\n self.encode = tf.nn.relu(tf.matmul(self.batch,self.eweights) + self.ebias)\n \n self.e1weights = tf.Variable(tf.truncated_normal([n_hidden,n_hidden],stddev=tf.sqrt(1./n_hidden)))\n self.e1bias = tf.Variable(tf.ones([1,n_hidden])*0.01)\n self.encode1 = tf.nn.relu(tf.matmul(self.encode,self.e1weights) + self.e1bias)\n \n self.lweights = tf.Variable(tf.truncated_normal([n_hidden,self.latent_size*2],stddev=tf.sqrt(0.5/latent_size)))\n self.lbias = tf.Variable(tf.ones([1,self.latent_size*2])*0.01)\n self.latent = tf.matmul(self.encode1,self.lweights) + self.lbias\n \n self.z = tf.random_normal([tf.shape(self.batch)[0],self.latent_size])*tf.exp(self.latent[:,self.latent_size:])+self.latent[:,:self.latent_size]\n \n self.dweights = tf.Variable(tf.truncated_normal([self.latent_size,n_hidden],stddev=tf.sqrt(1./n_hidden)))\n self.dbias = tf.Variable(tf.ones([1,n_hidden])*0.01)\n self.decode = tf.nn.relu(tf.matmul(self.z,self.dweights) + self.dbias)\n \n self.d1bias = tf.Variable(tf.ones([1,n_hidden])*0.01)\n self.decode1 = tf.nn.relu(tf.matmul(self.decode,self.e1weights,False,True) + self.d1bias)\n \n self.rbias = tf.Variable(tf.ones([1,n_features])*0.01)\n self.reconstruct = tf.matmul(self.decode1,self.eweights,False,True) + self.rbias\n \n self.rloss = tf.reduce_sum(tf.squared_difference(self.reconstruct,self.batch),1)\n self.kloss = tf.reduce_sum(0.5*(tf.square(self.latent[:,:self.latent_size])+tf.exp(self.latent[:,self.latent_size:])-\\\n self.latent[:,self.latent_size:]-1),1)\n self.loss = tf.reduce_mean(self.rloss+self.kloss)\n self.train = tf.train.AdamOptimizer(lr).minimize(self.loss)\n \n def learn(self,batch):\n return self.sess.run([self.loss,self.train],{self.batch : batch})\n def reconstruction(self,batch):\n return self.sess.run([self.reconstruct,self.latent[:,:self.latent_size],self.latent[:,self.latent_size:]],{self.batch : batch})\n def generate(self,sample_size):\n return self.sess.run(self.reconstruct,{self.z : np.random.randn(sample_size,self.latent_size)})\n\nnp.random.seed(0)\n\nn_samples = 5000\nmeans = np.random.randn(8).reshape(2,4)*7\ndata = np.vstack([sts.multivariate_normal(means[:,j]).rvs(n_samples//4) for j in range(means.shape[1])])\n\nnp.random.seed(None)\n\nsne.set_style('darkgrid')\n\ntf.reset_default_graph()\nsess = tf.Session()\nvae = VAE(sess,data.shape[1],5,2,0.005)\nsess.run(tf.global_variables_initializer())\n\nloss = []\n\nepochs = 500\nbatch_size = 64\nfor epoch in range(epochs):\n print('Epoch {}'.format(epoch))\n for batch in mini_batch(batch_size,n_samples//2):\n loss.append(vae.learn(data[::2,:][batch])[0])\n \nreconstruction,mean,var = vae.reconstruction(data); var=np.exp(var)\nsample = vae.generate(1000)\n \nfig = plt.figure(figsize=(13,7))\ngs = plt.GridSpec(3,2,fig)\nax1 = plt.subplot(gs[0,:])\nsne.scatterplot(data[:,0],data[:,1],color=np.repeat(['red','green','yellow','blue'],n_samples//4),ax=ax1)\nax2 = plt.subplot(gs[1,0])\nsne.scatterplot(reconstruction[:,0],reconstruction[:,1],color=np.repeat(['red','green','yellow','blue'],n_samples//4),ax=ax2)\nax3 = plt.subplot(gs[1,1])\nsne.scatterplot(sample[:,0],sample[:,1],ax=ax3)\nax4 = plt.subplot(gs[2,:])\nax4.plot(range(len(loss)),loss,lw=0.5)\nplt.show()\n","sub_path":"vae.py","file_name":"vae.py","file_ext":"py","file_size_in_byte":4304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"514001531","text":"from rest_framework.views import exception_handler\n\nfrom .constants import CUSTOM_MESSAGES\n\n\ndef custom_exception_handler(exc, context):\n # Call REST framework's default exception handler first,\n # to get the standard error response.\n response = exception_handler(exc, context)\n\n # set the custom message if found one\n if response is not None:\n keys_data = response.data.keys()\n for key in keys_data:\n if key in CUSTOM_MESSAGES.keys():\n response.data.update({key: CUSTOM_MESSAGES[key]})\n return response\n","sub_path":"AlbumApi/utils/custom_exception_handler.py","file_name":"custom_exception_handler.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"53452252","text":"# Train and Evaluate the Predictor\nfrom preprocessing import *\nfrom sklearn import linear_model\nimport xgboost as xgb\nimport numpy as np\nfrom sklearn.metrics import precision_score, recall_score, accuracy_score\nfrom sklearn.metrics import roc_curve, auc\nimport matplotlib.pyplot as plt\nimport shap\n\ndataset = data_preprocessing(threshold=2)\ndataset[\"avg_agi\"].replace('', np.nan, inplace=True)\ndataset.dropna(subset=[\"avg_agi\"], inplace=True)\ntrain, test, valid = train_test_validation(dataset)\n\n# Baseline Model\nmodel = linear_model.LogisticRegression(C = 0.005, class_weight='balanced')\ntrain_data, train_label = data_label(train)\nvalid_data, valid_label = data_label(valid)\ntest_data, test_label = data_label(test)\nmodel.fit(train_data, train_label)\n# Baseline accuracy\nprint(\"Baseline accuracy: \", model.score(test_data, test_label))\n\n# XGBoost Model\nD_train = xgb.DMatrix(train_data, label=train_label)\nD_valid = xgb.DMatrix(valid_data, label=valid_label)\nparam = {\n # 'eta': 0.3, \n # 'max_depth': 3, \n # 'objective': 'multi:softprob', \n # 'num_class': 2,\n 'objective': 'binary:logistic',\n 'tree_method': 'hist',\n 'eval_metrix': 'auc',\n 'eta': 0.3,\n 'gamma': 0,\n 'min_child_weight': 0.01,\n 'max_depth': 6,\n 'max_delta_step': 1,\n 'subsample': 0.85,\n 'colsample_bytree': 0.45,\n 'colsample_bylevel': 0.7,\n 'colsample_bynode': 1.0,\n 'lambda': 5,\n 'alpha': 0.2\n } \nsteps = 100 # The number of training iterations\nmodel = xgb.train(param, D_train, steps)\npreds = model.predict(D_valid)\nbest_preds = np.asarray([1 if p >= 0.5 else 0 for p in preds])\n# XGBoost Metrics\nprint(\"Precision = {}\".format(precision_score(valid_label, best_preds, average='macro')))\nprint(\"Recall = {}\".format(recall_score(valid_label, best_preds, average='macro')))\nprint(\"Accuracy = {}\".format(accuracy_score(valid_label, best_preds)))\n# ROC Curve\n# Roc Curve\nfpr, tpr, _ = roc_curve(valid_label, best_preds)\nroc_auc = auc(fpr, tpr)\nplt.figure()\nplt.plot(fpr, tpr, color='darkorange',\\\nlw=2, label='ROC curve (area = %0.2f)' % roc_auc)\nplt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')\nplt.xlim([-0.02, 1.0])\nplt.ylim([0.0, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('ROC curve')\nplt.legend(loc=\"lower right\")\nplt.show()\n\n# Shap Feature Importance analysis\nshap.initjs()\nexplainer = shap.TreeExplainer(model)\nshap_values = explainer.shap_values(train_data)\nshap.summary_plot(shap_values, train_data)","sub_path":"Publish_Code/train_evaluate_predictor.py","file_name":"train_evaluate_predictor.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"76491528","text":"import pytreebank\n\n\ndef main():\n sst_folder = 'trainDevTestTrees_PTB'\n dataset = pytreebank.load_sst(sst_folder)\n for d in dataset['train']:\n label, sentence = d.to_labeled_lines()[0]\n print(\"%s has sentiment label %s\" % (sentence, label))\n # example = dataset[\"train\"][0]\n\n # for label, sentence in example.to_labeled_lines():\n # print(\"%s has sentiment label %s\" % (sentence, label))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"test_pytreebank.py","file_name":"test_pytreebank.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"630535830","text":"\nfrom tkinter import *\nimport subprocess\n\n#Enables the button commands\ndef open1():\n subprocess.call(\" REIDv0.5.py 1\", shell=True)\ndef open2():\n subprocess.call(\" REIDv0.5-multiple.py 1\", shell=True)\ndef open3():\n subprocess.call(\" MoveFiles.py 1\", shell=True)\ndef open4():\n subprocess.call(\"Binning.py\", shell=True)\ndef open5():\n subprocess.call(\"Restriction Digest.xls\", shell=True)\nreadme = open(\"Readme.txt\", \"r\")\ntext = readme.read()\n\n \nroot = Tk()\nroot.title(\"REID\")\nroot.geometry(\"700x700\")\n\n#Closes the window with the Exit button\ndef close_program():\n root.destroy()\n\nmb1 = Menubutton(root, text=\"Programs\", relief=RAISED)\nmb1.grid()\nmb1.menu = Menu(mb1, tearoff=0)\nmb1[\"menu\"] = mb1.menu\n\nmb2 = Menubutton(root, text=\"Options\", relief=RAISED)\nmb2.grid(row=0, column=2)\nmb2.menu = Menu(mb2, tearoff=0)\nmb2[\"menu\"] = mb2.menu\n\nmb3 = Menubutton(root, text=\"Analyze\", relief=RAISED)\nmb3.grid(row=0, column=1)\nmb3.menu = Menu(mb3, tearoff=0)\nmb3[\"menu\"] = mb3.menu\n\nlabelframe = LabelFrame(root, text=\"Instructions\")\nlabelframe.grid(row=2, column=3, columnspan=3)\nleft = Label(labelframe, text=text)\nleft.grid(row=2, column=3, columnspan=3)\n\nmb1.menu.add_command(label=\" Single fasta file\", command=open1)\nmb1.menu.add_command(label=\" Multiple fasta files\", command=open2)\nmb2.menu.add_command(label=\" Move files\", command=open3)\nmb2.menu.add_command(label=\" View Restriction Digest \", command=open5)\nmb2.menu.add_command(label=\" Exit \", command=close_program)\nmb3.menu.add_command(label=\" Simulate\", command=open4)\nroot.mainloop()\n","sub_path":"REID_v0.5.1/REID.py","file_name":"REID.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"34398542","text":"from sklearn.metrics import confusion_matrix, cohen_kappa_score\n\ndef quadratic_weighted_kappa(c_matrix):\n num = 0.\n denom = 0.\n\n for i in range(c_matrix.shape[0]):\n for j in range(c_matrix.shape[1]):\n n = c_matrix.shape[0]\n wij = ((i-j) ** 2.)\n oji = c_matrix[i,j]\n eij = c_matrix[i, :].sum() * c_matrix[:,j].sum() / c_matrix.sum()\n\n num += wij * oji\n denom += wij * eij\n\n return 1. - num / denom\n\n\ny_true = [1,2,3,4,3]\ny_pred = [2,2,4,4,5]\n\nc_matrix = confusion_matrix(y_true, y_pred, labels=[1,2,3,4,5])\n\nkappa = quadratic_weighted_kappa(c_matrix)\n#print(kappa)\n#0.6153846153846154\n\nkappa = cohen_kappa_score(y_true, y_pred, weights='quadratic')\n#print(kappa)\n#0.6153846153846154","sub_path":"evalMetrics/quadraticWeightedKappa.py","file_name":"quadraticWeightedKappa.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"58302382","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0004_auto_20160129_1946'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='tag',\n name='priority',\n field=models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MaxValueValidator(10)], verbose_name='priority'),\n ),\n ]\n","sub_path":"apps/blog/migrations/0005_tag_priority.py","file_name":"0005_tag_priority.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"185500466","text":"from Apriori import *\nimport os, os.path\nimport cherrypy\nimport rpy2.robjects as ro\nimport rpy2.robjects.packages as rpackages\nfrom rpy2.robjects.packages import importr\nimport mysql.connector\nfrom Banco import *\n\n\nclass AprioriApp(object):\n\t@cherrypy.expose\n\tdef index(self):\n\t\treturn open('view.html')\n\t\n\tdef compras(self):\n\t\treturn open('verifica_vencendor.html')\n\n@cherrypy.expose\nclass AprioriCNPJ(object):\n\t@cherrypy.tools.json_out()\n\tdef GET(self, cnpj):\n\t\tcnpjs = Banco().searchCNPJS(cnpj)\n\t\tregras = Apriori().extractRules(cnpjs)\n\t\tcnpjs = Banco().extractCNPJs(regras)\n\t\t\n\t\treturn Banco().formatCNPJS(regras, cnpj)\n\n@cherrypy.expose\nclass CompraPorCNPJ(object):\n\t@cherrypy.tools.json_out()\n\tdef GET(self, idcompra):\n\t\tcompras_cnpj = Banco().searchCompras(idcompra)\n\t\treturn idcompra\n\n\nif __name__ == '__main__':\n\tconf = {\n\t\t'/': {\n\t\t\t'tools.sessions.on': True,\n\t\t\t'tools.staticdir.debug': True,\n\t\t\t'tools.staticdir.root': os.path.dirname(os.path.abspath(__file__))\n\t\t},\n\t\t'/cnpj': {\n\t\t\t'request.dispatch': cherrypy.dispatch.MethodDispatcher(),\n\t\t\t'tools.response_headers.on': True,\n\t\t\t'tools.response_headers.headers': [('Content-Type', 'application/json')],\n\t\t},\n\t\t'/compras': {\n\t\t\t'request.dispatch': cherrypy.dispatch.MethodDispatcher(),\n\t\t\t'tools.response_headers.on': True,\n\t\t\t'tools.response_headers.headers': [('Content-Type', 'application/json')],\n\t\t},\n\t\t'/css': {\n 'tools.staticdir.on': True,\n 'tools.staticdir.dir': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'css/')\n }\n\t}\n\twebapp = AprioriApp()\n\twebapp.cnpj = AprioriCNPJ()\n\twebapp.compras = CompraPorCNPJ()\n\tcherrypy.quickstart(webapp, '/', conf)\n","sub_path":"fornecedores_rass/final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"601568287","text":"import sys\nimport socket\nimport json\nfrom time import sleep\n\nimport machine\nfrom machine import Timer\nfrom network import WLAN\nimport _thread\n\nimport LIS2HH12\nimport LTR329ALS01\nimport MPL3115A2\nimport SI7006A20\nfrom pycom import rgbled, heartbeat\n\ncolor = 0x000000\n\nclass PySense:\n def __init__(self):\n self._accelerometer = LIS2HH12.LIS2HH12()\n self._lightSensor = LTR329ALS01.LTR329ALS01()\n self._tempPressureAlt = MPL3115A2.MPL3115A2()\n self._tempHumidity = SI7006A20.SI7006A20()\n\n def getRoll(self):\n return self._accelerometer.roll()\n\n def getPitch(self):\n return self._accelerometer.pitch()\n\n def getAcceleration(self):\n return self._accelerometer.acceleration()\n\n def getLuminosity(self):\n return self._lightSensor.light()\n\n def getTemperature1(self):\n return self._tempPressureAlt.temperature()\n\n def getTemperature2(self):\n return self._tempHumidity.temperature()\n\n def getHumidity(self):\n return self._tempHumidity.humidity()\n\n def getPressure(self):\n return self._tempPressureAlt.pressure()\n\n def getAltitude(self):\n return self._tempPressureAlt.altitude()\n\n def getData(self):\n acceleration = self.getAcceleration()\n\n data = {\n \"gyro\": {\n \"pitch\": self.getPitch(),\n \"roll\": self.getRoll(),\n \"x\": acceleration[0],\n \"y\": acceleration[1],\n \"z\": acceleration[2]\n },\n \"light\": self.getLuminosity(),\n \"temperature1\": self.getTemperature1(),\n \"temperature2\": self.getTemperature2(),\n \"humidity\": self.getHumidity()\n }\n\n if self.inPressureMode():\n data[\"pressure\"] = self.getPressure()\n elif self.inAltitudeMode():\n data[\"altitude\"] = self.getAltitude()\n\n return data\n\n def inPressureMode(self):\n return self._tempPressureAlt.mode == MPL3115A2.PRESSURE\n\n def inAltitudeMode(self):\n return self._tempPressureAlt.mode == MPL3115A2.ALTITUDE\n\nclass PyServer:\n def __init__(self, port):\n self._port = port\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.bind(('', port))\n self._sensor = PySense()\n\n def listen(self, wlan: WlanThreaded):\n global color\n print('Listening for sockets.')\n self._socket.settimeout(5)\n self._socket.listen(1)\n while True:\n try:\n color = 0x000030\n print('Waiting for client connection')\n accepting = True\n while accepting:\n try:\n (clientSocket, address) = self._socket.accept()\n color = 0x003000\n print('Client connected from <%s>' % (str(address)))\n accepting = False\n except OSError as e:\n if e.errno != 11:\n raise e\n elif not wlan.is_connected():\n print('Disconnected from WLAN network')\n raise e\n while True:\n data = json.dumps(self._sensor.getData()) + '\\n'\n print(data)\n clientSocket.send(data)\n sleep(0.5)\n except KeyboardInterrupt:\n self._socket.close()\n break\n except OSError as e:\n self._socket.close()\n break\n except socket.error as e:\n self._socket.close()\n break\n\nclass WlanThreaded:\n TIMEOUT = 5000\n def __init__(self, ssid:str='', key:str=''):\n self._ssid = ssid\n self._key = key\n self._wlan = WLAN(mode=WLAN.STA)\n self._running = False\n _thread.start_new_thread(self._connect, ())\n\n def is_connected(self):\n return self._wlan.isconnected()\n\n def print_wlan(self):\n print(self._wlan.ifconfig())\n\n def stop(self):\n self._running = False\n\n def _connect(self):\n global color\n self._running = True\n try:\n while self._running:\n color = 0x300000\n print('Connecting to <%s>' % (self._ssid))\n self._wlan.scan()\n timer = Timer.Chrono()\n timer.start()\n self._wlan.connect(ssid=self._ssid, auth=(WLAN.WPA2, self._key), timeout=WlanThreaded.TIMEOUT)\n while not self._wlan.isconnected():\n if not self._running:\n break\n duration = timer.read_ms()\n if duration > WlanThreaded.TIMEOUT:\n break\n sleep(0.1)\n if self._running and self._wlan.isconnected():\n self.print_wlan()\n self._start_server()\n except KeyboardInterrupt:\n pass\n self._running = False\n color = 0xff0000\n print('Disconnecting from <%s>' % (self._ssid))\n while True:\n try:\n self._wlan.disconnect()\n break\n except KeyboardInterrupt:\n pass\n color = 0x000000\n print('Shutting down')\n\n def _start_server(self):\n global color\n color = 0x300030\n print('Starting server')\n pyServer = PyServer(10000)\n pyServer.listen(self)\n color = 0x300030\n print('Stopped server')\n\nheartbeat(False)\nwlan = WlanThreaded('brw-pi', 'brentreinaertwout')\ntry:\n while True:\n rgbled(0x000000)\n sleep(0.5)\n rgbled(color)\n sleep(0.1)\nexcept KeyboardInterrupt:\n wlan.stop()\n sys.exit(0)\n","sub_path":"master/networking-and-interfacing-iot-platforms/practica/3/1.2-wifi-rpi/pycom/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"652947096","text":"#encoding: utf-8\n\nimport os, sys\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../const\"))\n\nfrom .token_class import Token\nfrom token_type import *\n\ndef add_token(token, tokens):\n if token not in IGNORE:\n token_type = check_token_type(token)\n value = eval_token(token, token_type)\n value = token\n t = Token(token_type, value, None)\n\n tokens.append(t)\n\ndef check_token_type(token):\n if token in SIGN_NAME:\n return TYPE_SIGN\n\n if token in KEYWORD:\n return TYPE_KEYWORD\n\n if token in KEYWORD_TYPE:\n return TYPE_TYPE_KEYWORD\n \n if token in BOOL_KEYWORD:\n return TYPE_BOOL\n \n if token.isdigit():\n return TYPE_INTEGER\n\n if token.replace(\".\", \"\", 1).isdigit():\n return TYPE_FLOAT\n\n if token[0] + token[-1] == \"\\\"\\\"\":\n return TYPE_STRING\n\n return TYPE_IDENTIFIER\n\ndef eval_token(token, token_type):\n if token_type == TYPE_SIGN:\n return token\n\n if token_type == TYPE_KEYWORD:\n return token\n\n if token_type == TYPE_BOOL:\n return BOOL_KEYWORD[token]\n\n if token_type == TYPE_INTEGER:\n return int(token)\n\n if token_type == TYPE_FLOAT:\n return float(token)\n\n if token_type == TYPE_STRING:\n return token[1:-1]\n\n return token\n\ndef lexer(code):\n tokens = []\n \n token = \"\"\n in_str = False\n in_comment = False\n n = 0\n code += \" \"\n\n while n < len(code):\n c = code[n]\n\n if in_comment:\n n += 1\n\n if c == \"\\n\":\n in_comment = False\n\n continue\n\n if in_str:\n token += c\n n += 1\n\n if c in STR_SIGN:\n in_str = False\n\n else:\n for i in range(3, 0, -1):\n if code[n: n+i] in SIGN_NAME:\n add_token(token, tokens)\n token = \"\"\n add_token(code[n: n+i], tokens)\n n += len(code[n: n+i])\n\n break\n else:\n if c in STR_SIGN:\n add_token(token, tokens)\n token = \"\"\n \n in_str = True\n\n if c in COMMENT_SIGN:\n add_token(token, tokens)\n token = \"\"\n \n in_comment = True\n n += 1 \n continue\n\n token += c\n n += 1\n\n eop = Token(EOP, EOP, None)\n tokens.append(eop)\n\n tokens = [Token()] + tokens\n\n head_token = tokens[0]\n cur_token = head_token\n\n for t in tokens[1:]:\n cur_token.next_token = t\n cur_token = cur_token.next_token\n\n return head_token\n","sub_path":"src/python/lexer/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"392466035","text":"\n\nfrom xai.brain.wordbase.nouns._confession import _CONFESSION\n\n#calss header\nclass _CONFESSIONS(_CONFESSION, ):\n\tdef __init__(self,): \n\t\t_CONFESSION.__init__(self)\n\t\tself.name = \"CONFESSIONS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"confession\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_confessions.py","file_name":"_confessions.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"414273881","text":"from bisect import bisect_left as bl\r\r\nimport sys;input=sys.stdin.readline;print=sys.stdout.write\r\r\n\r\r\nn = int(input())\r\r\nL = list(map(int,input().split()))\r\r\nseq = []; seqlen=0\r\r\nseqind = []\r\r\nprev = []\r\r\n\r\r\nfor i in range(n):\r\r\n x = L[i]\r\r\n pos = bl(seq, x)\r\r\n if seqlen <= pos:\r\r\n seq.append(x)\r\r\n seqlen+=1\r\r\n seqind.append(i)\r\r\n else:\r\r\n seq[pos] = x\r\r\n seqind[pos] = i\r\r\n if pos == 0:\r\r\n prev.append(None)\r\r\n else:\r\r\n prev.append(seqind[pos-1])\r\r\n\r\r\nres = []\r\r\nk = seqind[-1]\r\r\nwhile k != None:\r\r\n res.append(L[k])\r\r\n k = prev[k]\r\r\nprint(str(seqlen)+'\\n')\r\r\nprint(' '.join(map(str,reversed(res))))","sub_path":"BOJ/milkclouds/14003/14003.py3.py","file_name":"14003.py3.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"628468131","text":"import numpy as np\nimport pickle\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nimport time\n\nclass Config(object):\n \"\"\"配置参数\"\"\"\n\n def __init__(self, dataset, embedding):\n self.model_name = 'SMPCNN'\n self.train_path = dataset + '../../python/data/SMP2019/data/txt/train_x0.txt' # 训练集\n self.dev_path = dataset + '../../python/data/SMP2019/data/txt/dev_x0.txt' # 验证集\n self.test_path = dataset + '../../python/data/SMP2019/data/txt/test_x.txt' # 测试集\n self.class_list = [x.strip() for x in open(\n dataset + '/data/class.txt', encoding='utf-8').readlines()] # 类别名单\n self.vocab_path = dataset + '/data/vocab.pkl' # 词表\n self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt' # 模型训练结果\n self.log_path = dataset + '/log/' + self.model_name\n self.embedding_pretrained = torch.tensor(\n np.load(dataset + '/data/' + embedding)[\"embeddings\"].astype('float32')) \\\n if embedding != 'random' else None # 预训练词向量\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备\n\n self.dropout = 0.5 # 随机失活\n self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练\n self.num_classes = len(self.class_list) # 类别数\n self.n_vocab = 0 # 词表大小,在运行时赋值\n self.num_epochs = 20 # epoch数\n self.batch_size = 128 # mini-batch大小\n self.pad_size = 32 # 每句话处理成的长度(短填长切)\n self.learning_rate = 1e-3 # 学习率\n self.embed = self.embedding_pretrained.size(1) \\\n if self.embedding_pretrained is not None else 300 # 字向量维度\n self.hidden_size = 256 # 隐藏层大小\n self.n_gram_vocab = 250499\n\nfilepath = \"../THUCNews/data/vocab.pkl\"\ninf = pickle.load(open(filepath,'rb'),encoding='iso-8859-1')\nprint(inf)\nprint(inf.get(''))\n# embedding_SougouNews = '../THUCNews/data/embedding_SougouNews.npz'\n# data_embedding_SougouNews = np.load(embedding_SougouNews)\n# print(data_embedding_SougouNews['embeddings'][0])","sub_path":"models/SMP.py","file_name":"SMP.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"439990639","text":"from Tkinter import *\nfrom scipy import stats\nimport numpy as np\nimport random, time\n# import matplotlib.pyplot as plt\n\n# Particle filter algorithm - finding position of robot in a 2 dimensional space using noisy sensors\n# ===================================================== THEORY =========================================================================================\n# First we need to model the problem as Hidden Markov Model (HMM)\n# Notation: S - state space which can be discrete, continuous, defined on a range (smin, smax), t - time,\n# X - probability distribution of state, Y - probability distribution of observations, y(t) - observation at time t\n# B(X) - sampling distribution of X\n# 1) State space, X which produces a sequence of hidden (unobserved) state x(t) i.e. true location of the robot\n# 2) Transition model - P(x(t) | x(t-1)) i.e. what is the probability of robot being 1 step to the right at the next time step?\n# 3) Sequence of observations - readings Y(t) from noisy sensor and P(Y | X) - what is my sensor error?\n# What are we solving here, inference problem? P(X(t) | Y(t=0,1,2,3....t-1))\n#\n# SOLUTION\n# Elapse time step - compute P(X(t) | y(1:t-1)) i.e. what is my probability distribution of X given history of observations?\n# For every possible value of x in state space, P(x(t) | y(1:t-1)) = Summation over x(t-1) of P(x(t-1) | y(1:t-1)) * P(x(t) | x(t-1))\n# Note the recurrence relation here, answer for current time step is dependent on answer for previous time step, so we can use dynamic programming here.\n# \n# Observe step - Compute P(X(t) | y(1:t))\n# P(x(t) | y(1:t)) = P(x(t) | y(1:t-1)) * P(y(t) | x(t))\n#\n#\n# ============================================ MOTIVATION FOR PARTICLE FILTER ==========================================================================\n# Time complexity of elapse time step is |S|^2 because we have to perform the summation for every state to arrive at a distribution. \n# Thus the motivation for particle filters -> approximate solution to the above\n# We use N particles (samples) to represent P(X)\n# P(x) approximated by fraction of particles with value x, if N << |S|, we have many states with P(x) = 0 by pigeonhole principle\n# Start with a prior distribution of where the robot is at time t = 0, if no clue at all, just use a uniform distribution\n# 1) Elapse time step - each particle is moved by sampling its next position from the transition model\n# x' = sample(P(X' | x))\n# We approximate the new distribution using samples (particles) and thus the reduction in complexity\n# 2) Observe step - downweight samples based on the evidence.\n# w(x) = P(y|x)\n# B(X) = P(y|X) * B'(X)\n# Normalize all the particles so sum of B(X) = 1\n# If we iterate through these 2 steps, over time some of these particles are going to vanish to 0,\n# which means we are getting coarser approximation of the true distribution. Thus step 3.\n# 3) Resampling - Rather than tracking weighted samples, we resample.\n# N times, we choose from weighted sample distribution. Draw with replacement.\n# Notice that we are sampling from the sampling distrubution, which is a reduced state space, thus reduction in complexity.\n# weighted particles -> distribution -> unweighted particles\n# Iterate till convergence.\n# \n# So what is being filtered out and when? \n# 1) elapse time step, when we sample under the transition dynamics of the world, as N << |S| most states will end up with 0 or low probability\n# 2) Resampling - we are drawing from a sample distribution which has reduced state space\n\n# Constants\n\n# State space is CANVAS_WIDTH * CANVAS_HEIGHT\nCANVAS_WIDTH = 400\nCANVAS_HEIGHT = 400\nPARTICLE_RADIUS = 3\nNUM_OF_PARTICLES = 1000\nROBOT_RADIUS = 5\nROBOT_POS = (CANVAS_HEIGHT/2, CANVAS_WIDTH/2)\nSAMPLE_SIZE = 50\nSENSOR_SIGMA = 10\nSENSOR_COVARIANCE = np.array([[SENSOR_SIGMA, 0], [0, SENSOR_SIGMA]])\nROBOT_SIGMA = 50\nSENSOR_MEAN = np.array([20,20])\n\n# Global data structures\nINIT_STATE_TABLE = None\nTRANSITION_TABLE = None\nOBS_ERROR_TABLE = None\nPARTICLE_LOCATION = {}\nPARTICLE_WEIGHT = {}\n\nERROR_TOLERANCE = 0.00001\nUPDATE_INTERVAL = 400\nPARTICLE_DELAY = 400\n\n# Initialize prior state distribution, distribution = [\"uniform\", \"gaussian\"]\ndef init_state(distribution, **vargs):\n\tglobal INIT_STATE_TABLE\n\tif distribution == \"uniform\":\n\t\tprob = 1.0 / (CANVAS_HEIGHT * CANVAS_WIDTH)\n\t\tINIT_STATE_TABLE = np.array([CANVAS_HEIGHT, CANVAS_WIDTH]).fill(prob)\n\t\tinit_particles(distribution, **vargs)\n\telif distribution == \"gaussian\":\n\t\traise Exception(\"Not implemented\")\n\telse:\n\t\traise Exception(\"Invalid distribution - use one of uniform, gaussian\")\n\n# Construct the transition model probabilities, model = [\"random\", \"gaussian\", \"gaussian-with-drift\", \"stationary\"]\ndef init_transition_model(model, **vargs):\n\tglobal TRANSITION_TABLE\n\tif model == \"random\":\n\t\tTRANSITION_TABLE = np.random.rand(CANVAS_HEIGHT, CANVAS_WIDTH, CANVAS_HEIGHT, CANVAS_WIDTH)\n\t\tfor x in range(TRANSITION_TABLE.shape[0]):\n\t\t\tfor y in range(TRANSITION_TABLE.shape[1]):\n\t\t\t\tnorm = np.sum(TRANSITION_TABLE[x,y])\n\t\t\t\tTRANSITION_TABLE[x,y,:,:] /= norm\n\telif model == \"gaussian\":\n\t\ttry:\n\t\t\tcov = vargs[\"covariance\"]\n\t\texcept KeyError:\n\t\t\traise Exception(\"Please specify covariance matrix (standard deviation)\")\n\t\tTRANSITION_TABLE = np.zeros((2 * CANVAS_HEIGHT, 2 * CANVAS_WIDTH))\n\t\tif cov is None:\n\t\t\t# Default\n\t\t\tcov = np.array([[ROBOT_SIGMA,0],[0,ROBOT_SIGMA]])\n\t\tcoords_x, coords_y = np.mgrid[0:2 * CANVAS_HEIGHT, 0:2 * CANVAS_WIDTH]\n\t\tcoords = np.dstack((coords_x, coords_y))\n\t\t# Generate a multivariate truncated gaussian with mean (x,y) and bounded by (0,CANVAS_HEIGHT) \n\t\t# in the x direction, bounded by (0, CANVAS_WIDTH) in the y direction, with covariance matrix cov\n\t\t# rescale by a, b\n\t\t# Note this is a hack, this is not a truncated multivariate norm distribution. \n\t\tmean = np.array([CANVAS_HEIGHT, CANVAS_WIDTH])\n\t\trv = stats.multivariate_normal(mean, cov)\n\t\tTRANSITION_TABLE[:,:] = rv.pdf(coords)\n\t\tnorm = np.sum(TRANSITION_TABLE)\n\t\tTRANSITION_TABLE /= norm\n\t\tTRANSITION_TABLE = np.cumsum(TRANSITION_TABLE).reshape((2 * CANVAS_HEIGHT, 2 * CANVAS_WIDTH))\n\t\t# plt.contourf(coords_x, coords_y, rv.pdf(coords))\n\t\t# plt.show()\n\t\t# assert(abs(np.sum(TRANSITION_TABLE) - 1.0) < ERROR_TOLERANCE)\n\telif model == \"gaussian-with-drift\":\n\t\traise Exception(\"Not implemented\")\n\telif model == \"stationary\":\n\t\traise Exception(\"Not implemented\")\n\telse:\n\t\traise Exception(\"Invalid model - use one of random, gaussian, gaussian-with-drift, stationary\")\n\n# Construct the table of P(y|x), distribution = [\"random\", \"gaussian\"]\ndef init_obs_given_state(distribution, **vargs):\n\tglobal OBS_ERROR_TABLE\n\tif distribution == \"random\":\n\t\traise Exception(\"not implemented\")\n\telif distribution == \"gaussian\":\n\t\t# Typical scenario, sensor gives a reading +- some degree of accuracy. So Y = X + error, error ~ N(SENSOR_MEAN, SENSOR_COVARIANCE)\n\t\ttry:\n\t\t\tcov = vargs[\"covariance\"]\n\t\texcept KeyError:\n\t\t\traise Exception(\"Please specify covariance matrix (standard deviation)\")\n\t\tOBS_ERROR_TABLE = np.zeros((2 * CANVAS_HEIGHT, 2 * CANVAS_WIDTH))\n\t\tif cov is None:\n\t\t\t# Default\n\t\t\tcov = np.array([[SENSOR_SIGMA,0],[0,SENSOR_SIGMA]])\n\t\tcoords_x, coords_y = np.mgrid[0:2 * CANVAS_HEIGHT, 0:2 * CANVAS_WIDTH]\n\t\tcoords = np.dstack((coords_x, coords_y))\n\t\t# Generate a multivariate truncated gaussian with mean (x,y) and bounded by (0,2*CANVAS_HEIGHT) \n\t\t# in the x direction, bounded by (0, 2*CANVAS_WIDTH) in the y direction, with covariance matrix cov\n\t\t# rescale by a, b\n\t\t# Note this is a hack, this is not a truncated multivariate norm distribution. \n\t\ttry:\n\t\t\tmean = vargs[\"sensor_mean\"]\n\t\texcept KeyError:\n\t\t\t# print(\"sensor_mean is not given, using default\")\n\t\t\tmean = np.array([CANVAS_WIDTH + SENSOR_MEAN[0], CANVAS_HEIGHT + SENSOR_MEAN[1]])\n\t\trv = stats.multivariate_normal(mean, cov)\n\t\tOBS_ERROR_TABLE[:,:] = rv.pdf(coords)\n\t\tnorm = np.sum(OBS_ERROR_TABLE)\n\t\tOBS_ERROR_TABLE /= norm\n\t\t# plt.contourf(coords_x, coords_y, rv.pdf(coords))\n\t\t# plt.show()\n\t\tassert(abs(np.sum(OBS_ERROR_TABLE) - 1.0) < ERROR_TOLERANCE)\n\telse:\n\t\traise Exception(\"Invalid model - use one of random, gaussian\")\n\tpass\n\ndef init_particles(distribution, **vargs):\n\tif distribution == \"uniform\":\n\t\tx_samples = np.random.randint(low=0, high=CANVAS_HEIGHT, size=NUM_OF_PARTICLES)\n\t\ty_samples = np.random.randint(low=0, high=CANVAS_WIDTH, size=NUM_OF_PARTICLES)\n\t\tsamples = np.dstack((x_samples, y_samples))\n\t\tfor i in range(0, samples.shape[1]):\n\t\t\tPARTICLE_LOCATION[i] = samples[0,i]\n\t\t\tPARTICLE_WEIGHT[i] = 1.0\n\telse:\n\t\traise Exception(\"Invalid distribution - must be one of gaussian, uniform\")\n\ndef elapse_time_step(distribution):\n\tglobal PARTICLE_LOCATION\n\tif distribution == \"gaussian\":\n\t\tfor (idx, coords) in PARTICLE_LOCATION.iteritems():\n\t\t\ttransition_model_given_x = get_transition_table_slice(TRANSITION_TABLE, coords).flatten()\n\t\t\trand = random.random()\n\t\t\t# Another method is compute sample from closed form, TODO\n\t\t\traw_idx = bin_search(transition_model_given_x, 0, transition_model_given_x.size-1, rand, 0)\n\t\t\tx = raw_idx // CANVAS_HEIGHT\n\t\t\ty = raw_idx % CANVAS_WIDTH\n\t\t\t# start = time.time()\n\t\t\tPARTICLE_LOCATION[idx] = [x,y]\n\t\t\t# end = time.time()\n\t\t\t# print(str((end - start) * 1000) + \" ms\")\n\n# Observation is np.array([x,y])\ndef weight_particles(observation):\n\tglobal PARTICLE_WEIGHT\n\ttotal = 0\n\tfor (idx, location) in PARTICLE_LOCATION.iteritems():\n\t\tactual_x = location[0]\n\t\tactual_y = location[1]\n\t\tobs_error_table_slice = get_transition_table_slice(OBS_ERROR_TABLE, location)\n\t\tprob = obs_error_table_slice[observation[0], observation[1]]\n\t\ttotal += prob\n\t\tPARTICLE_WEIGHT[idx] *= prob\n\t# Normalize so sum of weights = 1\n\tfor idx, weight in PARTICLE_WEIGHT.iteritems():\n\t\tPARTICLE_WEIGHT[idx] /= total\n\n# Sample particles with probability according to their weights\ndef resample():\n\t# Stochastic Universal Sampling - O(N) \n\t# Build the wheel\n\ttotal = sum(v for v in PARTICLE_WEIGHT.values())\n\twheel = [0]\n\tfor (idx, weight) in PARTICLE_WEIGHT.iteritems():\n\t\twheel.append(wheel[-1] + weight / total)\n\t\t# Take this chance to reset the weight\n\t\tPARTICLE_WEIGHT[idx] = 1.0\n\t\n\trand = random.random()\n\tstep_size = 1.0 / NUM_OF_PARTICLES\n\tnew_particles = [] # Stores index of particle selected\n\tnew_particles.append(bin_search(wheel, 0, len(wheel)-1, rand, 0))\n\twhile len(new_particles) < NUM_OF_PARTICLES: # Sampling with replacement\n\t\trand += step_size\n\t\tif rand > 1:\n\t\t\trand %= 1\n\t\tnew_particles.append(bin_search(wheel, 0, len(wheel)-1, rand, 0))\n\n\tglobal PARTICLE_LOCATION\n\ttemp_particle_location = {}\n\tcount = 0\n\tfor i in new_particles:\n\t\tprev_location = PARTICLE_LOCATION[i-1] # Because we added in interval 0 in wheel which increases all subsequent particle index by 1\n\t\ttemp_particle_location[count] = prev_location\n\t\tcount += 1\n\tPARTICLE_LOCATION = temp_particle_location\n\n# Binary search for floats\ndef bin_search(wheel, start, end, num, num_discarded_start_of_list):\n\tif (end - start <= 0): return 1\n\tmid = (end - start)//2 + start\n\ttry:\n\t\tif wheel[mid] < num and num <= wheel[mid+1]:\n\t\t\treturn mid - start + num_discarded_start_of_list + 1\n\t\telif (end - start + 1) == 2:\n\t\t\tif num <= wheel[mid]:\n\t\t\t\treturn 1\n\t\t\telif wheel[end] > num:\n\t\t\t\treturn end\n\t\t\telse:\n\t\t\t\treturn end # Don't know what else to return\n\t\telif wheel[mid] >= num:\n\t\t\treturn bin_search(wheel, start, mid, num, num_discarded_start_of_list)\n\t\telse:\n\t\t\treturn bin_search(wheel, mid+1, end, num, num_discarded_start_of_list + (mid - start + 1))\n\texcept Exception as e:\n\t\t# pass\n\t\tprint(wheel, num)\n\n# Moves robot according to predefined motion dynamics\ndef move_robot():\n\tglobal ROBOT_POS\n\ttransition_model_given_x = get_transition_table_slice(TRANSITION_TABLE, ROBOT_POS)\n\trand = random.random()\n\tfor x in range(0, transition_model_given_x.shape[0]):\n\t\tfor y in range(0, transition_model_given_x.shape[1]):\n\t\t\tif rand < transition_model_given_x[x,y]:\n\t\t\t\t# update location of robot\n\t\t\t\tROBOT_POS = (x,y)\n\t\t\t\tbreak\n\t\telse:\n\t\t\tcontinue\n\t\tbreak\n\ndef get_transition_table_slice(tbl, position):\n\tstart_x = CANVAS_WIDTH - position[0]\n\tend_x = CANVAS_WIDTH + (CANVAS_WIDTH - position[0])\n\tstart_y = CANVAS_HEIGHT - position[1]\n\tend_y = CANVAS_HEIGHT + (CANVAS_HEIGHT - position[1])\n\treturn tbl[start_x:end_x, start_y:end_y]\n\n# Given current location of robot, what is my observation\ndef get_observation():\n\t# Y = X + some gaussian noise\n\t# This is not a truncated normal bounded by the canvas, thus there is non-zero\n\t# probability extending to -inf, +inf in both dimensions, we need to account for that.\n\tnoises = np.random.multivariate_normal(SENSOR_MEAN, SENSOR_COVARIANCE, 1)\n\tnew_x = ROBOT_POS[0] + int(round(noises[0,0]))\n\tnew_y = ROBOT_POS[1] + int(round(noises[0,1]))\n\tnew_x = min(new_x, CANVAS_WIDTH-1)\n\tnew_x = max(new_x, 0)\n\tnew_y = min(new_y, CANVAS_HEIGHT-1)\n\tnew_y = max(new_y, 0)\n\treturn [new_x, new_y]\n\ndef particle_filter():\n\tstart = time.time()\n\telapse_time_step(\"gaussian\")\n\tobs = get_observation()\n\tweight_particles(obs)\n\tresample()\n\tend = time.time()\n\tprint(str((end - start) * 1000) + \" ms\")\n\nclass Application(Canvas):\n\tdef update_clock(self):\n\t\tnow = time.strftime(\"%H:%M:%S\")\n\t\tself.label.configure(text=now)\n\t\tself.root.after(UPDATE_INTERVAL, self.update_clock)\n\t\tmove_robot()\n\t\tself.update_robot()\n\t\tparticle_filter()\n\n\tdef update_robot(self):\n\t\tx1, y1 = (ROBOT_POS[0] - PARTICLE_RADIUS), (ROBOT_POS[1] - PARTICLE_RADIUS)\n\t\tx2, y2 = (ROBOT_POS[0] + PARTICLE_RADIUS), (ROBOT_POS[1] + PARTICLE_RADIUS)\n\t\tself.c.delete('robot')\n\t\tself.c.create_oval(x1, y1, x2, y2, fill=\"red\", tag='robot')\n\n\tdef update_particles(self, event=None):\n\t\tself.c.delete('particles')\n\t\tfor (idx, coord) in PARTICLE_LOCATION.iteritems():\n\t\t\tx1, y1 = (coord[0] - PARTICLE_RADIUS), (coord[1] - PARTICLE_RADIUS)\n\t\t\tx2, y2 = (coord[0] + PARTICLE_RADIUS), (coord[1] + PARTICLE_RADIUS)\n\t\t\tself.c.create_oval(x1, y1, x2, y2, fill=\"green\", tag='particles')\n\t\tself.root.after(UPDATE_INTERVAL, self.update_particles)\n\n\tdef create_grid(self):\n\t\twidth = self.c.winfo_width()\n\t\theight = self.c.winfo_height()\n\t\tself.c.delete('grid_line')\n\n\t\tfor i in range(0, width, 10):\n\t\t\tself.c.create_line([(i,0), (i,height)], tag='grid_line')\n\t\t\tself.c.create_line([(0,i), (width,i)], tag='grid_line')\n\n\tdef __init__(self, master=None):\n\t\tself.update_count = 0\n\t\tself.c = Canvas(master, height=CANVAS_HEIGHT, width=CANVAS_WIDTH, bg='white')\n\t\tself.c.pack()\n\t\tself.root = master\n\t\tself.label = Label(text=\"\")\n\t\tself.label.pack()\n\t\tself.update_particles()\n\t\tself.update_robot()\n\t\tself.create_grid()\n\t\tself.update_clock()\n\t\ttime.sleep(PARTICLE_DELAY / 1000.0)\n\t\tself.update_particles()\n\t\t# self.c.bind('', self.update_particles)\n\ndef main():\n\tinit_state(\"uniform\")\n\tinit_transition_model(\"gaussian\", covariance=None)\n\tinit_obs_given_state(\"gaussian\", covariance=None)\n\tprint(\"Initialization complete\")\n\troot = Tk()\n\tapp = Application(master=root)\n\troot.mainloop()\n\t# root.destroy()\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":15074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"42292640","text":"from matplotlib import pyplot as plt\nimport random\nimport matplotlib\n\nfont = {'family': \"Microsoft Yahei\", 'size': '10'}\nmatplotlib.rc('font', **font)\n\nplt.figure(figsize=(20, 8))\nx = range(120)\ny = [random.uniform(20, 35) for i in range(120)]\n\nplt.plot(x, y, color='b', linestyle='-', linewidth='3')\n_x_ticks = [\"10点{}分\".format(i) for i in range(60)]\n_x_ticks += [\"11点{}分\".format(i - 60) for i in range(60, 120)]\nplt.xticks(list(x)[::5], _x_ticks[::5], rotation=45)\nplt.xlabel(\"时间\")\nplt.ylabel(\"温度 单位(C)\")\nplt.title(\"10点到12点温度变化情况\")\n# plt.savefig(\"./折线图1.svg\")\nplt.show()\n","sub_path":"matplotlib折线图2.py","file_name":"matplotlib折线图2.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"141526055","text":"#!/usr/bin/env python3\n\"\"\"\nGiven a sorted dictionary (array of words) of an alien language, find order of characters in the language.\n\nEXAMPLES:\n\n Input: words = [\"baa\", \"abcd\", \"abca\", \"cab\", \"cad\"]\n Output: \"bdac\"\"\n\n Input: words = [\"z\", \"x\"]\n Output: \"zx\"\n\n Input: words = [\"z\"]\n Output: \"z\"\n\n Input: words = [\"z\", \"x\", \"z\"]\n Output: \"\"\n Reason: Circula relationship.\n\n Input: words = [\"abc\", \"ab\"]\n Output: \"\"\n Reason: The 2nd word 'ab' is a prefix of the 1st. This is not valid.\n\n\nNOTE:\n - You may assume all letters are in lowercase.\n - If the order is invalid, return an empty string.\n - There may be multiple valid order of letters, return any one of them is fine.\n\nAPPROACHES:\n 1. Extract dependency rules from the input.\n 2. Putting dependency rules into a graph with letters as nodes and\n dependencies as edges. \n 3. Also track the number of inputs for each node.\n 4. Topologically sorting the graph nodes, starting with those without\n any inputs.\n \nTECHNIQUES:\n - Topological sortting.\n\nREFERENCE\n - https://www.geeksforgeeks.org/given-sorted-dictionary-find-precedence-characters/\n - https://www.geeksforgeeks.org/topological-sorting/\n - https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm\n\n\"\"\"\n\nfrom typing import List\nfrom collections import defaultdict, Counter, deque\n\n\nclass Node:\n def __init__(self, val):\n self.val = val\n self.edges = set()\n self.in_degree = 0\n\n\nclass Solution:\n\n def alienOrder_v1(self, words: List[str]) -> str:\n \"\"\"Use a Node structure to store the graph data.\"\"\"\n # Step 0. Create nodes\n nodes = dict()\n for c in set([c for w in words for c in w]):\n nodes[c] = Node(c)\n\n # Step 1. Build the edges (dependency)\n for w1, w2 in zip(words, words[1:]):\n for c1, c2 in zip(w1, w2):\n if c1 != c2:\n n1 = nodes[c1]\n if c2 not in n1.edges:\n n1.edges.add(c2)\n nodes[c2].in_degree += 1\n break\n else:\n if len(w2) < len(w1):\n return ''\n\n # Step 2. Topology Sorting\n results = list()\n queue = [n for n in nodes.values() if n.in_degree == 0]\n while queue:\n n = queue.pop(0)\n results.append(n.val)\n for c in n.edges:\n n2 = nodes[c]\n n2.in_degree -= 1\n if n2.in_degree == 0:\n queue.append(n2)\n\n # If not all letters are in output, that means there was a cycle and so\n # no valid ordering. Return \"\" as per the problem description.\n if len(results) < len(nodes):\n return \"\"\n\n return ''.join(results)\n\n def alienOrder_v2(self, words: List[str]) -> str:\n \"\"\"Use two structures (dict and Counter) to store the information.\"\"\"\n # Step 0: create data structures + the in_degree of each unique letter to 0.\n adj_list = defaultdict(set)\n in_degree = Counter({c: 0 for word in words for c in word})\n\n # Step 1: We need to populate adj_list and in_degree.\n # For each pair of adjacent words...\n for w1, w2 in zip(words, words[1:]):\n for c, d in zip(w1, w2):\n if c != d:\n if d not in adj_list[c]:\n adj_list[c].add(d)\n in_degree[d] += 1\n break\n\n # Check that second word isn't a prefix of first word. E.g. 'holly' ad 'ho'd\n # This is an invalid case and returns ''\n else:\n if len(w2) < len(w1):\n return \"\"\n\n # Step 2: We need to repeatedly pick off nodes with an indegree of 0.\n output = []\n queue = deque([c for c in in_degree if in_degree[c] == 0])\n while queue:\n c = queue.popleft()\n output.append(c)\n for d in adj_list[c]:\n in_degree[d] -= 1\n if in_degree[d] == 0:\n queue.append(d)\n\n # If not all letters are in output, that means there was a cycle and so\n # no valid ordering. Return \"\" as per the problem description.\n if len(output) < len(in_degree):\n return \"\"\n\n # Otherwise, convert the ordering we found into a string and return it.\n return \"\".join(output)\n\n\n# ---------------------------\n# Main & Helper Functions\n# ---------------------------\ndef main():\n \"\"\"Main function\"\"\"\n\n # Test data\n test_data = [\n [[\"baa\", \"abcd\", \"abca\", \"cab\", \"cad\"], \"bdac\"],\n [[\"wrt\", \"wrf\", \"er\", \"ett\", \"rftt\"], \"wertf\"],\n [[\"z\", \"x\"], \"zx\"],\n [[\"z\", \"x\", \"z\"], \"\"], # circular\n [[\"za\", \"zb\", \"ca\", \"cb\"], \"abzc\"],\n [[\"abc\", \"ab\"], \"\"], # 2nd word is a prefix of 1st. Thus, invalid\n [[\"z\", \"z\"], \"z\"],\n [[\"zy\", \"zx\"], \"zyx\"],\n [[\"ab\", \"adc\"], \"abcd\"],\n [[\"ri\", \"xz\", \"qxf\", \"jhsguaw\", \"dztqrbwbm\",\n \"dhdqfb\", \"jdv\", \"fcgfsilnb\", \"ooby\"], \"\"],\n ]\n\n sol = Solution()\n for words, expected in test_data:\n print(\"# Input = {}\".format(words))\n out1 = sol.alienOrder_v1(words)\n out2 = sol.alienOrder_v2(words)\n print(\" v1 = '{}' : {}\".format(\n out1, 'ok' if len(out1) == len(expected) else 'ERROR'))\n print(\" v2 = '{}' : {}\".format(\n out2, 'ok' if len(out2) == len(expected) else 'ERROR'))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"python3/trees_and_graphs/alien_dictionary.py","file_name":"alien_dictionary.py","file_ext":"py","file_size_in_byte":5541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"369111161","text":"# A brief description of the project\n# 10/28/18\n# CTI-110 P4HW1 - Budget Analysis\n# Josh Gainey\n#\n\ndef getBudget():\n return float(input(\"How much is your budget amount for this month?\"))\ndef balanceBudget(monthlyBudget):\n expensesTotal=0\n anotherTransaction = True\n \n while(anotherTransaction):\n\n expense = float(input(\"Enter expense total:\"))\n expensesTotal += expense\n\n print(\"Expenses total MTD: $\", expensesTotal)\n \n answer = input(\"Would you like to add a another transaction? enter 'Y' or 'N'\")\n if(answer == \"N\" or answer == \"n\"):\n anotherTransaction = False\n else:\n anotherTransaction = True\n\n finalize(monthlyBudget, expensesTotal)\n \ndef finalize(monthlyBudgetIn, expensesTotalIn):\n if (monthlyBudgetIn > expensesTotalIn):\n remaining = monthlyBudgetIn - expensesTotalIn\n print('You are under Budget! You spent $', expensesTotalIn, ' out of $', monthlyBudgetIn, '. This leaves you with $', remaining, '.')\n else:\n remaining = monthlyBudgetIn - expensesTotalIn\n print('You are over Budget! You spent $', expensesTotalIn, ' out of $', monthlyBudgetIn, '. This leaves you with over $', remaining, '.')\ndef main():\n balanceBudget(getBudget())\n \nmain()\n","sub_path":"m5/P4HW1_BudgetAnalysis_JoshGainey.py","file_name":"P4HW1_BudgetAnalysis_JoshGainey.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"629284964","text":"import numpy as np\nimport random\nimport copy\nfrom agent import Agent\nimport json\n\ndef getBest(pool):\n\tbest_fitness = 0\n\tbest_ind = None\n\n\tfor individual in pool:\n\t\tfitness = individual.fitness\n\t\tif fitness >= best_fitness:\n\t\t\tbest_fitness = fitness\n\t\t\tbest_ind = individual\n\n\tif not best_ind:\n\t\treturn random.choice(pool)\n\treturn best_ind\n\ndef tournament(pool, k, tourn_size):\n\tchosen = []\n\tfor i in range(k):\n\t\tpool = random.sample(pool, tourn_size)\n\t\tchosen.append(getBest(pool))\n\treturn chosen\n\n#Single Point Crossover\ndef crossover(weights1, weights2):\n\tweights1_new = copy.deepcopy(weights1)\n\tweights2_new = copy.deepcopy(weights2)\n\t\n\tweights1_new[0] = weights2[0]\n\tweights2_new[0] = weights1[0]\n\treturn weights1_new, weights2_new\n\ndef mutate(weights):\n\tfor i in range(len(weights)):\n\t\tfor j in range(len(weights[i])):\n\t\t\tif random.random() < 0.15:\n\t\t\t\tweights[i][j] += random.uniform(-0.5,0.5)\n\treturn weights\n","sub_path":"cartpole/genetic.py","file_name":"genetic.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"358854166","text":"# -*- coding: utf-8 -*-\n__author__ = 'Brice Olivier'\n\nimport matplotlib.pyplot as plt\nimport mne\nimport numpy as np\nimport os\nimport pandas as pd\nimport seaborn as sns\nfrom sea.config import OUTPUT_PATH\nimport uuid\n\n\nPHASE_NAMES = ['Fast Forward', 'Normal Reading', 'Information Search', 'Slow Confirmation']\nPHASE_NAMES_SHORT = ['FF', 'NR', 'IS', 'SC']\n\n\nclass MeltedMODWTDataFrame(pd.DataFrame):\n \"\"\"\n TODO:\n * subsample df to select channels (with missing channel interpolation), subjects\n * topomaps per stg\n * corr per stg\n \"\"\"\n\n _metadata = ['channel_info']\n\n def __init__(self, *args, **kwargs):\n channel_info = kwargs.pop('channel_info', None)\n super(MeltedMODWTDataFrame, self).__init__(*args, **kwargs)\n self.channel_info = channel_info\n\n @property\n def _constructor(self):\n return MeltedMODWTDataFrame\n\n @staticmethod\n def concat(melted_modwt_dataframes):\n assert all([type(melted_modwt_dataframe) == MeltedMODWTDataFrame\n for melted_modwt_dataframe in melted_modwt_dataframes])\n melted_modwt_dataframe = pd.concat(melted_modwt_dataframes)\n melted_modwt_dataframe.channel_info = melted_modwt_dataframe[0].channel_info\n return melted_modwt_dataframe\n\n def plot_var_heatmap(self, last_x_scales=None, robust=False, normalize_power_spectrum=False):\n assert all([col in self.columns for col in ['PHASE', 'CHANNEL', 'SCALE']])\n # nb_phases = self['PHASE'].astype(int).max()\n scale_names = ['sc1', 'sc2', r'$\\gamma$ +', r'$\\gamma$ -', r'$\\beta$', r'$\\alpha$', r'$\\theta$']\n nb_scales = len(self['SCALE'].unique())\n last_x_scales = nb_scales if (last_x_scales is None) or (last_x_scales > nb_scales) else last_x_scales\n df = self.groupby(['PHASE', 'CHANNEL', 'SCALE']).var().reset_index()\n df = df[df['SCALE'].isin(range(nb_scales - last_x_scales, nb_scales))]\n values = df.VALUE\n if normalize_power_spectrum:\n #values /= (df.SCALE.astype(float) + 1)\n values /= 2**(df.SCALE.astype(float))\n if robust:\n vmin = values.quantile(q=0.10)\n vmax = values.quantile(q=0.90)\n else:\n vmin = values.min()\n vmax = values.max()\n fig, axes = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True)\n cbar_ax = fig.add_axes([.91, .3, .03, .4])\n for i, ax in enumerate(axes.flat):\n if i in df['PHASE'].unique():\n v = df[df['PHASE'] == i].pivot(index='SCALE', columns='CHANNEL', values='VALUE')\n sns.heatmap(v, ax=ax, vmin=vmin, vmax=vmax, cbar=i == 0, cbar_ax=None if i else cbar_ax,\n yticklabels=scale_names[-last_x_scales:])\n ax.set_title(PHASE_NAMES[i])\n ax.set_xlabel('')\n ax.set_ylabel('')\n # fig.tight_layout() # seaborn.heatmap ax is tight_layout() incompatible\n plt.show()\n\n def plot_corr_heatmap(self, last_x_scales=None):\n assert all([col in self.columns for col in ['PHASE', 'CHANNEL', 'SCALE']])\n nb_scales = len(self['SCALE'].unique())\n scale_names = ['sc1', 'sc2', r'$\\gamma$ +', r'$\\gamma$ -', r'$\\beta$', r'$\\alpha$', r'$\\theta$']\n last_x_scales = nb_scales if (last_x_scales is None) or (last_x_scales > nb_scales) else last_x_scales\n channel_names = self['CHANNEL'].unique().tolist()\n df = self.groupby(['SCALE', 'CHANNEL', 'PHASE'])['VALUE'].apply(lambda x: [elem for elem in x]).reset_index()\n for scale in range(nb_scales - last_x_scales, nb_scales):\n fig, axes = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True)\n cbar_ax = fig.add_axes([.91, .3, .03, .4])\n for i, ax in enumerate(axes.flat):\n if i in df.loc[df['SCALE'] == i, 'PHASE'].unique():\n sub_gb = df.loc[(df.SCALE == scale) & (df.PHASE == i)]\n corr_mat = np.corrcoef([sub_gb.loc[j, 'VALUE'] for j in sub_gb.index])\n sns.heatmap(corr_mat, ax=ax, xticklabels=channel_names, yticklabels=channel_names,\n vmin=0, vmax=1, cbar=i == 0, cbar_ax=None if i else cbar_ax)\n ax.set_title(PHASE_NAMES[i])\n ax.set_xlabel('')\n ax.set_ylabel('')\n fig.suptitle(scale_names[scale])\n plt.show()\n\n def plot_topomap(self, groupby=None, robust=False, last_x_scales=None,\n is_file_output=False, normalize_power_spectrum=False):\n self['TEXT_TYPE'] = self['TEXT'].apply(lambda x: x.split('-')[1][0])\n if groupby is not None:\n assert all([col in self.columns for col in groupby])\n nb_scales = len(self['SCALE'].unique()) if 'SCALE' in groupby else 1\n scale_names = ['sc1', 'sc2', r'$\\gamma$ +', r'$\\gamma$ -', r'$\\beta$', r'$\\alpha$', r'$\\theta$']\n last_x_scales = nb_scales if (last_x_scales is None) or (last_x_scales > nb_scales) else last_x_scales\n nb_phases = self['PHASE'].astype(int).max() + 1 if 'PHASE' in groupby else 1\n subject_names = self['SUBJECT'].unique()\n nb_subjects = len(subject_names) if 'SUBJECT' in groupby else 1\n text_types = self['TEXT_TYPE'].unique()\n nb_text_types = len(text_types) if 'TEXT_TYPE' in groupby else 1\n self['SCALE'] = self['SCALE'].astype(float)\n gb = self[self['SCALE'].isin(range(nb_scales - last_x_scales, nb_scales))].groupby(\n groupby).var().reset_index()\n values = gb.VALUE\n if normalize_power_spectrum:\n #values /= (gb.SCALE.astype(float) + 1)\n values /= 2**(gb.SCALE.astype(float))\n if robust:\n vmin = values.quantile(q=0.10)\n vmax = values.quantile(q=0.90)\n else:\n vmin = values.min()\n vmax = values.max()\n\n for text_type_id in range(nb_text_types):\n if nb_text_types == 1:\n text_type = text_types\n else:\n text_type = text_types[text_type_id]\n for subject_id in range(nb_subjects):\n if nb_subjects == 1:\n subject_name = subject_names\n else:\n subject_name = subject_names[subject_id]\n fig, axes = plt.subplots(nrows=nb_phases, ncols=last_x_scales, sharex=True, sharey=True)\n for i, ax in enumerate(axes.flat):\n scale_id = nb_scales - 1 - i % (last_x_scales)\n phase_id = int(i / (last_x_scales))\n if phase_id in self['PHASE'].unique():\n gb_values = np.array(self[(self['SCALE'] == scale_id) &\n (self['PHASE'] == phase_id) &\n (self['TEXT_TYPE'].isin(text_type)) &\n (self['SUBJECT'].isin(subject_name))\n ].groupby(['CHANNEL']).var().VALUE)\n if normalize_power_spectrum:\n #gb_values = gb_values / (scale_id + 1)\n gb_values = gb_values / (2**scale_id)\n mne.viz.plot_topomap(gb_values, self.channel_info, axes=ax,\n vmin=vmin, vmax=vmax, show=False)\n if phase_id == self['PHASE'].astype(int).max():\n ax.set_xlabel(scale_names[scale_id])\n if scale_id == nb_scales - 1 % (last_x_scales):\n ax.set_ylabel(PHASE_NAMES_SHORT[phase_id])\n plot_title = ''\n if nb_text_types == 1:\n plot_title += 'all text types'\n else:\n plot_title += 'text type %s' % text_type\n if nb_subjects == 1:\n plot_title += ', all subjects'\n else:\n plot_title += ', subject %s' % subject_name\n #fig.text(0.5, 0.98, plot_title, ha='center')\n #fig.text(0.5, 0.01, 'scale', ha='center')\n #fig.text(0.01, 0.5, 'phase', va='center', rotation='vertical')\n fig.tight_layout(rect=[0, 0, .9, 1])\n if is_file_output:\n file_path = uuid.uuid4().hex + '.png'\n if not os.path.exists(OUTPUT_PATH):\n os.makedirs(OUTPUT_PATH)\n file_path = os.path.join(OUTPUT_PATH, file_path)\n plt.savefig(file_path)\n print('topomap - %s, saved to %s' % (plot_title, file_path))\n else:\n mne.viz.utils.plt_show()\n\n","sub_path":"sea/melted_modwt_dataframe.py","file_name":"melted_modwt_dataframe.py","file_ext":"py","file_size_in_byte":8782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"519119153","text":"filename = raw_input(\"Name of file: \")\r\nf = open(filename, \"r\")\r\no = open(filename + \".out\", \"w\")\r\n\r\n\r\nT = int(f.readline()[:-1])\r\n## Code starts here\r\n\r\nmany_possible = \"Bad magician!\"\r\nzero_possible = \"Volunteer cheated!\"\r\n\r\nfor t in range(1, T + 1):\r\n first = int(f.readline()[:-1]) - 1\r\n grid1 = []\r\n for i in range(4):\r\n grid1 += [f.readline()[:-1].split(\" \")]\r\n \r\n second = int(f.readline()[:-1]) - 1\r\n grid2 = []\r\n for i in range(4):\r\n grid2 += [f.readline()[:-1].split(\" \")]\r\n\r\n possible = []\r\n for num in grid1[first]:\r\n if num in grid2[second]:\r\n possible += [num]\r\n\r\n if len(possible) == 1:\r\n o.write(\"Case #%d: %s\\n\" %(t, possible[0]))\r\n elif len(possible) == 0:\r\n o.write(\"Case #%d: %s\\n\" %(t, zero_possible))\r\n else:\r\n o.write(\"Case #%d: %s\\n\" %(t, many_possible))\r\n \r\n \r\n## code ends here\r\n\r\no.close()\r\nf.close()\r\n","sub_path":"solutions_5756407898963968_0/Python/radkokotev/problem_a.py","file_name":"problem_a.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"575997199","text":"import argparse\nimport subprocess\nimport sys\n\nfrom . file_io import load_env, save_env, env_file\n\n\ndef install():\n parser = argparse.ArgumentParser(\n description='Install packages in the active environment')\n parser.add_argument('--pip', action='store_true',\n help=\"Install packages from PyPI with pip\")\n parser.add_argument('package_spec', nargs='+')\n parser.add_argument('-c', nargs='?', dest='channel',\n help=\"Conda channel for packages\")\n \n args = parser.parse_args(sys.argv[2:])\n\n try:\n env = load_env()\n except FileNotFoundError:\n print(f\"This environment isn't managed by Abode, use conda instead.\")\n return None\n\n if args.pip:\n for each in env['dependencies']:\n try:\n pip_packages = each['pip']\n except TypeError:\n continue\n # Found the pip dependencies\n pip_packages.extend(args.package_spec)\n else:\n env['dependencies'].extend(args.package_spec)\n\n if args.channel:\n env['channels'].insert(0, args.channel)\n\n save_env(env)\n\n subprocess.run(['conda', 'env', 'update', '-f', env_file(env['name'])])","sub_path":"abode/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"347378298","text":"import turtle\r\n\r\nwn = turtle.Screen()\r\nwn.title(\"Pong by Joseph Abero\")\r\nwn.bgcolor(\"black\")\r\nwn.setup(width=800, height=600)\r\nwn.tracer(0)\r\n\r\n# Paddle A\r\npaddle_a = turtle.Turtle()\r\npaddle_a.speed(0)\r\npaddle_a.shape(\"square\")\r\npaddle_a.color(\"white\")\r\npaddle_a.shapesize(stretch_wid=5, stretch_len=1)\r\npaddle_a.penup()\r\npaddle_a.goto(-350, 0)\r\n\r\n# Paddle B\r\npaddle_b = turtle.Turtle()\r\npaddle_b.speed(0)\r\npaddle_b.shape(\"square\")\r\npaddle_b.color(\"white\")\r\npaddle_b.shapesize(stretch_wid=5, stretch_len=1)\r\npaddle_b.penup()\r\npaddle_b.goto(350, 0)\r\n\r\n# Ball\r\nball = turtle.Turtle()\r\nball.speed(0)\r\nball.shape(\"square\")\r\nball.color(\"white\")\r\nball.penup()\r\nball.goto(0, 0)\r\nball.dx = .3\r\nball.dy = .3\r\n\r\n\r\n# Functions\r\ndef paddle_a_up():\r\n\ty = paddle_a.ycor()\r\n\tif (y + 20) > 300:\r\n\t\tpaddle_a.sety(300)\r\n\telse: \r\n\t\tpaddle_a.sety(y + 20)\r\n\r\n\r\ndef paddle_a_down():\r\n\ty = paddle_a.ycor()\r\n\tif (y - 20) < -300:\r\n\t\tpaddle_a.sety(-300)\r\n\telse: \r\n\t\tpaddle_a.sety(y - 20)\r\n\r\ndef paddle_b_up():\r\n\ty = paddle_b.ycor()\r\n\tif (y + 20) > 300:\r\n\t\tpaddle_b.sety(300)\r\n\telse: \r\n\t\tpaddle_b.sety(y + 20)\r\n\r\n\r\ndef paddle_b_down():\r\n\ty = paddle_b.ycor()\r\n\tif (y - 20) < -300:\r\n\t\tpaddle_b.sety(-300)\r\n\telse: \r\n\t\tpaddle_b.sety(y - 20)\r\n\r\n# Keyboard Bindings\r\nwn.listen()\r\nwn.onkeypress(paddle_a_up, \"w\")\r\nwn.onkeypress(paddle_a_down, \"s\")\r\nwn.onkeypress(paddle_b_up, \"e\")\r\nwn.onkeypress(paddle_b_down, \"d\")\r\n\r\n# Main Game Loop\r\nwhile True:\r\n\twn.update()\r\n\r\n\t# Move the ball\r\n\tball.setx(ball.xcor() + ball.dx)\r\n\tball.sety(ball.ycor() + ball.dy)\r\n\r\n\t# Border checking\r\n\tif ball.ycor() > 290:\r\n\t\tball.sety(290)\r\n\t\tball.dy *= -1\r\n\telif ball.ycor() < -290:\r\n\t\tball.sety(-290)\r\n\t\tball.dy *= -1\r\n\telif ball.xcor() > 390:\r\n\t\tball.setx(390)\r\n\t\tball.dx *= -1\r\n\telif ball.xcor() < -390:\r\n\t\tball.setx(-390)\r\n\t\tball.dx *= -1\r\n\r\n\t# Paddle and ball collisions\r\n\tif (ball.xcor() > 340 and ball.xcor() < 350 and \r\n\t\tball.ycor() < paddle_b.ycor() + 40 and \r\n\t\tball.ycor() > paddle_b.ycor() - 40):\r\n\t\t\tball.setx(340)\r\n\t\t\tball.dx *= -1\r\n\tif (ball.xcor() < -340 and ball.xcor() > -350 and\r\n\t\tball.ycor() < paddle_a.ycor() + 40 and \r\n\t\tball.ycor() > paddle_a.ycor() - 40):\r\n\t\t\tball.setx(-340)\r\n\t\t\tball.dx *= -1\r\n","sub_path":"Pong/pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"592298300","text":"\"\"\"Rename label to text in options table\n\nRevision ID: d1f37ec5225a\nRevises: 8804ebf2ff21\nCreate Date: 2020-05-19 23:00:20.009535\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd1f37ec5225a'\ndown_revision = '8804ebf2ff21'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('options', sa.Column('text', sa.String(), nullable=True))\n op.drop_column('options', 'label')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('options', sa.Column('label', sa.VARCHAR(), autoincrement=False, nullable=True))\n op.drop_column('options', 'text')\n # ### end Alembic commands ###\n","sub_path":"backend/app/alembic/versions/d1f37ec5225a_rename_label_to_text_in_options_table.py","file_name":"d1f37ec5225a_rename_label_to_text_in_options_table.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"222211571","text":"from cv2 import CascadeClassifier, cvtColor, COLOR_BGR2GRAY\nfrom facenet.src.align.detect_face import create_mtcnn, detect_face\n\n\nclass HaarcascadeDetector:\n def __init__(self, model_path, min_size):\n self._detector = CascadeClassifier(model_path)\n self._min_size = min_size\n\n def detect(self, img):\n img = cvtColor(img, COLOR_BGR2GRAY)\n faces = self._detector.detectMultiScale(\n image=img,\n scaleFactor=1.4,\n minNeighbors=5,\n minSize=(self._min_size, self._min_size)\n )\n\n updated_faces = [\n (x, y, x + w, y + h) for x, y, w, h in faces\n ]\n\n return updated_faces\n\n\nclass MTCNNDetector:\n _THRESHOLDS = [0.6, 0.7, 0.7]\n _FACTOR = 0.709\n\n def __init__(self, sess, model_path, min_size):\n self._pnet, self._rnet, self._onet = create_mtcnn(sess, model_path)\n self._min_size = min_size\n\n def detect(self, img):\n faces, _ = detect_face(\n img,\n self._min_size,\n self._pnet, self._rnet, self._onet,\n self._THRESHOLDS,\n self._FACTOR\n )\n\n return faces\n","sub_path":"Recogniton-AI/Face-Recognition/src/myfacenet/detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"44840144","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef imshow(image, ax=None, title=None):\n \"\"\"Imshow for Tensor. Provided by Udacity as part of this project.\"\"\"\n if ax is None:\n fig, ax = plt.subplots()\n \n # PyTorch tensors assume the color channel is the first dimension\n # but matplotlib assumes is the third dimension\n image = image.numpy().transpose((1, 2, 0))\n \n # Undo preprocessing\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n image = std * image + mean\n \n # Image needs to be clipped between 0 and 1 or it looks like noise when displayed\n image = np.clip(image, 0, 1)\n \n ax.imshow(image)\n \n return ax","sub_path":"imshow.py","file_name":"imshow.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"542491286","text":"#! /usr/bin/env python\nfrom __future__ import print_function\nimport sys\nfrom collections import Counter\n\nc = Counter()\nlen_dict = {}\n\nline_counter = 0\nfor line in sys.stdin:\n line_counter += 1\n if line_counter % 10000 == 0:\n print(\"%d lines processed\" % (line_counter), file=sys.stderr)\n tokens = line.split()\n l = len(tokens)\n if l not in len_dict:\n len_dict[l] = 1\n else:\n len_dict[l] += 1\n\nmax_len = sorted(len_dict.keys())[-1]\n\nless_or_equal_to_i = 0\nfor i in xrange(max_len + 1):\n if i in len_dict:\n less_or_equal_to_i += len_dict[i]\n ratio = 100.0 * less_or_equal_to_i / line_counter\n print(\"less or equal to %d : %d, %2.2f%%\" % (i, less_or_equal_to_i, ratio), file=sys.stderr)\n\nfor key,f in sorted(c.items(), key=lambda x: x[1], reverse=True):\n print(key+\" \"+ str(f))\n","sub_path":"data_stats.py","file_name":"data_stats.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"608125546","text":"'''\nCreated on Apr 25, 2016\n\n:author: iitow\n'''\nfrom modules.environment import EnvManager\nfrom modules.log import message\nimport types\nimport sys\n\n\nclass DecoMeta(type):\n '''\n This is a meta class for decorating all classes\n '''\n def __new__(cls, name, bases, attrs):\n '''\n Allows for grabbing class info for parsing\n '''\n for attr_name, attr_value in attrs.iteritems():\n if isinstance(attr_value, types.FunctionType):\n attrs[attr_name] = cls.deco(attr_value)\n return super(DecoMeta, cls).__new__(cls, name, bases, attrs)\n\n @classmethod\n def deco(cls, func):\n '''\n We use this to append defaults actions here\n '''\n def wrapper(*args, **kwargs):\n '''\n This is a decorator for adding global key,value pairs\n '''\n # filter updates\n filter_args = []\n filter_kwargs = {}\n keywords = {}\n # filter parameters\n for arg in args:\n if isinstance(arg, str):\n filter_args.append(EnvManager()._sanitize(arg))\n else:\n filter_args.append(arg)\n # filter defaults\n for key, value in kwargs.iteritems():\n if isinstance(value, str):\n # create environment variable\n if 'set_env' == key:\n keywords[key] = EnvManager()._sanitize(value)\n else:\n if '$' in key:\n key = EnvManager()._sanitize(key)\n filter_kwargs[key] = EnvManager()._sanitize(value)\n else:\n filter_kwargs[key] = value\n result = func(*filter_args, **filter_kwargs)\n if keywords.get('set_env'):\n EnvManager().set(keywords.get('set_env'),\n result,\n reset=True)\n return result\n sys.stdout.flush()\n return wrapper\n\n\nclass Plugin(object):\n ''' This is the base class for a plugin\n '''\n __metaclass__ = DecoMeta\n\n def __init__(self, action_manager):\n '''\n Plugin constructor\n '''\n self.action_manager = action_manager\n self.verbose = self.action_manager.verbose\n self.debug = self.action_manager.debug\n self.EnvManager = self.action_manager.EnvManager\n","sub_path":"src/goephor/core/plugins/pluginable.py","file_name":"pluginable.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"423824642","text":"from . import BaseHelper\nimport json\n\n\nclass DjangoHelper(BaseHelper):\n def __init__(self, request):\n self.request = request\n\n def get_current_path(self):\n return self.request.get_full_path()\n\n def get_params(self):\n return self.request.GET.dict()\n\n def get_body(self):\n return self.request.body\n\n def redirect(self, url):\n from django.shortcuts import redirect\n return redirect(url)\n\n @staticmethod\n def cache_get(key):\n try:\n with open('wework_cache.txt', 'r') as f:\n data = f.read()\n data = {} if len(data) == 0 else json.loads(data)\n try:\n return data[key]\n except KeyError:\n return None\n except FileNotFoundError:\n return None\n\n @staticmethod\n def cache_set(key, value, **kwargs):\n filename = 'wework_cache.txt'\n try:\n with open(filename, 'r') as f:\n data = f.read()\n data = {} if len(data) == 0 else json.loads(data)\n except FileNotFoundError:\n data = {}\n\n with open(filename, 'w+') as f:\n data[key] = value\n f.write(json.dumps(data))\n","sub_path":"wework/helpers/official.py","file_name":"official.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"262908337","text":"#!/usr/bin/python3\n\"\"\"\nUnittest for base module\n\"\"\"\nimport io\nimport unittest\nimport unittest.mock\nimport json\nfrom models.base import Base\nfrom models.rectangle import Rectangle\nfrom models.square import Square\n\n\nclass Test_Base(unittest.TestCase):\n \"\"\" Tests for Base Class \"\"\"\n\n def setUp(self):\n Base._Base__nb_objects = 0\n\n def test_base_id(self):\n b1 = Base()\n self.assertEqual(b1.id, 1)\n b2 = Base()\n self.assertEqual(b2.id, 2)\n b3 = Base()\n self.assertEqual(b3.id, 3)\n b4 = Base(12)\n self.assertEqual(b4.id, 12)\n b5 = Base()\n self.assertEqual(b5.id, 4)\n\n def test_base_type(self):\n b1 = Base()\n self.assertTrue(type(b1) is Base)\n\n @unittest.mock.patch('sys.stdout', new_callable=io.StringIO)\n def test_base_json_string(self, mock_stdout):\n r1 = Rectangle(10, 7, 2, 8)\n dictionary = r1.to_dictionary()\n self.assertDictEqual(dictionary, {'x': 2, 'y': 8, 'id': 1, 'height': 7,\n 'width': 10})\n json_dictionary = Base.to_json_string([dictionary])\n self.assertEqual(str([dictionary]).replace(\"'\", '\"'), json_dictionary)\n json_empty = Base.to_json_string([])\n self.assertEqual(str([]), json_empty)\n json_none = Base.to_json_string(None)\n self.assertEqual(str([]), json_none)\n print(type(dictionary))\n print(type(json_dictionary))\n self.assertEqual(mock_stdout.getvalue(),\n \"\"\"\n\n\"\"\")\n\n def test_base_save_json(self):\n r1 = Rectangle(10, 7, 2, 8)\n r2 = Rectangle(2, 4)\n Rectangle.save_to_file([r1, r2])\n with open(\"Rectangle.json\", \"r\") as f:\n data = json.load(f)\n a = [r1.to_dictionary(), r2.to_dictionary()]\n self.assertEqual(a, data)\n\n def test_base_string_json(self):\n list_input = [\n {'id': 89, 'width': 10, 'height': 4},\n {'id': 7, 'width': 1, 'height': 7}\n ]\n json_list_input = Rectangle.to_json_string(list_input)\n list_output = Rectangle.from_json_string(json_list_input)\n self.assertEqual(list_input, list_output)\n\n @unittest.mock.patch('sys.stdout', new_callable=io.StringIO)\n def test_base_create(self, mock_stdout):\n r1 = Rectangle(3, 5, 1)\n r1_dictionary = r1.to_dictionary()\n r2 = Rectangle.create(**r1_dictionary)\n print(r1)\n print(r2)\n print(r1 is r2)\n print(r1 == r2)\n s1 = Square(3)\n s1_dictionary = s1.to_dictionary()\n s2 = Square.create(**s1_dictionary)\n print(s1)\n print(s2)\n print(s1 is s2)\n print(s1 == s2)\n self.assertEqual(mock_stdout.getvalue(),\n \"\"\"[Rectangle] (1) 1/0 - 3/5\n[Rectangle] (1) 1/0 - 3/5\nFalse\nFalse\n[Square] (3) 0/0 - 3\n[Square] (3) 0/0 - 3\nFalse\nFalse\n\"\"\")\n\n def test_base_file(self):\n r1 = Rectangle(10, 7, 2, 8)\n r2 = Rectangle(2, 4)\n list_rectangles_input = [r1, r2]\n Rectangle.save_to_file(list_rectangles_input)\n list_rectangles_output = Rectangle.load_from_file()\n self.assertNotEqual(list_rectangles_input, list_rectangles_output)\n d1 = [i.to_dictionary() for i in list_rectangles_input]\n d2 = [i.to_dictionary() for i in list_rectangles_output]\n self.assertEqual(d1, d2)\n\n def tearDown(self):\n pass\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"0x0C-python-almost_a_circle/tests/test_models/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"327776292","text":"import tempfile\r\nimport pyutilib.th as unittest\r\nimport sys\r\nimport os.path\r\nimport os\r\nimport glob\r\nimport pandas as pd\r\nimport shutil\r\nimport datetime\r\nfrom datetime import datetime\r\nimport shutil\r\nimport mape_maker\r\ndir_sep = '/'\r\nfrom mape_maker import __main__ as mapemain\r\ndir_sep = \"/\"\r\np = str(mape_maker.__path__)\r\nl = p.find(\"'\")\r\nr = p.find(\"'\", l+1)\r\nmape_maker_path = p[l+1:r]\r\nfile_path = mape_maker_path + dir_sep + \"samples\"\r\n# whether to skip the last two tests\r\nquick_test = False\r\n# whether to run only one example\r\nskip_all_but_one = False\r\n\r\nclass TestUM(unittest.TestCase):\r\n\r\n def _basic_dict(self):\r\n basedict = {\"input_file\": \"\",\r\n \"target_mape\": None,\r\n \"simulated_timeseries\": \"forecasts\",\r\n \"base-process\": \"ARMA\",\r\n \"a\": 4,\r\n \"output_dir\": None,\r\n \"number_simulations\": 1,\r\n \"input_start_dt\": None,\r\n \"input_end_dt\": None,\r\n \"simulation_start_dt\": None,\r\n \"simulation_end_dt\": None,\r\n \"title\": None,\r\n \"seed\": None,\r\n \"load_pickle\": False,\r\n \"curvature\": None,\r\n \"time_limit\": 3600,\r\n \"curvature_target\": None,\r\n \"mip_gap\": 0.3,\r\n \"solver\": \"gurobi\",\r\n \"latex_output\": False,\r\n \"show\": True,\r\n \"verbosity\": 2,\r\n \"verbosity_output\": None\r\n }\r\n return basedict\r\n\r\n @classmethod\r\n def setUpClass(self):\r\n # make a temp dir\r\n self.temp_dir = tempfile.mkdtemp()\r\n sys.path.insert(1, self.temp_dir)\r\n # change to the temp directory\r\n os.chdir(self.temp_dir)\r\n self.cwd = os.getcwd()\r\n print(\"temporary directory:\", self.cwd)\r\n # path to the RTS wind data\r\n\r\n self.wind_data = file_path + dir_sep + \"based_rts_gmlc\" + \\\r\n dir_sep + \"Wind_rts_gmlc_based\" + dir_sep + \\\r\n \"processed_file.csv\"\r\n\r\n def test_commmand(self):\r\n \"\"\"\r\n here is the command :\r\n python -m mape_maker \"mape_maker/samples/based_rts_gmlc/Wind_rts_gmlc_based/processed_file.csv\" -st \"actuals\" -s 1234 -n 5 -bp \"ARMA\" -o \"wind_forecasts_actuals\" -is \"2020-2-1 00:00:00\" -ie \"2020-5-1 00:00:00\" -sd \"2020-2-2 00:00:00\" -ed \"2020-3-2 00:00:00\"\r\n :return:\r\n \"\"\"\r\n print(\"Running \", str(self.id()).split('.')[2])\r\n parm_dict = self._basic_dict()\r\n parm_dict[\"input_file\"] = self.wind_data\r\n parm_dict[\"simulated_timeseries\"] = \"actuals\"\r\n parm_dict[\"number_simulations\"] = 5\r\n parm_dict[\"base-process\"] = \"ARMA\"\r\n parm_dict[\"output_dir\"] = \"wind_forecasts_actuals\"\r\n parm_dict[\"seed\"] = 1234\r\n parm_dict[\"simulation_start_dt\"] = datetime(year=2020, month=2, day=2, hour=0, minute=0, second=0)\r\n parm_dict[\"simulation_end_dt\"] = datetime(year=2020, month=3, day=2, hour=0, minute=0, second=0)\r\n parm_dict[\"input_start_dt\"] = datetime(year=2020, month=2, day=1, hour=0, minute=0, second=0)\r\n parm_dict[\"input_end_dt\"] = datetime(year=2020, month=5, day=1, hour=0, minute=0, second=0)\r\n parm_list = list(parm_dict.values())\r\n mapemain.main_func(*parm_list)\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()","sub_path":"test/x_rts_wind_test.py","file_name":"x_rts_wind_test.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"522632601","text":"# import config\n\n# config.a = 10\n# config.b = \"alphabet\"\n\n# Python program to test\n# internet speed\n\n# import speedtest\n\n\n# st = speedtest.Speedtest()\n\n# option = int(input('''What speed do you want to test:\n\n# 1) Download Speed\n\n# 2) Upload Speed\n\n# 3) Ping\n\n# Your Choice: '''))\n\n\n# if option == 1:\n\n# \tprint(st.download())\n\n# elif option == 2:\n\n# \tprint(st.upload())\n\n# elif option == 3:\n\n# \tservernames =[]\n\n# \tst.get_servers(servernames)\n\n# \tprint(st.results.ping)\n\n# else:\n\n# \tprint(\"Please enter the correct choice !\")\n\n\n# import datetime\n# import time\n# d = datetime.datetime.now()\n\n# print(d)\n\n\n# now = datetime.datetime.now()\n\n# t = now.strftime(\"%Y/%m/%d\")\n\n# print(t)\n\n# date = datetime.datetime(2003,8,1,12,4,5)\n# for i in range(5): \n# date += datetime.timedelta(days=1)\n# print(date) \n\nfrom datetime import datetime,timedelta\nimport time\n\ndef last_day(d, day_name):\n days_of_week = ['sunday','monday','tuesday','wednesday',\n 'thursday','friday','saturday']\n target_day = days_of_week.index(day_name.lower())\n delta_day = target_day - d.isoweekday()\n if delta_day >= 0: delta_day -= 7 # go back 7 days\n return d + timedelta(days=delta_day)\n\n\nprint(last_day(1,'sunday'))","sub_path":"update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"433443025","text":"from interference.clusters.processor import Processor\nfrom typing import Any, Dict, List, Optional, Sequence, Tuple\n\nimport numpy\n\nfrom scipy.spatial.distance import cdist\n\nimport numpy as np\n\nfrom enum import Enum\n\n\nclass Cluster:\n def __init__(self, tag: str, center: numpy.ndarray, index: int) -> None:\n self.center = center\n self.radius = 0\n self.tags = [tag]\n self.index = index\n\n def add_radius(self, tag: str, embedding: numpy.ndarray) -> None:\n self.tags.append(tag)\n\n def _adapt(self, distance: float, embedding: numpy.ndarray):\n direction = embedding - self.center\n self.radius = distance / 2\n self.center : numpy.ndarray = embedding - (direction / np.linalg.norm(direction)) * self.radius\n\n def add_threshold(self, distance: float, tag: str, embedding: numpy.ndarray) -> None:\n self.add_radius(tag, embedding)\n self._adapt(distance, embedding)\n\n def update_radius(self, tag: str, embedding: numpy.ndarray) -> None:\n pass\n\n def update_threshold(self, distance: float, tag: str, embedding: numpy.ndarray) -> None:\n self.update_radius(tag, embedding)\n self._adapt(distance, embedding)\n\n def remove(self, tag: str) -> None:\n self.tags.remove(tag)\n\n\nclass SearchResultType(Enum):\n RADIUS = 1\n THRESHOLD = 2\n OUTSIDE = 3\n\n\nclass ECM(Processor):\n\n def __init__(self, distance_threshold: float) -> None:\n self.clusters: Dict[int, Cluster] = {}\n self.distance_threshold = distance_threshold\n self.tag_to_cluster: Dict[str, int] = {}\n self.cluster_index = 0\n\n self.cached_cluster_keys: List[int] = []\n self.cached_cluster_centers: List[numpy.ndarray] = []\n self.cached_cluster_radiuses: List[float] = []\n\n def update(self, tag: str, embedding: numpy.ndarray) -> None:\n result, (searched_index, searched_distance) = self._search_index_and_distance(embedding)\n old_index = self.get_cluster_by_tag(tag)\n old_cluster = self.clusters[old_index]\n\n if result == SearchResultType.OUTSIDE:\n self._remove_from_cluster(old_cluster, tag)\n\n cluster = self._create_cluster(tag, embedding)\n index = cluster.index\n\n elif result == SearchResultType.RADIUS:\n if searched_index == old_index:\n old_cluster.update_radius(tag, embedding)\n\n index = searched_index\n else:\n self._remove_from_cluster(old_cluster, tag)\n\n new_cluster = self.clusters[searched_index]\n new_cluster.add_radius(tag, embedding)\n\n index = searched_index\n\n # elif result == SearchResultType.THRESHOLD:\n else:\n if searched_index == old_index:\n old_cluster.update_threshold(searched_distance, tag, embedding)\n\n index = searched_index\n else:\n self._remove_from_cluster(old_cluster, tag)\n\n new_cluster = self.clusters[searched_index]\n new_cluster.add_threshold(searched_distance, tag, embedding)\n\n index = searched_index\n\n self.tag_to_cluster[tag] = index\n self._invalidate_cached()\n\n def _remove_from_cluster(self, cluster: Cluster, tag: str) -> None:\n cluster.remove(tag)\n if len(cluster.tags) == 0:\n del self.clusters[cluster.index]\n\n def _create_cluster(self, tag: str, embedding: numpy.ndarray) -> Cluster:\n cluster = Cluster(tag, embedding, self.cluster_index)\n self.clusters[self.cluster_index] = cluster\n self.cluster_index += 1\n return cluster\n\n def remove(self, tag: str) -> None:\n index = self.get_cluster_by_tag(tag)\n cluster = self.clusters[index]\n\n del self.tag_to_cluster[tag]\n\n self._remove_from_cluster(cluster, tag)\n self._invalidate_cached()\n\n def get_cluster_by_tag(self, tag: str) -> int:\n return self.tag_to_cluster[tag]\n\n def get_tags_in_cluster(self, cluster_id: int) -> Sequence[str]:\n return self.clusters[cluster_id].tags\n\n def get_cluster_ids(self) -> Sequence[int]:\n return list(self.clusters.keys())\n\n def process(self, tag: str, embedding: numpy.ndarray) -> None:\n if len(self.clusters) == 0:\n cluster = self._create_cluster(tag, embedding)\n\n else:\n search_result, (index, distance) = self._search_index_and_distance(embedding)\n\n if search_result == SearchResultType.RADIUS:\n cluster = self.clusters[index]\n cluster.add_radius(tag, embedding)\n\n elif search_result == SearchResultType.THRESHOLD:\n cluster = self.clusters[index]\n cluster.add_threshold(distance, tag, embedding)\n\n # search_result == SearchResultType.OUTSIDE\n else:\n cluster = self._create_cluster(tag, embedding)\n\n self.tag_to_cluster[tag] = cluster.index\n self._invalidate_cached()\n\n def _invalidate_cached(self):\n self.cached_cluster_keys = []\n self.cached_cluster_centers = []\n self.cached_cluster_radiuses = []\n\n def _ensure_cached(self):\n if not self.cached_cluster_keys and len(self.clusters) > 0:\n self.cached_cluster_keys = []\n self.cached_cluster_centers = []\n self.cached_cluster_radiuses = []\n for index, cluster in self.clusters.items():\n self.cached_cluster_keys.append(index)\n self.cached_cluster_centers.append(cluster.center)\n self.cached_cluster_radiuses.append(cluster.radius)\n\n\n def _search_index_and_distance(self, embedding: numpy.ndarray) -> \\\n Tuple[SearchResultType, Tuple[int, float]]:\n\n self._ensure_cached()\n\n distances = cdist(\n np.array([embedding]),\n np.array(self.cached_cluster_centers),\n 'euclidean'\n )[0]\n\n diffs = distances - self.cached_cluster_radiuses\n\n possible_indexes = np.where(diffs <= 0)[0]\n\n possible = distances[possible_indexes]\n\n min_index: Optional[int] = None if possible.size == 0 else possible_indexes[possible.argmin()]\n\n if min_index is not None:\n return SearchResultType.RADIUS, (self.cached_cluster_keys[min_index], distances[min_index])\n\n distances_plus_radiuses = distances + self.cached_cluster_radiuses\n lowest_distance_and_radius_index = np.argmin(distances_plus_radiuses)\n lowest_distance_and_radius: float = distances_plus_radiuses[lowest_distance_and_radius_index]\n\n actual_index = self.cached_cluster_keys[lowest_distance_and_radius_index]\n\n if lowest_distance_and_radius > 2 * self.distance_threshold:\n return SearchResultType.OUTSIDE, (actual_index, lowest_distance_and_radius)\n\n else:\n return SearchResultType.THRESHOLD, (actual_index, lowest_distance_and_radius)\n\n def describe(self) -> Dict[str, Any]:\n \"\"\"\n This describes this clustering algorithm's parameters\n \"\"\"\n\n return {\n \"name\": \"ECM\",\n \"parameters\": {\n \"distance threshold\": self.distance_threshold\n }\n }\n\n def safe_file_name(self) -> str:\n return f\"ECM = distance_threshold={self.distance_threshold}\"\n\n def predict(self, embedding: numpy.ndarray) -> int:\n search_result, (index, _) = self._search_index_and_distance(embedding)\n\n # FIXME: What should predict do in this case?\n if search_result == SearchResultType.OUTSIDE:\n return index\n\n elif search_result == SearchResultType.THRESHOLD:\n return index\n\n #elif search_result == SearchResultType.RADIUS:\n else:\n return index","sub_path":"interference/clusters/ecm.py","file_name":"ecm.py","file_ext":"py","file_size_in_byte":7820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"94122977","text":"#!/usr/bin/env python\nfrom os.path import *\n# me\nfrom isstring import *\nfrom public import *\n\n\n@public\ndef name(path):\n \"\"\"return file name without extension\"\"\"\n if not path:\n return\n if path and not isstring(path):\n path = str(path)\n if path[-1] in [\"/\", \"\\\\\"]:\n path = path[0:-1]\n return splitext(basename(path))[0]\n\nif __name__ == \"__main__\":\n print(name(__file__)) # name\n print(name(\"name\")) # name\n print(name(\"name.ext.ext2\")) # name.ext\n","sub_path":"py_modules/name.py","file_name":"name.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"242172306","text":"\"\"\"\nProgram predpostavlja obstojeco, urejeno bazo s podatki za dolocen prodajni segment. Omogoca klcanje podatkov iz baze in vrne excel datoteko s strukturiranim zapisom prodajnega programa\n\n\"\"\"\nfrom pathlib import Path\nfrom re import sub, split\nfrom itertools import count\nfrom csv import writer, reader, QUOTE_MINIMAL\nfrom collections import defaultdict\nimport sqlite3\nfrom openpyxl import Workbook, styles\n\nimport pomozne_funcV2 as pfun\nimport pomozne_func_sqlite as pfuns\n\n# region VpisExcel\n\nsegment = 'chiller'\nskupine = None\nizvedbe = None\nvelikosti = None\nvelikosti_cevni = None\nprevodi = dict()\n\ndef poisci_skupine():\n global skupine\n b = sqlite3.connect(\"SQL_\"+segment+\".db\")\n baza = b.cursor()\n baza.execute('''SELECT DISTINCT \"{}\" \n FROM \"GENERAL\" \n ORDER BY NumID'''.format('Group'))\n skupine = [i[0] for i in baza.fetchall()]\n b.close()\n return skupine\n\ndef poisci_izvedbe(skupina):\n global izvedbe\n b = sqlite3.connect(\"SQL_\"+segment+\".db\")\n baza = b.cursor()\n baza.execute('''SELECT DISTINCT \"{}\" \n FROM \"GENERAL\"\n WHERE \"Group\"=\"{}\"\n ORDER BY NumID'''.format('Version', skupina))\n izvedbe = [i[0] for i in baza.fetchall()]\n b.close()\n return izvedbe\n\ndef poisci_velikosti(skupina, izvedba):\n global velikosti\n b = sqlite3.connect(\"SQL_\"+segment+\".db\")\n baza = b.cursor()\n baza.execute('''SELECT DISTINCT \"{}\" \n FROM \"GENERAL\"\n WHERE \"Group\"=\"{}\" AND Version=\"{}\"\n ORDER BY NumID'''.format('Size', skupina, izvedba))\n velikosti = [i[0] for i in baza.fetchall() if i[-1] != 'T']\n b.close()\n return velikosti\n\ndef poisci_velikosti_cevni(skupina, izvedba):\n global velikosti_cevni\n b = sqlite3.connect(\"SQL_\"+segment+\".db\")\n baza = b.cursor()\n baza.execute('''SELECT DISTINCT \"{}\" \n FROM \"GENERAL\"\n WHERE \"Group\"=\"{}\" AND Version={}\n ORDER BY NumID'''.format('Size', skupina, izvedba))\n velikosti_cevni = [i[0] for i in baza.fetchall() if i[-1] == 'T']\n b.close()\n return velikosti_cevni\n\nclass ExDatoteka:\n st_opis = 0\n st_list = 0\n st_zvezek = 0\n zvezek = Workbook()\n ex_skupina = 'Neznano'\n ex_izvedbe = set()\n\n def __init__(self, ex_skupina):\n ExDatoteka.ex_skupina = ex_skupina\n ExDatoteka.st_opis = 0\n ExDatoteka.st_zvezek += 1\n ExDatoteka.zvezek = Workbook()\n\n @classmethod\n def ex_shrani(self):\n ExDatoteka.zvezek.save(self.ex_skupina + '.xlsx')\n \n @classmethod\n def ex_zbrisi_sheet(self):\n std = ExDatoteka.zvezek.get_sheet_by_name('Sheet')\n ExDatoteka.zvezek.remove_sheet(std)\n\nclass ExStran(ExDatoteka):\n\n ex_opis = '***Splošni opis enote***'\n # postavke = [('Hladilna moč', 'kW'), ('EER (EN14511 metoda)', ''), \n # ('ESEER (EN14511 metoda)', ''), ('SEER (Reg. EU 2016/2281)', ''), ('Električna moč', 'kW'), ('El. priključek', ''), \n # ('Zvočni tlak (SPL)', 'dB(A)'), ('Zvočna moč (PWL)', 'dB(A)'),\n # ('Število hladilnih krogov', ''), ('Število kompresorjev', ''), ('Dolžina', 'mm'), ('Širina', 'mm'),('Višina', 'mm'), ('Teža', 'kg')]\n \n postavke = [('Hladilna moč', 'kW'), ('EER (EN14511 metoda)', ''), \n ('ESEER (EN14511 metoda)', ''), ('SEER (Reg. EU 2016/2281)', ''),\n ('El. priključek', ''), \n ('Zvočni tlak (SPL)', 'dB(A)'), ('Zvočna moč (PWL)', 'dB(A)'),\n ('Število hladilnih krogov', ''), ('Število kompresorjev', ''), ('Dolžina', 'mm'), ('Širina', 'mm'),('Višina', 'mm'), ('Teža', 'kg')]\n\n def __init__(self, ex_izvedba):\n self.ex_izvedba = ex_izvedba\n \n if not ex_izvedba in ExDatoteka.zvezek.sheetnames:\n temp_ime_lista = '_'.join(\n [ExDatoteka.ex_skupina, ex_izvedba])\n ExDatoteka.zvezek.create_sheet(temp_ime_lista)\n self.ex_stran = ExDatoteka.zvezek[temp_ime_lista]\n\n ExDatoteka.ex_izvedbe.add(ex_izvedba)\n ExDatoteka.st_list += 1\n ExDatoteka.st_opis = 0\n \n def temp_naslovna_vr(self):\n naslovna = ['Zap. št.', 'Prodajni program', 'Količina',\n 'Cena/kos', 'Prodajna cena']\n for stolpec, vrednost in enumerate(naslovna, 1):\n _ = self.ex_stran.cell(column=stolpec, row=1, value=vrednost)\n\n def temp_dimenzioniraj(self):\n self.ex_stran.column_dimensions['A'].width = 5\n self.ex_stran.column_dimensions['B'].width = 60\n self.ex_stran.column_dimensions['C'].width = 10\n self.ex_stran.column_dimensions['D'].width = 15\n self.ex_stran.column_dimensions['E'].width = 15\n double = styles.Side(border_style=\"double\", color=\"111111\")\n for c in self.ex_stran[1]:\n c.fill = styles.PatternFill(\"solid\", fgColor='ffff99')\n c.alignment = styles.Alignment(wrap_text=True)\n c.border = styles.Border(bottom=double)\n for v in self.ex_stran.iter_rows():\n v[1].alignment = styles.Alignment(wrap_text=True)\n if v[0].value:\n for c in v:\n c.font = styles.Font(bold=True)\n elif v[2].value:\n for c in v[3:]:\n c.number_format = '0.00'\n\n def ex_zapisi_podatke(self, objekt):\n i = objekt.t_dol * (ExDatoteka.st_opis-1) + 3\n self.ex_stran.cell(column=1, row=i, \n value=str(ExDatoteka.st_opis) + '.')\n self.ex_stran.cell(column=2, row=i, \n value=' '.join(['Hladilni agregat Climaveneta', ExDatoteka.ex_skupina + '/', self.ex_izvedba, objekt.velikost]))\n self.ex_stran.cell(column=2, row=i+1, value=objekt.ex_opis)\n self.ex_stran.cell(column=2, row=i+2, \n value=' '.join(\n ['PROIZVAJALEC:', \n 'Mitsubishi Electric Hydronics & IT Cooling Systems S.p.A, Italija']))\n self.ex_stran.cell(column=2, row=i+3, \n value='UVOZNIK: REAM d.o.o., Trzin')\n self.ex_stran.cell(column=2, row=i+5, value='TEHNIČNI OPIS:')\n for j in range(len(objekt.tehnicni_podatki)):\n if objekt.tehnicni_podatki[j]:\n _ = '{}: {} {}'.format(ExStran.postavke[j][0], \n objekt.tehnicni_podatki[j], ExStran.postavke[j][1])\n self.ex_stran.cell(column=2, row=i+j+6, value=_)\n vr = i+len(objekt.tehnicni_podatki)+5\n self.ex_stran.cell(column=3, row=vr, value=1)\n self.ex_stran.cell(column=4, row=vr, value=0.00)\n self.ex_stran.cell(column=5, row=vr, \n value=r'=$C{}*$D{}'.format(vr, vr))\n return None\n\nclass ExOpis(ExStran):\n\n def __init__(self, ob_stran, velikost):\n ExDatoteka.st_opis += 1\n self.velikost = velikost\n self.ex_productID = ' '.join(\n filter(None, \n [ExDatoteka.ex_skupina, ob_stran.ex_izvedba, self.velikost]))\n self.tehnicni_podatki = []\n self.t_dol = 0\n\n def ex_sestavi_naziv(self):\n return ' '.join(['Hladilni agragat Climaveneta', self.ex_productID])\n \n def ex_dimenzije(self):\n b = sqlite3.connect(\"SQL_\"+segment+\".db\")\n baza = b.cursor()\n baza.execute('''SELECT \"A\", \"B\", \"H\" \n FROM GENERAL\n WHERE productID=?''', (self.ex_productID,))\n _ = [i for i in baza.fetchall()[0]]\n dimenzije = '{} x {} x {}'.format(*_)\n b.close()\n return dimenzije\n\n def ex_teh_opis(self):\n '''\n TEHNIČNI PODATKI:\n Hladilna moč:\n EER (EN14511 metoda):\n ESEER (EN14511 metoda):\n SEER (Reg. EU 2016/2281):\n El.moč:\n El. priključek:\n Zvočni tlak SPL:\n Zvočna moč PWL:\n Število hladilnih krogov:\n Število kompresorjev:\n Dolžina:\n Širina:\n Višina:\n Teža:\n '''\n\n b = sqlite3.connect(\"SQL_\"+segment+\".db\")\n baza = b.cursor()\n baza.execute('''SELECT \"Cooling capacity\" \n FROM COOLING_EUROVENT\n WHERE productID=?''', (self.ex_productID,))\n self.tehnicni_podatki.append(baza.fetchall()[0][0])\n baza.execute('''SELECT \"EER\", \"ESEER\" \n FROM COOLING_EUROVENT\n WHERE productID=?''', (self.ex_productID,))\n for i in baza.fetchall()[0]:\n try:\n ba, de = i.split(',')\n self.tehnicni_podatki.append(ba+','+de[:2])\n except:\n if i:\n self.tehnicni_podatki.append(i)\n else:\n self.tehnicni_podatki.append('-')\n baza.execute('''SELECT \"SEER\"\n FROM SEASONAL_EFF_COOLING\n WHERE productID=?''', (self.ex_productID,))\n i = baza.fetchall()[0][0].split(',')\n try:\n ba, de = i\n self.tehnicni_podatki.append(ba+','+de[:2])\n except:\n self.tehnicni_podatki.append('-')\n baza.execute('''SELECT \"Total power input\" \n FROM COOLING_GROSS\n WHERE productID=?''', (self.ex_productID,))\n # self.tehnicni_podatki.append(baza.fetchall()[0][0])\n baza.execute('''SELECT \"Power supply\"\n FROM GENERAL\n WHERE productID=?''', (self.ex_productID,))\n if '400' in baza.fetchall()[0][0]:\n self.tehnicni_podatki.append('400V/ 3F/ 50Hz')\n else:\n self.tehnicni_podatki.append('230V/ 1F/ 50Hz')\n baza.execute('''SELECT \"Sound Pressure\", \n \"Sound power level in cooling\", \"No. Circuits\", \"Compressors nr.\", \"A\", \"B\", \"H\", \"Operating weight\"\n FROM GENERAL\n WHERE productID=?''', (self.ex_productID,))\n self.tehnicni_podatki.extend([i for i in baza.fetchall()[0]])\n b.close()\n self.t_dol = len(list(filter(None,self.tehnicni_podatki))) + 8\n print(self.tehnicni_podatki)\n return self.tehnicni_podatki\n\n\ndef main2():\n poisci_skupine()\n for sk in skupine:\n dat = ExDatoteka(sk)\n # datoteka se nanasa na skupini agregatov\n for iz in poisci_izvedbe(sk):\n stran = ExStran(iz)\n # dat.zamenjaj_list()\n # dodaj stil strani\n for vel in poisci_velikosti(sk, iz):\n opis = ExOpis(stran, vel)\n opis.ex_teh_opis()\n stran.ex_zapisi_podatke(opis)\n # print(opis.tehnicni_podatki)\n stran.temp_naslovna_vr()\n stran.temp_dimenzioniraj()\n dat.ex_zbrisi_sheet()\n dat.ex_shrani()\n# main2()\n \n# endregion","sub_path":"CLIMAVENETA/V2/02_beri_bazo_chiller.py","file_name":"02_beri_bazo_chiller.py","file_ext":"py","file_size_in_byte":10825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"391016137","text":"#!/usr/bin/env python3\n\nimport argparse\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pylab\n\n# Read filename from command line or use default\n#(https://levelup.gitconnected.com/the-easy-guide-to-python-command-line-arguments-96b4607baea1)\nparser = argparse.ArgumentParser(description='Optional filename input')\nparser.add_argument(\"-fname1\", default=\"sim.txt\", help=\"filename\")\nparser.add_argument(\"-fname2\", default=\"rup.txt\", help=\"filename\")\nargs = parser.parse_args()\nfname1 = args.fname1\nfname2 = args.fname2\n\n# Import data and print overview\ndat=np.loadtxt(fname = fname1)\nhmin=np.loadtxt(fname = fname2)\n\n# Plot time-evolution of film\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\n#\"\"\"\n#plt.plot(dat[:,0],dat[:,1],label=r'$\\tilde{t} = 0$', linewidth=3)\n#plt.plot(dat[:,0],dat[:,2],label=r'$\\tilde{t} \\simeq 0.50 \\tilde{t}_r$', linewidth=3)\n#plt.plot(dat[:,0],dat[:,3],label=r'$\\tilde{t} \\simeq 0.95 \\tilde{t}_r$', linewidth=3)\n#plt.plot(dat[:,0],dat[:,4],label=r'$\\tilde{t} \\simeq \\tilde{t}_r$', linewidth=3)\nplt.plot(dat[:,0],dat[:,1],label=r'$\\tilde{t} = 0$', linewidth=3)\nplt.plot(dat[:,0],dat[:,2],label=r'$\\tilde{t} = T/100$', linewidth=3)\nplt.plot(dat[:,0],dat[:,3],label=r'$\\tilde{t} = T/10$', linewidth=3)\nplt.plot(dat[:,0],dat[:,4],label=r'$\\tilde{t} = T$', linewidth=3)\nplt.xlim(-4.44,4.44)\n#plt.ylim(0,1.2)\nplt.ylim(0,2)\nplt.ylabel(r'Dimensionless Height $\\tilde{h}$',fontsize=16)\nplt.xlabel(r'Periodic Width $\\tilde{x}$',fontsize=16)\nxtics = [r'$\\frac{-\\pi}{\\tilde{k}_m}$',r'0',r'$\\frac{\\pi}{\\tilde{k}_m}$']\nplt.xticks(np.arange(-4.44,8.88,4.44),xtics,fontsize=14)\nplt.yticks(fontsize=14)\n#plt.legend(loc='lower right', fontsize=16)\nplt.legend(loc='upper center', fontsize=16)\nplt.show()\n#\"\"\"\n\n# Plot time-evolution of minimum height\n\"\"\"\nx = np.linspace(0, 18, 1000)\n#x = np.linspace(0, 2.035, 1000)\n#plt.plot(hmin[:,1],hmin[:,2], \\\n# linewidth=3,label=r'$-\\ln[(\\tilde{t}_r-\\tilde{t})^{1/5}]$')\n#plt.plot(hmin[:,0],hmin[:,2], \\\n# linewidth=3,label=r'$-\\ln(\\tilde{t}_r-\\tilde{t})$')\nplt.plot(hmin[:,1],hmin[:,2], 'r2', markersize = 10, \\\n linewidth=3,label=r'$-\\ln[(\\tilde{t}_r-\\tilde{t})^{1/5}]$')\nplt.plot(hmin[:,0],hmin[:,2], 'b1', markersize = 10, \\\n linewidth=3,label=r'$-\\ln(\\tilde{t}_r-\\tilde{t})$')\nz = np.polyfit(hmin[:,1],hmin[:,2], 1)\np = np.poly1d(z)\npylab.plot(hmin[:,1],p(hmin[:,1]),\"--\")\nz = np.polyfit(hmin[:,0],hmin[:,2], 1)\np = np.poly1d(z)\npylab.plot(hmin[:,0],p(hmin[:,0]),\"--\")\n#plt.plot(x,5.6*(x-1.8),linewidth=3,label=r'Slope=5.6')\n#plt.plot(x,1*(x-7.9),linewidth=3,label=r'Slope=1')\n\n#plt.plot(x,-np.log(x**(1/5)),linewidth=3,label=r'Slope=1')\nplt.xlim(2,18)\nplt.ylim(5,12)\nplt.ylabel(r'$-\\ln(\\tilde{h}_{min})$',fontsize=16)\n#plt.xlabel(r'$-\\ln[(\\tilde{t}_r-\\tilde{t})^{1/5}]$',fontsize=16)\nplt.xticks(fontsize=14)\nplt.yticks(fontsize=14)\nplt.legend(loc='upper center', fontsize=16)\nplt.show()\n#\"\"\"\n","sub_path":"fluid/visualise.py","file_name":"visualise.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"415111472","text":"#!/usr/bin/python\n#Let's assume you have the current time series for a given stock\nstock_price=[12.32, 12.10, 12.12, '12.21', 12.20, 12.20, 12.20, 'number', 12.42, 1.2, 12.65, 12.43, 12.54]\n\n# You need to store this stock into a float variable\na = 0.0\n\n# For that, you are going to use an exception to detect when you receive a string instead of a float\nfor price in stock_price:\n try:\n a = float(price)\n except TypeError:\n print(\"TypeError: %s\"%price)\n except ValueError:\n print(\"ValueError: %s\"%price)\n\n#create a new list converting string to float except when not possible\n#you will assign this new list to clean_stock_price\nclean_stock_price = []\nfor price in stock_price:\n try:\n a = float(price)\n clean_stock_price.append(a)\n except (TypeError,ValueError):\n pass\n\n\n# you will also create a function raising an exception when this function find an outlier\n# use raise Exception('outlier')\n\nimport numpy as np\ndef raise_outlier_exception(price_list):\n variance = np.std(price_list)\n mean = np.mean(price_list)\n print(\"mean: %f, variance: %f\"%(mean,variance))\n # If the price is 2*variance away from mean, raise outlier exception\n for price in price_list:\n if abs(price-mean) > 2*variance:\n raise Exception(\"outlier: %s\"%price)\n#\ndef main():\n \"\"\"\n You can test your code here\n \"\"\"\n # you will now use clean_strock_price as an argument of the function raise_outlier_exception\n try:\n raise_outlier_exception(clean_stock_price)\n except Exception as inst:\n print(inst)\n\nif __name__ == '__main__':\n main()\n","sub_path":"assignment3/part7.py","file_name":"part7.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"511217643","text":"#Sonar Treasure Hunt\n\nimport random\nimport math\nimport sys\n\ndef getNewBoard():\n \n board=[]\n \n for y in range (15):\n \n board.append([])\n \n for x in range (60):\n board [y].append(random.choice(['~','`']))\n \n return board\n \ndef drawBoard(board):\n \n tensDigitsLine = ' '\n \n for i in range (1,6):\n tensDigitsLine += (' '*9) + str(i)\n \n print(tensDigitsLine)\n print(' ' + ('0123456789'*6))\n print()\n \n for row in range(15):\n \n if row<10:\n extraspace=' '\n else:\n extraspace=' '\n \n boardRow=''\n \n for column in range (60):\n boardRow += board[row][column]\n \n print('%s%s %s %s' %(extraspace, row, boardRow, row))\n \n print()\n print(' ' + ('0123456789'*6))\n print(tensDigitsLine)\n \ndef getRandomChests(numChest):\n chests=[]\n while len(chests) 0:\n \n print('You have %s sonar device(s) left. %s treasure chest(s) remaining.' % (sonarDevices, len(theChests)))\n \n x, y = enterPlayerMove(previousMoves)\n previousMoves.append([x, y])\n moveResult = makeMove(theBoard, theChests, x, y)\n if moveResult == False:\n continue\n else:\n if moveResult == 'You have found a sunken treasure chest!':\n for x, y in previousMoves:\n makeMove(theBoard, theChests, x, y)\n drawBoard(theBoard)\n print(moveResult)\n if len(theChests) == 0:\n print('You have found all the sunken treasure chests! Congratulations and good game!')\n break\n sonarDevices -= 1\n if sonarDevices == 0:\n print('We\\'ve run out of sonar devices! Now we have to turn the ship around and head')\n print('for home with treasure chests still out there! Game over.')\n print(' The remaining chests were here:')\n for x, y in theChests:\n print(' %s, %s' % (x, y))\n \n print('Do you want to play again? (yes or no)')\n if not input().lower().startswith('y'):\n sys.exit()","sub_path":"Sonar Treasure Hunt.py","file_name":"Sonar Treasure Hunt.py","file_ext":"py","file_size_in_byte":6354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"212024200","text":"#!/usr/bin/env python3\n\n#Authors: M269 Module Team\n#Date: 09/12/13\n\n# Helper function for hashFold(). The function\n# splits its integer argument into pairs of\n# digits which are returned in a list.\ndef chopIntoPairs(aNumber):\n numString = str(aNumber)\n pairs = []\n for i in range( 0,len(numString), 2):\n pairs.append(int(numString[i: i + 2]))\n print(\"chopping, \", aNumber, \" into pairs: \", pairs)\n \n return pairs\n\n\n# Helper function for hashMidSquare(). The function\n# returns the middle two digits of its argument if the\n# argument has an even number of digits, otherwise it\n# returns the single middle digit.\ndef getMiddle(aNumber):\n numString = str(aNumber)\n midPoint = len(numString) // 2\n if (len(numString) % 2) == 0:\n middle = int(numString[midPoint - 1:midPoint + 1])\n else:\n middle = int(numString[midPoint])\n return middle\n\n\ndef hashFold(aNumber, tableSize):\n numList = chopIntoPairs(aNumber)\n total = 0\n for item in numList:\n total = total + item\n print('adding pairs in hashfold function... New total = ', total)\n return total % tableSize\n\n\ndef hashMidSquare(aNumber, tableSize):\n numberSquared = aNumber * aNumber\n midSequence = getMiddle(numberSquared)\n hashNumber = midSequence % tableSize\n return hashNumber\n\n\n# Code to test the functions\nprint(hashFold(1459862903, 23))\nprint(hashMidSquare(1459862903, 23))\n","sub_path":"Python_M269/Python_activity_4.5/Python_activity_4.5_sol.py","file_name":"Python_activity_4.5_sol.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"141407506","text":"import string\nimport pandas as pd\n\nclass KrakenResult():\n def __init__(self, split_line):\n self.sample_name = split_line[0]\n self.percent_reads_assigned = float(split_line[1])\n self.number_reads_rooted_here = int(split_line[2])\n self.number_reads_assigned_here = int(split_line[3])\n self.taxonomic = split_line[4]\n self.rank = self.full_rank.rstrip(string.digits)\n self.ncbi_id = int(split_line[5])\n self.name = split_line[6].strip()\n\ndef read_in_parsed_kraken_result(kraken_inhandle):\n kraken_results = pd.read_csv(kraken_inhandle, sep = '\\t', header = None)\n return kraken_results\n\ndef make_output_dataframe(kraken_results):\n kraken_interpretation = pd.DataFrame()\n kraken_interpretation['sample_name'] = kraken_results[0].unique().tolist()\n return kraken_interpretation\n\ndef get_count_column(contam):\n output_list = []\n counting_dict = {}\n for name in contam[0]:\n if name in counting_dict:\n counting_dict[name] += 1\n output_list.append(counting_dict[name])\n else:\n counting_dict[name] = 1\n output_list.append(counting_dict[name])\n return output_list\n\ndef identify_contaminated_samples(kraken_results):\n contam = kraken_results.loc[kraken_results[6] != 'Salmonella enterica']\n contam = contam.assign(contam_string = [f'{x}; {y}' for x, y in zip(contam[6], contam[1])])\n contam = contam[[0, 'contam_string']]\n ## need to add a column index, in addition to the \"row\" index which will be the sample name\n count_column = get_count_column(contam)\n contam = contam.assign(count_column = count_column)\n contam = contam.pivot(index = 0, columns = 'count_column', values = 'contam_string')\n return contam\n\ndef main(kraken_inhandle):\n kraken_results = read_in_parsed_kraken_result(kraken_inhandle)\n kraken_interpretation = make_output_dataframe(kraken_results)\n contam = identify_contaminated_samples(kraken_results)\n # with pd.option_context('display.max_rows', None, 'display.max_columns', None):\n # print(contam)\n\nroot_dir = '/Users/flashton/Dropbox/GordonGroup/ben_kumwenda_genomes'\nkraken_inhandle = f'{root_dir}/kraken2/results/2020.10.08/2020.10.08.parsed_results_Feasy_Ent.txt'\n\nif __name__ == '__main__':\n main(kraken_inhandle)\n\n","sub_path":"interpret_kraken2_output.py","file_name":"interpret_kraken2_output.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"166898509","text":"#!/usr/bin/python\n\n\"\"\"\nfile_read_lines: read lines from file. one line string, one list item;\n \\r or \\r\\n of the line string is stripped off;\nfile_write_lines: write the line string (list) to file, append \\n at the end of line string;\n\nf_path = './test.txt'\nlines = file_read_lines(f_path) # lines is look like, lines = [\"one\", \"two\", \"three\"]\n\nlines = [\"one\", \"two\", \"three\", \"four\"]\nfile_write_lines(f_path, lines)\n\n\"\"\"\n\nimport re\n\n__all__ = [\"file_read_lines\", \"file_write_lines\"]\n\n\n## read lines from file, strip the end char '\\n' or '\\r\\n' (one item, one line)\n## @path: file path string\n## return: string lines with type of list()\ndef file_read_lines(path):\n lines = list()\n f_obj = open(path, 'r')\n try :\n f_lines = f_obj.readlines()\n # remove char '\\n' or '\\r\\n' of the string end\n rgx = '[\\r]?\\n'\n regexp = re.compile(rgx)\n for line in f_lines :\n line = regexp.sub('', line)\n lines.append(line)\n finally:\n f_obj.close()\n return lines\n\n\n## write list strings to file (one item, one line)\n## @path: file path string\n## @lines: string lines with type of list()\ndef file_write_lines(path, lines):\n f_obj = open(path, 'w+')\n try :\n f_lines = list()\n for line in lines :\n f_lines.append(line + '\\n')\n f_obj.writelines(f_lines)\n f_obj.flush()\n finally:\n f_obj.close()\n\n\n","sub_path":"libcom/lib_pub/file_process.py","file_name":"file_process.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"132081311","text":"from sys import argv as args\nimport re\nimport string\n\n\ndef parse_coords(coords):\n coords = coords.split(':')[1].split(',')\n parsed = sum([x.strip().split('-') for x in coords], [])\n parsed = '_'.join(sorted(parsed))\n return parsed\n\nargs = args[1:]\n\nfilename = args[0]\nstrand = open(filename, 'r')\n\ntranslate = string.maketrans('-,', '__')\nloops = []\npatterns = ['***internal', '***hairpin']\nline = strand.readline()\ncount = 0\nwhile line:\n match = False\n for pattern in patterns:\n if line.find(pattern) >= 0:\n match = True\n\n if match:\n # print(line)\n line = strand.readline()\n parsed = parse_coords(line)\n count += 1\n print(parsed)\n else:\n line = strand.readline()\n","sub_path":"py/parse_strand.py","file_name":"parse_strand.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"341919760","text":"import numpy as np\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.nn import init\nimport torch.nn.init as weight_init\nfrom torch.autograd import Variable\n\nchannels = 10\n\nclass Encoder(nn.Module):\n\tdef __init__(self, latent_size=2048, time_latent_size=64, hidden_latent_size=1024):\n\t\tsuper(Encoder, self).__init__()\n\t\tself.latent_size = latent_size\n\t\tself.leaky = nn.LeakyReLU(0.2, inplace=True)\n\n\t\tself.conv0 = nn.Conv2d(channels, 32, 5, stride=1, padding=(2, 2))\n\t\tself.batch0 = nn.BatchNorm2d(32)\n\t\tself.maxpool0 = nn.MaxPool2d(2, return_indices=True)\n\t\t# 32 x 64 x 64\n\t\t# # 32 x 32 x 32\n\n\t\tself.conv1 = nn.Conv2d(32, 64, 5, stride=1, padding=(2, 2))\n\t\tself.batch1 = nn.BatchNorm2d(64)\n\t\tself.maxpool1 = nn.MaxPool2d(2, return_indices=True)\n\t\t# 64 x 32 x 32\n\t\t## 64 x 16 x 16\n\n\t\tself.conv2 = nn.Conv2d(64, 128, 1, stride=1)\n\t\tself.batch2 = nn.BatchNorm2d(128)\n\t\tself.maxpool2 = nn.MaxPool2d(2, return_indices=True)\n\t\t# 128 x 16 x 16\n\t\t# #128 x 8 x 8\n\n\t\t#self.hidden_units = 128*16*16\n\t\tself.hidden_units = 128*8*8\n\n\t\tself.linear0 = nn.Linear(self.hidden_units, hidden_latent_size)\n\t\tself.linear1 = nn.Linear(hidden_latent_size, latent_size)\n\n\t\tself.tlinear0 = nn.Linear(1, time_latent_size)\n\t\tself.tlinear1 = nn.Linear(time_latent_size, time_latent_size)\n\t\tself.tlinear2 = nn.Linear(time_latent_size, time_latent_size)\n\n\n\tdef forward(self, x, t):\n\t\tx = self.leaky(self.conv0(x))\n\t\tx = self.batch0(x)\n\t\tx, mpi0 = self.maxpool0(x)\n\t\t#print(x.shape)\n\n\t\tx = self.leaky(self.conv1(x))\n\t\tx = self.batch1(x)\n\t\tx, mpi1 = self.maxpool1(x)\n\t\t#print(x.shape)\n\n\t\tx = self.leaky(self.conv2(x))\n\t\tx = self.batch2(x)\n\t\tx, mpi2 = self.maxpool2(x)\n\t\t#print(x.shape)\n\n\t\tx = x.view(-1, self.hidden_units)\n\t\tx = self.leaky(self.linear0(x))\n\t\tx = self.linear1(x)\n\t\t#print(x.shape)\n\n\t\tt = t.view(-1, 1)\n\t\tt = self.leaky(self.tlinear0(t))\n\t\tt = self.leaky(self.tlinear1(t))\n\t\tt = self.tlinear2(t)\n\n\t\t#print(x.shape, t.shape)\n\t\tout = torch.cat((x, t), 1)\n\t\treturn out, [mpi0, mpi1, mpi2]\n\n\nclass Decoder(nn.Module):\n\tdef __init__(self, latent_size=2048, time_latent_size=64, hidden_latent_size=1024):\n\t\tsuper(Decoder, self).__init__()\n\t\tself.fc_size = latent_size + time_latent_size\n\n\t\tself.linear3 = nn.Linear(self.fc_size, hidden_latent_size)\n\t\t#self.linear4 = nn.Linear(hidden_latent_size, 128*16*16)\n\t\tself.linear4 = nn.Linear(hidden_latent_size, 128*8*8)\n\n\t\tself.unpool0 = nn.MaxUnpool2d(2)\n\t\tself.deconv0 = nn.ConvTranspose2d(128, 64, 1, stride=1)\n\t\tself.batch0 = nn.BatchNorm2d(64)\n\n\t\tself.unpool1 = nn.MaxUnpool2d(2)\n\t\tself.deconv1 = nn.ConvTranspose2d(64, 32, 5, stride=1, padding=(2, 2))\n\t\tself.batch1 = nn.BatchNorm2d(32)\n\n\t\tself.unpool2 = nn.MaxUnpool2d(2)\n\t\tself.deconv2 = nn.ConvTranspose2d(32, 1, 5, stride=1, padding=(2, 2))\n\n\n\tdef forward(self, x, mpis):\n\t\tx = F.relu(self.linear3(x))\n\t\tx = F.relu(self.linear4(x))\n\n\t\t#x = x.view(-1, 128, 16, 16)\n\t\tx = x.view(-1, 128, 8, 8)\n\t\tx = self.unpool0(x, mpis[2])\n\t\tx = F.relu(self.deconv0(x))\n\t\tx = self.batch0(x)\n\n\t\tx = self.unpool1(x, mpis[1])\n\t\tx = F.relu(self.deconv1(x))\n\t\tx = self.batch1(x)\n\n\t\tx = self.unpool2(x, mpis[0])\n\t\tx = self.deconv2(x)\n\n\t\treturn x\n\n\n\nclass EncoderDecoder(nn.Module):\n\tdef __init__(self, args):\n\t\tsuper(EncoderDecoder, self).__init__()\n\t\tlatent_size = args.latent_size\n\t\ttime_latent_size = args.time_latent\n\t\thidden_latent = args.hidden_latent\n\t\tself.encoder = Encoder(latent_size, time_latent_size, hidden_latent)\n\t\tself.decoder = Decoder(latent_size, time_latent_size, hidden_latent)\n\n\tdef forward(self, x, t):\n\t\tx, mpis = self.encoder(x, t)\n\t\tx = self.decoder(x, mpis)\n\n\t\treturn x\n","sub_path":"model_stacked.py","file_name":"model_stacked.py","file_ext":"py","file_size_in_byte":3582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"202865348","text":"# #三国test\n#\n# #读取人物名称\n# f = open('./resource/txt/name.txt')\n# data = f.read()\n# print(data.split('|'))\n#\n# #读取兵器名称\n# #字符串 split() 分隔 strip()删除\n# i = 1\n# f2 = open('./resource/txt/weapon.txt')\n# for line in f2.readlines() :\n# if i % 2 == 1:\n# print(line.strip('\\n'))\n# i += 1\n#\n# #编码格式encoding=\n# #字符串替换replace()\n# f3 = open('./resource/txt/sanguo.txt',encoding='GB18030')\n# print(f3.read().replace('\\n',''))\n\n#函数定义 def\n# def func(filename):\n# print(open(filename).read())\n# func('./resource/txt/name.txt')\n\n\n\n#python的一个标准库\nimport re\n# with .... as ... : 上下文管理器 函数出现异常的时候会自动调用finally关闭文件\nwith open('./resource/txt/sanguo.txt',encoding='GB18030') as f :\n data = f.read().replace('\\n','');\n # 查找 data 中 name 返回值是一个name的数组\n name_count = len(re.findall('諸葛亮',data))\n print(name_count)\n\n","sub_path":"22函数.py","file_name":"22函数.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"414705902","text":"\"\"\"Problem:\n Given a set of n tupels find the k closest \n points to the origin (0,0).\n Solution:\n First write a function that determines the \n Euclidean distance (use Pythagoras' theorem)\n then implement a method to find the smallest k. \n\"\"\"\n# Note: A couple methods for finding smallest k are:\n# 1.Using a sorting algorithm and returning the first k -- O(nlogn)\n# 2.Building a maxheap of the first k without sorting and\n# swaping the top element if smaller (faster than sorting) -- O(n+(n-k)logk)\n\nfrom quickSort import sort #sort takes a list as argument\nimport math\nimport heapq\n\ndef calc_distance(N):\n distance = []\n i = 0\n for a, b in N:\n distance.append(tuple((math.sqrt(a**2 + b**2), i)))\n #will use index to hash a map when returning points\n i += 1\n return distance\n\ndef negate_list(N):\n negative = [0]*len(N)\n for i in range(len(N)):\n negative[i] = -N[i]\n return negative\n\ndef maxheap_kSort(N, k):\n heap = []\n # print(N)\n negative = negate_list(N)\n #print(negative)\n for i in range(k):\n heapq.heappush(heap, negative[i])\n for i in range(k, len(N), 1):\n if heap[0] < negative[i]:\n heapq.heapreplace(heap, negative[i])\n maxHeap = negate_list(heap)\n maxHeap.reverse()\n return maxHeap\n\n#Honestly the max heap thing in python is really really confusing\n#I should've probably just built my own code, but this is a really\n#simple hack.\n\nN_points = [(-2,-4),(0,-2),(-1,0),(3,-5),(-2,-2),(3,2)]\nk = 3\nprint(\"The n points are: \",N_points)\nN_dist = calc_distance(N_points)\n#print(\"(Distance, index): \",N_dist)\nhash_map = dict(N_dist) #dict is (key,value); D[key] returns value\n#print(\"Our hash map: \",hash_map)\nunzipped = list(zip(*N_dist)) #list1_2=zip(*list3) is the reverse of list3=zip(list1,list2)\n#print(\"(Distance),(indicies)\",unzipped)\nshortest_dist = sort(unzipped[0]) #This is solution 1 quicksort()\nshortest_dist2 = maxheap_kSort(unzipped[0],k)\n#print(\"Just distance: \",shortest_dist)\n#print(\"Just distance: \",shortest_dist2)\nprint(k,\"closest points (using quicksort):\")\nfor i in range(k):\n print(i+1,\": \",N_points[hash_map[shortest_dist[i]]])\nprint(k,\"closest points (using a maxheap):\")\nfor i in range(k):\n print(i+1,\": \",N_points[hash_map[shortest_dist2[i]]])\n","sub_path":"Python/k_closestPoints.py","file_name":"k_closestPoints.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"111661675","text":"import sys\nsys.stdin = open('input.txt', 'r')\n\n\ndx, dy = [1, -1 ,0 ,0], [0, 0, 1, -1]\ndef bfs(ni, nj):\n cnt = 1\n go = True\n while go:\n go = False\n i, j = ni, nj\n visit[A[i][j]] = True\n for d in range(4):\n ni, nj = i+dx[d], j+dy[d]\n if -1 < ni < N and -1 < nj < N and A[ni][nj] == A[i][j]+1:\n if dp[A[ni][nj]]:\n cnt += dp[A[ni][nj]]\n return cnt\n go = True\n cnt += 1\n break\n return cnt\n \nfor tc in range(1, int(input())+1):\n N = int(input())\n A = [list(map(int, input().split())) for _ in range(N)]\n NN = N*N\n visit = [False]*(NN+1)\n dp = [0]*(NN+1)\n ans = 0\n for i in range(N):\n for j in range(N):\n if not visit[A[i][j]] and ans <= NN-A[i][j]:\n cnt = bfs(i, j)\n dp[A[i][j]] = cnt\n if cnt == ans:\n if A[i][j] < room:\n room = A[i][j]\n elif cnt > ans:\n ans = cnt\n room = A[i][j]\n print('#{} {} {}'.format(tc, room, ans))\n\n\n\n# dx, dy = [1, -1 ,0 ,0], [0, 0, 1, -1]\n# def dfs(i, j, S):\n# global cnt\n# for d in range(4):\n# ni, nj = i+dx[d], j+dy[d]\n# if -1 < ni < N and -1 < nj < N and A[ni][nj] == S+1:\n# if dp[A[ni][nj]]:\n# cnt += dp[A[ni][nj]]\n# return\n# visit[A[ni][nj]] = True\n# cnt += 1\n# dfs(ni, nj, A[ni][nj])\n# return\n \n# for tc in range(1, int(input())+1):\n# N = int(input())\n# A = [list(map(int, input().split())) for _ in range(N)]\n# NN = N*N\n# visit = [False]*(NN+1)\n# dp = [0]*(NN+1)\n# ans = 0\n# room = float('inf')\n# for i in range(N):\n# for j in range(N):\n# S = A[i][j]\n# if not visit[S] and ans <= NN-S:\n# visit[S] = True\n# cnt = 1\n# dfs(i, j, S)\n# dp[S] = cnt\n# if cnt == ans:\n# if A[i][j] < room:\n# room = S\n# elif cnt > ans:\n# ans = cnt\n# room = S\n# print('#{} {} {}'.format(tc, room, ans))","sub_path":"swea/D4/1861.py","file_name":"1861.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"630524470","text":"#coding: utf-8\n\n# Pour tous les matricules entre 1930 et 1999\nannee1 = [format(annee1) for annee1 in range(30000, 100000)]\n\n# Pour tous les matricules entre 2000 et 2017\nannee2 = [\"{:05d}\".format(annee2) for annee2 in range(0, 18000)]\n\n#On imprime les deux chaînes l'une après l'autre\nprint(annee1, annee2)\n\n### Super! L'erreur du premier script est évitée.\n### Afin de produire l'affichage désiré, ou d'utiliser ce numéro de permis dans un script qui moissonnerait le site du Collège des médecins, par exemple, il faudrait (en plus d'enlever l'affichage des listes ci-dessus) ajouter les lignes suivantes :\n\npermis = annee1 + annee2\nfor numPermis in permis:\n\tprint(numPermis)","sub_path":"devoir1_v2JHR.py","file_name":"devoir1_v2JHR.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"171425510","text":"# Qualquer Duvida entrar em contato com Tiago Braga Camargo Dias\n# Class para as seções dos scripts\n\n# Importes\n\nfrom arquivoteste import *\n\nclass Sections:\n\n def __init__(self, file):\n\n self.file = file\n\n def getSections(self):\n\n ListasDados = dict(\n configSystemGlobal = [],\n configSystemInterface = [],\n configSystemDns = [],\n configSystemDdns = [],\n configSystemSnmpCommunity = [],\n configSystemCentralManagement = [],\n configLogFortianalyzerSetting = [],\n configFirewallAddress = [],\n configVpnIpsecPhaseInterface = [],\n configRouterStatic = [],\n configRouterBgp = [],\n )\n\n AcessListaDados = {\n 0: ListasDados['configSystemGlobal'],\n 1: ListasDados['configSystemInterface'],\n 2: ListasDados['configSystemDns'],\n 3: ListasDados['configSystemDdns'],\n 4: ListasDados['configSystemSnmpCommunity'],\n 5: ListasDados['configSystemCentralManagement'],\n 6: ListasDados['configLogFortianalyzerSetting'],\n 7: ListasDados['configFirewallAddress'],\n 8: ListasDados['configVpnIpsecPhaseInterface'],\n 9: ListasDados['configRouterStatic'],\n 10: ListasDados['configRouterBgp'],\n }\n\n Cases = {\n 0: \"config system global\",\n 1: \"config system interface\",\n 2: \"config system dns\",\n 3: \"config system ddns\",\n 4: \"config system snmp community\" ,\n 5: \"config system central-management\",\n 6: \"config log fortianalyzer setting\",\n 7: \"config firewall address\",\n 8: \"config vpn ipsec phase1-interface\",\n 9: \"config router static\",\n 10: \"config router bgp\",\n }\n\n\n fim = 'end'\n\n flag = False\n\n contador = 0\n while contador < 11:\n for linha in getArquivo(self.file):\n \n if flag and linha == fim:\n\n flag = False\n\n break\n\n if flag:\n\n AcessListaDados[contador].append(linha)\n\n if Cases[contador] == linha:\n\n flag = True\n\n contador = contador + 1\n\n return ListasDados\n","sub_path":"TestePython/teste.py","file_name":"teste.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"323102051","text":"\ndef parse_event_data(response):\n\n event_data = []\n\n for event in response.get('items', []):\n event_item = {'summary': event.get('summary', 'No title'),\n 'status': event.get('status', 'confirmed'),\n 'start_datetime': event.get('start', ''),\n 'end_datetime': event.get('end', ''),\n 'description': event.get('description', 'No description')}\n\n event_data.append(event_item)\n\n return event_data","sub_path":"phreaknic/calendar/calendar_parser.py","file_name":"calendar_parser.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"225467715","text":"import multiprocessing as mp\nfrom datetime import datetime\n\nfrom astar import manhattan_distance, hamming_distance\nfrom statistics import start_run, write_file, initialize\n\n\ndef compute(data):\n # Put the call to the A* in here.\n goal_state = (0, 1, 2, 3, 4, 5, 6, 7, 8)\n return start_run(start=data[0], weights=data[1], heuristics=[manhattan_distance, hamming_distance], goal=goal_state)\n\n\ndef process_array(array_with_weights, runs):\n p = mp.Pool()\n res = p.map(compute, array_with_weights)\n write_file(res, runs)\n\n\nif __name__ == \"__main__\":\n number_of_examples = 1\n\n start_time = datetime.now()\n print(f\"Start: {start_time}\")\n\n work_to_do = initialize(number_of_examples)\n process_array(work_to_do, number_of_examples)\n\n end_time = datetime.now()\n print(f\"End: {end_time}\")\n print(f\"Duration: {end_time - start_time}\")\n","sub_path":"Exercise1/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"602615229","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Apr 14 16:33:29 2016\r\n\r\n@author: takaiguchi\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom sklearn.manifold import TSNE\r\nfrom sklearn.cluster import KMeans\r\nimport os\r\nfrom PIL import Image\r\nfrom matplotlib import pyplot as plt\r\nfrom sklearn import decomposition\r\n\r\n#X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])\r\n#model = TSNE(n_components=2, random_state=0)\r\n#np.set_printoptions(suppress=True)\r\n#Y = model.fit_transform(X)\r\n#y_pred = KMeans(n_clusters=2).fit_predict(Y)\r\npath = 'C:/Users/takaiguchi/Documents/Fun with Data Science/Kaggle/State Farm Distracted Driver/train/'\r\nfolders = os.listdir(path)\r\n\r\nfrms = 100\r\ndata = np.empty((len(folders)*frms,640*480))\r\nfor i in range(len(folders)):\r\n folder = folders[i]\r\n files = os.listdir(path+folder)\r\n for j in range(frms):\r\n file = files[i]\r\n img = Image.open(path+folder+'/'+file)\r\n data[(i-1)*frms+j,:] = np.asarray(img.getdata())[:,1]\r\nmodel = TSNE(n_components = 2, random_state=0, perplexity=15.0)\r\nnp.set_printoptions(suppress=True)\r\n\r\nX_pca = decomposition.TruncatedSVD(n_components=100).fit_transform(data)\r\n\r\nTSNEres = model.fit_transform(X_pca)\r\n\r\nfig = plt.figure()\r\nax1 = fig.add_subplot(111);\r\n\r\ncolors = [\"#000000\",\"#199999\",\"#333332\",\"#4CCCCB\",\"#666664\",\"#7FFFFD\",\"#999996\",\"#B3332F\",\"#CCCCC8\",\"#E66661\"]\r\nfor i in range(10):\r\n ax1.scatter(TSNEres[i*frms:(i+1)*frms,0],TSNEres[i*frms:(i+1)*frms,1],c=colors[i])\r\n \r\nplt.show()\r\n","sub_path":"tsne+kmeans.py","file_name":"tsne+kmeans.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"117448128","text":"from channels.generic import BaseConsumer\nimport re\nfrom channels import Group\nfrom uuid import uuid4\nfrom pprint import pprint\nimport time\nfrom bot.models import Log\nimport socket\n\nclass ChatClient(BaseConsumer):\n\n method_mapping = {\n 'create': 'create',\n }\n\n sockets = {}\n\n # Creates a new stream client\n def create(self, message, **kwargs) :\n msg = message.content\n network = msg.get('network')\n port = int(msg.get('port'))\n user = msg.get('user')\n usersocket = self.sockets.get(user)\n if not usersocket :\n self.sockets.update({user:socket.socket()})\n usersocket = self.sockets.get(user)\n usersocket.connect((network, port))\n while True :\n try :\n data = [self.parse(line.strip(), message, usersocket) for line in usersocket.recv(1024).decode('utf-8').split(\"\\n\") if line]\n if not data :\n usersocket.close()\n del(self.sockets[user])\n break\n except KeyboardInterrupt :\n self.send(\"QUIT :BAD BYE!\", usersocket)\n\n def parse(self, line, message, usersocket) :\n print(\"< %s\" % line)\n user = message.get('user')\n if 'NOTICE' in line :\n self.send(\"NICK %s\" % (user), usersocket)\n self.send(\"USER %s %s %s :%s\" % (user, user, user, user), usersocket)\n if 'PING' in line :\n self.send(\"PONG %s\" % line.split(\":\")[1], usersocket)\n self.send(\"JOIN #topsecret\" , usersocket)\n if 'PRIVMSG' in line :\n nick = line.split('!')[0][1:]\n msg = ':'.join(line.split(':')[2:])\n channel = line.split()[2]\n log = Log(\n protocol = 'IRC',\n user = user,\n nickname = nick,\n message = msg,\n channel = channel,\n network = message.get('network')\n )\n log.save()\n\n\n # Sends a message over given stream client\n def send(self, line, usersocket):\n print(\"> %s\" % line)\n line += \"\\r\\n\"\n usersocket.send(line.encode('utf-8'))\n","sub_path":"bot/oldclient.py","file_name":"oldclient.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"393589910","text":"#-*-coding:utf-8-*-\nimport os\n\nfrom flask import Flask, render_template, request\nfrom flask_socketio import SocketIO, emit\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = os.getenv(\"SECRET_KEY\")\nsocketio = SocketIO(app)\n\nchannels = list()\n# Channels view\n#{\"name\": \"channel_name\", \"messages\": [], \"id\":0})\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/channels\", methods=[\"POST\", \"GET\"])\ndef channel_list():\n\n # if GET request\n if request.method == \"GET\":\n return render_template(\"channels.html\", channels=channels)\n \n # if submit create channel form\n else:\n name = request.form.get(\"channel\")\n\n # unique name for each channel\n for channel in channels:\n if name == channel[\"name\"]:\n return render_template(\"error.html\", message=\"Channel name already taken\")\n\n id = len(channels)\n channels.append({\"name\": name, \"messages\": [], \"id\":id})\n return render_template(\"channels.html\", channels=channels)\n\n\n@app.route(\"/channels/\")\ndef channel(id):\n return render_template(\"channel.html\", channel=channels[id])\n\n@socketio.on(\"send message\")\ndef message(data):\n\n message = data[\"message\"]\n user = data[\"user\"]\n date = data[\"date\"]\n id = int(data[\"id\"])\n channels[id][\"messages\"].append([message, user, date])\n\n # no messages more than 100\n if len(channels[id][\"messages\"]) > 100:\n channels[id][\"messages\"].pop(0)\n \n emit(\"show message\", {\"message\": message, \"user\": user, \"date\": date}, broadcast=True)\n\nif __name__ == \"__main__\":\n socketio.run(app)","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"617159418","text":"def insertion_sort(arr):\r\n \"\"\"Implementation of the Insertion sort algorithm with O(n^2) time complexity.\r\n Iterates through each index in the list placing each value at the left in the \r\n proper location among previously sorted values.\r\n \"\"\"\r\n for index in range(len(arr)):\r\n\r\n current_value = arr[index]\r\n position = index\r\n \r\n while position > 0 and arr[position - 1] > current_value: \r\n\r\n arr[position] = arr[position - 1]\r\n position = position - 1\r\n\r\n arr[position] = current_value\r\n\r\n\r\n","sub_path":"Sorting-Algorithms/insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"53703681","text":"import random as r\nimport re\nfrom math import exp\nimport time\n\n\n\n\"czytanie danych z pliku\"\ndef read():\n with open(\"neh.data.txt\") as file:\n lines = file.readlines()\n start = re.compile(r'data\\.\\d{3}:')\n end = re.compile(r'\\n')\n flag = False\n for line in lines:\n if re.match(start, line):\n data = []\n flag = True\n continue\n if re.fullmatch(end, line):\n flag = False\n if flag:\n tmp = list(map(int, line.split()))\n data.append(tmp)\n if data[0][0]+1 == len(data):\n yield [[data[i][y] for i in range(1, len(data))] for y in range(data[0][1])]\n return StopIteration\n\n\"Ustawienie poprawnej wartości w tablicy\"\ndef appendtab(pattern, *args):\n tab = []\n for arg in args:\n tab.append([arg[i-1] for i in pattern])\n return tab\n\n\"Generowanie rozwiązania początkowego\"\ndef init(*args):\n border = [[] for i in range(len(args))]\n border[0] = [sum(args[0][:i]) for i in range(1, len(args[0]) + 1)]\n for n in range(len(args[0])):\n for i in range(1, len(args)):\n if n == 0:\n border[i].append(border[i - 1][n] + args[i][n])\n else:\n if border[i - 1][n] > border[i][n - 1]:\n border[i].append(border[i - 1][n] + args[i][n])\n else:\n border[i].append(border[i][n - 1] + args[i][n])\n return border[len(args) - 1][len(args[0]) - 1]\n\n\n\"Generowanie sąsiada\"\ndef generate_n(answer):\n n_solve = answer[::]\n index = r.sample(list(range(len(answer))), 2)\n n_solve[index[0]], n_solve[index[1]] = n_solve[index[1]], n_solve[index[0]]\n return n_solve\n\n\"Prawdopodobieństwo przejścia\"\ndef prob_trans(cmax, cmax_bis, t):\n if t == 0:\n return -1\n elif cmax_bis >= cmax:\n return exp((cmax-cmax_bis)/t)\n else:\n return 1\n\n\"Wykonaj ruch\"\ndef move(*args):\n task = [args[i][0] for i in range(len(args))]\n task = [[task[z][y] for z in range(len(task))] for y in range(len(task[0]))]\n return task\n\n\n\"Schładzanie\"\ndef chill(t, param=None, k=None, k_max=None):\n if param:\n return param * t\n return t*(k/k_max)\n\n\"Końcowa kolejność\"\ndef solve(*args):\n tasks = list(zip(*args))\n task = [[v, i] for i, v in enumerate(tasks)]\n answer = task[::]\n r.shuffle(answer)\n t = 200\n iteration = 10000\n for i in range(iteration):\n b_answer = generate_n(answer)\n p = prob_trans(init(*move(*answer)), init(*move(*b_answer)), t)\n if p >= r.uniform(0, 1):\n answer = b_answer\n t = chill(t, param=0.99)\n answer = [answer[i][1] for i in range(len(answer))]\n return list(map(lambda x: x+1, answer))\n\n\nif __name__ == '__main__':\n examples = read()\n for exa, end in enumerate(examples):\n if exa == 38:\n sek = solve(*end)\n print(\"Czas CMax poczatkowy\", init(*end))\n print(\"Czas CMax końcowy\", init(*appendtab(sek, *end)))\n print(sek)\n\n","sub_path":"Zad4.py","file_name":"Zad4.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"559915680","text":"#!/usr/bin/python\n#coding=utf-8\n#__author__:TaQini\n\nfrom pwn import *\n\nlocal_file = './chall'\nlocal_libc = '/lib/x86_64-linux-gnu/libc.so.6'\nremote_libc = './libc.so.6'\n\nis_local = False\nis_remote = False\n\nif len(sys.argv) == 1:\n is_local = True\n p = process(local_file)\n libc = ELF(local_libc)\nelif len(sys.argv) > 1:\n is_remote = True\n if len(sys.argv) == 3:\n host = sys.argv[1]\n port = sys.argv[2]\n else:\n host, port = sys.argv[1].split(':')\n p = remote(host, port)\n libc = ELF(remote_libc)\n\nelf = ELF(local_file)\n\ncontext.log_level = 'debug'\ncontext.arch = elf.arch\n\nse = lambda data :p.send(data) \nsa = lambda delim,data :p.sendafter(delim, data)\nsl = lambda data :p.sendline(data)\nsla = lambda delim,data :p.sendlineafter(delim, data)\nsea = lambda delim,data :p.sendafter(delim, data)\nrc = lambda numb=4096 :p.recv(numb)\nru = lambda delims, drop=True :p.recvuntil(delims, drop)\nuu32 = lambda data :u32(data.ljust(4, '\\0'))\nuu64 = lambda data :u64(data.ljust(8, '\\0'))\ninfo_addr = lambda tag, addr :p.info(tag + ': {:#x}'.format(addr))\n\ndef debug(cmd=''):\n if is_local: gdb.attach(p,cmd)\n\n# info\n# gadget\nprdi = 0x0000000000000e03 # pop rdi ; ret\n\n# elf, libc\n\n# rop1\n\nlibc_got = 0x5f4038\nog_off = 0xe569f # r12==NULL | r14==NULL\n\nru('I placed the target near: ')\nputs = eval(rc(14))\ninfo_addr('puts',puts)\nlibcbase = puts-libc.sym['puts']\ninfo_addr('libcbase',libcbase)\ngot = libcbase+libc_got\ninfo_addr('got',got)\nog = libcbase+og_off\ninfo_addr('og',og)\nru('shoot!shoot!\\n')\nsl(str(got))\nru('biang!\\n')\nsl(p8(og&0xff))\nru('biang!\\n')\nsl(p8((og>>8)&0xff))\nru('biang!\\n')\ndebug('b *$rebase(0xd63)')\nsl(p8((og>>16)&0xff))\n\n# sl(payload)\n\n\np.interactive()\n","sub_path":"icq-HFCTF2020/pwn/chall/chall.py","file_name":"chall.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"67984257","text":"import sys\nimport re\nimport time\nimport copy\nimport numpy as np\nfrom models import utils\nfrom models.cross_validation import CrossValidation\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nimport xgboost as xgb\nfrom xgboost import XGBClassifier\nimport lightgbm as lgb\nfrom lightgbm import LGBMClassifier\nfrom catboost import CatBoostClassifier\nfrom config import cfg\n\n\nclass ModelBase(object):\n \"\"\"\n Base Model Class of Models in scikit-learn Module\n \"\"\"\n def __init__(self, x_tr, y_tr, w_tr, e_tr, x_te, id_te,\n x_va=None, y_va=None, w_va=None, e_va=None, use_multi_group=False):\n\n self.x_train = x_tr\n self.y_train = y_tr\n self.w_train = w_tr\n self.e_train = e_tr\n self.x_test = x_te\n self.id_test = id_te\n\n self.x_global_valid = x_va\n self.y_global_valid = y_va\n self.w_global_valid = w_va\n self.e_global_valid = e_va\n\n self.importance = np.array([])\n self.indices = np.array([])\n self.std = np.array([])\n self.model_name = ''\n self.num_boost_round = 0\n self.use_multi_group = use_multi_group\n self.use_global_valid = False\n self.use_custom_obj = False\n self.postscale = False\n self.postscale_rate = None\n\n if cfg.group_list is None:\n if use_multi_group:\n raise ValueError(\"Groups not found! 'use_multi_group' should be False!\")\n\n @staticmethod\n def get_reg(parameters):\n\n print('This Is Base Model!')\n reg = DecisionTreeClassifier()\n\n return reg\n\n def print_start_info(self):\n\n print('------------------------------------------------------')\n print('This Is Base Model!')\n\n self.model_name = 'base'\n\n @staticmethod\n def select_category_variable(x_train, x_g_train, x_valid, x_g_valid, x_test, x_g_test):\n\n return x_train, x_valid, x_test\n\n def fit(self, x_train, y_train, w_train, x_valid, y_valid, w_valid, parameters=None):\n\n # Get Classifier\n reg = self.get_reg(parameters)\n\n # Training Model\n reg.fit(x_train, y_train, sample_weight=w_train)\n\n return reg\n\n def get_pattern(self):\n return None\n\n def fit_with_round_log(self, boost_round_log_path, cv_count, x_train, y_train,\n w_train, x_valid, y_valid, w_valid, parameters,\n param_name_list, param_value_list, append_info=''):\n\n boost_round_log_path, _ = utils.get_boost_round_log_path(boost_round_log_path, self.model_name,\n param_name_list, param_value_list, append_info)\n boost_round_log_path += 'cv_cache/'\n utils.check_dir([boost_round_log_path])\n boost_round_log_path += self.model_name + '_cv_{}_log.txt'.format(cv_count)\n\n print('Saving Outputs to:', boost_round_log_path)\n print('------------------------------------------------------')\n\n open(boost_round_log_path, 'w+').close()\n\n with open(boost_round_log_path, 'a') as f:\n __console__ = sys.stdout\n sys.stdout = f\n reg = self.fit(x_train, y_train, w_train, x_valid, y_valid, w_valid, parameters)\n sys.stdout = __console__\n\n with open(boost_round_log_path) as f:\n lines = f.readlines()\n idx_round_cv = []\n train_loss_round_cv = []\n valid_loss_round_cv = []\n global_valid_loss_round_cv = []\n pattern = self.get_pattern()\n for line in lines:\n if pattern.match(line) is not None:\n idx_round_cv.append(int(pattern.match(line).group(1)))\n train_loss_round_cv.append(float(pattern.match(line).group(2)))\n valid_loss_round_cv.append(float(pattern.match(line).group(3)))\n if self.use_global_valid:\n global_valid_loss_round_cv.append(float(pattern.match(line).group(4)))\n\n if self.use_global_valid:\n return reg, idx_round_cv, train_loss_round_cv, valid_loss_round_cv, global_valid_loss_round_cv\n else:\n return reg, idx_round_cv, train_loss_round_cv, valid_loss_round_cv\n\n def save_boost_round_log(self, boost_round_log_path, idx_round, train_loss_round_mean,\n valid_loss_round_mean, train_seed, cv_seed, csv_idx, parameters,\n param_name_list, param_value_list, append_info='',\n global_valid_loss_round_mean=None, profit=None):\n\n boost_round_log_upper_path = \\\n utils.get_boost_round_log_upper_path(\n boost_round_log_path, self.model_name, param_name_list, append_info)\n boost_round_log_path, param_name = \\\n utils.get_boost_round_log_path(\n boost_round_log_path, self.model_name,\n param_name_list, param_value_list, append_info)\n utils.save_boost_round_log_to_csv(\n self.model_name, boost_round_log_path, boost_round_log_upper_path, csv_idx,\n idx_round, valid_loss_round_mean, train_loss_round_mean, train_seed, cv_seed,\n parameters, param_name_list, param_value_list, param_name, profit=profit)\n if self.use_global_valid:\n utils.save_boost_round_log_gl_to_csv(\n self.model_name, boost_round_log_path, boost_round_log_upper_path,\n csv_idx, idx_round, valid_loss_round_mean, train_loss_round_mean,\n global_valid_loss_round_mean, train_seed, cv_seed, parameters,\n param_name_list, param_value_list, param_name, profit=profit)\n\n boost_round_log_path += 'final_logs/'\n utils.check_dir([boost_round_log_path])\n boost_round_log_path += self.model_name + '_' + str(csv_idx) + '_t-' \\\n + str(train_seed) + '_c-' + str(cv_seed) + '_log.csv'\n\n if self.use_global_valid:\n utils.save_final_boost_round_gl_log(\n boost_round_log_path, idx_round, train_loss_round_mean,\n valid_loss_round_mean, global_valid_loss_round_mean, profit=profit)\n else:\n utils.save_final_boost_round_log(\n boost_round_log_path, idx_round, train_loss_round_mean,\n valid_loss_round_mean, profit=profit)\n\n def get_importance(self, reg):\n\n print('------------------------------------------------------')\n print('Feature Importance')\n\n self.importance = reg.feature_importances_\n self.indices = np.argsort(self.importance)[::-1]\n\n feature_num = len(self.importance)\n\n for f in range(feature_num):\n print(\"%d | feature %d | %d\" % (\n f + 1, self.indices[f], self.importance[self.indices[f]]))\n\n def predict(self, reg, x_test, pred_path=None):\n\n print('------------------------------------------------------')\n print('Predicting Test Result...')\n\n pred_test = np.array(reg.predict(x_test))\n\n if pred_path is not None:\n utils.save_pred_to_csv(pred_path, self.id_test, pred_test)\n\n return pred_test\n\n def get_pred_train(self, reg, x_train, pred_path=None):\n\n print('------------------------------------------------------')\n print('Predicting Train Probability...')\n\n pred_train = np.array(reg.predict(x_train))[:, 1]\n\n if pred_path is not None:\n utils.save_pred_train_to_csv(pred_path, pred_train, self.y_train)\n\n return pred_train\n\n def save_csv_log(self, mode, csv_log_path, param_name_list, param_value_list, csv_idx,\n loss_train_w_mean, loss_valid_w_mean, acc_train, train_seed, cv_seed,\n n_valid, n_cv, parameters, boost_round_log_path=None,\n file_name_params=None, append_info='', loss_global_valid=None,\n acc_global_valid=None, profit=None):\n\n if mode == 'auto_grid_search':\n\n csv_log_path, param_name, param_info = \\\n utils.get_grid_search_log_path(csv_log_path, self.model_name,\n param_name_list, param_value_list, append_info)\n if self.use_global_valid:\n utils.save_grid_search_log_with_glv_to_csv(\n csv_idx, csv_log_path + param_name + '_',\n loss_train_w_mean, loss_valid_w_mean, acc_train, train_seed,\n loss_global_valid, acc_global_valid, cv_seed, n_valid, n_cv,\n parameters, param_name_list, param_value_list, profit=profit)\n csv_log_path += str(param_info) + '_'\n utils.save_grid_search_log_with_glv_to_csv(\n csv_idx, csv_log_path, loss_train_w_mean, loss_valid_w_mean,\n acc_train, train_seed, loss_global_valid, acc_global_valid,\n cv_seed, n_valid, n_cv, parameters, param_name_list,\n param_value_list, profit=profit)\n else:\n utils.save_grid_search_log_to_csv(\n csv_idx, csv_log_path + param_name + '_', loss_train_w_mean,\n loss_valid_w_mean, acc_train, train_seed, cv_seed, n_valid,\n n_cv, parameters, param_name_list, param_value_list, profit=profit)\n csv_log_path += str(param_info) + '_'\n utils.save_grid_search_log_to_csv(\n csv_idx, csv_log_path, loss_train_w_mean, loss_valid_w_mean,\n acc_train, train_seed, cv_seed, n_valid, n_cv, parameters,\n param_name_list, param_value_list, profit=profit)\n\n elif mode == 'auto_train_boost_round':\n\n boost_round_log_path, _ = \\\n utils.get_boost_round_log_path(\n boost_round_log_path, self.model_name, param_name_list, param_value_list, append_info)\n boost_round_log_path += self.model_name + '_' + append_info + '_'\n if self.use_global_valid:\n utils.save_grid_search_log_to_csv(\n csv_idx, boost_round_log_path, loss_train_w_mean, loss_valid_w_mean,\n acc_train, train_seed, cv_seed, n_valid, n_cv, parameters,\n param_name_list, param_value_list, profit=profit)\n else:\n utils.save_final_loss_log_to_csv(\n csv_idx, boost_round_log_path, loss_train_w_mean,\n loss_valid_w_mean, acc_train, train_seed, cv_seed,\n n_valid, n_cv, parameters, profit=profit)\n\n elif mode == 'auto_train':\n\n csv_log_path += self.model_name + '/'\n utils.check_dir([csv_log_path])\n csv_log_path += self.model_name + '_' + append_info + '/'\n utils.check_dir([csv_log_path])\n csv_log_path += self.model_name + '_'\n if file_name_params is not None:\n for p_name in file_name_params:\n csv_log_path += str(parameters[p_name]) + '_'\n else:\n for p_name, p_value in parameters.items():\n csv_log_path += str(p_value) + '_'\n\n if self.use_global_valid:\n utils.save_log_with_glv_to_csv(\n csv_idx, csv_log_path, loss_train_w_mean,\n loss_valid_w_mean, acc_train, train_seed,\n loss_global_valid, acc_global_valid, cv_seed,\n n_valid, n_cv, parameters, profit=profit)\n else:\n utils.save_final_loss_log_to_csv(\n csv_idx, csv_log_path, loss_train_w_mean,\n loss_valid_w_mean, acc_train, train_seed,\n cv_seed, n_valid, n_cv, parameters, profit=profit)\n\n else:\n\n csv_log_path += self.model_name + '_' + append_info + '_'\n if self.use_global_valid:\n utils.save_log_with_glv_to_csv(csv_idx, csv_log_path, loss_train_w_mean,\n loss_valid_w_mean, acc_train, train_seed,\n loss_global_valid, acc_global_valid, cv_seed,\n n_valid, n_cv, parameters, profit=profit)\n else:\n utils.save_final_loss_log_to_csv(csv_idx, csv_log_path, loss_train_w_mean,\n loss_valid_w_mean, acc_train, train_seed,\n cv_seed, n_valid, n_cv, parameters, profit=profit)\n\n def save_final_pred(self, mode, pred_test_mean, pred_path, parameters, csv_idx,\n train_seed, cv_seed, boost_round_log_path=None, param_name_list=None,\n param_value_list=None, file_name_params=None, append_info=''):\n\n params = '_'\n if file_name_params is not None:\n for p_name in file_name_params:\n params += utils.get_simple_param_name(p_name) + \\\n '-' + str(parameters[p_name]) + '_'\n else:\n for p_name, p_value in parameters.items():\n params += utils.get_simple_param_name(p_name) + '-' + str(p_value) + '_'\n\n if mode == 'auto_train':\n\n pred_path += self.model_name + '/'\n utils.check_dir([pred_path])\n pred_path += self.model_name + '_' + append_info + '/'\n utils.check_dir([pred_path])\n pred_path += self.model_name + params + 'results/'\n utils.check_dir([pred_path])\n pred_path += self.model_name + '_' + str(csv_idx) + \\\n '_t-' + str(train_seed) + '_c-' + str(cv_seed) + '_'\n utils.save_pred_to_csv(pred_path, self.id_test, pred_test_mean)\n\n elif mode == 'auto_train_boost_round':\n\n boost_round_log_path, _ = \\\n utils.get_boost_round_log_path(\n boost_round_log_path, self.model_name, param_name_list, param_value_list, append_info)\n pred_path = boost_round_log_path + 'final_results/'\n utils.check_dir([pred_path])\n pred_path += self.model_name + '_' + str(csv_idx) + \\\n '_t-' + str(train_seed) + '_c-' + str(cv_seed) + '_'\n utils.save_pred_to_csv(pred_path, self.id_test, pred_test_mean)\n\n else:\n pred_path += 'final_results/'\n utils.check_dir([pred_path])\n pred_path += self.model_name + '_' + append_info + '/'\n utils.check_dir([pred_path])\n pred_path += self.model_name + '_t-' + str(train_seed) + '_c-' + str(cv_seed) + params\n utils.save_pred_to_csv(pred_path, self.id_test, pred_test_mean)\n\n @staticmethod\n def get_postscale_rate(y):\n\n positive = 0\n for y_ in y:\n if y_ == 1:\n positive += 1\n\n positive_rate = positive / len(y)\n postscale_rate = len(y) / (2*positive)\n\n return positive_rate, postscale_rate\n\n @staticmethod\n def prescale(x_train, y_train, w_train, e_train):\n\n print('[W] PreScaling Train Set...')\n\n positive_idx = []\n negative_idx = []\n for i, y in enumerate(y_train):\n if y == 1:\n positive_idx.append(i)\n else:\n negative_idx.append(i)\n n_positive = len(positive_idx)\n n_negative = len(negative_idx)\n print('Number of Positive Labels: {}'.format(n_positive))\n print('Number of Negative Labels: {}'.format(n_negative))\n\n if n_positive > n_negative:\n positive_idx = list(np.random.choice(positive_idx, len(negative_idx), replace=False))\n elif n_negative > n_positive:\n negative_idx = list(np.random.choice(negative_idx, n_positive, replace=False))\n\n # Checking\n if len(positive_idx) != len(negative_idx):\n raise ValueError('PreScaling Failed! len(positive_idx) != len(negative_idx)!')\n else:\n print('Number of PreScaled Labels: {}'.format(len(positive_idx)))\n\n prescale_idx = list(np.sort(positive_idx + negative_idx))\n x_train = x_train[prescale_idx]\n y_train = y_train[prescale_idx]\n w_train = w_train[prescale_idx]\n e_train = e_train[prescale_idx]\n print('------------------------------------------------------')\n\n return x_train, y_train, w_train, e_train\n\n def lgb_postscale_feval(self, preds, train_data):\n\n pred = copy.deepcopy(preds)\n labels = train_data.get_label()\n weights = train_data.get_weight()\n pred *= self.postscale_rate\n loss = utils.log_loss_with_weight(pred, labels, weights)\n\n return 'binary_logloss', loss, False\n\n def xgb_postscale_feval(self, preds, train_data):\n\n pred = copy.deepcopy(preds)\n labels = train_data.get_label()\n weights = train_data.get_weight()\n pred *= self.postscale_rate\n loss = utils.log_loss_with_weight(pred, labels, weights)\n\n return 'logloss', loss\n\n def train(self, pred_path=None, loss_log_path=None, csv_log_path=None, boost_round_log_path=None,\n train_seed=None, cv_args=None, parameters=None, show_importance=False, show_accuracy=False,\n save_cv_pred=True, save_cv_pred_train=False, save_final_pred=True, save_final_pred_train=False,\n save_csv_log=True, csv_idx=None, prescale=False, postscale=False, use_global_valid=False,\n return_pred_test=False, mode=None, param_name_list=None, param_value_list=None,\n use_custom_obj=False, use_scale_pos_weight=False, file_name_params=None, append_info=None):\n\n # Check if directories exit or not\n utils.check_dir_model(pred_path, loss_log_path)\n utils.check_dir([pred_path, loss_log_path, csv_log_path, boost_round_log_path])\n\n # Global Validation\n self.use_global_valid = use_global_valid\n\n # Use Custom Objective Function\n self.use_custom_obj = use_custom_obj\n\n # Cross Validation Arguments\n cv_args_copy, n_valid, n_cv, n_era, cv_seed = utils.get_cv_args(cv_args, append_info)\n\n if csv_idx is None:\n csv_idx = self.model_name\n\n # Print Start Information and Get Model Name\n self.print_start_info()\n\n if use_global_valid:\n print('------------------------------------------------------')\n print('[W] Using Global Validation...')\n\n cv_count = 0\n pred_test_total = []\n pred_train_total = []\n loss_train_total = []\n loss_valid_total = []\n loss_train_w_total = []\n loss_valid_w_total = []\n idx_round = []\n train_loss_round_total = []\n valid_loss_round_total = []\n global_valid_loss_round_total = []\n pred_global_valid_total = []\n loss_global_valid_total = []\n loss_global_valid_w_total = []\n\n # Get Cross Validation Generator\n if 'cv_generator' in cv_args_copy:\n cv_generator = cv_args_copy['cv_generator']\n if cv_generator is None:\n cv_generator = CrossValidation.era_k_fold\n cv_args_copy.pop('cv_generator')\n else:\n cv_generator = CrossValidation.era_k_fold\n print('------------------------------------------------------')\n print('[W] Using CV Generator: {}'.format(getattr(cv_generator, '__name__')))\n\n if 'era_list' in cv_args_copy:\n print('Era List: ', cv_args_copy['era_list'])\n if 'window_size' in cv_args_copy:\n print('Window Size: ', cv_args_copy['window_size'])\n if 'cv_weights' in cv_args_copy:\n cv_weights = cv_args_copy['cv_weights']\n cv_args_copy.pop('cv_weights')\n if cv_weights is not None:\n if len(cv_weights) != n_cv:\n raise ValueError(\"The length of 'cv_weights'({}) should be equal to 'n_cv'({})!\"\n .format(len(cv_weights), n_cv))\n else:\n cv_weights = None\n\n # Training on Cross Validation Sets\n for x_train, y_train, w_train, e_train, x_valid, y_valid, w_valid, e_valid, valid_era \\\n in cv_generator(x=self.x_train, y=self.y_train,\n w=self.w_train, e=self.e_train, **cv_args_copy):\n\n # CV Start Time\n cv_start_time = time.time()\n\n cv_count += 1\n\n # Get Positive Rate of Train Set and postscale Rate\n positive_rate_train, postscale_rate = self.get_postscale_rate(y_train)\n positive_rate_valid, _ = self.get_postscale_rate(y_valid)\n\n # Remove Metric of Post Scale\n if postscale:\n self.postscale = True\n self.postscale_rate = postscale_rate\n if 'metric' in parameters.keys():\n parameters.pop('metric')\n if 'eval_metric' in parameters.keys():\n parameters.pop('eval_metric')\n\n if use_scale_pos_weight:\n if self.model_name == 'xgb':\n parameters['scale_pos_weight'] = postscale_rate\n\n print('------------------------------------------------------')\n print('Validation Set Era: ', valid_era)\n print('Number of Features: ', x_train.shape[1])\n print('------------------------------------------------------')\n print('Positive Rate of Train Set: {:.6f}'.format(positive_rate_train))\n print('Positive Rate of Valid Set: {:.6f}'.format(positive_rate_valid))\n print('------------------------------------------------------')\n\n # prescale\n if prescale:\n x_train, y_train, w_train, e_train = self.prescale(x_train, y_train, w_train, e_train)\n\n # Fitting and Training Model\n if mode == 'auto_train_boost_round':\n if use_global_valid:\n reg, idx_round_cv, train_loss_round_cv, \\\n valid_loss_round_cv, global_valid_loss_round_cv = \\\n self.fit_with_round_log(\n boost_round_log_path, cv_count, x_train, y_train, w_train, x_valid, y_valid,\n w_valid, parameters, param_name_list, param_value_list, append_info=append_info)\n global_valid_loss_round_total.append(global_valid_loss_round_cv)\n else:\n reg, idx_round_cv, train_loss_round_cv, valid_loss_round_cv = \\\n self.fit_with_round_log(\n boost_round_log_path, cv_count, x_train, y_train, w_train, x_valid, y_valid,\n w_valid, parameters, param_name_list, param_value_list, append_info=append_info)\n\n idx_round = idx_round_cv\n train_loss_round_total.append(train_loss_round_cv)\n valid_loss_round_total.append(valid_loss_round_cv)\n else:\n reg = self.fit(x_train, y_train, w_train, x_valid, y_valid, w_valid, parameters)\n\n # Feature Importance\n if show_importance:\n self.get_importance(reg)\n\n # Prediction\n if save_cv_pred:\n cv_pred_path = \\\n pred_path + 'cv_results/' + self.model_name + '_cv_{}_'.format(cv_count)\n else:\n cv_pred_path = None\n pred_test = self.predict(reg, self.x_test, pred_path=cv_pred_path)\n\n # Save Train Probabilities to CSV File\n if save_cv_pred_train:\n cv_pred_train_path = \\\n pred_path + 'cv_pred_train/' + self.model_name + '_cv_{}_'.format(cv_count)\n else:\n cv_pred_train_path = None\n pred_train = self.get_pred_train(reg, x_train, pred_path=cv_pred_train_path)\n pred_train_all = self.get_pred_train(reg, self.x_train, pred_path=cv_pred_train_path)\n\n # Predict Global Validation Set\n if use_global_valid:\n pred_global_valid = self.predict(reg, self.x_global_valid)\n else:\n pred_global_valid = np.array([])\n\n # Get Probabilities of Validation Set\n pred_valid = self.predict(reg, x_valid)\n\n # postscale\n if postscale:\n print('------------------------------------------------------')\n print('[W] PostScaling Results...')\n print('PostScale Rate: {:.6f}'.format(postscale_rate))\n pred_test *= postscale_rate\n pred_train *= postscale_rate\n pred_valid *= postscale_rate\n if use_global_valid:\n pred_global_valid *= postscale_rate\n\n # Print LogLoss\n print('------------------------------------------------------')\n print('Validation Set Era: ', valid_era)\n loss_train, loss_valid, loss_train_w, loss_valid_w = \\\n utils.print_loss(pred_train, y_train, w_train, pred_valid, y_valid, w_valid)\n\n # Print and Get Accuracies of CV\n acc_train_cv, acc_valid_cv, acc_train_cv_era, acc_valid_cv_era = \\\n utils.print_and_get_accuracy(pred_train, y_train, e_train,\n pred_valid, y_valid, e_valid, show_accuracy)\n\n # Print Loss and Accuracy of Global Validation Set\n if use_global_valid:\n loss_global_valid, loss_global_valid_w, acc_global_valid = \\\n utils.print_global_valid_loss_and_acc(\n pred_global_valid, self.y_global_valid, self.w_global_valid)\n pred_global_valid_total.append(pred_global_valid)\n loss_global_valid_total.append(loss_global_valid)\n loss_global_valid_w_total.append(loss_global_valid_w)\n\n # Save Losses to File\n utils.save_loss_log(\n loss_log_path + self.model_name + '_', cv_count, parameters, n_valid, n_cv,\n valid_era, loss_train, loss_valid, loss_train_w, loss_valid_w, train_seed,\n cv_seed, acc_train_cv, acc_valid_cv, acc_train_cv_era, acc_valid_cv_era)\n\n pred_test_total.append(pred_test)\n pred_train_total.append(pred_train_all)\n loss_train_total.append(loss_train)\n loss_valid_total.append(loss_valid)\n loss_train_w_total.append(loss_train_w)\n loss_valid_w_total.append(loss_valid_w)\n\n # CV End Time\n print('------------------------------------------------------')\n print('CV Done! Using Time: {}s'.format(time.time() - cv_start_time))\n\n print('======================================================')\n print('Calculating Final Result...')\n\n # Calculate Means of pred and losses\n pred_test_mean, pred_train_mean, loss_train_mean, \\\n loss_valid_mean, loss_train_w_mean, loss_valid_w_mean = \\\n utils.calculate_means(pred_test_total, pred_train_total, loss_train_total, loss_valid_total,\n loss_train_w_total, loss_valid_w_total, weights=cv_weights)\n\n # Save 'num_boost_round'\n if self.model_name in ['xgb', 'lgb']:\n parameters['num_boost_round'] = self.num_boost_round\n\n # Calculate Profit\n profit = 0\n\n # Save Logs of num_boost_round\n if mode == 'auto_train_boost_round':\n if use_global_valid:\n train_loss_round_mean, valid_loss_round_mean, global_valid_loss_round_mean = \\\n utils.calculate_boost_round_means(\n train_loss_round_total, valid_loss_round_total, weights=cv_weights,\n global_valid_loss_round_total=global_valid_loss_round_total)\n self.save_boost_round_log(\n boost_round_log_path, idx_round, train_loss_round_mean,\n valid_loss_round_mean, train_seed, cv_seed, csv_idx,\n parameters, param_name_list, param_value_list, append_info=append_info,\n global_valid_loss_round_mean=global_valid_loss_round_mean, profit=profit)\n else:\n train_loss_round_mean, valid_loss_round_mean = \\\n utils.calculate_boost_round_means(\n train_loss_round_total, valid_loss_round_total, weights=cv_weights)\n self.save_boost_round_log(\n boost_round_log_path, idx_round, train_loss_round_mean,\n valid_loss_round_mean, train_seed, cv_seed, csv_idx, parameters,\n param_name_list, param_value_list, append_info=append_info, profit=profit)\n\n # Save Final Result\n if save_final_pred:\n self.save_final_pred(\n mode, pred_test_mean, pred_path, parameters, csv_idx, train_seed,\n cv_seed, boost_round_log_path, param_name_list, param_value_list,\n file_name_params=file_name_params, append_info=append_info)\n\n # Save Final pred_train\n if save_final_pred_train:\n utils.save_pred_train_to_csv(pred_path + 'final_pred_train/' + self.model_name + '_',\n pred_train_mean, self.y_train)\n\n # Print Total Losses\n utils.print_total_loss(loss_train_mean, loss_valid_mean, loss_train_w_mean,\n loss_valid_w_mean, profit=profit)\n\n # Print and Get Accuracies of CV of All Train Set\n acc_train, acc_train_era = \\\n utils.print_and_get_train_accuracy(pred_train_mean, self.y_train, self.e_train, show_accuracy)\n\n # Save Final Losses to File\n utils.save_final_loss_log(\n loss_log_path + self.model_name + '_', parameters, n_valid, n_cv,\n loss_train_mean, loss_valid_mean, loss_train_w_mean, loss_valid_w_mean,\n train_seed, cv_seed, acc_train, acc_train_era)\n\n # Print Global Validation Information and Save\n if use_global_valid:\n # Calculate Means of Probabilities and Losses\n pred_global_valid_mean, loss_global_valid_mean, loss_global_valid_w_mean = \\\n utils.calculate_global_valid_means(pred_global_valid_total, loss_global_valid_total,\n loss_global_valid_w_total, weights=cv_weights)\n # Print Loss and Accuracy\n acc_total_global_valid = \\\n utils.print_total_global_valid_loss_and_acc(\n pred_global_valid_mean, self.y_global_valid,\n loss_global_valid_mean, loss_global_valid_w_mean)\n # Save csv log\n if save_csv_log:\n self.save_csv_log(\n mode, csv_log_path, param_name_list, param_value_list, csv_idx, loss_train_w_mean,\n loss_valid_w_mean, acc_train, train_seed, cv_seed, n_valid, n_cv, parameters,\n boost_round_log_path=boost_round_log_path, file_name_params=file_name_params,\n append_info=append_info, loss_global_valid=loss_global_valid_w_mean,\n acc_global_valid=acc_total_global_valid, profit=profit)\n\n # Save Loss Log to csv File\n if save_csv_log:\n if not use_global_valid:\n self.save_csv_log(\n mode, csv_log_path, param_name_list, param_value_list, csv_idx, loss_train_w_mean,\n loss_valid_w_mean, acc_train, train_seed, cv_seed, n_valid, n_cv, parameters,\n boost_round_log_path=boost_round_log_path, file_name_params=file_name_params,\n append_info=append_info, profit=profit)\n\n # Remove 'num_boost_round' of parameters\n if 'num_boost_round' in parameters:\n parameters.pop('num_boost_round')\n\n # Return Final Result\n if return_pred_test:\n return pred_test_mean\n\n def stack_train(self, x_train, y_train, w_train, x_g_train, x_valid, y_valid,\n w_valid, x_g_valid, x_test, x_g_test, parameters, show_importance=False):\n\n # Select Group Variable\n x_train, x_valid, x_test = self.select_category_variable(x_train, x_g_train, x_valid,\n x_g_valid, x_test, x_g_test)\n\n # Print Start Information and Get Model Name\n self.print_start_info()\n print('Number of Features: ', x_train.shape[1])\n print('------------------------------------------------------')\n\n # Fitting and Training Model\n reg = self.fit(x_train, y_train, w_train, x_valid, y_valid, w_valid, parameters)\n\n # Feature Importance\n if show_importance:\n self.get_importance(reg)\n\n # Prediction\n pred_train = self.predict(reg, x_train)\n pred_valid = self.predict(reg, x_valid)\n pred_test = self.predict(reg, x_test)\n\n # Print LogLoss\n loss_train, loss_valid, loss_train_w, loss_valid_w = \\\n utils.print_loss(pred_train, y_train, w_train, pred_valid, y_valid, w_valid)\n\n losses = [loss_train, loss_valid, loss_train_w, loss_valid_w]\n\n return pred_valid, pred_test, losses\n\n\nclass LRegression(ModelBase):\n \"\"\"\n Logistic Regression\n \"\"\"\n @staticmethod\n def get_reg(parameters):\n\n print('Initialize Model...')\n reg = LogisticRegression(**parameters)\n\n return reg\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training Logistic Regression...')\n\n self.model_name = 'lr'\n\n def get_importance(self, reg):\n\n print('------------------------------------------------------')\n print('Feature Importance')\n self.importance = np.abs(reg.coef_)[0]\n indices = np.argsort(self.importance)[::-1]\n\n feature_num = self.x_train.shape[1]\n\n for f in range(feature_num):\n print(\"%d | feature %d | %f\" % (f + 1, indices[f], self.importance[indices[f]]))\n\n\nclass KNearestNeighbor(ModelBase):\n \"\"\"\n k-Nearest Neighbor Classifier\n \"\"\"\n @staticmethod\n def get_reg(parameters):\n\n print('Initialize Model...')\n reg = KNeighborsClassifier(**parameters)\n\n return reg\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training k-Nearest Neighbor Classifier...')\n\n self.model_name = 'knn'\n\n\nclass SupportVectorClustering(ModelBase):\n \"\"\"\n SVM - Support Vector Clustering\n \"\"\"\n @staticmethod\n def get_reg(parameters):\n\n print('Initialize Model...')\n reg = SVC(**parameters)\n\n return reg\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training Support Vector Clustering...')\n\n self.model_name = 'svc'\n\n\nclass Gaussian(ModelBase):\n \"\"\"\n Gaussian NB\n \"\"\"\n @staticmethod\n def get_reg(parameters):\n\n print('Initialize Model...')\n reg = GaussianNB(**parameters)\n\n return reg\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training Gaussian...')\n\n self.model_name = 'gs'\n\n\nclass DecisionTree(ModelBase):\n \"\"\"\n Decision Tree\n \"\"\"\n @staticmethod\n def get_reg(parameters):\n\n print('Initialize Model...')\n reg = DecisionTreeClassifier(**parameters)\n\n return reg\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training Decision Tree...')\n\n self.model_name = 'dt'\n\n\nclass RandomForest(ModelBase):\n \"\"\"\n Random forecast\n \"\"\"\n @staticmethod\n def get_reg(parameters):\n\n print('Initialize Model...')\n reg = RandomForestRegressor(**parameters)\n\n return reg\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training Random forecast...')\n\n self.model_name = 'rf'\n\n\nclass ExtraTrees(ModelBase):\n \"\"\"\n Extra Trees\n \"\"\"\n @staticmethod\n def get_reg(parameters):\n\n print('Initialize Model...')\n reg = ExtraTreesClassifier(**parameters)\n\n return reg\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training Extra Trees...')\n\n self.model_name = 'et'\n\n\nclass AdaBoost(ModelBase):\n \"\"\"\n AdaBoost\n \"\"\"\n @staticmethod\n def get_reg(parameters):\n\n print('Initialize Model...')\n reg = AdaBoostClassifier(**parameters)\n\n return reg\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training AdaBoost...')\n\n self.model_name = 'ab'\n\n\nclass GradientBoosting(ModelBase):\n \"\"\"\n Gradient Boosting\n \"\"\"\n @staticmethod\n def get_reg(parameters):\n\n print('Initialize Model...')\n reg = GradientBoostingClassifier(**parameters)\n\n return reg\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training Gradient Boosting...')\n\n self.model_name = 'gb'\n\n\nclass XGBoost(ModelBase):\n \"\"\"\n XGBoost\n \"\"\"\n def __init__(self, x_tr, y_tr, w_tr, e_tr, x_te, id_te,\n x_va=None, y_va=None, w_va=None, e_va=None,\n num_boost_round=None, use_multi_group=False):\n\n super(XGBoost, self).__init__(x_tr, y_tr, w_tr, e_tr, x_te, id_te,\n x_va, y_va, w_va, e_va, use_multi_group)\n\n self.num_boost_round = num_boost_round\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training XGBoost...')\n\n self.model_name = 'xgb'\n\n @staticmethod\n def logloss_obj(pred, d_train):\n\n y = d_train.get_label()\n\n grad = (pred - y) / ((1.0 - pred) * pred)\n hess = (pred * pred - 2.0 * pred * y + y) / ((1.0 - pred) * (1.0 - pred) * pred * pred)\n\n return grad, hess\n\n def fit(self, x_train, y_train, w_train, x_valid, y_valid, w_valid, parameters=None):\n\n d_train = xgb.DMatrix(x_train, label=y_train, weight=w_train)\n d_valid = xgb.DMatrix(x_valid, label=y_valid, weight=w_valid)\n\n # Booster\n if self.use_global_valid:\n d_gl_valid = xgb.DMatrix(self.x_global_valid, label=self.y_global_valid, weight=self.w_global_valid)\n eval_list = [(d_train, 'Train'), (d_valid, 'Valid'), (d_gl_valid, 'Global_Valid')]\n else:\n eval_list = [(d_train, 'Train'), (d_valid, 'Valid')]\n\n if self.postscale:\n if self.use_custom_obj:\n bst = xgb.train(parameters, d_train, num_boost_round=self.num_boost_round,\n evals=eval_list, obj=self.logloss_obj, feval=self.xgb_postscale_feval)\n else:\n bst = xgb.train(parameters, d_train, num_boost_round=self.num_boost_round,\n evals=eval_list, feval=self.xgb_postscale_feval)\n else:\n if self.use_custom_obj:\n bst = xgb.train(parameters, d_train, num_boost_round=self.num_boost_round,\n obj=self.logloss_obj, evals=eval_list)\n else:\n bst = xgb.train(parameters, d_train, num_boost_round=self.num_boost_round, evals=eval_list)\n\n return bst\n\n def get_pattern(self):\n\n if self.use_global_valid:\n if self.postscale:\n return re.compile(r'\\[(\\d*)\\].*\\tTrain-logloss:(.*)\\tValid-logloss:(.*)\\tGlobal_Valid-logloss:(.*)')\n else:\n return re.compile(r'\\[(\\d*)\\]\\tTrain-logloss:(.*)\\tValid-logloss:(.*)\\tGlobal_Valid-logloss:(.*)')\n else:\n if self.postscale:\n return re.compile(r'\\[(\\d*)\\].*\\tTrain-logloss:(.*)\\tValid-logloss:(.*)')\n else:\n return re.compile(r'\\[(\\d*)\\]\\tTrain-logloss:(.*)\\tValid-logloss:(.*)')\n\n def get_importance(self, model):\n\n print('------------------------------------------------------')\n print('Feature Importance')\n\n self.importance = model.get_fscore()\n sorted_importance = sorted(self.importance.items(), key=lambda d: d[1], reverse=True)\n\n feature_num = len(self.importance)\n\n for i in range(feature_num):\n print('{} | feature {} | {}'.format(i + 1, sorted_importance[i][0], sorted_importance[i][1]))\n\n def predict(self, model, x_test, pred_path=None):\n\n print('------------------------------------------------------')\n print('Predicting Test Probability...')\n\n pred_test = model.predict(xgb.DMatrix(x_test))\n\n if pred_path is not None:\n utils.save_pred_to_csv(pred_path, self.id_test, pred_test)\n\n return pred_test\n\n def get_pred_train(self, model, x_train, pred_path=None):\n\n print('------------------------------------------------------')\n print('Predicting Train Probability...')\n\n pred_train = model.predict(xgb.DMatrix(x_train))\n\n if pred_path is not None:\n utils.save_pred_train_to_csv(pred_path, pred_train, self.y_train)\n\n return pred_train\n\n\nclass SKLearnXGBoost(ModelBase):\n \"\"\"\n XGBoost using sklearn module\n \"\"\"\n @staticmethod\n def get_reg(parameters=None):\n\n print('Initialize Model...')\n reg = XGBClassifier(**parameters)\n\n return reg\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training XGBoost(sklearn)...')\n\n self.model_name = 'xgb_sk'\n\n def fit(self, x_train, y_train, w_train, x_valid, y_valid, w_valid, parameters=None):\n\n # Get Classifier\n reg = self.get_reg(parameters)\n\n # Training Model\n reg.fit(x_train, y_train, sample_weight=w_train,\n eval_set=[(x_train, y_train), (x_valid, y_valid)],\n early_stopping_rounds=100, eval_metric='logloss', verbose=True)\n\n return reg\n\n def get_importance(self, reg):\n\n print('------------------------------------------------------')\n print('Feature Importance')\n\n self.importance = reg.feature_importances_\n self.indices = np.argsort(self.importance)[::-1]\n\n feature_num = len(self.importance)\n\n for f in range(feature_num):\n print(\"%d | feature %d | %f\" % (f + 1, self.indices[f], self.importance[self.indices[f]]))\n\n\nclass LightGBM(ModelBase):\n \"\"\"\n LightGBM\n \"\"\"\n def __init__(self, x_tr, y_tr, w_tr, e_tr, x_te, id_te,\n x_va=None, y_va=None, w_va=None, e_va=None,\n num_boost_round=None, use_multi_group=False):\n\n super(LightGBM, self).__init__(x_tr, y_tr, w_tr, e_tr, x_te, id_te,\n x_va, y_va, w_va, e_va, use_multi_group)\n\n self.num_boost_round = num_boost_round\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training LightGBM...')\n\n self.model_name = 'lgb'\n\n @staticmethod\n def select_category_variable(x_train, x_g_train, x_valid, x_g_valid, x_test, x_g_test):\n\n return x_g_train, x_g_valid, x_g_test\n\n def fit(self, x_train, y_train, w_train, x_valid, y_valid, w_valid, parameters=None):\n\n # Get Category Feature's Index\n idx_category = utils.get_idx_category(x_train, self.use_multi_group)\n\n d_train = lgb.Dataset(x_train, label=y_train, weight=w_train, categorical_feature=idx_category)\n d_valid = lgb.Dataset(x_valid, label=y_valid, weight=w_valid, categorical_feature=idx_category)\n\n # Booster\n if self.use_global_valid:\n d_gl_valid = lgb.Dataset(self.x_global_valid, label=self.y_global_valid,\n weight=self.w_global_valid, categorical_feature=idx_category)\n if self.postscale:\n bst = lgb.train(parameters, d_train, num_boost_round=self.num_boost_round,\n valid_sets=[d_valid, d_gl_valid, d_train],\n valid_names=['Valid', 'Global_Valid', 'Train'], feval=self.lgb_postscale_feval)\n else:\n bst = lgb.train(parameters, d_train, num_boost_round=self.num_boost_round,\n valid_sets=[d_valid, d_gl_valid, d_train],\n valid_names=['Valid', 'Global_Valid', 'Train'])\n else:\n if self.postscale:\n bst = lgb.train(parameters, d_train, num_boost_round=self.num_boost_round,\n valid_sets=[d_valid, d_train], valid_names=['Valid', 'Train'],\n feval=self.lgb_postscale_feval)\n else:\n bst = lgb.train(parameters, d_train, num_boost_round=self.num_boost_round,\n valid_sets=[d_valid, d_train], valid_names=['Valid', 'Train'])\n\n return bst\n\n def get_pattern(self):\n\n if self.use_global_valid:\n return re.compile(r\"\\[(\\d*)\\]\\tTrain\\'s binary_logloss: (.*)\\tValid\\'s binary_logloss:(.*)\\tGlobal_Valid\\'s binary_logloss:(.*)\")\n else:\n return re.compile(r\"\\[(\\d*)\\]\\tTrain\\'s binary_logloss: (.*)\\tValid\\'s binary_logloss:(.*)\")\n\n @staticmethod\n def logloss_obj(y, pred):\n\n grad = (pred - y) / ((1 - pred) * pred)\n hess = (pred * pred - 2 * pred * y + y) / ((1 - pred) * (1 - pred) * pred * pred)\n\n return grad, hess\n\n def get_importance(self, bst):\n\n print('------------------------------------------------------')\n print('Feature Importance')\n\n self.importance = bst.feature_importance()\n self.indices = np.argsort(self.importance)[::-1]\n\n feature_num = len(self.importance)\n\n for f in range(feature_num):\n print(\"%d | feature %d | %d\" % (f + 1, self.indices[f], self.importance[self.indices[f]]))\n\n print('\\n')\n\n def predict(self, bst, x_test, pred_path=None):\n\n print('------------------------------------------------------')\n print('Predicting Test Probability...')\n\n pred_test = bst.predict(x_test)\n\n if pred_path is not None:\n utils.save_pred_to_csv(pred_path, self.id_test, pred_test)\n\n return pred_test\n\n def get_pred_train(self, bst, x_train, pred_path=None):\n\n print('------------------------------------------------------')\n print('Predicting Train Probability...')\n\n pred_train = bst.predict(x_train)\n\n if pred_path is not None:\n utils.save_pred_train_to_csv(pred_path, pred_train, self.y_train)\n\n return pred_train\n\n\nclass SKLearnLightGBM(ModelBase):\n \"\"\"\n LightGBM using sklearn module\n \"\"\"\n @staticmethod\n def get_reg(parameters=None):\n\n print('Initialize Model...')\n reg = LGBMClassifier(**parameters)\n\n return reg\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training LightGBM(sklearn)...')\n\n self.model_name = 'lgb_sk'\n\n def fit(self, x_train, y_train, w_train, x_valid, y_valid, w_valid, parameters=None):\n\n # Get Classifier\n reg = self.get_reg(parameters)\n\n # Get Category Feature's Index\n idx_category = utils.get_idx_category(x_train, self.use_multi_group)\n\n # Fitting and Training Model\n reg.fit(x_train, y_train, sample_weight=w_train, categorical_feature=idx_category,\n eval_set=[(x_train, y_train), (x_valid, y_valid)], eval_names=['train', 'eval'],\n early_stopping_rounds=100, eval_sample_weight=[w_train, w_valid],\n eval_metric='logloss', verbose=True)\n\n return reg\n\n\nclass CatBoost(ModelBase):\n \"\"\"\n CatBoost\n \"\"\"\n @staticmethod\n def get_reg(parameters=None):\n\n reg = CatBoostClassifier(**parameters)\n\n return reg\n\n def print_start_info(self):\n\n print('======================================================')\n print('Training CatBoost...')\n\n self.model_name = 'cb'\n\n @staticmethod\n def select_category_variable(x_train, x_g_train, x_valid, x_g_valid, x_test, x_g_test):\n\n return x_g_train, x_g_valid, x_g_test\n\n def fit(self, x_train, y_train, w_train, x_valid, y_valid, w_valid, parameters=None):\n\n # Get Classifier\n reg = self.get_reg(parameters)\n\n # Get Category Feature's Index\n idx_category = utils.get_idx_category(x_train, self.use_multi_group)\n\n # Convert Zeros in Weights to Small Positive Numbers\n w_train = [0.001 if w == 0 else w for w in w_train]\n\n # Fitting and Training Model\n reg.fit(X=x_train, y=y_train, cat_features=idx_category, sample_weight=w_train,\n baseline=None, use_best_model=None, eval_set=(x_valid, y_valid), verbose=True, plot=False)\n\n return reg\n\n def get_pattern(self):\n\n return re.compile(r'(\\d*):\\tlearn (.*)\\ttest (.*)\\tbestTest')\n\n def get_importance(self, reg):\n\n print('------------------------------------------------------')\n print('Feature Importance')\n\n self.importance = reg.feature_importances_\n self.indices = np.argsort(self.importance)[::-1]\n\n feature_num = len(self.importance)\n\n for f in range(feature_num):\n print(\"%d | feature %d | %d\" % (f + 1, self.indices[f], self.importance[self.indices[f]]))\n","sub_path":"src/models/classifiers.py","file_name":"classifiers.py","file_ext":"py","file_size_in_byte":50178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"466147952","text":"\"\"\"\nTo complete this assignment, you should use this API endpoint that has a static\nsubset of the Google Data:\nhttp://python-data.dr-chuck.net/geojson\n\nThis API uses the same parameters (sensor and address) as the Google API. This\nAPI also has no rate limit so you can test as often as you like. If you visit\nthe URL with no parameters, you get a list of all of the address values which\ncan be used with this API.\n\nTo call the API, you need to provide a sensor=false parameter and the address\nthat you are requesting as the address= parameter that is properly URL encoded\nusing the urllib.urlencode() fuction as shown in\nhttp://www.pythonlearn.com/code/geojson.py\n\nTEST DATA / SAMPLE EXECUTION\nYou can test to see if your program is working with a location of \"South Federal\nUniversity\" which will have a place_id of \"ChIJJ8oO7_B_bIcR2AlhC8nKlok\".\n\"\"\"\n\n\"\"\"\nJSON Response\n{\n \"results\": [\n {\n \"access_points\": [],\n \"address_components\": [\n {\n \"long_name\": \"#300\",\n \"short_name\": \"#300\",\n \"types\": [\n \"subpremise\"\n ]\n },\n {\n \"long_name\": \"4001\",\n \"short_name\": \"4001\",\n \"types\": [\n \"street_number\"\n ]\n },\n {\n \"long_name\": \"700 East\",\n \"short_name\": \"700 E\",\n \"types\": [\n \"route\"\n ]\n },\n {\n \"long_name\": \"Salt Lake City\",\n \"short_name\": \"Salt Lake City\",\n \"types\": [\n \"locality\",\n \"political\"\n ]\n },\n {\n \"long_name\": \"Salt Lake County\",\n \"short_name\": \"Salt Lake County\",\n \"types\": [\n \"administrative_area_level_2\",\n \"political\"\n ]\n },\n {\n \"long_name\": \"Utah\",\n \"short_name\": \"UT\",\n \"types\": [\n \"administrative_area_level_1\",\n \"political\"\n ]\n },\n {\n \"long_name\": \"United States\",\n \"short_name\": \"US\",\n \"types\": [\n \"country\",\n \"political\"\n ]\n },\n {\n \"long_name\": \"84107\",\n \"short_name\": \"84107\",\n \"types\": [\n \"postal_code\"\n ]\n }\n ],\n \"formatted_address\": \"4001 700 E #300, Salt Lake City, UT 84107, USA\",\n \"geometry\": {\n \"location\": {\n \"lat\": 40.6849285,\n \"lng\": -111.8700525\n },\n \"location_type\": \"ROOFTOP\",\n \"viewport\": {\n \"northeast\": {\n \"lat\": 40.68627748029149,\n \"lng\": -111.8687035197085\n },\n \"southwest\": {\n \"lat\": 40.6835795197085,\n \"lng\": -111.8714014802915\n }\n }\n },\n \"place_id\": \"ChIJBVZvCm6KUocRoh4bYfH-h2M\",\n \"plus_code\": {\n \"compound_code\": \"M4MH+XX Salt Lake City, Utah, United States\",\n \"global_code\": \"85GCM4MH+XX\"\n },\n \"types\": [\n \"establishment\",\n \"point_of_interest\",\n \"university\"\n ]\n }\n ],\n \"status\": \"OK\"\n}\n\"\"\"\n\nimport urllib.request as ur\nimport urllib.parse as up\nimport json\n\nservice_url = \"http://py4e-data.dr-chuck.net/json?\"\n\naddress_input = input(\"Enter location: \")\nparams = {\"sensor\": \"false\", \"address\": address_input, \"key\": 42}\nurl = service_url + up.urlencode(params)\n\nprint(\"Receiving:\", url)\ndata = ur.urlopen(url).read()\nprint(\"Retrieved\", len(data), \"characters\")\njson_obj = json.loads(data)\nplace_id = json_obj[\"results\"][0][\"place_id\"]\nprint(\"Place ID:\", place_id)\n","sub_path":"Access Web Data/Using_GeoJSON_API.py","file_name":"Using_GeoJSON_API.py","file_ext":"py","file_size_in_byte":3566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"604100435","text":"# 给你一个整数数组 nums ,请你找出数组中乘积最大的连续子数组(该子数组中至少包含一个数字),并返回该子数组所对应的乘积。 \n# \n# \n# \n# 示例 1: \n# \n# 输入: [2,3,-2,4]\n# 输出: 6\n# 解释: 子数组 [2,3] 有最大乘积 6。\n# \n# \n# 示例 2: \n# \n# 输入: [-2,0,-1]\n# 输出: 0\n# 解释: 结果不能为 2, 因为 [-2,-1] 不是子数组。 \n# Related Topics 数组 动态规划 \n# 👍 829 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def maxProduct(self, nums: List[int]) -> int:\n # dp[i][0] 最小值,dp[i][1]最大值\n if not nums: return 0\n dp_max,dp_min,max_res = nums[0],nums[0],nums[0]\n for i in range(1,len(nums)):\n if nums[i]<0: dp_max,dp_min = dp_min,dp_max\n dp_max = max(dp_max * nums[i],nums[i])\n dp_min = min(dp_min * nums[i],nums[i])\n max_res = max(dp_max,max_res)\n return max_res\n\n# leetcode submit region end(Prohibit modification and deletion)\n","sub_path":"Week_06/[152]乘积最大子数组.py","file_name":"[152]乘积最大子数组.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"550388916","text":"import numpy as np\nimport paddle.fluid as fluid\nimport paddle.fluid.dygraph as dygraph #动态图加载\nfrom paddle.fluid.dygraph import Linear\nimport sklearn.datasets as sd\nclass Regressor(fluid.dygraph.Layer):\n def __init__(self):\n super(Regressor, self).__init__()\n self.fc = Linear(input_dim=13,output_dim=1,act=None)\n #网络的前向计算\n def forward(self, inputs):\n x = self.fc(inputs)\n return x\ndef load_data():\n data = sd.load_boston()\n size = int(len(data.data)*0.8)\n x = data.data[:size,:]\n y = data.target[:size]\n data = np.column_stack((x, y))\n maximum = data.max(axis=0)\n minimum = data.min(axis=0)\n avgs = data.sum(axis=0)/data.shape[0]\n global max_values\n global min_values\n global avg_values\n max_values = maximum\n min_values = minimum\n avg_values = avgs\n for i in range(data.shape[1]):\n data[:,i] = (data[:,i] - avg_values[i]) / (max_values[i] - min_values[i])\n print(data.shape)\n return data\ndef process():\n data = sd.load_boston()\n size = int(len(data.data) * 0.8)\n x = data.data[size:, :]\n y = data.target[size:]\n data = np.column_stack((x, y))\n for i in range(data.shape[1]):\n data[:, i] = (data[:, i] - avg_values[i]) / (max_values[i] - min_values[i])\n return data\nwith dygraph.guard():\n one = load_data()\n data = process()\n data = np.array(data).astype('float32')\n test_x = data[:,:-1]\n test_y = data[:,-1]\n x = fluid.dygraph.to_variable(test_x)\n model = Regressor()\n # 参数为保存模型参数的文件地址\n model_dict,_ = fluid.load_dygraph('LR_model')\n model.load_dict(model_dict)\n model.eval()\n # 参数为数据集的文件地址\n # test_data,label = load_one_example()\n # 将数据转为动态图的variable格式\n results = model(x)\n # 对结果做反归一化处理\n y = test_y * (max_values[-1] - min_values[-1]) + avg_values[-1]\n results = results * (max_values[-1] - min_values[-1]) + avg_values[-1]\n for result,real in zip(results,y):\n print(\"Inference result is {}, the corresponding label is {}\".format(result.numpy(), real))\n","sub_path":"Paddle/day01/demo02.py","file_name":"demo02.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"103436586","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/5/8 11:19\n# @Author : WuxieYaYa\n\"\"\"\n在一个由 0 和 1 组成的二维矩阵内,找到只包含 1 的最大正方形,并返回其面积。\n\n示例:\n\n输入:\n\n1 0 1 0 0\n1 0 1 1 1\n1 1 1 1 1\n1 0 0 1 0\n\n输出: 4\n\n链接:https://leetcode-cn.com/problems/maximal-square\n\"\"\"\n\n\ndef maximalSquare(matrix):\n if len(matrix) == 0 or len(matrix[0]) == 0:\n return 0\n c, r = len(matrix), len(matrix[0])\n dp = [[0] * r for _ in range(c)] #深拷贝\n ans = 0\n for i in range(c):\n for j in range(r):\n if matrix[i][j] == 1:\n if i == 0 or j == 0:\n dp[i][j] = 1\n else:\n dp[i][j] = min(dp[i - 1][j], dp[i - 1][j - 1], dp[i][j - 1]) + 1\n\n if dp[i][j] > ans:\n ans = dp[i][j]\n\n return ans * ans\n\n\nif __name__ == '__main__':\n ma = [[1, 0, 1, 0, 0],\n [1, 0, 1, 1, 1],\n [1, 1, 1, 1, 1],\n [1, 0, 0, 1, 0]]\n # ma = [[1,1],[1,1]]\n print(maximalSquare(ma))\n","sub_path":"dp动态规划/221. 最大正方形.py","file_name":"221. 最大正方形.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"482903076","text":"#爬虫初始化函数\ndef init():\n import requests\n from bs4 import BeautifulSoup\n import json\n import re\n\n#163门户网站抓取函数\ndef MainPage(url):\n Result = []\n res = BeautifulSoup((requests.get(url)).text,'html.parser')\n for Res in res.select('.cm_fb'):\n Result.append(Res.select('a')[0].text)\n Result.append(Res.select('a')[0]['href'])\n return Result\n\n#163网站内文抓取函数\ndef News(newsurl):\n result = {}\n news = BeautifulSoup((requests.get(newsurl)).text,'html.parser')\n result['title'] = news.select('h1')[0].text\n result['time'] = news.select('.post_time_source')[0].contents[0].rstrip('\\u3000来源: ')\n result['article'] = news.select('.post_text')[0].text\n return result\n\ninit()\n","sub_path":"Python/Spider/文章提取.py","file_name":"文章提取.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"618456790","text":"# -*- mode: python ; coding: utf-8 -*-\n\n\nblock_cipher = None\n\n\na = Analysis(['proyecto.py'],\n pathex=['C:\\\\Users\\\\eduar_000.EDUARDO\\\\Desktop\\\\py_inventari'],\n binaries=[],\n datas=[('mydatabase.db','.'),('mydatabaseClientes.db','.'),('mydatabaseCOMPRAS.db','.'),('mydatabaseHistorial.db','.'),('mydatabaseUsuarios.db','.'),('basura.png','.'),('carro.png','.'),('editar.png','.'),('enviar.png','.'),('escaner.png','.'),('historial.png','.'),('renvolso.png','.')],\n hiddenimports=[],\n hookspath=[],\n runtime_hooks=[],\n excludes=[],\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher,\n noarchive=False)\npyz = PYZ(a.pure, a.zipped_data,\n cipher=block_cipher)\nexe = EXE(pyz,\n a.scripts,\n [],\n exclude_binaries=True,\n name='proyecto',\n debug=False,\n bootloader_ignore_signals=False,\n strip=False,\n upx=True,\n console=True )\ncoll = COLLECT(exe,\n a.binaries,\n a.zipfiles,\n a.datas,\n strip=False,\n upx=True,\n upx_exclude=[],\n name='proyecto')\n","sub_path":"proyecto.spec","file_name":"proyecto.spec","file_ext":"spec","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"75226462","text":"from __future__ import absolute_import\nimport Cocoa\nimport Foundation\nimport CoreFoundation\nimport os\n\nfrom ..xcrun import *\nfrom ..Path import *\nfrom .PBXResolver import *\nfrom .PBX_Base import *\n\nclass PBX_Base_Reference(PBX_Base):\n \n def __init__(self, lookup_func, dictionary, project, identifier):\n self.name = 'PBX_BASE_REFERENCE';\n self.identifier = identifier;\n self.fs_path = None;\n self.fs_found = False;\n \n # Absolute Path = \n def resolveAbsolutePath(self, project, parent_path):\n return Path(self.path.obj_path, '');\n \n # Relative to Group = \n def resolveGroupPath(self, project, parent_path):\n obj_path = '';\n if self.path != None:\n obj_path = self.path.obj_path;\n return Path(os.path.join(parent_path, obj_path), '');\n \n # Relative to Project = SOURCE_ROOT\n def resolveSourceRootPath(self, project, parent_path):\n obj_path = '';\n if self.path != None:\n obj_path = self.path.obj_path;\n return Path(os.path.join(project.projectRoot.obj_path, obj_path), '');\n \n # Relative to Developer Directory = DEVELOPER_DIR\n def resolveDeveloperDirPath(self, project, parent_path):\n developer_dir = xcrun.resolve_developer_path();\n obj_path = '';\n if self.path != None:\n obj_path = self.path.obj_path;\n return Path(os.path.join(developer_dir, obj_path), '');\n \n # Relative to Build Products = BUILT_PRODUCTS_DIR\n def resolveBuildProductsPath(self, project, parent_path):\n target = project.targetForProductRef(self.identifier)[0];\n default_config = target.buildConfigurationList.defaultBuildConfiguration();\n symroot_path = default_config.buildSettingForKey('CONFIGURATION_BUILD_DIR');\n # default for now\n symroot_path = 'build'; \n build_location = xcrun.BuildLocation(project, symroot_path);\n obj_path = '';\n if self.path != None:\n obj_path = self.path.obj_path;\n # this should change to be the correct CONFIGURATION_BUILD_DIR path\n return Path(os.path.join(build_location, obj_path), '');\n \n # Relative to SDK = SDKROOT\n def resolveSDKPath(self, project, parent_path):\n target = project.targetForProductRef(self.identifier)[0];\n default_config = target.buildConfigurationList.defaultBuildConfiguration();\n sdk_path = xcrun.resolve_sdk_path(default_config.buildSettingForKey('SDKROOT'));\n obj_path = '';\n if self.path != None:\n obj_path = self.path.obj_path;\n return Path(os.path.join(sdk_path, obj_path), '');\n \n def lookupPathType(self, action_name):\n lookup = {\n '': self.resolveAbsolutePath,\n '': self.resolveGroupPath,\n 'SOURCE_ROOT': self.resolveSourceRootPath,\n 'DEVELOPER_DIR': self.resolveDeveloperDirPath,\n 'BUILT_PRODUCTS_DIR': self.resolveBuildProductsPath,\n 'SDKROOT': self.resolveSDKPath\n };\n if action_name in lookup.keys():\n return lookup[action_name];\n else:\n return None;\n \n def resolvePath(self, project, parent_path):\n action = self.lookupPathType(self.sourceTree);\n if action != None:\n self.fs_path = action(project, parent_path.obj_path);\n self.fs_found = os.path.exists(self.fs_path.obj_path);\n \n if hasattr(self, 'children'):\n self.children = list(map(lambda child: child.resolvePath(project, self.fs_path), self.children));\n ","sub_path":"PBX/PBX_Base_Reference.py","file_name":"PBX_Base_Reference.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"8063044","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport select\nimport h5py\nfrom scipy import sparse\nimport time\n\nclass Util:\n \n \n def softmax(self, X):\n '''numerically stable softmax function\n '''\n max_row_values = np.matrix(np.max(X,axis=1)).T\n result = np.exp(X - max_row_values)\n sums = np.matrix(np.sum(result,axis=1)) \n return result/sums\n \n \n def evolutionary_optimization(self, weight, X,y, func, percentile=3, population= 500, noise_variance=0.5, direction='max', epochs = 12): \n best_error = 0 if direction == 'max' else 1\n rdm = np.random.RandomState(1234)\n best_noise = 0\n t0 = time.time()\n best_weight = weight\n for epoch in range(epochs):\n best_weights = []\n best_errors = []\n best_noises = []\n for i in range(population):\n noise = rdm.normal(0,noise_variance,(weight.shape))\n #noise = gpu.randn(m,1,1)/5.0\n current = func(X,y, noise, weight)\n #print 'Cross validation error: {0}'.format(current)\n best_weights.append(weight)\n best_errors.append(current)\n best_noises.append(noise)\n if direction == 'max' and current > best_error or (direction == 'min' and current < best_error): \n best_error = current \n best_noise = noise \n best_weight = weight \n \n print ('EPOCH: {0}, best_error: {1}'.format(epoch,best_error)) \n if direction == 'max':\n idx = np.where(np.array(best_errors) >= np.percentile(best_errors, q=percentile))[0]\n else:\n idx = np.where(np.array(best_errors) <= np.percentile(best_errors, q=percentile))[0]\n \n weight = np.mean((np.array(best_weights)[idx] + np.array(best_noises)[idx]),axis=0) \n \n \n print (best_error)\n print (best_noise.T) \n print (best_weight.T)\n print (time.time() - t0)\n \n def strings_to_classes(self, strings):\n ret_classes = []\n dict_classes = {}\n i = 0\n for val in strings:\n if val not in dict_classes.keys():\n dict_classes[val] = i \n ret_classes.append(i)\n i+=1\n else:\n ret_classes.append(dict_classes[val]) \n \n return np.array(ret_classes)\n \n def hyperparameter_fitting(self, fun, data, means, lower_vals, upper_vals, positive=True, iter=20): \n def get_new_params(data, means, lower_vals, upper_vals, positive=True):\n data = np.array(data) \n ret_params = np.zeros_like(np.array(means))\n if data.shape[0] > 5:\n best_result_idx = np.argmax(data[:,-1])\n means = data[best_result_idx,:-1] \n \n for i, mean in enumerate(means):\n lower = lower_vals[i]\n upper = upper_vals[i] \n if data.shape[0] > 10: \n for j in range(len(means)):\n upper = np.percentile(data[:,-1], 75) \n variance = np.var(data[data[:,-1] > upper,j],axis=0) \n #mean = np.mean(data[data[:,-1] > upper,j],axis=0)\n else: \n variance = ((upper - lower)/ (2* 1.96))**2\n \n rdm_value = np.random.normal(mean,variance)\n if positive: \n while rdm_value <= 0:\n rdm_value = np.random.normal(mean,variance) \n \n ret_params[i] = rdm_value \n \n return ret_params\n \n params = get_new_params(data, means, lower_vals, upper_vals)\n param_data = []\n for epoch in range(iter):\n cv_score = fun(params)\n print ('CV score: {0}'.format(cv_score))\n param_data.append(params.tolist() + [cv_score])\n params = get_new_params(param_data,means, lower_vals, upper_vals)\n \n print ('Best parameter: {0}'.format(get_new_params(param_data,means, lower_vals, upper_vals)))\n \n def create_t_matrix(self, y):\n classes = np.max(y)\n t = np.zeros((y.shape[0], classes+1))\n for i in range(y.shape[0]):\n t[i, y[i]] = 1\n \n return t\n \n def create_balanced_set_index(self, y, X): \n labels_and_cases = []\n labels = np.max(y)\n a = np.zeros((labels+1,))\n for i in range(a.shape[0]):\n a[i] = np.sum(y==i) \n labels_and_cases.append(np.where(y==i)[0].tolist())\n \n a_original = a.copy() \n X_new = np.zeros((X.shape)) \n y_new = np.zeros((X.shape[0]))\n for row in range(X.shape[0]):\n next_label = np.argmax(a)\n if len(labels_and_cases[next_label]) > 0: \n y_new[row] = next_label\n X_new[row] = X[labels_and_cases[next_label].pop()]\n a += a_original*(np.arange(0,labels+1)!=next_label)\n \n return y_new, X_new\n \n def create_balanced_index_vector(self, y): \n labels_and_cases = []\n labels = np.max(y)\n a = np.zeros((labels+1,))\n for i in range(a.shape[0]):\n a[i] = np.sum(y==i) \n labels_and_cases.append(np.where(y==i)[0].tolist())\n \n a_original = a.copy() \n y_idx = []\n for row in range(y.shape[0]):\n next_label = np.argmax(a)\n if len(labels_and_cases[next_label]) > 0: \n y_idx.append(labels_and_cases[next_label].pop()) \n a += a_original*(np.arange(0,labels+1)!=next_label)\n \n return np.array(y_idx)\n \n def save_sparse_matrix(self, filename,x): \n x = sparse.csr_matrix(x)\n data=x.data\n indices=x.indices\n indptr=x.indptr\n shape=x.shape\n file = h5py.File(filename,'w')\n file.create_dataset(\"indices\", data=indices)\n file.create_dataset(\"indptr\", data=indptr)\n file.create_dataset(\"data\", data=data)\n file.create_dataset(\"shape\", data=shape)\n file.close()\n\n def load_sparse_matrix(self, filename):\n f = h5py.File(filename,'r')\n z = sparse.csr_matrix( (f['data'],f['indices'],f['indptr']), shape=f['shape'])\n return z\n \n def create_batches(self, X, size):\n count = np.round(X.shape[0]/(1.0*size),0)\n return np.array(np.split(X,count))\n \n def create_sparse_weight(self, input_size, output_size, sparsity = 15): \n rdm = np.random.RandomState(1234) \n weight = np.zeros((input_size, output_size))\n for axon in range(output_size): \n idxes = rdm.randint(0,input_size, (sparsity,))\n rdm_weights = rdm.randn(sparsity)\n for idx, rdm_weights in zip(idxes, rdm_weights):\n weight[idx,axon] = rdm_weights \n return weight\n \n def create_uniform_rdm_weight(self,input_size,output_size):\n rdm = np.random.RandomState(1234) \n return rdm.uniform(low=-4*np.sqrt(6./(input_size+output_size)),\n high=4*np.sqrt(6./(input_size+output_size)),\n size=(input_size,output_size))\n \n \n def create_t_dataset(self, y): \n if y != None:\n Y = np.matrix(y)\n Y = Y.T if Y.shape[0] == 1 else Y\n \n no_labels = np.max(y)\n t = np.zeros((Y.shape[0],no_labels+1))\n for i in range(Y.shape[0]):\n t[i,Y[i,0]] = 1\n \n return t\n else:\n return None \n \n def shuffle_set(self, data_set_X, data_set_y, data_set_t):\n n = data_set_X.shape[0]\n rdm_idx = np.arange(0,n)\n np.random.shuffle(rdm_idx)\n new_X = np.zeros((data_set_X.shape))\n new_y = np.zeros((data_set_y.shape))\n new_t = np.zeros((data_set_t.shape))\n for i in range(n):\n new_X[i,:] = data_set_X[rdm_idx[i],:]\n new_y[i] = data_set_y[rdm_idx[i]]\n new_t[i,:] = data_set_t[rdm_idx[i],:]\n \n \n def plot_results(self, valid, train, epochs, filename):\n plt.hold(True) \n print ('Printing result...')\n plt.axis([0,epochs,0,0.05])\n plt.title('Epochs: ' + str(epochs) + ', ' +'Hidden layer units: ')\n plt.plot(range(epochs),valid,color='blue')\n plt.plot(range(epochs),train,color='red')\n plt.tight_layout()\n plt.savefig(filename +'.png')\n plt.hold(False)\n \n def plot_weights(self, weight, filename):\n print ('Printing weights...')\n hist, bins = np.histogram(weight,bins = 50)\n width = 0.7*(bins[1]-bins[0])\n center = (bins[:-1]+bins[1:])/2\n plt.bar(center, hist, align = 'center', width = width)\n plt.savefig(filename + '.png')\n \n def heardEnter(self):\n i,o,e = select.select([sys.stdin],[],[],0.0001)\n for s in i:\n if s == sys.stdin:\n input = sys.stdin.readline()\n return True\n return False ","sub_path":"util_tweet.py","file_name":"util_tweet.py","file_ext":"py","file_size_in_byte":9535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"50035909","text":"# built-in libraries\nimport collections\nimport functools\nimport itertools\nimport json\nimport logging\n\n# external libraries\n# ...\n\n# internal libraries\n# ...\n\n# exports\n__all__ = (\"coroutine\", \"default\", \"object_hook\")\n\n# constants\nCLOUD = {} # image catelog\nSTONE = {} # type catelog\n\n\ndef coroutine(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n gen = func(*args, **kwargs)\n next(gen)\n return gen\n wrapper.__name__ = func.__name__\n wrapper.__dict__ = func.__dict__\n wrapper.__doc__ = func.__doc__\n return wrapper\n\n\ndef default(obj):\n \"\"\"Return a serializable version of `object`\"\"\"\n try:\n return next({key: type.default(obj)}\n for ((key, cls), type)\n in STONE.items()\n if isinstance(obj, cls))\n except StopIteration:\n raise TypeError\n\n\ndef object_hook(dct):\n \"\"\"Return value instead of the `dict`\"\"\"\n return next((type.object_hook(obj)\n for ((key, cls), type)\n in STONE.items()\n if key in dct), dct)\n\n\nclass Type(collections.namedtuple\n (\"Type\", (\"default\", \"object_hook\"))):\n\n def __new__(cls, key, type, default, object_hook):\n obj = super(Type, cls).__new__(cls, default, object_hook)\n STONE[key, type] = obj\n return obj\n\n\nclass Event(object):\n __slots__ = (\"cbs\",)\n\n def __init__(self, cbs=None):\n self.cbs = cbs or []\n\n\nclass Item(collections.namedtuple\n (\"Item\", (\"data\", \"ctrl\"))):\n\n def __new__(cls, data, ctrl):\n ctrl = {key: Event()\n for key in ctrl}\n return super(Item, cls).__new__(cls, data, ctrl)\n \n\nclass Mask(collections.namedtuple\n (\"Mask\", (\"gets\", \"sets\"))):\n pass\n\n\nclass Mode(collections.namedtuple\n (\"Mode\", (\"data\", \"ctrl\"))):\n\n def __new__(cls, ins, outs, reqs, pros):\n data = Mask(reqs, pros)\n ctrl = Mask(ins, outs)\n return super(Mode, cls).__new__(cls, data, ctrl)\n\n\nclass Node(collections.namedtuple\n (\"Node\", (\"init\", \"main\"))):\n\n def __new__(cls, evs, args, ins, reqs, outs, pros):\n init = Mode(evs, (), args, ())\n main = Mode(ins, outs, reqs, pros)\n return super(Node, cls).__new__(cls, init, main)\n\n\nclass Edge(collections.namedtuple\n (\"Edge\", (\"data\", \"ctrl\"))):\n\n def __new__(cls, data=None, ctrl=None):\n data = data or {}\n ctrl = ctrl or {}\n return super(Edge, cls).__new__(cls, data, ctrl)\n \n\nclass Face(collections.namedtuple\n (\"Face\", (\"data\", \"ctrl\"))):\n\n def __new__(cls, node, edge, item):\n data = iterdata(node, edge, item)\n ctrl = iterctrl(node, edge, item)\n return super(Face, cls).__new__(cls, data, ctrl)\n\n@coroutine\ndef iterdata(node, edge, item):\n pros = yield\n \n # XXX takes advantage of there currently being no `data` provided\n # ... while in `init` mode\n mode = node.init\n if len(mode.data.gets) > 0:\n reqs = (logging.debug(\"get data: %s\", key)\n or item.data.get(edge.data.get(key, key))\n for key in mode.data.gets)\n pros = yield reqs\n \n mode = node.main\n while True:\n if len(mode.data.gets) > 0:\n reqs = (logging.debug(\"get data: %s\", key)\n or item.data.get(edge.data.get(key, key))\n for key in mode.data.gets)\n pros = yield reqs\n \n if len(mode.data.sets) > 0:\n item.data.update({edge.data.get(key, key):\n logging.debug(\"set data: %s=%s\", key, pro)\n or pro\n for key, pro\n in zip(mode.data.sets, pros)\n if pro is not None}\n if pros is not None\n else {})\n pros = yield\n\n\n@coroutine\ndef iterctrl(node, edge, item):\n yield\n \n # XXX takes advantage of there currently being no `ctrl` output\n # ... while in `init` mode\n mode = node.init\n ins = (logging.debug(\"get ctrl: %s\", key)\n or item.ctrl.get(edge.ctrl.get(key, key))\n for key in mode.ctrl.gets)\n outs = yield ins # always called\n \n mode = node.main\n while True:\n if len(mode.ctrl.gets) > 0:\n ins = (logging.debug(\"get ctrl: %s\", key)\n or item.ctrl.get(edge.ctrl.get(key, key))\n for key in mode.ctrl.gets)\n outs = yield ins\n \n if len(mode.ctrl.sets) > 0:\n evs = (((logging.debug(\"set ctrl: %s=%s\", key, out)\n or item.ctrl.get(edge.ctrl.get(key, key)), out)\n for key, out in zip(mode.ctrl.sets, outs)\n if out is not None)\n if outs is not None\n else ())\n outs = yield evs\n\n \nclass Task(collections.namedtuple\n (\"Task\", (\"p\", \"gen\"))):\n pass\n\n\nclass Image(object):\n __slots__ = (\"tag\", \"nodes\", \"proc\")\n\n def __init__(self, tag, **nodes):\n self.tag = tag\n self.nodes = nodes\n\n def __call__(self, func):\n func = coroutine(func)\n \n @coroutine\n @functools.wraps(func)\n def wrapper(**args):\n yield\n try:\n logging.debug(\"exec %s init\", self.tag)\n gen = func(**args) # create generator\n evs = yield\n while True:\n logging.debug(\"exec %s main\", self.tag)\n yield gen.send(evs)\n except StopIteration:\n return\n finally:\n pass\n \n self.proc = wrapper\n CLOUD[self.tag] = self\n return wrapper\n\n\ndef run(task, model):\n img = CLOUD[task[\"tag\"]]\n faces = {arg: Face(node,\n Edge(**task[\"maps\"].get(arg, {})),\n model[task[\"keys\"][arg]])\n for (arg, node)\n in img.nodes.items()}\n gen = img.proc(**faces)\n obj = Task(task[\"p\"], gen)\n any(ev.cbs.append(obj)\n for face in faces.values()\n for ev in next(face.ctrl) or ())\n return obj\n\n\n","sub_path":"ouroboros/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"564423425","text":"# -*- coding: utf-8 -*-\nfrom argh.decorators import arg\n\nimport lain_sdk.mydocker as docker\nfrom lain_cli.utils import check_phase, get_domain, lain_yaml, ClusterConfig\nfrom lain_sdk.util import error, info\n\n\n@arg('phase', help=\"lain cluster phase id, can be added by lain config save\")\n@arg('-r', '--registry', help='registry url')\ndef tag(phase, registry=None):\n \"\"\"\n Tag release and meta images\n \"\"\"\n\n check_phase(phase)\n params = dict(name=phase)\n if registry is not None:\n params['registry'] = registry\n cluster_config = ClusterConfig(**params)\n info(\"Taging meta and relese image ...\")\n yml = lain_yaml(ignore_prepare=True)\n meta_version = yml.meta_version\n if meta_version is None:\n error(\"please git commit.\")\n return None\n meta_tag = \"%s:meta-%s\" % (yml.appname, meta_version)\n release_tag = \"%s:release-%s\" % (yml.appname, meta_version)\n phase_meta_tag = docker.gen_image_name(yml.appname, 'meta', meta_version, cluster_config.registry)\n phase_release_tag = docker.gen_image_name(yml.appname, 'release', meta_version, cluster_config.registry)\n meta_code = docker.tag(meta_tag, phase_meta_tag)\n release_code = docker.tag(release_tag, phase_release_tag)\n if meta_code or release_code:\n error(\"Error lain tag.\")\n else:\n info(\"Done lain tag.\")\n","sub_path":"lain_cli/tag.py","file_name":"tag.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"10147805","text":"from pypi_notifier import db\n\n\nclass ModelMixin(object):\n\n @classmethod\n def get_or_create(cls, **kwargs):\n instance = db.session.query(cls).filter_by(**kwargs).first()\n if not instance:\n instance = cls(**kwargs)\n return instance\n","sub_path":"pypi_notifier/models/mixin.py","file_name":"mixin.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"501188055","text":"# Copyright 2016 Jon Wayne Parrott\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport platform\nimport shutil\n\nfrom nox.command import Command\nfrom nox.logger import logger\n\n\nclass ProcessEnv(object):\n \"\"\"A environment with a 'bin' directory and a set of 'env' vars.\"\"\"\n\n def __init__(self, bin=None, env=None):\n self._bin = bin\n self.env = os.environ.copy()\n\n if env is not None:\n self.env.update(env)\n\n if self.bin:\n self.env['PATH'] = ':'.join([self.bin, self.env.get('PATH')])\n\n @property\n def bin(self):\n return self._bin\n\n def run(self, args, in_venv=True):\n \"\"\"Runs a command. By default, the command runs within the\n environment.\"\"\"\n return Command(\n args=args,\n env=self.env if in_venv else None,\n silent=True,\n path=self.bin if in_venv else None).run()\n\n\nclass VirtualEnv(ProcessEnv):\n \"\"\"Virtualenv management class.\"\"\"\n\n def __init__(self, location, interpreter=None, reuse_existing=False):\n self.location = os.path.abspath(location)\n self.interpreter = interpreter\n self.reuse_existing = reuse_existing\n super(VirtualEnv, self).__init__()\n\n def _clean_location(self):\n \"\"\"Deletes any existing virtualenv\"\"\"\n if os.path.exists(self.location):\n if self.reuse_existing:\n return False\n else:\n shutil.rmtree(self.location)\n\n return True\n\n @property\n def bin(self):\n \"\"\"Returns the location of the virtualenv's bin folder.\"\"\"\n if platform.system() == 'Windows':\n return os.path.join(self.location, 'Scripts')\n else:\n return os.path.join(self.location, 'bin')\n\n def create(self):\n \"\"\"Create the virtualenv.\"\"\"\n if not self._clean_location():\n logger.debug('Re-using existing virtualenv.')\n return False\n\n cmd = ['virtualenv', self.location]\n\n if self.interpreter:\n cmd.extend(['-p', self.interpreter])\n\n self.run(cmd, in_venv=False)\n\n return True\n\n def install(self, *args):\n self.run(('pip', 'install', '--upgrade') + args)\n","sub_path":"nox/virtualenv.py","file_name":"virtualenv.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"325302208","text":"'''\nProblem #4 [Hard]: Given an array of integers, find the first missing positive integer in linear time and constant space. \nIn other words, find the lowest positive integer that does not exist in the array. \nThe array can contain duplicates and negative numbers as well.\n\nFor example, the input [3, 4, -1, 1] should give 2. The input [1, 2, 0] should give 3.\n\nYou can modify the input array in-place.\n'''\n\n'''\nWithout the linear time constraint restriction, we could just sort the array, while filtering out negative numbers, \nand iterate over the sorted array and return the first number that doesn't match the index. \nHowever, sorting takes O(n log n), so we can't use that here.\n\nAnother way to solve this problem is by adding all the numbers to a set, and then use a counter initialized to 1. \nThen continuously increment the counter and check whether the value is in the set.\nThis is much simpler, but runs in O(N) time and space, whereas the \"first_missing_positive.py\" algorithm uses no extra space.\n'''\n\ndef first_missing_positive(nums):\n s = set(nums)\n print(\"current set = \", s)\n i = 1\n while i in s:\n i += 1\n return i\n\n\nv1 = [3, 4, -1, 1]\nprint(\"first_missing_positive([3, 4, -1, 1]) = \", first_missing_positive(v1), \"\\n\")\n\nv2 = [1, 2, 0]\nprint(\"first_missing_positive([1, 2, 0]) = \", first_missing_positive(v2), \"\\n\")\n\n\nv3 = [1, 2, 3, 4]\nprint(\"first_missing_positive([1, 2, 3, 4]) = \", first_missing_positive(v3), \"\\n\")\n","sub_path":"solutions_for_coding_problems/1-25/first_missing_positive_using_set.py","file_name":"first_missing_positive_using_set.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"410511917","text":"###\n### TEAM 80 - OMNIVIDA\n### Generic functions to data transformation\n###\n\nimport pandas as pd\n\n### The following function load table for module *modulo* from path *ruta*.\n# This function uses a dictionary *diccionario* which has the name of id and date columns.\n# This function computes variables 'year' and 'yearmonth', which will be used in monthly consolidation.\n# Returns: dataframe with renamed keys\n# Author: monicarodguev\ndef carga_datos( ruta, diccionario, modulo ) :\n df = pd.read_excel( ruta + modulo + '.xlsx')\n df.rename(columns={diccionario[modulo]['id']:'id', diccionario[modulo]['fecha']:'fecha'}, inplace=True)\n df[ 'fecha' ] = pd.to_datetime(df[\"fecha\"])\n df['year'] = df[ 'fecha' ].apply( lambda x : x.year )\n df['month'] = df[ 'fecha' ].apply( lambda x : x.month )\n df['year_month'] = df[ 'fecha' ].apply( lambda x : x.year * 100 + x.month )\n\n return df\n\n\n### The following function gets the first character from specified column from a dataframe.\n# Returns: dataframe with new code column\n# Author: monicarodguev\ndef letra_codigo( df, columna ):\n df[ columna + '_cod' ] = df[ columna ].apply( lambda x : str(x)[0].lower() )\n return df\n\n\n### The following function change all the strings to lower using the name of one string column.\n# Returns: dataframe with new code column\n# Author: monicarodguev\ndef letra_lower( df, columna ):\n a = df[columna].dtype\n for col in df.columns:\n if df[col].dtype == a:\n df[col] = df[col].str.lower()\n return df\n\n\n\n### This function gets the \"initial\" table. That is all the ids in periods from 201601 to 202012\n# Returns: dataframe with id, year and month columns\n# Author: monicarodguev\ndef base_ids_mensual( ruta ):\n # All ids\n ids = pd.read_excel( ruta + 'Datos basicos.xlsx')\n ids.drop_duplicates( subset = 'ID', inplace= True )\n ids.rename(columns={'ID':'id'}, inplace=True)\n\n # All periods\n dy = pd.DataFrame.from_dict( {'year': list(range(2016,2021))} )\n dm = pd.DataFrame.from_dict({'month': list(range(1,13) )})\n\n # Cross join\n ids['key'] = 1\n dy['key'] = 1\n dm['key'] = 1\n\n ndf = ids.merge(dy, on ='key').merge(dm, on ='key')[['id','year','month']]\n return ndf\n\n### This function returns dictionary with keys information about tables\n# Returns: dictionary with name of column of id and date from all tables\n# Author: monicarodguev\ndef diccionario_llaves():\n dccio = {\n 'ACT' : { 'id': 'ID', 'fecha': 'FE_RESULTADO', 'fecha_no_ok': False, 'prefi':'act' },\n 'ACT_DESAGREGADO' : { 'id': 'NUMERO IDENTIFICACION', 'fecha': 'FE_RESULTADO', 'fecha_no_ok': False, 'prefi':'acd' },\n 'Adherencia' : { 'id': 'ds_identificacion', 'fecha': 'FE_ENTREVISTA', 'fecha_no_ok': False, 'prefi':'adh' },\n 'Antecedentes_familiares' : { 'id': 'Id', 'fecha': 'FE_ALTA', 'fecha_no_ok': False, 'prefi':'anf' },\n 'Antecedentes_patologicos' : { 'id': 'DS_IDENTIFICACION', 'fecha': 'FE_ACTUALIZA' , 'fecha_no_ok': False, 'prefi':'ant' },\n 'Ayudas_diagnosticas' : { 'id': 'Numero_Identificacion', 'fecha': 'Fecha_Orden', 'fecha_no_ok': False, 'prefi':'ayu' },\n 'Biologicos Asma' : { 'id': 'Identificacion', 'fecha': 'Fecha_Dcto', 'fecha_no_ok': False, 'prefi':'bio' },\n 'Calidad de vida relacioada en salud' : { 'id': 'Identificacion', 'fecha': 'FE_ALTA', 'fecha_no_ok': False, 'prefi':'cal' },\n #'Datos basicos' : { 'id': 'ID', 'fecha': '', 'fecha_no_ok': False, 'prefi': },\n 'Disnea' : { 'id': 'id', 'fecha': 'FE_ALTA', 'fecha_no_ok': False, 'prefi':'epo' },\n 'Farmacovigilancia RAM' : { 'id': 'NRO_IDENTIFICACION', 'fecha':'FECHA_NOTIFICACION' , 'fecha_no_ok': False, 'prefi':'far' },\n 'Habitos' : { 'id': 'DS_IDENTIFICACION', 'fecha': 'Fe_Registro', 'fecha_no_ok': False, 'prefi':'hab' },\n 'Hospitalizaciones' : { 'id': 'Id', 'fecha': 'Fecha Ingreso', 'fecha_no_ok': False, 'prefi':'hos' },\n 'Incosistencias en reclamacion' : { 'id':'IDENTIFICACIÓN' , 'fecha':'FE_REGISTRO' , 'fecha_no_ok': True, 'formato_fecha': '%Y-%m-%d', 'prefi':'inc' },\n 'Medicamentos' : { 'id':'Id' , 'fecha': 'Fecha_Emision', 'fecha_no_ok': False, 'prefi':'med' },\n 'Mediciones de peso y talla' : { 'id':'DS_IDENTIFICACION' , 'fecha': 'FE_alta' , 'fecha_no_ok': False, 'prefi':'imc' },\n 'Urgencias' : { 'id':'Numero_Identificacion' , 'fecha':'Fecha_Emision' , 'fecha_no_ok': False, 'prefi':'urg' },\n 'Vacunacion' : { 'id':'Numero_de_documento' , 'fecha':'Fecha_Emision' , 'fecha_no_ok': False, 'prefi':'vac' }\n }\n return dccio\n\n\n### This function returns dictionary with the aggrupation function that need to be use to compute features\n# Returns: dictionary variables per aggregation functions\n# Author: monicarodguev\ndef diccionario_agg_functions():\n dccio = {\n 'far' : { 'mod' : 'Farmacovigilancia RAM',\n 'sum' : ['far_rea_asma', 'far_rea_total','far_evo_aun','far_cau_def', 'far_cau_otra','far_gra_leve','far_gra_mod'],\n 'flag' : ['far_rea_asma', 'far_rea_total','far_rea_asma', 'far_rea_total','far_rea_asma', 'far_rea_total','far_evo_con','far_des_si','far_des_no','far_mis_si'],\n 'foto' : ['far_rea_asma', 'far_rea_total']\n },\n\n 'hos' : { 'mod' : 'Hospitalizaciones',\n 'sum' : ['hos_num','hos_num_j','hos_uci','hos_uci_j','hos_uce','hos_uce_j','hos_est','hos_est_j']\n },\n\n 'med' : { 'mod' : 'Medicamentos',\n 'sum' : ['med_num_dis','med_flag_j','med_flag_otra'],\n 'avg' : ['med_num_doses_j', 'med_num_doses_otra'],\n 'flag' : ['med_flag_j','med_flag_otra'],\n 'var' : ['med_num_doses_j', 'med_num_doses_otra']\n },\n\n 'urg' : { 'mod' : 'Urgencias',\n 'sum' : ['urg_urg', 'urg_total','urg_j_urg', 'urg_j_total']\n },\n\n 'vac' : { 'mod' : 'Vacunacion',\n 'sum' : ['vac_cant'],\n 'flag' : ['vac_cant'],\n },\n\n 'cal' : { 'mod' : 'Calidad de vida relacioada en salud',\n 'avg' : ['cal_ent','cal_psi', 'cal_rel', 'cal_fis'],\n 'var' : ['cal_ent','cal_psi', 'cal_rel', 'cal_fis'],\n 'foto' : ['cal_ent','cal_psi', 'cal_rel', 'cal_fis'],\n },\n 'act' : { 'mod' : 'ACT',\n 'avg' : ['act_punt_control'],\n 'var' : ['act_punt_control'],\n },\n\n 'acd' : { 'mod' : 'ACT_DESAGREGADO',\n 'avg' : ['acd_cont_asma','acd_ef_act_usu', 'acd_ef_sueno', 'acd_falta_aire','acd_uso_inhal'],\n 'var' : ['acd_cont_asma','acd_ef_act_usu', 'acd_ef_sueno', 'acd_falta_aire','acd_uso_inhal'],\n },\n\n 'ant' : { 'mod' : 'ACT_DESAGREGADO',\n 'sum' : ['cal_ent','cal_psi', 'cal_rel', 'cal_fis'],\n 'var' : ['cal_ent','cal_psi', 'cal_rel', 'cal_fis'],\n },\n\n 'anf' : { 'mod' : 'Antecedentes_familiares',\n 'sum' : ['anf_j','anf_otra'],\n },\n\n 'ant' : { 'mod' : 'Antecedentes_patologicos',\n 'sum' : ['ant_j','ant_otra'],\n },\n\n 'ayu' : { 'mod' : 'Ayudas_diagnosticas',\n 'avg' : ['ayu_exa'],\n },\n\n 'bio' : { 'mod' : 'Biologicos Asma',\n 'sum' : ['bio_veces'],\n 'avg' : ['bio_benralizumab', 'bio_dupilumab', 'bio_mepolizumab', 'bio_omalizumab', 'bio_total'],\n },\n\n 'epo' : { 'mod' : 'Disnea',\n 'sum' : ['epo_1', 'epo_2', 'epo_3', 'epo_4', 'epo_total'],\n },\n\n 'hab' : { 'mod' : 'Habitos',\n 'avg' : ['hab_alcohol', 'hab_cigarrillo', 'hab_ejercicio'],\n 'var' : ['hab_alcohol', 'hab_cigarrillo', 'hab_ejercicio'],\n },\n\n 'inc' : { 'mod' : 'Incosistencias en reclamacion',\n 'sum' : ['inc_con', 'inc_inc'],\n },\n\n 'imc' : { 'mod' : 'Mediciones de peso y talla',\n 'avg' : ['imc'],\n 'foto' : ['imc'],\n },\n }\n return dccio\n","sub_path":"data_transformation/borradores/generic_funcions.py","file_name":"generic_funcions.py","file_ext":"py","file_size_in_byte":7980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"177012480","text":"N = int(input())\nA = list(map(int, input().split()))\n\n\"\"\"TLE\n\nfrom collections import Counter\n\nc = Counter(A)\n\ncounts = []\nfor num, count in c.most_common():\n num_around_count = 0\n for a in A:\n if a+1 == num or a == num or a-1 == num:\n num_around_count += 1\n counts.append((num, num_around_count))\n\ncounts.sort(key = lambda a:a[1])\nprint(counts[-1][1])\n\"\"\"\n\ncount = [0 for i in range(100000+10)]\nfor a in A:\n count[a+1] += 1\n count[a] +=1\n if a >= 1:\n count[a-1] +=1\n\nprint(max(count))\n","sub_path":"80-100/82c.py","file_name":"82c.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"113269866","text":"\nclass DictUtils:\n\n @staticmethod\n def inverse_dict(dicti, value):\n try:\n keys = list(dicti.keys())\n values = list(dicti.values())\n index = values.index(value)\n return keys[index]\n except Exception as e:\n return None\n","sub_path":"MetaDataApi/metadata/utils/common_utils/dict_utils.py","file_name":"dict_utils.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"41922044","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n#-----------------------------------------------------------------------------\n# Name: Script phân loại file theo định dạng và dung lượng\n#\n# Purpose:\n#\n# Version: 1.1\n#\n# Author:\n#\n# Created: 06/02/2020\n# Updated: 06/02/2020\n#\n# Copyright: -\n#\n#-----------------------------------------------------------------------------\n\"\"\"\n\n#Import lib\nimport os, re, fnmatch, shutil\nfrom datetime import datetime\nimport pyunpack #pip install pyunpack\n\nclass AnalyzeFolder:\n\n def __init__(self, input_folder, output_folder, pattern, size_group, limit_size_copy = -1):\n self.input_folder = os.path.normpath(input_folder)\n self.output_folder = os.path.normpath(output_folder)\n self.pattern = pattern\n self.size_group = size_group\n self.size_group.sort()\n self.limit_size_copy = limit_size_copy\n \n if not os.path.exists(self.input_folder):\n print(self.input_folder)\n print(\"Folder input not exists\")\n exit()\n\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)\n\n #Check size file and create folder\n def get_file_size(self, file_path):\n return os.path.getsize(file_path)\n\n #Get list file\n def prepare_data_file(self, target_files, nontarget_files):\n print(\"Prepare data file : Waiting\")\n try:\n for path, subdirs, files in os.walk(self.input_folder):\n for name in files:\n file_path = os.path.join(path, name)\n if re.search(self.pattern, name):\n size_byte = self.get_file_size(file_path)\n size_mb = size_byte/1024/1024 #Convert Byte to Megabyte\n target_files[file_path] = size_mb\n print(\"Prepare data file : Indexing {0}\".format(file_path))\n else:\n nontarget_files[file_path] = \"-\"\n print(\"Prepare data file : Complete\")\n return True\n except:\n return False\n\n def create_and_copy_file(self, dicFile):\n print(\"Copy file : Waiting\")\n limit_copy = self.limit_size_copy * 1024 * 1024 #Byte\n current_copy = 0 #Byte\n for file, size in dicFile.items():\n size_byte = size * 1024 * 1024 #Byte\n iter_size_group = iter(self.size_group)\n next(iter_size_group)\n for limit in size_group:\n limit_end = next(iter_size_group, \"\")\n #print(\"-- {0} ~ {1}\".format(limit, limit_end))\n if size >= limit and (limit_end == \"\" or size < limit_end):\n\n #Check limited copy setting\n if limit_copy > 0 and (limit_copy - size_byte) > 0:\n limit_copy = limit_copy - size_byte\n current_copy = current_copy + size_byte\n #print(limit_copy)\n elif self.limit_size_copy != -1:\n print(\"Stop copy because the next file is {2}MB, but setting copy limit is {0}/{1}MB\".format(round(current_copy/1024/1024, 0), self.limit_size_copy, round(size, 0)))\n print(file)\n exit()\n\n org_file_name = os.path.basename(file)\n filename, file_extension = os.path.splitext(org_file_name)\n \n #Create folder storage\n folder_name = \"Size {0}MB ~ {1}MB\".format(limit, limit_end)\n if limit_end == \"\":\n folder_name = \"Size {0}MB ~\".format(limit) \n folder_path = os.path.join(self.output_folder, folder_name)\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n #Create folder extension\n if file_extension == \"\":\n file_extension = \"non_extension_files\"\n folder_path = os.path.join(folder_path, file_extension)\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n #Copy file\n random_file_name = \"File_\" + datetime.now().strftime('%Y%m%d%H%M%S%f')+ \"_(Duplicate with {0})\".format(org_file_name) + file_extension\n file_dist_random_path = os.path.join(folder_path, random_file_name)\n file_dist_org_path = os.path.join(folder_path, org_file_name)\n\n folder_extract_to = \"\"\n if os.path.exists(file):\n if not os.path.exists(file_dist_org_path):\n shutil.copyfile(file, file_dist_org_path)\n folder_extract_to = file_dist_org_path\n print(\"Created file {0}\".format(file_dist_org_path))\n else:\n shutil.copyfile(file, file_dist_random_path)\n folder_extract_to = file_dist_random_path\n print(\"Created file {0}\".format(file_dist_random_path))\n\n #if file_extension == \".zip\":\n filename, file_extension = os.path.splitext(folder_extract_to)\n folder_extract_to = filename\n if not os.path.exists(folder_extract_to):\n os.makedirs(folder_extract_to)\n try:\n #with zipfile.ZipFile(file, 'r') as zip_ref:\n # zip_ref.extractall(folder_extract_to)\n pyunpack.Archive(file).extractall(folder_extract_to)\n pyunpack.extract_archive(file, folder_extract_to)\n print(\"Extracted zip : {0}\".format(file))\n except:\n os.rmdir(folder_extract_to)\n print(\"Extract fail : {0}\".format(file))\n \n break\n print(\"Copy file : Complete\")\n\n \n def analyze(self):\n target_files = {}\n nontarget_files = {}\n result_get_list = self.prepare_data_file(target_files, nontarget_files)\n\n if result_get_list == True:\n #print(target_files)\n #print(nontarget_files)\n self.create_and_copy_file(target_files)\n else:\n print(\"Có lỗi xảy ra.\")\n\n#Start application\nfolder_input = \"X:\\\\01-WIN10-TMP\\\\Desktop\\\\testPY\\\\in\"\nfolder_output = \"X:\\\\01-WIN10-TMP\\\\Desktop\\\\testPY\\\\out\"\npattern_file = \"^.*.*$\"\nsize_group = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 40, 50, 100, 200, 400, 500, 1000] #MB\nlimit_copy_size = -1 #MB (-1 is unlimited)\ntest = AnalyzeFolder(folder_input, folder_output, pattern_file, size_group, limit_copy_size)\ntest.analyze()\nexit()\n","sub_path":"File_Filter.py","file_name":"File_Filter.py","file_ext":"py","file_size_in_byte":6963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"626635507","text":"#!/usr/bin/env python3\nimport os\nimport json\nimport difflib\nfrom typing import Dict, List\n\ncwd = os.getcwd()\nmisc_file = cwd + '/json/character_misc.json'\n\nshortcuts = {\n 'cd' : 'fnddf',\n 'wr' : 'fff',\n 'hcf' : 'bdbddff',\n 'hcb' : 'fdfddbb',\n 'qcf' : 'ddff',\n 'qcb' : 'ddbb'\n}\n\ndef get_character(char_name : str):\n contents = None\n\n with open(misc_file) as char_misc_file:\n contents = char_misc_file.read()\n contents = json.loads(contents)\n\n if contents != None:\n char_details = filter_dictionary('name', char_name, contents)\n\n if char_details:\n return char_details\n else:\n names = dict_key_to_list('name', contents)\n\n guessed_char = difflib.get_close_matches(char_name, names, n=2, cutoff=0.4)\n\n if guessed_char:\n guessed_char = guessed_char[0]\n\n to_return_char = filter_dictionary('name', guessed_char, contents)\n\n if to_return_char:\n return to_return_char\n else:\n return None\n else:\n return None\n\n return None\n\ndef get_move(character_json : str, char_move : str) -> Dict[str, str]:\n char_move_list = None\n char_json = cwd + '/json/' + character_json\n \n with open(char_json, 'r', encoding=\"utf8\") as char_json_file:\n char_move_list = char_json_file.read()\n char_move_list = json.loads(char_move_list)\n\n if char_move_list != None:\n if char_move == \"ra\":\n to_return_move = filter_dictionary('Name', 'Rage Art', char_move_list)\n return to_return_move\n\n if char_move == \"rd\":\n to_return_move = filter_dictionary('Name', 'Rage Drive', char_move_list)\n return to_return_move\n\n char_move = replace_shortcuts(char_move)\n\n to_return_move = filter_dictionary('Command', char_move, char_move_list)\n\n if to_return_move:\n return to_return_move\n else:\n move_inputs = dict_key_to_list('Command', char_move_list)\n\n guessed_move = difflib.get_close_matches(char_move, move_inputs, n=2, cutoff=0.4)\n\n if guessed_move:\n guessed_move = guessed_move[0]\n\n to_return_move = filter_dictionary('Command', guessed_move, char_move_list)\n\n if to_return_move:\n return to_return_move\n else:\n return None\n else:\n return None\n\ndef filter_dictionary(to_compare_key : str, to_compare_value : str, dictionary : List[dict]) -> Dict[str, str]:\n to_return_item : dict = None\n value_split = to_compare_value.split(' ')\n\n # First check if move is equal to move list item, then if move is contained in move list item\n for item in dictionary:\n item_clean = item[to_compare_key].lower().strip().replace(' ', '')\n \n if len(value_split) == 1:\n value_clean = to_compare_value.lower().strip().replace(' ', '')\n\n if item_clean == value_clean:\n to_return_item = item\n break\n else:\n value_clean = [value.lower().strip().replace(' ','') for value in value_split]\n\n if all(value in item_clean for value in value_clean):\n to_return_item = item\n break\n\n if to_return_item != None:\n return to_return_item\n else:\n for item in dictionary:\n item_clean = item[to_compare_key].lower().strip().replace(' ', '')\n\n if len(value_split) == 1:\n value_clean = to_compare_value.lower().strip().replace(' ', '')\n\n if value_clean in item_clean:\n to_return_item = item\n break\n else:\n value_clean = [value.lower().strip().replace(' ','') for value in value_split]\n\n if all(value in item_clean for value in value_clean):\n to_return_item = item\n break\n \n return to_return_item\n\ndef replace_shortcuts(char_move : str) -> str:\n for key, item in shortcuts.items():\n if char_move == key or char_move.__contains__(key):\n return char_move.replace(key, item)\n return char_move\n\ndef dict_key_to_list(key : str, dictionary : Dict[str, str]) -> List[str]:\n to_return_list = []\n for item in dictionary:\n to_add_item = item[key].lower().strip().replace(' ', '')\n to_return_list.append(to_add_item)\n\n return to_return_list","sub_path":"infofinder.py","file_name":"infofinder.py","file_ext":"py","file_size_in_byte":4547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"583401573","text":"import time\n\nfrom utils_py3_tfrecord import *\nfrom model_database_DnCNN import *\n\nclass denoiser(object):\n def __init__(self, sess, optimizer='Adam',input_c_dim=3, batch_size=64, patch_size=160):\n self.sess = sess\n self.input_c_dim = input_c_dim\n self.Y_ = tf.placeholder(tf.float32, [None, None, None, self.input_c_dim], name='GroundTruth') # ground truth\n self.X = tf.placeholder(tf.float32, [None, None, None, self.input_c_dim], name='BilinearInitialization') # input of the network\n self.is_training = tf.placeholder(tf.bool, name='is_training')\n self.lr = tf.placeholder(tf.float32, name='learning_rate')\n \n self.Y = subpixel_new(self.X, is_training=self.is_training)\n #loss has to be mean squared error\n self.lossRGB = (1.0 /batch_size / patch_size / patch_size) * tf.nn.l2_loss(self.Y_ -self.Y)\n self.reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n self.loss = tf.add_n([self.lossRGB] + self.reg_losses)\n\n self.eva_psnr = tf_psnr(self.Y, self.Y_)\n if optimizer=='Adam':\n optimizer = tf.train.AdamOptimizer(self.lr, name='AdamOptimizer')\n\n #SGD + momentum \n elif optimizer=='SGD':\n #optimizer = tf.keras.optimizers.SGD(self.lr, momentum=0.9, decay=0.0001)\n #optimizer= tf.train.GradientDescentOptimizer(self.lr, name='GradientDescent')\n optimizer=tf.train.MomentumOptimizer(self.lr, momentum=0.9)\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # for BN?\n with tf.control_dependencies(update_ops):\n self.train_op = optimizer.minimize(self.loss)\n init = tf.global_variables_initializer()\n self.sess.run(init)\n print(\"[*] Initialize model successfully...\")\n\n def evaluate(self, iter_num, test_data_gt, test_data_bl, sample_dir, summary_merged, summary_writer):\n print(\"[*] Evaluating...\")\n psnr_sum = 0\n time_sum = 0\n for idx in range(len(test_data_gt)):\n clean_image = test_data_gt[idx].astype(np.float32)\n bayer_image = test_data_bl[idx].astype(np.float32)\n _, w, h, _ = clean_image.shape\n clean_image = clean_image[:, 0:w//2*2, 0:h//2*2, :]\n bayer_image = bayer_image[:, 0:w//2*2, 0:h//2*2, :]\n val_st_time = time.time()\n output_clean_image, noisy_image, psnr_summary = self.sess.run([self.Y, self.X, summary_merged], feed_dict={self.Y_: clean_image, self.X: bayer_image, self.is_training: False})\n val_time = time.time()-val_st_time\n groundtruth = np.clip(clean_image, 0, 255).astype('uint8')\n noisyimage = np.around(np.clip(noisy_image, 0, 255)).astype('uint8')\n outputimage = np.around(np.clip(output_clean_image, 0, 255)).astype('uint8')\n psnr = imcpsnr(groundtruth, outputimage, 255, 10)\n print(\"img%d PSNR: %.2f Time: %.2fs\" % (idx + 1, psnr, val_time))\n psnr_sum += psnr\n time_sum += val_time\n save_images(os.path.join(sample_dir, 'test%d_%d_%.2f.png' % (idx + 1, iter_num, psnr)), groundtruth, noisyimage, outputimage)\n summary_writer.add_summary(psnr_summary, iter_num)\n avg_psnr = psnr_sum / len(test_data_gt)\n print(\"--- Validation ---- Average PSNR %.2fdB , Running Time %.2fs---\" % (avg_psnr, time_sum))\n avg_psnr_summary = tf.Summary(value=[tf.Summary.Value(tag='Average PSNR', simple_value=avg_psnr)])\n summary_writer.add_summary(avg_psnr_summary, iter_num)\n\n def train(self, img_labelBatch, img_bayerBatch, eval_data_gt, eval_data_bl, batch_size , ckpt_dir, lr, sample_dir, eval_every_step):\n # load pretrained model\n numStep = len(lr)\n load_model_status, global_step = self.load(ckpt_dir)\n if load_model_status:\n iter_num = global_step\n print(\"[*] Model restore success!\")\n else:\n iter_num = 0\n print(\"[*] Not find pretrained model!\")\n # make summary\n with tf.variable_scope('Loss'):\n tf.summary.scalar('Overall_loss', self.loss)\n tf.summary.scalar('Stage2_lossRGB', self.lossRGB)\n tf.summary.scalar('lr', self.lr)\n for var in tf.trainable_variables():\n tf.summary.histogram(var.name, var)\n train_writer = tf.summary.FileWriter('./logs/train', self.sess.graph)\n merged_train = tf.summary.merge_all()\n val_writer = tf.summary.FileWriter('./logs/val')\n val_summary_psnr = tf.summary.scalar('eva_psnr', self.eva_psnr)\n print(\"[*] Start training, with start iter %d : \" % (iter_num))\n self.evaluate(iter_num, eval_data_gt, eval_data_bl, sample_dir=sample_dir, summary_merged=val_summary_psnr, summary_writer=val_writer)\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=self.sess,coord=coord)\n try:\n for step in range(iter_num,numStep):\n if coord.should_stop():\n break\n start_time = time.time()\n label_batch, bayer_batch = self.sess.run([img_labelBatch, img_bayerBatch])#get mini-batch\n _, loss, train_summary = self.sess.run([self.train_op, self.loss, merged_train], feed_dict={self.Y_: label_batch, self.X: bayer_batch, self.lr: lr[step], self.is_training: True})\n print(\"Training: [%4d/%4d] Speed: %4.2fimgs/s, loss: %.6f\" % (step + 1, numStep, batch_size/(time.time() - start_time), loss))\n iter_num += 1\n #was set to 1000\n if np.mod(step+1, eval_every_step) == 0:\n train_writer.add_summary(train_summary, iter_num)\n if np.mod(step + 1, eval_every_step) == 0:# save check points, evaluation\n self.evaluate(iter_num, eval_data_gt, eval_data_bl, sample_dir=sample_dir, summary_merged=val_summary_psnr, summary_writer=val_writer)\n self.save(iter_num, ckpt_dir)\n except tf.errors.OutOfRangeError:\n print('epoch limit reached')\n coord.request_stop()\n finally:\n coord.request_stop()\n coord.join(threads)\n train_writer.close()\n val_writer.close()\n print(\"[*] Finish training.\")\n\n def test(self, test_files_gt, test_files_bl, ckpt_dir, save_dir):\n # init variables\n tf.global_variables_initializer().run()\n assert len(test_files_gt) != 0, 'No testing data!'\n load_model_status, global_step = self.load(ckpt_dir)\n assert load_model_status == True, '[!] Load weights FAILED...'\n print(\"[*] Load weights SUCCESS...\")\n for run in range(1): # for accurate running time evaluation, warming-up\n psnr_sum = 0\n psnr_initial_sum = 0\n test_sum = 0\n for idx in range(len(test_files_gt)):\n imagename = os.path.basename(test_files_gt[idx])\n clean_image = load_images(test_files_gt[idx]).astype(np.float32)\n _, w, h, _ = clean_image.shape\n clean_image_crop = clean_image[:, 0:w//2*2, 0:h//2*2, :]\n image_bayer = load_images(test_files_bl[idx]).astype(np.float32)\n image_bayer_crop = image_bayer[:, 0:w//2*2, 0:h//2*2, :]\n test_s_time = time.time()\n output_clean_image = self.sess.run(self.Y, feed_dict={self.Y_: clean_image_crop, self.X: image_bayer_crop, self.is_training: False})\n test_time = time.time()-test_s_time\n\n if np.mod(w,2):\n output_clean_image = np.pad(output_clean_image, pad_width=((0,0),(0,1),(0,0),(0,0)), mode='symmetric')\n if np.mod(h,2):\n output_clean_image = np.pad(output_clean_image, pad_width=((0,0),(0,0),(0,1),(0,0)), mode='symmetric')\n \n groundtruth = np.clip(clean_image, 0, 255).astype('uint8')\n noisyimage = np.around(np.clip(image_bayer, 0, 255)).astype('uint8')\n outputimage = np.around(np.clip(output_clean_image, 0, 255)).astype('uint8')\n psnr_bilinear = imcpsnr(groundtruth, noisyimage, 255, 10)\n csnr_bilinear = impsnr(groundtruth, noisyimage, 255, 10)\n psnr = imcpsnr(groundtruth, outputimage, 255, 10)\n csnr = impsnr(groundtruth, outputimage, 255, 10)\n print(\"Run%d, %s, Bilinear PSNR: %.2fdB, Final PSNR: %.2fdB, Time: %.4fs\" % (run, imagename, psnr_bilinear, psnr, test_time))\n psnr_sum += psnr\n psnr_initial_sum += psnr_bilinear\n test_sum += test_time\n save_images(os.path.join(save_dir, imagename), outputimage)\n avg_psnr = psnr_sum / len(test_files_gt)\n avg_psnr_initial = psnr_initial_sum / len(test_files_gt)\n print(\"--- Test --- Average PSNR Bilinear: %.2fdB, Final: %.2fdB, Running Time: %.4fs ---\" % (avg_psnr_initial, avg_psnr, test_sum))\n\n def self_ensemble_test(self, test_files_gt, test_files_bl, ckpt_dir, save_dir):\n # init variables\n tf.global_variables_initializer().run()\n assert len(test_files_gt) != 0, 'No testing data!'\n load_model_status, global_step = self.load(ckpt_dir)\n assert load_model_status == True, '[!] Load weights FAILED...'\n print(\"[*] Load weights SUCCESS...\")\n psnr_sum = 0\n msssim_sum = 0\n ssim_sum = 0\n csnr_sum = np.zeros(3)\n for idx in range(len(test_files_gt)):\n imagename = os.path.basename(test_files_gt[idx])\n clean_image = load_images(test_files_gt[idx]).astype(np.float32)\n _, w, h, _ = clean_image.shape\n clean_image = clean_image[:, 0:w//2*2, 0:h//2*2, :]\n image_bayer = load_images(test_files_bl[idx])[:, 0:w//2*2, 0:h//2*2, :].astype(np.float32)\n image_ensemble = np.zeros([8,image_bayer.shape[1],image_bayer.shape[2],3])\n # mode 1-8\n for mode in range(8):\n if mode == 0:\n image_bayerRGGB = image_bayer\n output_clean_image = self.sess.run(self.Y, feed_dict={self.Y_: clean_image, self.X: image_bayerRGGB, self.is_training: False})\n image_ensemble[mode,:,:,:] = output_clean_image\n print(imcpsnr(clean_image, output_clean_image, 255, 10))\n elif mode == 1:\n image_bayer1 = np.flip(image_bayer,2)\n image_bayerRGGB = image_bayer1\n output_clean_image = self.sess.run(self.Y, feed_dict={self.Y_: clean_image, self.X: image_bayerRGGB, self.is_training: False})\n image_ensemble[mode,:,:,:] = np.flip(output_clean_image,2)\n print(imcpsnr(clean_image, np.flip(output_clean_image,2), 255, 10))\n elif mode == 2:\n image_bayer1 = np.flip(image_bayer,1)\n image_bayerRGGB = image_bayer1\n output_clean_image = self.sess.run(self.Y, feed_dict={self.Y_: clean_image, self.X: image_bayerRGGB, self.is_training: False})\n image_ensemble[mode,:,:,:] = np.flip(output_clean_image,1)\n print(imcpsnr(clean_image, np.flip(output_clean_image,1), 255, 10))\n elif mode == 3:\n image_bayer1 = np.rot90(image_bayer,axes=(1,2))\n image_bayerRGGB = image_bayer1\n output_clean_image = self.sess.run(self.Y, feed_dict={self.Y_: clean_image, self.X: image_bayerRGGB, self.is_training: False})\n image_ensemble[mode,:,:,:] = np.rot90(output_clean_image,3,axes=(1,2))\n print(imcpsnr(clean_image, np.rot90(output_clean_image,3,axes=(1,2)), 255, 10))\n elif mode == 4:\n image_bayer1 = np.rot90(image_bayer,2,axes=(1,2))\n image_bayerRGGB = image_bayer1\n output_clean_image = self.sess.run(self.Y, feed_dict={self.Y_: clean_image, self.X: image_bayerRGGB, self.is_training: False})\n image_ensemble[mode,:,:,:] = np.rot90(output_clean_image,2,axes=(1,2))\n print(imcpsnr(clean_image, np.rot90(output_clean_image,2,axes=(1,2)), 255, 10))\n elif mode == 5:\n image_bayer1 = np.rot90(image_bayer,3,axes=(1,2))\n image_bayerRGGB = image_bayer1\n output_clean_image = self.sess.run(self.Y, feed_dict={self.Y_: clean_image, self.X: image_bayerRGGB, self.is_training: False})\n image_ensemble[mode,:,:,:] = np.rot90(output_clean_image,axes=(1,2))\n print(imcpsnr(clean_image, np.rot90(output_clean_image,axes=(1,2)), 255, 10))\n elif mode == 6:\n image_bayer1 = np.flip(np.rot90(image_bayer,axes=(1,2)),2)\n image_bayerRGGB = image_bayer1\n output_clean_image = self.sess.run(self.Y, feed_dict={self.Y_: clean_image, self.X: image_bayerRGGB, self.is_training: False})\n image_ensemble[mode,:,:,:] = np.rot90(np.flip(output_clean_image,2),3,axes=(1,2))\n print(imcpsnr(clean_image, np.rot90(np.flip(output_clean_image,2),3,axes=(1,2)), 255, 10))\n elif mode == 7:\n image_bayer1 = np.flip(np.rot90(image_bayer,3,axes=(1,2)),2)\n image_bayerRGGB = image_bayer1\n output_clean_image = self.sess.run(self.Y, feed_dict={self.Y_: clean_image, self.X: image_bayerRGGB, self.is_training: False})\n image_ensemble[mode,:,:,:] = np.rot90(np.flip(output_clean_image,2),axes=(1,2))\n print(imcpsnr(clean_image, np.rot90(np.flip(output_clean_image,2),axes=(1,2)), 255, 10))\n else:\n print('[!]Wrong Mode')\n exit(0)\n groundtruth = np.clip(clean_image, 0, 255).astype('uint8')\n outputimage = np.average(image_ensemble,axis=0)\n outputimage = np.around(np.clip(outputimage, 0, 255)).astype('uint8')\n outputimage = np.expand_dims(outputimage, 0)\n psnr = imcpsnr(groundtruth, outputimage, 255, 10)\n csnr = impsnr(groundtruth, outputimage, 255, 10)\n msssim = MS_SSIM(groundtruth, outputimage, 10)\n ssim = self.sess.run(SSIM(groundtruth, outputimage, 10)) \n print(\"%s, Final PSNR: %.2fdB (R: %.2f, G: %.2f, B: %.2f), MSSSIM: %.5f\" % (imagename, psnr, csnr[0], csnr[1], csnr[2], msssim))\n psnr_sum += psnr\n csnr_sum += csnr\n msssim_sum += msssim\n ssim_sum += ssim\n save_images(os.path.join(save_dir, imagename), outputimage)\n avg_psnr = psnr_sum / len(test_files_gt)\n avg_csnr = csnr_sum / len(test_files_gt)\n avg_msssim = msssim_sum / len(test_files_gt)\n avg_ssim = ssim_sum / len(test_files_gt)\n print(\"--- Test --- Average PSNR Final: %.2fdB (R: %.2f, G: %.2f, B: %.2f), MSSSIM: %.5f, SSIM: %.5f ---\" % (avg_psnr, avg_csnr[0], avg_csnr[1], avg_csnr[2], avg_msssim, avg_ssim))\n\n def save(self, iter_num, ckpt_dir, model_name='CNNCDM-2Stage'):\n saver = tf.train.Saver()\n checkpoint_dir = ckpt_dir\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n print(\"[*] Saving model...\")\n saver.save(self.sess, os.path.join(checkpoint_dir, model_name), global_step=iter_num)\n\n def load(self, checkpoint_dir):\n print(\"[*] Reading checkpoint...\")\n saver = tf.train.Saver()\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n full_path = tf.train.latest_checkpoint(checkpoint_dir)\n global_step = int(full_path.split('/')[-1].split('-')[-1])\n saver.restore(self.sess, full_path)\n return True, global_step\n else:\n return False, 0\n","sub_path":"DnCNN/model_py3_tfrecord_DnCNN.py","file_name":"model_py3_tfrecord_DnCNN.py","file_ext":"py","file_size_in_byte":16034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"262500679","text":"#!/usr/bin/python3\n\"\"\"This is the file storage class for AirBnB\"\"\"\nimport json\nfrom models.base_model import BaseModel\nfrom models.user import User\nfrom models.state import State\nfrom models.city import City\nfrom models.amenity import Amenity\nfrom models.place import Place\nfrom models.review import Review\nfrom sqlalchemy.orm import sessionmaker, scoped_session\nfrom sqlalchemy import create_engine, MetaData\nfrom models.base_model import Base\nimport os\n\n\nclass DBStorage():\n \"\"\"manager of mysql database\"\"\"\n __engine = None\n __session = None\n\n def __init__(self):\n\n self.__engine = create_engine('mysql+mysqldb://{}:{}@{}/{}'.format(\n os.environ.get(\"HBNB_MYSQL_USER\"),\n os.environ.get(\"HBNB_MYSQL_PWD\"),\n os.environ.get(\"HBNB_MYSQL_HOST\"),\n os.environ.get(\"HBNB_MYSQL_DB\")),\n pool_pre_ping=True)\n if (os.environ.get(\"HBNB_MYSQL_USER\") == \"test\"):\n Base.metadata.drop_all(bind=self.__engine)\n\n def all(self, cls=None):\n \"\"\"\n all\n \"\"\"\n current = []\n objects = {}\n my_tables = {'cities': 'City', 'states': 'State', 'users': 'User',\n 'amenities': 'Amenity', 'places': 'Place',\n 'reviews': 'Review'}\n if cls:\n if type(cls) == str:\n current = self.__session.query(eval(cls)).all()\n else:\n current = self.__session.query(cls).all()\n else:\n tables = self.__engine.table_names()\n for table in tables:\n current.append(self.__session.query(\n eval(my_tables[table])).all())\n for obj in current:\n if type(obj) == list:\n for o in obj:\n key = \"{}.{}\".format(o.__class__.__name__, o.id)\n objects[key] = o\n else:\n key = \"{}.{}\".format(obj.__class__.__name__, obj.id)\n objects[key] = obj\n return objects\n\n def new(self, obj):\n \"\"\"sets __object to given obj\n Args:\n obj: given object\n \"\"\"\n\n self.__session.add(obj)\n self.save()\n\n def save(self):\n \"\"\"serialize the file path to JSON file path\n \"\"\"\n self.__session.commit()\n\n def delete(self, obj=None):\n \"\"\" doc \"\"\"\n if obj:\n self.__session.delete(obj)\n self.save()\n\n def reload(self):\n \"\"\"serialize the file path to JSON file path\n \"\"\"\n Base.metadata.create_all(bind=self.__engine)\n Session = scoped_session(sessionmaker(bind=self.__engine,\n expire_on_commit=False))\n self.__session = Session()\n\n def close(self):\n \"\"\" doc \"\"\"\n self.__session.close()\n","sub_path":"models/engine/db_storage.py","file_name":"db_storage.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"20011088","text":"import numpy as np\nimport sys\n\ndef restrict(factor, variable, value):\n print(\"Restricted to \" + variable + \" = \" + str(value))\n print_factor(factor, sort=False)\n\n names = factor[0]\n table = factor[1]\n rows = np.shape(table)[0]\n indices = []\n for r in range(rows):\n if table[r][names[variable]] == value:\n indices.append(r)\n restricted_factor = np.delete(np.take(table, indices, 0), names[variable], 1)\n del names[variable]\n new_names = {}\n idx = 0\n for n in sorted(names.iteritems(), key=lambda x: x[1]):\n new_names[n[0]] = idx\n idx += 1\n\n print(\"\")\n print(\"Result:\")\n print_factor((new_names, restricted_factor))\n print(\"______________\")\n return new_names, restricted_factor\n\ndef query_prob(names, row, table):\n for r in table:\n okay = True\n for var in r:\n if var != \"Prob\" and row[names[var]] != r[var]:\n okay = False\n if okay:\n return r[\"Prob\"]\n return 0\n\ndef query_prob2(names, var, val, row, table):\n for r in table:\n okay = True\n for n in names:\n if row[names[n]] != r[n]:\n okay = False\n if r[var] != val:\n okay = False\n if okay:\n return r[\"Prob\"]\n return 0\n\ndef multiply(factor1, factor2):\n print(\"Multiplying:\")\n print_factor(factor1, sort=False)\n print_factor(factor2, sort=False)\n\n names1 = factor1[0]\n table1 = factor1[1]\n names2 = factor2[0]\n table2 = factor2[1]\n\n union = []\n for n in names1:\n if n not in union:\n union.append(n)\n for n in names2:\n if n not in union:\n union.append(n)\n new_names = {}\n idx = 0\n for i in sorted(union):\n new_names[i] = idx\n idx += 1\n\n prob_table1 = []\n prob_table2 = []\n for r in range(np.shape(table1)[0]):\n row = {}\n for n in names1:\n row[n] = table1[r,names1[n]]\n row[\"Prob\"] = table1[r,-1]\n prob_table1.append(row)\n #print(prob_table1)\n for r in range(np.shape(table2)[0]):\n row = {}\n for n in names2:\n row[n] = table2[r,names2[n]]\n row[\"Prob\"] = table2[r,-1]\n prob_table2.append(row)\n #print(prob_table2)\n\n cols = len(union)\n rows = 2**cols\n product = np.zeros(shape=(rows, cols+1))\n for j in range(cols):\n for i in range(rows):\n if i/(2**(cols-j-1)) % 2 == 0:\n product[i,j] = 1\n else:\n product[i,j] = 0\n\n for i in range(rows):\n p1 = query_prob(new_names, product[i], prob_table1)\n p2 = query_prob(new_names, product[i], prob_table2)\n product[i,-1] = p1*p2\n\n print(\"\")\n print(\"Result:\")\n print_factor((new_names, product))\n print(\"______________\")\n return new_names, product\n\ndef sumout(factor, variable):\n print(\"Sumout \" + variable + \" from:\")\n print_factor(factor, sort=False)\n\n names = factor[0]\n table = factor[1]\n rows = np.shape(table)[0]\n cols = np.shape(table)[1]\n prob_table = []\n for r in range(rows):\n row = {}\n for n in names:\n row[n] = table[r,names[n]]\n row[\"Prob\"] = table[r,-1]\n prob_table.append(row)\n #print(prob_table)\n\n result = np.zeros(shape=(rows/2, cols-1))\n for j in range(cols-2):\n for i in range(rows/2):\n if i/(2**(cols-j-3)) % 2 == 0:\n result[i,j] = 1\n else:\n result[i,j] = 0\n #print(result)\n \n new_names = {}\n idx = 0\n for n in names:\n if n != variable:\n new_names[n] = idx\n idx += 1\n #print(new_names)\n\n for i in range(np.shape(result)[0]):\n p1 = query_prob2(new_names, variable, 1, result[i], prob_table)\n p0 = query_prob2(new_names, variable, 0, result[i], prob_table)\n result[i,-1] = p1+p0\n\n print(\"\")\n print(\"Result:\")\n print_factor((new_names, result))\n print(\"______________\")\n return new_names, result\n\ndef normalize(factor):\n print(\"Normalize:\")\n print_factor(factor)\n\n names = factor[0]\n table = factor[1]\n rows = np.shape(table)[0]\n s = 0.\n for r in range(rows):\n s += table[r][1]\n result_factor = np.copy(table)\n for r in range(rows):\n result_factor[r][1] = table[r][1]/s\n\n print(\"\")\n print(\"Result:\")\n print_factor((names, result_factor))\n print(\"______________\")\n return names, result_factor\n\ndef inference(factor_list, query_variables, ordered_hidden_var_list, evidence_vars):\n for i in range(len(factor_list)):\n names = factor_list[i][0]\n for e in evidence_vars:\n if e not in names:\n continue\n factor_list[i] = restrict(factor_list[i], e, evidence_vars[e])\n #print(factor_list)\n \n for h in ordered_hidden_var_list:\n #print(\"Hidden variable \" + h)\n common = []\n remaining_factors = []\n for f in factor_list:\n names = f[0]\n if h in names:\n #print(f)\n #print(\"Adding \" + h + \" to common\")\n common.append(f)\n else:\n remaining_factors.append(f)\n new_factor = common[0]\n for c in common:\n if c != new_factor:\n new_factor = multiply(new_factor, c)\n new_factor = sumout(new_factor, h)\n remaining_factors.append(new_factor)\n factor_list = remaining_factors\n \n remaining = factor_list[0]\n for i in range(1, len(factor_list)):\n remaining = multiply(remaining, factor_list[i])\n\n final = normalize(remaining)\n return final\n\ndef print_factor(factor, sort=True):\n names = factor[0]\n table = factor[1]\n for n in sorted(names.iteritems(), key=lambda x: x[1]):\n print(n[0] + \"\\t\"),\n print(\"Prob\")\n rows = np.shape(table)[0]\n cols = np.shape(table)[1]\n for i in range(rows):\n for j in range(cols):\n if j < cols-1:\n print(int(table[i,j])),\n print(\"\\t\"),\n else:\n print(float(table[i,j]))\n\n# AB\nf1 = ({\"AB\": 0, \"AS\": 1},\n np.array([[1, 0, 0.1],\n [1, 1, 0.6],\n [0, 0, 0.9],\n [0, 1, 0.4]]))\n\n# AH\nf2 = ({\"AH\": 0, \"AS\": 1, \"M\": 2, \"NH\": 3},\n np.array([[1, 0, 0, 0, 0],\n [1, 0, 0, 1, 0.2],\n [1, 0, 1, 0, 0.4],\n [1, 0, 1, 1, 0.65],\n [1, 1, 0, 0, 0.5],\n [1, 1, 0, 1, 0.75],\n [1, 1, 1, 0, 0.9],\n [1, 1, 1, 1, 0.99],\n [0, 0, 0, 0, 1],\n [0, 0, 0, 1, 0.8],\n [0, 0, 1, 0, 0.6],\n [0, 0, 1, 1, 0.35],\n [0, 1, 0, 0, 0.5],\n [0, 1, 0, 1, 0.25],\n [0, 1, 1, 0, 0.1],\n [0, 1, 1, 1, 0.01]]))\n\n# AS\nf3 = ({\"AS\": 0},\n np.array([[1, 0.05],\n [0, 0.95]]))\n\n# M\nf4 = ({\"M\": 0},\n np.array([[1, 0.03571428571],\n [0, 0.96428571428]]))\n\n# NA\nf5 = ({\"NA\": 0},\n np.array([[1, 0.3],\n [0, 0.7]]))\n\n# AH\nf6 = ({\"NH\": 0, \"M\": 1, \"NA\": 2},\n np.array([[1, 0, 0, 0],\n [1, 0, 1, 0.5],\n [1, 1, 0, 0.4],\n [1, 1, 1, 0.8],\n [0, 0, 0, 1],\n [0, 0, 1, 0.5],\n [0, 1, 0, 0.6],\n [0, 1, 1, 0.2]]))\n\nfactors = [f1, f2, f3, f4, f5, f6]\nfactors2 = [f1, f2, f3, f5, f6]\n\nif sys.argv[1] == \"a\":\n inference(factors, \"AH\", [\"AB\", \"AS\", \"M\", \"NA\", \"NH\"], {})\nelif sys.argv[1] == \"b\":\n inference(factors2 , \"AS\", [\"AB\", \"NA\", \"NH\"], {\"AH\": 1, \"M\": 1})\nelif sys.argv[1] == \"c\":\n inference(factors2 , \"AS\", [\"NA\", \"NH\"], {\"AB\": 1, \"AH\": 1, \"M\": 1})\nelif sys.argv[1] == \"d\":\n inference(factors2 , \"AS\", [\"NH\"], {\"AB\": 1, \"AH\": 1, \"M\": 1, \"NA\": 1})\nelse:\n print(\"Incorrect command line arguments\")\n\n# Constructing a Bayesian Network\n\n#f11 = ({\"E\": 0},\n# np.array([[1, 0.0003],\n# [0, 0.9997]]))\n#\n#f12 = ({\"B\": 0},\n# np.array([[1, 0.0001],\n# [0, 0.9999]]))\n#\n#f13 = ({\"W\": 0, \"A\": 1},\n# np.array([[1, 0, 0.4],\n# [1, 1, 0.8],\n# [0, 0, 0.6],\n# [0, 1, 0.2]]))\n#\n#f14 = ({\"G\": 0, \"A\": 1},\n# np.array([[1, 0, 0.04],\n# [1, 1, 0.4],\n# [0, 0, 0.96],\n# [0, 1, 0.6]]))\n#\n#f15 = ({\"A\": 0, \"B\": 1, \"E\": 2},\n# np.array([[1, 0, 0, 0.01],\n# [1, 0, 1, 0.2],\n# [1, 1, 0, 0.95],\n# [1, 1, 1, 0.96],\n# [0, 0, 0, 0.99],\n# [0, 0, 1, 0.8],\n# [0, 1, 0, 0.05],\n# [0, 1, 1, 0.04]]))\n\n#inference([f11, f12, f13, f14, f15], \"W\", [\"A\", \"B\", \"E\", \"G\"], {})\n#inference([f11, f12, f13, f14, f15], \"G\", [\"A\", \"B\", \"E\"], {\"W\": 1})\n#inference([f11, f12, f13, f14, f15], \"G\", [\"A\", \"B\", \"E\"], {\"W\": 0})\n#inference([f11, f12, f13, f14, f15], \"A\", [\"B\", \"E\"], {\"W\": 0, \"G\": 0})\n#inference([f11, f12, f13, f14, f15], \"A\", [\"B\", \"E\"], {\"W\": 0, \"G\": 1})\n#inference([f11, f12, f13, f14, f15], \"A\", [\"B\", \"E\"], {\"W\": 1, \"G\": 0})\n#inference([f11, f12, f13, f14, f15], \"A\", [\"B\", \"E\"], {\"W\": 1, \"G\": 1})\n#inference([f11, f12, f13, f14, f15], \"E\", [\"G\", \"W\"], {\"A\": 0, \"B\": 0})\n#inference([f11, f12, f13, f14, f15], \"E\", [\"G\", \"W\"], {\"A\": 0, \"B\": 1})\n#inference([f11, f12, f13, f14, f15], \"E\", [\"G\", \"W\"], {\"A\": 1, \"B\": 0})\n#inference([f11, f12, f13, f14, f15], \"E\", [\"G\", \"W\"], {\"A\": 1, \"B\": 1})\n","sub_path":"a3/bayesnet.py","file_name":"bayesnet.py","file_ext":"py","file_size_in_byte":9650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"633020674","text":"from __future__ import unicode_literals\nimport time\nfrom base import Progress\n\n\nclass Bar(Progress):\n width = 32\n suffix = '%(index)d:%(max)d'\n bar_prefix = ' |'\n bar_suffix = '| '\n empty_fill = ' '\n fill = '#'\n\n def update(self):\n filled_length = int(self.width * self.progress)\n empty_length = self.width - filled_length\n\n message = self.message % self\n\n bar = self.fill * filled_length\n empty = self.empty_fill * empty_length\n suffix = self.suffix % self\n # print(type(suffix))\n # % 用于格式化赋值\n # %(index)d%(max)d % self.index=1 , self.max=20\n line = ''.join([message, self.bar_prefix, bar, empty, self.bar_suffix, suffix])\n self.writeln(line)\n\n\ndef work(s):\n time.sleep(s)\n\n\nif __name__ == '__main__':\n\n bar = Bar(\"message\", max=20)\n for i in range(20):\n # do some work\n work(0.1)\n bar.next()\n bar.finish()\n","sub_path":"bili_video_download/bar.py","file_name":"bar.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"205443957","text":"from __future__ import print_function\nn, d = map(int, raw_input().split(' '))\npenge = list(map(int, raw_input().split(' ')))\npenge.append('Stopp')\n\npeningar = 0\nreitur = 0\nwhile True:\n if penge[reitur] != 'Stopp':\n peningar += penge[reitur]\n else:\n break\n\n reitur += d\n if reitur > n:\n reitur -= n\n\nprint(peningar)","sub_path":"Forritunarkeppnir/2020/Peningar.py","file_name":"Peningar.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"159677055","text":"# pylint: disable=no-member\nfrom typing import Dict, List, Tuple, Any, Optional\nfrom copy import deepcopy\nimport math\n\nimport numpy\nfrom overrides import overrides\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.nn.modules.linear import Linear\n\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.common.util import START_SYMBOL, END_SYMBOL\nfrom allennlp.data.vocabulary import Vocabulary\nfrom allennlp.modules import TextFieldEmbedder, Seq2SeqEncoder\nfrom allennlp.modules.seq2seq_decoders import DecoderNet\nfrom allennlp.models.model import Model\nfrom allennlp.modules.token_embedders import Embedding\nfrom allennlp.nn.initializers import InitializerApplicator\nfrom allennlp.nn import util\nfrom allennlp.nn.beam_search import BeamSearch\nfrom allennlp.modules.seq2seq_encoders.bidirectional_language_model_transformer import (\n subsequent_mask,\n PositionwiseFeedForward,\n SublayerConnection,\n PositionalEncoding,\n MultiHeadedAttention,\n)\n\nfrom xlwomt.metrics import TokenSequenceAccuracy\nfrom xlwomt.models.combiner import TransformerCombiner, AttentionCombiner\n\n\n@Model.register(\"ensemble_transformer_single_encoder\")\nclass EnsembleSequenceSingleEncoderTransformer(Model):\n def __init__(self,\n vocab: Vocabulary,\n source_embedder: TextFieldEmbedder,\n target_embedder: Embedding,\n encoder: Seq2SeqEncoder,\n max_decoding_steps: int,\n decoding_dim: int,\n feedforward_hidden_dim: int,\n num_layers: int,\n num_attention_heads: int,\n combiner_module: TransformerCombiner,\n use_positional_encoding: bool = True,\n positional_encoding_max_steps: int = 5000,\n dropout_prob: float = 0.1,\n residual_dropout_prob: float = 0.2,\n attention_dropout_prob: float = 0.2,\n beam_size: int = 1,\n target_namespace: str = \"tokens\",\n label_smoothing_ratio: Optional[float] = None,\n initializer: Optional[InitializerApplicator] = None) -> None:\n super(EnsembleSequenceSingleEncoderTransformer, self).__init__(vocab)\n\n self._target_namespace = target_namespace\n self._label_smoothing_ratio = label_smoothing_ratio\n self._start_index = self.vocab.get_token_index(START_SYMBOL, self._target_namespace)\n self._end_index = self.vocab.get_token_index(END_SYMBOL, self._target_namespace)\n self._token_based_metric = TokenSequenceAccuracy()\n\n # Beam Search\n self._max_decoding_steps = max_decoding_steps\n self._beam_search = BeamSearch(self._end_index, max_steps=max_decoding_steps, beam_size=beam_size)\n\n # Encoder\n self._encoder = encoder\n\n # Vocabulary and embedder\n self._source_embedder = source_embedder\n self._target_embedder = target_embedder\n\n target_vocab_size = self.vocab.get_vocab_size(self._target_namespace)\n assert target_vocab_size == self._target_embedder.num_embeddings\n\n target_embedding_dim = self._target_embedder.get_output_dim()\n\n self._decoding_dim = decoding_dim\n # Sequence Decoder Features\n self._output_projection_layer = Linear(\n self._decoding_dim, target_vocab_size\n )\n\n self._decoder = Decoder(\n num_layers=num_layers,\n decoding_dim=decoding_dim,\n target_embedding_dim=target_embedding_dim,\n feedforward_hidden_dim=feedforward_hidden_dim,\n num_attention_heads=num_attention_heads,\n use_positional_encoding=use_positional_encoding,\n positional_encoding_max_steps=positional_encoding_max_steps,\n dropout_prob=dropout_prob,\n residual_dropout_prob=residual_dropout_prob,\n attention_dropout_prob=attention_dropout_prob,\n combiner=combiner_module,\n num_sources=3\n )\n\n # Parameter checks and cleanup\n if self._target_embedder.get_output_dim() != self._decoder.target_embedding_dim:\n raise ConfigurationError(\n \"Target Embedder output_dim doesn't match decoder module's input.\"\n )\n #\n if self._encoder.get_output_dim() != self._decoder.get_output_dim():\n raise ConfigurationError(\n f\"Encoder output dimension {self._encoder.get_output_dim()} should be\"\n f\" equal to decoder dimension {self._self_attention.get_output_dim()}.\"\n )\n\n if initializer:\n initializer(self)\n\n # Print the model\n print(self)\n\n def take_step(self,\n last_predictions: torch.Tensor,\n state: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:\n \"\"\"\n Take a decoding step. This is called by the beam search class.\n # Parameters\n last_predictions : `torch.Tensor`\n A tensor of shape `(group_size,)`, which gives the indices of the predictions\n during the last time step.\n state : `Dict[str, torch.Tensor]`\n A dictionary of tensors that contain the current state information\n needed to predict the next step, which includes the encoder outputs,\n the source mask, and the decoder hidden state and context. Each of these\n tensors has shape `(group_size, *)`, where `*` can be any other number\n of dimensions.\n # Returns\n Tuple[torch.Tensor, Dict[str, torch.Tensor]]\n A tuple of `(log_probabilities, updated_state)`, where `log_probabilities`\n is a tensor of shape `(group_size, num_classes)` containing the predicted\n log probability of each class for the next step, for each item in the group,\n while `updated_state` is a dictionary of tensors containing the encoder outputs,\n source mask, and updated decoder hidden state and context.\n Notes\n -----\n We treat the inputs as a batch, even though `group_size` is not necessarily\n equal to `batch_size`, since the group may contain multiple states\n for each source sentence in the batch.\n \"\"\"\n # shape: (group_size, num_classes)\n output_projections, state = self._decoder_step(last_predictions, state)\n\n # shape: (group_size, num_classes)\n class_log_probabilities = F.log_softmax(output_projections, dim=-1)\n\n return class_log_probabilities, state\n\n @overrides\n def forward(self, # type: ignore\n source_tokens_0: Dict[str, torch.LongTensor],\n source_tokens_1: Dict[str, torch.LongTensor],\n source_tokens_2: Dict[str, torch.LongTensor],\n metadata: List[Dict[str, Any]],\n target_tokens: Dict[str, torch.LongTensor] = None) -> Dict[str, torch.Tensor]:\n \"\"\"\n Make forward pass with decoder logic for producing the entire target sequence.\n Parameters\n ----------\n source_tokens_0 : ``Dict[str, torch.LongTensor]``\n The output of `TextField.as_array()` applied on the source `TextField`. This will be\n passed through a `TextFieldEmbedder` and then through an encoder.\n source_tokens_1 : ``Dict[str, torch.LongTensor]``\n The output of `TextField.as_array()` applied on the source `TextField`. This will be\n passed through a `TextFieldEmbedder` and then through an encoder.\n source_tokens_2 : ``Dict[str, torch.LongTensor]``\n The output of `TextField.as_array()` applied on the source `TextField`. This will be\n passed through a `TextFieldEmbedder` and then through an encoder.\n metadata: List[Dict[str, Any]]\n Additional information for prediction\n target_tokens : ``Dict[str, torch.LongTensor]``, optional (default = None)\n Output of `Textfield.as_array()` applied on target `TextField`. We assume that the\n target tokens are also represented as a `TextField`.\n Returns\n -------\n Dict[str, torch.Tensor]\n \"\"\"\n state = self._encode(source_tokens=[source_tokens_0,\n source_tokens_1,\n source_tokens_2])\n\n if target_tokens:\n # state = self._decoder.init_decoder_state(state)\n # The `_forward_loop` decodes the input sequence and computes the loss during training\n # and validation.\n output_dict = self._forward_loop(state, target_tokens)\n else:\n output_dict = {}\n\n if not self.training:\n # state = self._init_decoder_state(state)\n predictions = self._forward_beam_search(state)\n output_dict.update(predictions)\n if target_tokens:\n # shape: (batch_size, max_predicted_sequence_length)\n predicted_tokens = self.decode(output_dict)[\"predicted_tokens\"]\n\n self._token_based_metric(predicted_tokens, [x[\"target_tokens\"] for x in metadata])\n\n return output_dict\n\n @overrides\n def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n \"\"\"\n Finalize predictions.\n This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test\n time, to finalize predictions. The logic for the decoder part of the encoder-decoder lives\n within the ``forward`` method.\n This method trims the output predictions to the first end symbol, replaces indices with\n corresponding tokens, and adds a field called ``predicted_tokens`` to the ``output_dict``.\n \"\"\"\n predicted_indices = output_dict[\"predictions\"]\n if not isinstance(predicted_indices, numpy.ndarray):\n predicted_indices = predicted_indices.detach().cpu().numpy()\n all_predicted_tokens = []\n for indices in predicted_indices:\n # Beam search gives us the top k results for each source sentence in the batch\n # but we just want the single best.\n if len(indices.shape) > 1:\n indices = indices[0]\n indices = list(indices)\n # Collect indices till the first end_symbol\n if self._end_index in indices:\n indices = indices[:indices.index(self._end_index)]\n predicted_tokens = [self.vocab.get_token_from_index(x, namespace=self._target_namespace)\n for x in indices]\n all_predicted_tokens.append(predicted_tokens)\n output_dict[\"predicted_tokens\"] = all_predicted_tokens # type: ignore\n return output_dict\n\n def _encode(self, source_tokens: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:\n \"\"\"\n Make forward pass on the encoder.\n # Parameters\n source_tokens : `List[Dict[str, torch.Tensor]]`\n List of the output of `TextField.as_array()` applied on the source `TextField`. This will be\n passed through a `TextFieldEmbedder` and then through an encoder.\n # Returns\n Dict[str, torch.Tensor]\n Map consisting of the key `source_mask` with the mask over the\n `source_tokens` text field,\n and the key `encoder_outputs` with the output tensor from\n forward pass on the encoder.\n \"\"\"\n # shape: n_srcs list of (batch_size, max_input_sequence_length, encoder_input_dim)\n embedded_inputs = [self._source_embedder(src_toks) for src_toks in source_tokens]\n # print(embedded_inputs[0].shape)\n # shape: (batch_size, n_srcs, max_input_sequence_length, encoder_input_dim)\n embedded_inputs = torch.nn.utils.rnn.pad_sequence([e.permute(1, 0, 2) for e in embedded_inputs]).permute(2, 1, 0, 3)\n # print(embedded_inputs.shape)\n # shape: n_src size list of (batch_size, max_input_sequence_length)\n source_masks = [util.get_text_field_mask(src_toks) for src_toks in source_tokens]\n\n # shape: (batch_size, n_srcs, max_input_sequence_length)\n source_masks = torch.nn.utils.rnn.pad_sequence([s.permute(1, 0) for s in source_masks]).permute(2, 1, 0)\n # print(source_masks.shape)\n # import pdb; pdb.set_trace()\n # shape: List(batch_size, max_input_sequence_length, encoder_output_dim)\n encoder_outputs = [self._encoder(embedded_inputs[:, src_idx, :, :], source_masks[:, src_idx, :])\n for src_idx in range(3)]\n\n # shape: (batch_size, n_srcs, max_input_sequence_length, encoder_output_dim)\n encoder_outputs = torch.stack(encoder_outputs, dim=1)\n\n return {\"source_mask\": source_masks, \"encoder_outputs\": encoder_outputs}\n\n def _forward_loop(self,\n state: Dict[str, torch.Tensor],\n target_tokens: Dict[str, torch.LongTensor] = None) -> Dict[str, torch.Tensor]:\n\n # shape: (batch_size, n_srcs, max_input_sequence_length, encoder_output_dim)\n encoder_outputs = state[\"encoder_outputs\"]\n\n # shape: (batch_size, n_srcs, max_input_sequence_length)\n source_mask = state[\"source_mask\"]\n\n # shape: (batch_size, max_target_sequence_length)\n targets = target_tokens[\"tokens\"]\n\n _, target_sequence_length = targets.size()\n\n # Prepare embeddings for targets. They will be used as gold embeddings during decoder training\n # shape: (batch_size, max_target_sequence_length, embedding_dim)\n target_embedding = self._target_embedder(targets)\n\n # shape: (batch_size, max_target_batch_sequence_length)\n target_mask = util.get_text_field_mask(target_tokens)\n\n _, decoder_output = self._decoder(\n previous_state=state,\n previous_steps_predictions=target_embedding[:, :-1, :],\n encoder_outputs=encoder_outputs,\n source_mask=source_mask,\n previous_steps_mask=target_mask[:, :-1]\n )\n\n # shape: (group_size, max_target_sequence_length, num_classes)\n logits = self._output_projection_layer(decoder_output).type(torch.FloatTensor)\n\n # Compute loss.\n loss = self._get_loss(logits, targets, target_mask)\n output_dict = {\"loss\": loss}\n\n return output_dict\n\n def _forward_beam_search(self, state: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n \"\"\"\n Prepare inputs for the beam search, does beam search and returns beam search results.\n \"\"\"\n batch_size = state[\"source_mask\"].size(dim=0)\n start_predictions = state[\"source_mask\"][:, 0, :].new_full((batch_size,), fill_value=self._start_index)\n\n # shape (all_top_k_predictions): (batch_size, beam_size, num_decoding_steps)\n # shape (log_probabilities): (batch_size, beam_size)\n all_top_k_predictions, log_probabilities = self._beam_search.search(\n start_predictions, state, self.take_step\n )\n\n output_dict = {\n \"class_log_probabilities\": log_probabilities,\n \"predictions\": all_top_k_predictions,\n }\n return output_dict\n\n def _decoder_step(\n self, last_predictions: torch.Tensor, state: Dict[str, torch.Tensor]\n ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:\n \"\"\"\n Decode current state and last prediction to produce produce projections\n into the target space, which can then be used to get probabilities of\n each target token for the next step.\n Inputs are the same as for `take_step()`.\n \"\"\"\n # shape: (batch_size, n_srcs, max_input_sequence_length, encoder_output_dim)\n encoder_outputs = state[\"encoder_outputs\"]\n\n # shape: (batch_size, n_srcs, max_input_sequence_length)\n source_mask = state[\"source_mask\"]\n\n # shape: (group_size, steps_count, decoder_output_dim)\n previous_steps_predictions = state.get(\"previous_steps_predictions\")\n\n # shape: (batch_size, 1, target_embedding_dim)\n last_predictions_embeddings = self._target_embedder(last_predictions).unsqueeze(1)\n\n if previous_steps_predictions is None or previous_steps_predictions.shape[-1] == 0:\n # There is no previous steps, except for start vectors in `last_predictions`\n # shape: (group_size, 1, target_embedding_dim)\n previous_steps_predictions = last_predictions_embeddings\n else:\n # shape: (group_size, steps_count, target_embedding_dim)\n previous_steps_predictions = torch.cat(\n [previous_steps_predictions, last_predictions_embeddings], 1\n )\n\n decoder_state, decoder_output = self._decoder(\n previous_state=state,\n encoder_outputs=encoder_outputs,\n source_mask=source_mask,\n previous_steps_predictions=previous_steps_predictions,\n )\n state[\"previous_steps_predictions\"] = previous_steps_predictions\n\n # Update state with new decoder state, override previous state\n state.update(decoder_state)\n\n if self._decoder.decodes_parallel:\n decoder_output = decoder_output[:, -1, :]\n\n # shape: (group_size, num_classes)\n output_projections = self._output_projection_layer(decoder_output)\n\n return output_projections, state\n\n def _get_loss(self,\n logits: torch.FloatTensor,\n targets: torch.LongTensor,\n target_mask: torch.LongTensor) -> torch.Tensor:\n \"\"\"\n Compute loss.\n Takes logits (unnormalized outputs from the decoder) of size (batch_size,\n num_decoding_steps, num_classes), target indices of size (batch_size, num_decoding_steps+1)\n and corresponding masks of size (batch_size, num_decoding_steps+1) steps and computes cross\n entropy loss while taking the mask into account.\n The length of ``targets`` is expected to be greater than that of ``logits`` because the\n decoder does not need to compute the output corresponding to the last timestep of\n ``targets``. This method aligns the inputs appropriately to compute the loss.\n During training, we want the logit corresponding to timestep i to be similar to the target\n token from timestep i + 1. That is, the targets should be shifted by one timestep for\n appropriate comparison. Consider a single example where the target has 3 words, and\n padding is to 7 tokens.\n The complete sequence would correspond to w1 w2 w3

\n and the mask would be 1 1 1 1 1 0 0\n and let the logits be l1 l2 l3 l4 l5 l6\n We actually need to compare:\n the sequence w1 w2 w3

\n with masks 1 1 1 1 0 0\n against l1 l2 l3 l4 l5 l6\n (where the input was) w1 w2 w3

\n \"\"\"\n # shape: (batch_size, num_decoding_steps)\n relevant_targets = targets[:, 1:].contiguous().to(logits.device)\n\n # shape: (batch_size, num_decoding_steps)\n relevant_mask = target_mask[:, 1:].contiguous().to(logits.device)\n\n return util.sequence_cross_entropy_with_logits(logits,\n relevant_targets,\n relevant_mask)\n\n @overrides\n def get_metrics(self, reset: bool = False) -> Dict[str, float]:\n all_metrics: Dict[str, float] = {}\n if not self.training:\n all_metrics.update(self._token_based_metric.get_metric(reset=reset))\n return all_metrics\n\n\ndef _clones(module: nn.Module, num_layers: int):\n \"\"\"Produce N identical layers.\"\"\"\n return nn.ModuleList([deepcopy(module) for _ in range(num_layers)])\n\n\nclass Decoder(DecoderNet):\n \"\"\"\n Transformer N layer decoder with masking.\n Code taken from http://nlp.seas.harvard.edu/2018/04/03/attention.html\n \"\"\"\n\n def __init__(self,\n num_layers: int,\n decoding_dim: int,\n target_embedding_dim: int,\n feedforward_hidden_dim: int,\n num_attention_heads: int,\n combiner: TransformerCombiner,\n num_sources: int,\n use_positional_encoding: bool = True,\n positional_encoding_max_steps: int = 5000,\n dropout_prob: float = 0.1,\n residual_dropout_prob: float = 0.2,\n attention_dropout_prob: float = 0.2,\n ) -> None:\n super().__init__(decoding_dim, target_embedding_dim, decodes_parallel=True)\n\n self._decoding_dim = decoding_dim\n self._embed_scale = math.sqrt(decoding_dim)\n\n self._positional_embedder = (\n PositionalEncoding(input_dim=decoding_dim, max_len=positional_encoding_max_steps)\n if use_positional_encoding\n else None\n )\n self._dropout = nn.Dropout(dropout_prob)\n\n generic_attn = MultiHeadedAttention(num_attention_heads, decoding_dim, attention_dropout_prob)\n combined_attn = AttentionCombiner(num_sources, generic_attn, combiner)\n feed_forward = PositionwiseFeedForward(decoding_dim, feedforward_hidden_dim, dropout_prob)\n\n layer = DecoderLayer(\n size=decoding_dim,\n self_attn=deepcopy(generic_attn),\n src_attn=deepcopy(combined_attn),\n feed_forward=feed_forward,\n dropout=residual_dropout_prob\n )\n\n self._self_attention_layers = _clones(layer, num_layers)\n self.norm = nn.LayerNorm(layer.size)\n\n def init_decoder_state(\n self, encoder_out: Dict[str, torch.LongTensor]\n ) -> Dict[str, torch.Tensor]:\n return {}\n\n @overrides\n def forward(\n self,\n previous_state: Dict[str, torch.Tensor],\n encoder_outputs: torch.Tensor,\n source_mask: torch.Tensor,\n previous_steps_predictions: torch.Tensor,\n previous_steps_mask: Optional[torch.Tensor] = None,\n ) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:\n\n # shape: (batch_size, n_srcs, max_input_sequence_length) ->\n # (batch_size, n_srcs, 1, max_input_sequence_length)\n source_mask = source_mask.unsqueeze(-2)\n future_mask = Variable(subsequent_mask(previous_steps_predictions.size(-2),\n device=source_mask.device)\n .type_as(source_mask.data))\n\n if previous_steps_mask is None:\n previous_steps_mask = future_mask\n else:\n previous_steps_mask = previous_steps_mask.unsqueeze(-2) & future_mask\n\n previous_steps_predictions = previous_steps_predictions * self._embed_scale\n if self._positional_embedder:\n previous_steps_predictions = self._positional_embedder(previous_steps_predictions)\n previous_steps_predictions = self._dropout(previous_steps_predictions)\n\n for layer in self._self_attention_layers:\n previous_steps_predictions = layer(previous_steps_predictions,\n encoder_outputs,\n source_mask,\n previous_steps_mask)\n\n decoded = self.norm(previous_steps_predictions)\n return {}, decoded\n\n\nclass DecoderLayer(nn.Module):\n \"\"\"\n A single layer of transformer decoder.\n Code taken from http://nlp.seas.harvard.edu/2018/04/03/attention.html\n \"\"\"\n def __init__(\n self,\n size: int,\n self_attn: MultiHeadedAttention,\n src_attn: AttentionCombiner,\n feed_forward: F,\n dropout: float,\n ) -> None:\n super().__init__()\n self.size = size\n self.self_attn = self_attn\n self.src_attn = src_attn\n self.feed_forward = feed_forward\n self.sublayer = _clones(SublayerConnection(size, dropout), 3)\n\n def forward(\n self, x: torch.Tensor, memory: torch.Tensor, src_mask: torch.Tensor, tgt_mask: torch.Tensor\n ) -> torch.Tensor:\n\n \"\"\"Follow Figure 1 (right) for connections.\"\"\"\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))\n x = self.sublayer[1](x, lambda x: self.src_attn(x, memory, memory, src_mask))\n return self.sublayer[2](x, self.feed_forward)\n","sub_path":"code/models/transformer_ensemble_single_enc.py","file_name":"transformer_ensemble_single_enc.py","file_ext":"py","file_size_in_byte":24642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"141692291","text":"import torch\nfrom torch.utils.data import DataLoader\nfrom dataset.dataset import Landsat8Dataset, Landsat8DatasetHDF5\nfrom dataset.dataset import LocalRandomSampler\nfrom dataset.customTransform import DenormalizeS2\nfrom torchvision import transforms\n\nimport argparse\nfrom IPython import embed\n\nimport gc\nimport gdal\nfrom gdalconst import GA_ReadOnly\nfrom osgeo import osr\n\nimport numpy as np\nfrom PIL import Image\n\nparser = argparse.ArgumentParser(description='PyTorch Super Res Example')\n# hyper-parameters\nparser.add_argument('--batchSize', type=int, default=1, help='training batch size')\nparser.add_argument('--testBatchSize', type=int, default=1, help='testing batch size')\nparser.add_argument('--nEpochs', type=int, default=20, help='number of epochs to train for')\nparser.add_argument('--lr', type=float, default=0.001, help='Learning Rate. Default=0.01')\nparser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')\n\n# model configuration\nparser.add_argument('--upscale_factor', '-uf', type=int, default=4, help=\"super resolution upscale factor\")\nparser.add_argument('--model', '-m', type=str, default='sub', help='choose which model is going to use')\n\nargs = parser.parse_args()\n\ndef main():\n train_csv = \"../dataset/l8s2-train.csv\"\n val_csv = \"../dataset/l8s2-val.csv\"\n test_csv = \"../dataset/l8s2-test.csv\"\n single_csv = \"../dataset/l8s2-predict-single.csv\"\n\n #====================================================================================================\n # Dataloader with HDF5\n #====================================================================================================\n input_transform = transforms.Compose([\n transforms.ToTensor()\n ])\n\n target_transform = transforms.Compose([\n transforms.Lambda(lambda x: [x[i].astype('float32') for i in range(13)]),\n transforms.Lambda(lambda x: [transforms.ToTensor()(x[i]) for i in range(13)])\n ])\n\n # train_set = Landsat8DatasetHDF5(train_csv,\n # input_transform = input_transform,\n # target_transform=target_transform)\n # train_data_loader = DataLoader(dataset=train_set, batch_size=args.batchSize, sampler = LocalRandomSampler(train_set))\n # train_data_loader = DataLoader(dataset=train_set, batch_size=args.batchSize, shuffle=False)\n\n # val_set = Landsat8DatasetHDF5(val_csv,\n # input_transform = input_transform,\n # target_transform=target_transform)\n # val_data_loader = DataLoader(dataset=val_set, batch_size=args.testBatchSize, shuffle=False)\n\n # test_set = Landsat8DatasetHDF5(test_csv,\n # input_transform = input_transform,\n # target_transform=target_transform)\n # test_data_loader = DataLoader(dataset=test_set, batch_size=args.testBatchSize, shuffle=False)\n\n single_set = Landsat8DatasetHDF5(single_csv,\n input_transform = input_transform,\n target_transform=target_transform)\n single_data_loader = DataLoader(dataset=single_set, batch_size=args.testBatchSize, shuffle=False)\n #====================================================================================================\n\n # L8\n # means = [489.7118, 591.63416, 826.2221, 948.7332, 1858.4872, 1864.6527, 1355.4669]\n # sds = [338.75378, 403.48727, 572.8161, 784.2508, 1208.3722, 1436.1204, 1138.7588]\n\n # S2\n means = [1440.2627, 1258.3445, 1214.9252, 1325.0135, 1486.8649, 1866.3961, 2085.1528, 2070.0884, 2272.1758, 931.276, 21.306807, 2370.4104, 1701.286]\n sds = [366.68463, 378.73654, 512.0519, 771.2212, 791.2124, 874.36127, 989.072, 1001.9915, 1093.7765, 552.87885, 28.292986, 1379.6288, 1097.3044]\n\n modelname = 'SubPixelCNN'\n modelname = 'SubPixelMaxPoolCNN'\n modelname = 'TransConvCNN'\n modelname = 'TransConvMaxPoolCNN'\n model = torch.load('save/'+modelname+'/model_path.pth')\n\n # model = torch.load('save/SubPixelCNN/model_path.pth')\n # model = torch.load('save/SubPixelMaxPoolCNN/model_path.pth')\n # model = torch.load('save/TransConvCNN/model_path.pth')\n # model = torch.load('save/TransConvMaxPoolCNN/model_path.pth')\n\n s2_path = 'S2A_MSIL1C_20171230T183751_N0206_R027_T11SLU_20171230T202151/T11SLU_20171230T183751'\n\n model.eval()\n\n iter_loader = iter(single_data_loader)\n for i in range(1):\n input, target = next(iter_loader)\n out = model(input.cuda())\n denorm = DenormalizeS2(means, sds)\n out_denorm = denorm(out)\n patch01 = out_denorm[0].reshape(500,500) \n patch09 = out_denorm[9].reshape(500,500)\n patch10 = out_denorm[10].reshape(500,500)\n\n patch05 = out_denorm[4].reshape(1500,1500) \n patch06 = out_denorm[5].reshape(1500,1500) \n patch07 = out_denorm[6].reshape(1500,1500) \n patch8A = out_denorm[8].reshape(1500,1500)\n patch11 = out_denorm[11].reshape(1500,1500)\n patch12 = out_denorm[12].reshape(1500,1500)\n\n patch02 = out_denorm[1].reshape(3000,3000) \n patch03 = out_denorm[2].reshape(3000,3000) \n patch04 = out_denorm[3].reshape(3000,3000) \n patch08 = out_denorm[7].reshape(3000,3000) \n\n #====================================================================================================\n # 01, 09, 10\n #====================================================================================================\n xstart = 475\n ystart = 475\n geotransform = (300000.0+(60.0*xstart), 60.0, 0.0, 3900000.0-(60.0*ystart), 0.0, -60.0)\n\n # 01\n print(\"Predicting B01\")\n nx = patch01.shape[0]\n ny = patch01.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B01.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B01.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch01) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch01.min()\n maxval = patch01.max()\n patch01 = ((patch01.astype(np.float32)-minval)/(maxval-minval))*256\n patch01 = patch01.astype(np.uint8)\n pil_img = Image.fromarray(patch01)\n pil_img.save('../save/jpg/'+modelname+'/pred_B01.jpg')\n\n\n # 09\n print(\"Predicting B09\")\n nx = patch09.shape[0]\n ny = patch09.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B09.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B09.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch09) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch09.min()\n maxval = patch09.max()\n patch09 = ((patch09.astype(np.float32)-minval)/(maxval-minval))*256\n patch09 = patch09.astype(np.uint8)\n pil_img = Image.fromarray(patch09)\n pil_img.save('../save/jpg/'+modelname+'/pred_B09.jpg')\n\n\n\n # 10\n print(\"Predicting B10\")\n nx = patch10.shape[0]\n ny = patch10.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B10.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B10.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch10) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch10.min()\n maxval = patch10.max()\n patch10 = ((patch10.astype(np.float32)-minval)/(maxval-minval))*256\n patch10 = patch10.astype(np.uint8)\n pil_img = Image.fromarray(patch10)\n pil_img.save('../save/jpg/'+modelname+'/pred_B10.jpg')\n\n\n\n #====================================================================================================\n # 02, 03, 04, 08\n #====================================================================================================\n xstart = 2850\n ystart = 2850\n geotransform = (300000.0+(10.0*xstart), 10.0, 0.0, 3900000.0-(10.0*ystart), 0.0, -10.0)\n\n # 02\n print(\"Predicting B02\")\n nx = patch02.shape[0]\n ny = patch02.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B02.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B02.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch02) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch02.min()\n maxval = patch02.max()\n patch02 = ((patch02.astype(np.float32)-minval)/(maxval-minval))*256\n patch02 = patch02.astype(np.uint8)\n pil_img = Image.fromarray(patch02)\n pil_img.save('../save/jpg/'+modelname+'/pred_B02.jpg')\n\n # 03\n print(\"Predicting B03\")\n nx = patch03.shape[0]\n ny = patch03.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B03.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B03.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch03) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch03.min()\n maxval = patch03.max()\n patch03 = ((patch03.astype(np.float32)-minval)/(maxval-minval))*256\n patch03 = patch03.astype(np.uint8)\n pil_img = Image.fromarray(patch03)\n pil_img.save('../save/jpg/'+modelname+'/pred_B03.jpg')\n\n\n # 04\n print(\"Predicting B04\")\n nx = patch04.shape[0]\n ny = patch04.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B04.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B04.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch04) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch04.min()\n maxval = patch04.max()\n patch04 = ((patch04.astype(np.float32)-minval)/(maxval-minval))*256\n patch04 = patch04.astype(np.uint8)\n pil_img = Image.fromarray(patch04)\n pil_img.save('../save/jpg/'+modelname+'/pred_B04.jpg')\n\n\n # 08\n print(\"Predicting B08\")\n nx = patch08.shape[0]\n ny = patch08.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B08.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B08.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch08) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch08.min()\n maxval = patch08.max()\n patch08 = ((patch08.astype(np.float32)-minval)/(maxval-minval))*256\n patch08 = patch08.astype(np.uint8)\n pil_img = Image.fromarray(patch08)\n pil_img.save('../save/jpg/'+modelname+'/pred_B08.jpg')\n\n\n #====================================================================================================\n # 05, 06, 07, 8A, 11, 12\n #====================================================================================================\n xstart = 1425\n ystart = 1425\n geotransform = (300000.0+(20.0*xstart), 20.0, 0.0, 3900000.0-(20.0*ystart), 0.0, -20.0)\n\n\n # 05\n print(\"Predicting B05\")\n nx = patch05.shape[0]\n ny = patch05.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B05.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B05.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch05) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch05.min()\n maxval = patch05.max()\n patch05 = ((patch05.astype(np.float32)-minval)/(maxval-minval))*256\n patch05 = patch05.astype(np.uint8)\n pil_img = Image.fromarray(patch05)\n pil_img.save('../save/jpg/'+modelname+'/pred_B05.jpg')\n\n\n # 06\n print(\"Predicting B06\")\n nx = patch06.shape[0]\n ny = patch06.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B06.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B06.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch06) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch06.min()\n maxval = patch06.max()\n patch06 = ((patch06.astype(np.float32)-minval)/(maxval-minval))*256\n patch06 = patch06.astype(np.uint8)\n pil_img = Image.fromarray(patch06)\n pil_img.save('../save/jpg/'+modelname+'/pred_B06.jpg')\n\n\n\n # 07\n print(\"Predicting B07\")\n nx = patch07.shape[0]\n ny = patch07.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B07.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B07.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch07) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch07.min()\n maxval = patch07.max()\n patch07 = ((patch07.astype(np.float32)-minval)/(maxval-minval))*256\n patch07 = patch07.astype(np.uint8)\n pil_img = Image.fromarray(patch07)\n pil_img.save('../save/jpg/'+modelname+'/pred_B07.jpg')\n\n\n\n # 8A\n print(\"Predicting B8A\")\n nx = patch8A.shape[0]\n ny = patch8A.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B8A.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B8A.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch8A) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch07.min()\n maxval = patch07.max()\n patch07 = ((patch07.astype(np.float32)-minval)/(maxval-minval))*256\n patch07 = patch07.astype(np.uint8)\n pil_img = Image.fromarray(patch07)\n pil_img.save('../save/jpg/'+modelname+'/pred_B8A.jpg')\n\n\n\n # 11\n print(\"Predicting B11\")\n nx = patch11.shape[0]\n ny = patch11.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B11.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B11.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch11) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch11.min()\n maxval = patch11.max()\n patch11 = ((patch11.astype(np.float32)-minval)/(maxval-minval))*256\n patch11 = patch11.astype(np.uint8)\n pil_img = Image.fromarray(patch11)\n pil_img.save('../save/jpg/'+modelname+'/pred_B11.jpg')\n\n\n\n # 12\n print(\"Predicting B12\")\n nx = patch12.shape[0]\n ny = patch12.shape[1]\n\n ds = gdal.Open('/mnt/Storage2/Projects/dikti2019PakSani/dataset/sentinel2fim/la2017/'+s2_path+'_B12.tif')\n img = np.array(ds.GetRasterBand(1).ReadAsArray())\n projection = ds.GetProjection()\n\n dst_ds = gdal.GetDriverByName('GTiff').Create('../save/tif/'+modelname+'/pred_B12.tif', ny, nx, 1, gdal.GDT_Int16)\n dst_ds.SetGeoTransform(geotransform) # specify coords\n srs = osr.SpatialReference(wkt=ds.GetProjection()) # establish encoding\n dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file\n dst_ds.GetRasterBand(1).WriteArray(patch12) # write band to the raster \n dst_ds.FlushCache() # write to disk\n dst_ds = None # save, close \n\n minval = patch12.min()\n maxval = patch12.max()\n patch12 = ((patch12.astype(np.float32)-minval)/(maxval-minval))*256\n patch12 = patch12.astype(np.uint8)\n pil_img = Image.fromarray(patch12)\n pil_img.save('../save/jpg/'+modelname+'/pred_B12.jpg')\n\n\nif __name__ == '__main__':\n main()","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":20574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"376038598","text":"__author__ = 'Lynn'\r\n__email__ = 'lynnn.hong@gmail.com'\r\n__date__ = '4/6/2016'\r\n\r\nimport math\r\nimport re\r\nimport json\r\nimport time\r\nfrom datetime import datetime, timedelta\r\nfrom newsCrawler3.newsCrawler import NewsCrawler\r\nfrom newsCrawler3.requestsHandler import Req\r\n\r\n\r\nclass NaverNews(NewsCrawler):\r\n # represent Naver news crawler\r\n\r\n def __init__(self, cnfDict, dbConnection, re_dict):\r\n NewsCrawler.__init__(self)\r\n self.cnfDict = cnfDict\r\n self.mysql = dbConnection\r\n self.re_dict = re_dict\r\n self.r = Req()\r\n self.url_format = \"http://news.naver.com/main/main.nhn?mode=LSD&mid=shm&sid1=%i#&date=%s 00:00:00&page=%i\"\r\n self.post_url_format = \"http://news.naver.com/main/mainNews.nhn?componentId=%i&date=%s 00:00:00&page=%i\"\r\n self.news_url_format = \"http://news.naver.com/main/read.nhn?mode=LSD&mid=shm&sid1=%i&oid=%s&aid=%s\"\r\n\r\n def search_news(self, query):\r\n print(\"The entire date range in %s to %s\\r\\n\" % (self.cnfDict['start_date'], self.cnfDict['end_date']))\r\n url_format = \"http://news.naver.com/main/search/search.nhn?refresh=&so=%s&stPhoto=&stPaper=&stRelease=&detail=0&rcsection=&query=%s&x=24&y=9&sm=all.basic&pd=4&startDate=%s&endDate=%s&page=%i\"\r\n restStart = start_date = self.cnfDict['start_date']\r\n restEnd = end_date = self.cnfDict['end_date']\r\n while True:\r\n while True:\r\n page = 1\r\n url = url_format % (self.cnfDict['order'], str(query.replace(\" \", \"+\").encode(\"cp949\")).strip(\"b\").replace(\"\\\\x\", \"%\").strip(\"'\"), start_date, end_date, page)\r\n err_code, soup = self.r.access_page(url, self.cnfDict['retry'])\r\n if err_code != 1:\r\n print(\"Can't load the page %i\" % page)\r\n continue\r\n try:\r\n resultCheck = soup.find('div', {'class': 'result_header'}).find('span', {'class': 'result_num'}).text.strip()\r\n except Exception as e:\r\n print(e)\r\n print(\"There's a problem with page %i\" % page)\r\n continue\r\n entireCnt = int(re.search(r\"\\(.+ / (\\d+)건\\)\", resultCheck.replace(\",\", \"\")).group(1))\r\n if entireCnt <= 4000 or start_date == end_date:\r\n if start_date == end_date:\r\n print(\"The start date and the end date is now same. entire count: %i\\r\\n\" % entireCnt)\r\n print(\"Range in %s to %s\\r\\n\" % (start_date, end_date))\r\n last_page = self.check_page(url)\r\n print(\"There're %i pagelists...\" % last_page)\r\n print(str(datetime.now()) + \"\\r\\n\")\r\n for page in list(range(1, last_page + 1)):\r\n if page == 1:\r\n print(\"Start page: %i\" % page, end=\"\")\r\n else:\r\n print(\", %i\" % page, end=\"\")\r\n url = url_format % (self.cnfDict['order'], str(query.replace(\" \", \"+\").encode(\"cp949\")).strip(\"b\").replace(\"\\\\x\", \"%\").strip(\"'\"), start_date, end_date, page)\r\n err_code, soup = self.r.access_page(url, self.cnfDict['retry'])\r\n if err_code != 1:\r\n print(\"Can't load the page %i\" % page)\r\n continue\r\n for item in soup.findAll(\"ul\", {\"class\": \"srch_lst\"}):\r\n a_id = self.get_article(query, item, 0, 0, \"\")\r\n restStart = str((datetime.strptime(end_date, \"%Y-%m-%d\") + timedelta(days=1)).date())\r\n break\r\n else:\r\n print(\"Reduce date interval...\")\r\n dateInterval = datetime.strptime(end_date, \"%Y-%m-%d\") - datetime.strptime(start_date, \"%Y-%m-%d\")\r\n dateInterval = math.floor(int(dateInterval.days)/2)\r\n end_date = str((datetime.strptime(start_date, \"%Y-%m-%d\") + timedelta(days=dateInterval)).date())\r\n start_date = restStart\r\n end_date = restEnd\r\n\r\n if start_date > end_date:\r\n break\r\n\r\n def get_article(self, query, item, sub=0, mother_id=0, mother_url=\"\"):\r\n content, title, press, a_date, url1, url2, related_group = self.get_basic_info(item, sub)\r\n # if naver news, url1 is go_naver link. if external news, url1 is external news link.\r\n rep_url = url1\r\n if sub == 0 or (sub in (1, 2) and rep_url != mother_url):\r\n # not sub news OR (sub news AND not same with mother url)\r\n check = self.mysql.check_inserted_or_not(url1)\r\n if url2 is not None: # if there is naver news url\r\n a_id = self.getNewsAndComment(\"\", url1, check)\r\n #print(\"no article... skip....\") #STOP here\r\n #a_id = 0 # for related news\r\n else: # only has outer url on the press\r\n if check[0] == 1:\r\n # already retrieved url\r\n a_id = check[1][0]\r\n self.mysql.insert_srch_query(query, a_id)\r\n if sub in (1, 2):\r\n self.mysql.update_rel_article(mother_id, a_id) # STOP here\r\n else:\r\n # external news AND not yet retrieved\r\n err_code, soup = self.r.access_page(url1, self.cnfDict['retry'])\r\n if err_code != 1:\r\n a_id = self.mysql.insert_url_n_srch_query(query, url1, err_code) # STOP\r\n else: # there's no error\r\n a_cat = \"\"\r\n a_id = self.mysql.insert_url_n_srch_query(query, url1, err_code)\r\n isNaver = 0\r\n dsc = self.get_dsc(sub, content)\r\n from newsCrawler3.externalNews import ExternalNews\r\n ext = ExternalNews()\r\n err_code, a_body = ext.get_external_news(soup, dsc, url1, self.re_dict)\r\n if err_code != 1:\r\n self.mysql.update_error_code(a_id, err_code) # STOP\r\n else:\r\n r_datetime = datetime.now()\r\n var_tuple = (a_id, \"naver\", press, title.strip('\"'), a_body.replace(\"\\n\", \" \").strip(), a_date, a_cat, isNaver, r_datetime)\r\n self.mysql.insert_news(var_tuple)\r\n if sub in (1, 2):\r\n return a_id\r\n\r\n # if there're related news list, go ahead\r\n if sub == 0 and related_group is not None and related_group.find(\"span\", {\"class\": \"ico_bu\"}):\r\n self.get_related_news(query, related_group, a_id, rep_url)\r\n return 0\r\n\r\n\r\n def get_related_news(self, query, related_group, mother_id, mother_url):\r\n btn_more = related_group.find(\"a\", {\"class\": \"btn_more\"})\r\n\r\n # if there're more related news than 4\r\n if btn_more is not None:\r\n more_link = \"http://news.naver.com/main/search/search.nhn%s\" % btn_more.get('href')\r\n err_code, soup = self.r.access_page(more_link, self.cnfDict['retry'])\r\n if err_code != 1:\r\n pass\r\n else:\r\n for item in soup.findAll(\"ul\", {\"class\": \"srch_lst\"}):\r\n a_id= self.get_article(query, item, 2, mother_id, mother_url)\r\n # go to redirect again(this is new list page)\r\n if a_id is None:\r\n continue\r\n self.mysql.update_rel_article(mother_id, a_id)\r\n # if there're more related news equal or less than 4\r\n else:\r\n for item in related_group.findAll(\"li\"): # each related news items\r\n a_id = self.get_article(query, item, 1, mother_id, mother_url)\r\n self.mysql.update_rel_article(mother_id, a_id)\r\n\r\n\r\n def getNewsAndComment(self, section, newsLink, check):\r\n if check[0] == 1:\r\n # already retrieved url\r\n a_id = check[1][0]\r\n else:\r\n a_id = self.mysql.insert_url(newsLink)\r\n var_tuple = self.getNewsBody(a_id, section, newsLink)\r\n if var_tuple[0] in (404, 90, 91, 95): #404 error, sports, entertain, else\r\n # error\r\n self.mysql.update_error_code(a_id, var_tuple[0])\r\n else:\r\n self.mysql.insert_news(var_tuple)\r\n if self.cnfDict['comment'] is True:\r\n oid = re.findall(r\"oid=(\\d+)\", newsLink)[0]\r\n aid = re.findall(r\"aid=(\\d+)\", newsLink)[0]\r\n gno = \"news\" + oid + \",\" + aid\r\n while True:\r\n headers = {\r\n 'accept-encoding': 'gzip, deflate, sdch',\r\n 'accept-language': 'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',\r\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36',\r\n 'accept': '*/*',\r\n 'referer': 'http://news.naver.com/main/ranking/read.nhn?mid=etc&sid1=111&rankingType=memo_week&oid=421&aid=0001843772&date=20160120&type=1&rankingSectionId=100&rankingSeq=1',\r\n 'cookie': 'NNB=QECI6G5YG4HFM; npic=WXjITWIJ4tuGHKilzfw6Nljt9irLqDTyXcPPQPdLWDv9+/zMx8hTr42F7iTT73h0CA==; BMR=s=1449599642101&r=http%3A%2F%2Fnstore.naver.com%2Fappstore%2Fweb%2Fdetail.nhn%3FproductNo%3D1483986&r2=https%3A%2F%2Fwww.google.co.kr%2F; nid_iplevel=1; nid_inf=-2131057769; page_uid=SqW7SdpyLflsscEivZossssssud-397948; _naver_usersession_=DSLHgTQaLsQL5PF8I4FVLQ==',\r\n }\r\n timeout = 10.0\r\n var_sql, var_tuple = self.getComment(a_id, gno, headers, timeout)\r\n break\r\n if var_tuple == ():\r\n #print(\"There's no comment. skip...\")\r\n pass\r\n else:\r\n #print(\"Insert to DB\")\r\n self.mysql.insert_comment(var_sql, var_tuple)\r\n time.sleep(2)\r\n return a_id #temp\r\n\r\n\r\n def getNewsBody(self, a_id, section, newsLink):\r\n err_code, soup = self.r.access_page(newsLink, self.cnfDict['retry'])\r\n if err_code == 1:\r\n try:\r\n header = soup.find('div', {'class': 'article_header'}).find('div', {'class': 'article_info'})\r\n except AttributeError: # naver entertainment news page\r\n if soup.find(\"div\", {\"class\": \"error_msg 404\"}) is not None:\r\n return(404,)\r\n elif newsLink.startswith(\"http://sports.news.naver.com\"):\r\n return(90,)\r\n else:\r\n try:\r\n redirect_url = soup.find('meta', {'property': 'og:url'})['content']\r\n if redirect_url.startswith(\"http://entertain.naver.com\"):\r\n return(91,) # just for now...\r\n else:\r\n print(\"Another naver child news site\")\r\n print(newsLink)\r\n return(95,)\r\n except Exception as e:\r\n print(\"Another exception page...\")\r\n print(e)\r\n print(newsLink)\r\n return(95,)\r\n press = soup.find('meta', {'property': 'me2:category1'})['content']\r\n title = header.find('h3', {'id': 'articleTitle'}).text\r\n a_datetime = header.find('span', {'class': 't11'}).text\r\n a_datetime = datetime.strptime(a_datetime, '%Y-%m-%d %H:%M')\r\n a_body = soup.find('div', {'id': 'articleBodyContents'}).text.strip() # should remove link\r\n isNaver = 1\r\n r_datetime = datetime.now()\r\n return a_id, \"naver\", press, title.replace(\"'\", \"\\'\"), a_body.replace(\"'\", \"\\'\").replace(\"\\n\", \" \"), \\\r\n a_datetime, section, isNaver, r_datetime\r\n else:\r\n return(404,)\r\n\r\n\r\n def getComment(self, a_id, gno, headers, timeout):\r\n var_sql_list = list()\r\n var_tuple = tuple()\r\n page = 1\r\n rdic = dict()\r\n rdic['count'] = dict()\r\n rdic['count']['comment'] = 0\r\n url = \"https://apis.naver.com/commentBox/cbox5/web_naver_list_jsonp.json?ticket=news&templateId=default_politics&_callback=window.__cbox_jindo_callback._8858&lang=ko&country=KR&objectId=\" + gno + \"&categoryId=&pageSize=20&indexSize=10&groupId=&page=%i&initialize=true&useAltSort=true&replyPageSize=100&moveTo=&sort=favorite&userType=\"\r\n while True:\r\n if page % 10 == 1:\r\n if page == 1:\r\n pass\r\n else:\r\n print(\", %i\" % page, end=\"\")\r\n e_num, res = self.r.access_page(url % page, self.cnfDict['retry'], headers=headers)\r\n if e_num == 1:\r\n try:\r\n t = re.search(r\"window\\.__cbox_jindo_callback\\._8858\\((.+)\\)\", res).group(1)\r\n rdic = json.loads(t)['result']\r\n anch = True\r\n except Exception as e:\r\n print(e)\r\n print(\"comment parsing error with gno %s...\" % gno)\r\n anch = False\r\n pass\r\n if anch is True:\r\n if 'commentList' not in rdic.keys():\r\n pass\r\n else:\r\n for reply in rdic['commentList']:\r\n maskUserId = reply['maskedUserId']\r\n encodedUserId = reply['userIdNo']\r\n if encodedUserId is None:\r\n encodedUserId = \"\"\r\n commentReplyNo = reply['commentNo']\r\n sRegDate = reply['regTime']\r\n if \"오전\" in sRegDate:\r\n sRegDate = sRegDate.replace(\"오전\", \"AM\")\r\n elif \"오후\" in sRegDate:\r\n sRegDate = sRegDate.replace(\"오후\", \"PM\")\r\n sRegDate = datetime.strptime(sRegDate, \"%Y-%m-%dT%H:%M:%S+0900\")\r\n snsType = \"\"\r\n incomingType = \"\"\r\n badCnt = reply['antipathyCount']\r\n goodCnt = reply['sympathyCount']\r\n likeCnt = goodCnt-badCnt\r\n replyCnt = reply['replyCount']\r\n content = reply['contents'].replace(\"\\n\\r\", \" \").replace(\"\\n\", \" \")\r\n if reply['best'] is False:\r\n isBest = 0\r\n elif reply['best'] is True:\r\n isBest = 1\r\n c_grade = \"\"\r\n c_pnt = 0\r\n c_nextGradePnt = 0\r\n var_sql_list.append(\"(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\")\r\n var_tuple += (a_id, commentReplyNo, maskUserId, encodedUserId, sRegDate, content, badCnt, goodCnt, likeCnt, replyCnt, incomingType, snsType, isBest, c_grade, c_pnt, c_nextGradePnt)\r\n else:\r\n pass\r\n else:\r\n pass\r\n try:\r\n if page < math.ceil(rdic['count']['comment']/20):\r\n page += 1\r\n time.sleep(0.1)\r\n else:\r\n break\r\n except KeyError:\r\n print(\"KeyError...\")\r\n break\r\n return(\",\".join(var_sql_list).strip(\",\"), var_tuple)\r\n\r\n\r\n def get_dsc(self, sub, content):\r\n if sub == 0:\r\n # get news description part: to find text body easily\r\n dsc_list = content.find(\"p\", {\"class\": \"dsc\"}).get_text(strip=False).strip().split(\"...\")\r\n dsc_list = [x for x in dsc_list if x != \"\" and x != \" \" and x != \".\"]\r\n # if there's description, split it using '다.'\r\n if len(dsc_list) != 0:\r\n try:\r\n dsc = dsc_list[-1].split(\"다.\")[-2].strip() + \"다.\"\r\n except:\r\n dsc = dsc_list[-1].split(\"다.\")[0].strip() + \"다.\"\r\n # if there's no descripton, then use '다.' instead\r\n else:\r\n dsc = \"다.\"\r\n else: # sub in (1, 2)\r\n dsc = \"다.\"\r\n return dsc\r\n\r\n\r\n def get_basic_info(self, item, sub):\r\n if sub in (0, 2):\r\n content = item.find(\"div\", {\"class\": \"ct\"})\r\n info = content.find(\"div\", {\"class\": \"info\"})\r\n title = content.find(\"a\", {\"class\": \"tit\"}).get_text(strip=True)\r\n press = info.find(\"span\", {\"class\": \"press\"}).get_text(strip=True).replace(\"\\'\", \"\\\\'\")\r\n date_text = info.find(\"span\", {\"class\": \"time\"}).get_text(strip=True).strip(\"전\")\r\n date = self.get_news_date(date_text)\r\n url1 = content.find(\"a\", {\"class\": \"tit\"}).get('href')\r\n go_naver = info.find(\"a\", {\"class\": \"go_naver\"})\r\n if sub == 0:\r\n related_group = content.find(\"div\", {\"class\": \"related_group\"})\r\n else:# sub == 2:\r\n related_group = \"\"\r\n else: #sub == 1\r\n title = item.find(\"a\").get_text(strip=True).replace(\"\\'\", \"\\\\'\")\r\n press = item.find(\"span\", {\"class\": \"press\"}).get_text(strip=True).replace(\"\\'\", \"\\\\'\")\r\n date_text = item.find(\"span\", {\"class\": \"time\"}).get_text(strip=True).strip(\"전\")\r\n date = self.get_news_date(date_text)\r\n url1 = item.find(\"a\").get('href')\r\n go_naver = item.find(\"a\", {\"class\": \"go_naver\"})\r\n content = \"\"\r\n related_group = \"\"\r\n if go_naver is not None:\r\n url2 = url1\r\n url1 = go_naver.get('href')\r\n else:\r\n url2 = None\r\n return (content, title, press, date, url1, url2, related_group)\r\n\r\n\r\n def get_news_date(self, date_text):\r\n if date_text.endswith(\"분\"):\r\n date = datetime.now() - timedelta(minutes=int(date_text.split(\"분\")[0]))\r\n elif date_text.endswith(\"시간\"):\r\n date = datetime.now() - timedelta(hours=int(date_text.split(\"시간\")[0]))\r\n elif date_text.endswith(\"일\"):\r\n date = datetime.today() - timedelta(days=int(date_text.split(\"일\")[0]))\r\n else:\r\n date = date_text.replace(\".\", \"-\") + \" 00:00:00\"\r\n return date\r\n\r\n\r\n def check_page(self, nextUrl):\r\n print(\"start counting the entire pages...\")\r\n print(str(datetime.now()) + \"\\n\")\r\n current_no = 1\r\n next_no = 1\r\n while True:\r\n if current_no > next_no:\r\n break\r\n err_code, soup = self.r.access_page(nextUrl, self.cnfDict['retry'])\r\n current_no = next_no\r\n\r\n try:\r\n nextUrl = soup.find(\"div\", {\"class\": \"paging\"}).findAll(\"a\")[-1].get('href')\r\n nextUrl = \"http://news.naver.com\" + nextUrl\r\n next_no = int(nextUrl.split(\"&page=\")[1])\r\n except:\r\n current_no = 1\r\n break\r\n return current_no # int\r\n\r\n def search_all_section(self, section, sectionDict, sectionDictKo):\r\n start_date = datetime.strptime(self.cnfDict['start_date'], \"%Y-%m-%d\").date()\r\n end_date = datetime.strptime(self.cnfDict['end_date'], \"%Y-%m-%d\").date()\r\n while start_date <= end_date:\r\n print(\"start date %s\" % str(start_date))\r\n print(datetime.now())\r\n page = 1\r\n url = self.url_format % (sectionDict[section], str(start_date), page)\r\n err_code, soup = self.r.access_page(url, self.cnfDict['retry'])\r\n if err_code != 1:\r\n print(\"Error occurred on date: %s\" % str(start_date))\r\n else:\r\n comp_id = int(soup.find(\"a\", {\"id\": \"mainNewsComponentId\"})['name'])\r\n url = self.post_url_format % (comp_id, str(start_date), page)\r\n err_code, html = self.r.access_page(url, self.cnfDict['retry'], headers=None, isSoup=False)\r\n if err_code != 1:\r\n print(\"Error occurred on date: %s\" % str(start_date))\r\n else:\r\n lastPage = json.loads(html)['pagerInfo']['lastPage']\r\n while page <= lastPage:\r\n url = self.post_url_format % (comp_id, str(start_date), page)\r\n err_code, html = self.r.access_page(url, self.cnfDict['retry'], headers=None, isSoup=False)\r\n if err_code != 1:\r\n print(\"Error occurred on date %s and page %i\" % (str(start_date), page))\r\n else:\r\n for item in json.loads(html)['itemList']:\r\n aid = item['articleId']\r\n oid = item['officeId']\r\n news_url = self.news_url_format % (sectionDict[section], oid, aid)\r\n self.getNews(sectionDictKo[section], news_url)\r\n page += 1\r\n start_date = start_date + timedelta(days=1)\r\n\r\n def getNews(self, section, newsLink):\r\n check = self.mysql.check_inserted_or_not(newsLink)\r\n a_id = self.getNewsAndComment(section, newsLink, check)\r\n '''if check[0] == 1:\r\n # already retrieved url\r\n a_id = check[1][0]\r\n else:\r\n a_id = self.mysql.insert_url(newsLink)\r\n var_tuple = self.getNewsBody(a_id, section, newsLink)\r\n if var_tuple[0] in (404, 90, 91, 95): #404 error, sports, entertain, else\r\n # error\r\n self.mysql.update_error_code(a_id, var_tuple[0])\r\n else:\r\n self.mysql.insert_news(var_tuple)'''\r\n","sub_path":"crawler/newsCrawler3/naverNews.py","file_name":"naverNews.py","file_ext":"py","file_size_in_byte":22423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"515029659","text":"from bandits.bandit import Bandit\nfrom distributions.betadistribution import BetaDistribution\nimport numpy as np\nimport copy\nfrom scipy.stats import beta\nimport matplotlib.pyplot as plt\n\nclass InfluenceLimiter_study():\n def __init__(self, bandit, agency, reward_reports, initial_reputation, track_reputation= True):\n self.bandit = bandit\n self.agency = agency\n self.posterior_history = {}\n self.prediction_history = {}\n self.reward_reports = reward_reports\n self.initial_reputation = initial_reputation\n self.track_reputation = track_reputation\n super().__init__()\n \n def reset(self):\n self.bandit.reset()\n self.posterior_history = {}\n self.prediction_history = {}\n self.__initialize_reputations()\n\n def __initialize_reputations(self):\n self.agent_reputations = [self.initial_reputation for agent in self.agency.agents]\n # self.agent_reputations = [int(agent.trustworthy == True) for agent in self.agency.agents]\n if self.track_reputation:\n self.agent_reputations_track = [[self.initial_reputation] for agent in self.agency.agents]\n\n def plot_posterior_history(self, arm):\n x = np.linspace(0, 1.0, 100)\n for (index, dist) in enumerate(self.prediction_history[arm]):\n a, b = dist.get_params()\n y = beta.pdf(x, a, b)\n plt.plot(x, y, label=index)\n plt.legend()\n plt.show()\n \n def _compute_IL_posterior(self, t):\n # print(\"reputations:\", self.agent_reputations)\n for (arm_index, arm) in enumerate(self.bandit.arms):\n # self.posterior_history[arm_index] = [BetaDistribution(1, 1)]\n self.prediction_history[arm_index]=[]\n\n pre_alpha, pre_beta = copy.deepcopy(arm.reward_dist.get_params())\n new_mean = copy.deepcopy(arm.reward_dist.mean())\n weight = 1\n running_weighted_sum = weight * new_mean\n q_tilde = running_weighted_sum/weight\n\n self.posterior_history[arm_index] = [BetaDistribution(q_tilde, 1-q_tilde)]\n k = 2/(len(self.agency.agents) + 1)\n prev_ema = self._compute_SMA(arm_index)\n \n #iterate through each agent and process their report\n for agent_index, agent in enumerate(self.agency.agents):\n gamma = min(1, self.agent_reputations[agent_index])\n current_ema = (self.agency.agent_reports[agent_index][arm_index] - prev_ema) * k + prev_ema\n alpha_j = current_ema * (agent.num_reports) \n beta_j = (1-current_ema) * (agent.num_reports)\n\n self.prediction_history[arm_index].append(BetaDistribution(alpha_j, beta_j))\n\n q_j = copy.deepcopy(current_ema)\n\n running_weighted_sum += gamma * q_j\n weight += gamma\n\n q_tilde = running_weighted_sum/weight\n\n alpha_tilde = q_tilde * (agent.num_reports) \n beta_tilde = (1-q_tilde) * (agent.num_reports)\n self.posterior_history[arm_index].append(BetaDistribution(alpha_tilde, beta_tilde))\n \n # print(\"final:\", alpha_tilde + pre_alpha, beta_tilde + pre_beta)\n arm.influence_reward_dist.set_params(alpha_tilde + pre_alpha, beta_tilde + pre_beta)\n\n def select_arm(self, t, influence_limit = True):\n self._compute_IL_posterior(t)\n return self.bandit.select_arm(t, influence_limit = influence_limit)\n\n def _update_reputations(self, arm, reward):\n for index, agent in enumerate(self.agency.agents):\n gamma = min(1, self.agent_reputations[index])\n q_tile_j_1 = self.posterior_history[arm][index].mean()\n q_j = self.prediction_history[arm][index].mean()\n \n self.agent_reputations[index] += gamma * (self.scoring_rule(reward, q_tile_j_1) - self.scoring_rule(reward, q_j))\n if self.track_reputation == True:\n self.agent_reputations_track[index].append(self.agent_reputations[index])\n \n def _compute_T_posterior(self, selected_arm, reward):\n self.bandit.arms[selected_arm].reward_dist.update(reward)\n\n def update(self, arm, reward):\n # print(\"pre_rep update:\", self.agent_reputations)\n self._update_reputations(arm, reward)\n # print(\"post_rep update:\", self.agent_reputations)\n self._compute_T_posterior(arm, reward)\n \n def plot_reputations(self):\n for (index, reputations) in enumerate(self.agent_reputations_track):\n plt.plot(reputations, label=index)\n plt.legend()\n plt.xlabel(\"Round (t)\")\n plt.ylabel(\"Reputation\")\n plt.show()\n\n def scoring_rule(self, r, q, rule = \"quadratic\"):\n if r == 1:\n return (1-q)**2\n else:\n return (q)**2\n\n","sub_path":"influencelimiters/influencelimiter_study_4.py","file_name":"influencelimiter_study_4.py","file_ext":"py","file_size_in_byte":4861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"62323617","text":"# Что такое генератор?\nimport sys\n\n# миллион элементов в списке занимает память\nnums = []\nfor num in range(1, 10 ** 6 + 1, 2):\n nums.append(num ** 2)\nprint(type(nums), sys.getsizeof(nums))\n\n# миллион объектов не выдаются разом, поэтому памяти занимает минимум\nnums_gen = (num ** 2 for num in range(1, 10 ** 6 + 1, 2))\nprint(type(nums_gen), sys.getsizeof(nums_gen))\n\n\n# профилируем чтобы понять не в ущерб ли используем генератор\nfrom time import perf_counter\n\nstart = perf_counter()\nnums_sum = sum(nums)\nprint(nums_sum, perf_counter() - start)\n\nstart = perf_counter()\nnums_gen_sum = sum(nums_gen)\nprint(nums_gen_sum, perf_counter() - start)\n\n\n# второй пример профилирования - в чём разница?\nfrom time import perf_counter\n\nstart = perf_counter()\nnums = []\nfor num in range(1, 10 ** 5 + 1, 2):\n nums.append(num ** 2)\nnums_sum = sum(nums)\nprint(nums_sum, perf_counter() - start)\n\nstart = perf_counter()\nnums_gen = (num ** 2 for num in range(1, 10 ** 5 + 1, 2))\nnums_gen_sum = sum(nums_gen)\nprint(nums_gen_sum, perf_counter() - start)\n\n\n# t = list(1, 2, 3, 4, 5, 6)[left:right:step]\n\n# генераторы не поддерживают слайсы\nnums = []\nfor num in range(1, 10 ** 6 + 1, 2):\n nums.append(num ** 2)\n\nnums_gen = (num ** 2 for num in range(1, 10 ** 6 + 1, 2))\n\nprint(nums[:3])\nprint(next(nums_gen), next(nums_gen), next(nums_gen), sep=', ')\n\n\n# но мы можем получить следующие несколько значений - генератор «помнит своё состояние»\nfrom itertools import islice\n\nprint(*islice(nums_gen, 3))\nprint(*islice(nums_gen, 3))\n\n# перерыв\n\n# генераторы одноразовые\nnums_gen_sum = sum(nums_gen)\nprint(nums_gen_sum)\n\nnums_gen_sum = sum(nums_gen)\nprint(nums_gen_sum)\n\n\ndef letters_generator(start, end):\n for code in range(ord(start), ord(end) + 1):\n yield chr(code)\n print('end func generator')\n\n\neng_uppercase_letters = letters_generator('A', 'Z')\nprint(*eng_uppercase_letters, sep='')\n\n# List Comprehensions - не является генератором\nnums_cube = [num ** 3 for num in range(5 + 1)]\nprint(type(nums_cube), *nums_cube)\n\nweather_data = [\n [-17.5, -18.9, -21.0, -16.1],\n [-9.3, -11.7, -14.3, -15.8],\n]\nflat_weather_data = [el for row in weather_data for el in row if el > -19]\nprint(flat_weather_data)\n\n\n# Dict Comprehensions\neng_ru_nums = {'one': 'один', 'first': 'один', 'two': 'два'}\nru_eng_nums = {val: key for key, val in eng_ru_nums.items()}\nprint(ru_eng_nums)\n\n\n# Множества в Python (Хэш-таблицы)\nbasket = ['apple', 'dell', 'samsung', 'apple', 'huawei', 'asus', 'samsung']\nunique_brands = [el for el in basket if basket.count(el) == 1]\nprint(unique_brands)\n\nunique_brands = set()\ntmp = set()\nfor el in basket:\n if el not in tmp:\n unique_brands.add(el)\n else:\n unique_brands.discard(el)\n tmp.add(el)\nprint(unique_brands)\n\n\n# сохранение последовательности элементов\nunique_brands_ord = [el for el in basket if el in unique_brands]\nprint(unique_brands_ord)\n\n\n# ещё методы множества\nchat_1 = {'user_1', 'user_5', 'user_7', 'user_8', 'user_11'}\nchat_2 = {'user_1', 'user_2', 'user_2', 'user_7', 'user_9', 'user_10'}\n# пересечения по множествам\nchats_common = chat_1.intersection(chat_2)\nprint(chats_common) # {'user_1', 'user_7'}\nprint(chat_1 & chat_2)\n\n\n# только пользователи конкретного чата\nchat_1_only = chat_1 - chat_2\nchat_2_only = chat_2 - chat_1\nprint(chat_1_only) # {'user_11', 'user_5', 'user_8'}\nprint(chat_1.difference(chat_2))\nprint(chat_2_only) # {'user_9', 'user_10', 'user_2'}\nprint(chat_2.difference(chat_1))\n# объединение пользователей двух множеств\nboth_chats = chat_1.union(chat_2)\nprint(both_chats)\nprint(chat_1 | chat_2)\n\n# Снова множества — frozenset\nchat_1 = frozenset(('user_1', 'user_5', 'user_7', 'user_8', 'user_11'))\nchat_2 = frozenset(('user_1', 'user_2', 'user_2', 'user_7', 'user_9'))\n\nchats_common = chat_1.intersection(chat_2)\nprint(chats_common)\n\n\n# Set Comprehensions\nimport random\n\nrandom_nums = {random.randint(1, 100) for _ in range(10)}\nprint(len(random_nums), random_nums)\n\n\n\nprint('end')","sub_path":"Shishkin_Anatoliy_lesson_5/code_5.py","file_name":"code_5.py","file_ext":"py","file_size_in_byte":4501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"575296403","text":"from websocket_server import WebsocketServer\nimport ipget # pip3 install ipget\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import db\n\n# 自身のIDを定義\nDEVICE_ID = \"D0001\"\n\n# Firebaseの設定\ncred = credentials.Certificate(\"key/halms-49316-firebase-adminsdk-y7wsu-6a5942aa12.json\")\n\n# RealtimeDBの定義\nfirebase_admin.initialize_app(cred, {\n 'databaseURL': 'https://halms-49316-default-rtdb.firebaseio.com/'\n})\n\n# 新規クライアント接続時処理\ndef new_client(client, server):\n print(\"New client has joined\")\n\n# 新規メッセージ受信時処理\ndef send_msg_allclient(client, server, message):\n print(message)\n server.send_message_to_all(message)\n\n# 自身のIPを取得\nhost = ipget.ipget().ipaddr(\"wlan0\")\n# host = ipget.ipget().ipaddr(\"eth0\")\nhost_address = host[:host.find('/')]\n\n# IPアドレスをDBへ保存\nref = db.reference('/devices')\nusers_ref = ref.child(DEVICE_ID)\nusers_ref.set({\n 'server': host_address\n})\n\nprint(host_address)\n\n# ソケットサーバを作成\nserver = WebsocketServer(50000, host=host_address)\n\n# 新しいクライアントが接続したときの処理\nserver.set_fn_new_client(new_client)\n\n# クライアントがメッセージを送信したときの処理\nserver.set_fn_message_received(send_msg_allclient)\nserver.run_forever()\n","sub_path":"raspi_serve/socket_serve/serve.py","file_name":"serve.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"149777099","text":"#!/usr/bin/python\n#\n# (C) 2018 Riad S. Wahby \n#\n# test runner for pylaurent\n\ntry:\n import sys\n import os\n import os.path as OP\nexcept:\n assert False\nelse:\n sys.path.insert(0, OP.abspath(OP.join(sys.path[0], os.pardir)))\n\nimport pylaurent.pyl_test as pyltest\n\nDEFAULT_NUM_TESTS = 8\nif len(sys.argv) > 1:\n try:\n num_tests = int(sys.argv[1])\n except:\n num_tests = DEFAULT_NUM_TESTS\nelse:\n num_tests = DEFAULT_NUM_TESTS\n\nfor t in [pyltest]:\n t.run_tests(num_tests)\n","sub_path":"python/pylaurent/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"190709448","text":"\nclass PhotoModel:\n\n def __init__(self, id, description, link, principal, album_id):\n self.id = id\n self.description = description\n self.link = link\n self.principal = principal\n self.album_id = album_id\n\n def serialize(self):\n return {\n \"id\": self.id,\n \"description\": self.description,\n \"link\": self.link,\n \"principal\": self.principal,\n \"album_id\": self.album_id\n }\n","sub_path":"albuns/model/photo_model.py","file_name":"photo_model.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"533097867","text":"\n#!/usr/bin/python\nimport paho.mqtt.client as mqtt\n# get the credentials of the devises\nfrom not_show import intro_json, UPDATE_TOPICS, MQTT_CREDENTIALS\nimport sys\nimport os\nimport json\nimport zlib\nimport hashlib\nimport multiprocessing\nimport time\nimport base64\nimport ssl\n\"\"\"\nFor converting to windows:\n pip install pyinstaller\nGo to your program’s directory and run:\n pyinstaller yourprogram.py\n\"\"\"\n\n# The parameters introduce by the user:\nbusiness = \"\" # The name of the enterprise to make the update\ndevise = \"\" # The model, or ESP32 or ATMEGA\nupdate_file_name = \"\" # The file direction of the update\n\n# Check if the update is possible:\nif (len(sys.argv) != 4):\n print(\"Invalid number of parameters !!\")\n sys.exit()\nelse:\n business = str(sys.argv[1]).lower()\n devise = str(sys.argv[2]).lower()\n update_file_name = str(sys.argv[3])\n # print(update_file_name)\n # Check if it makes sense the words introduce:\n if (business == 'infrico' or business == 'solidy'):\n if (devise == 'esp32' or devise == 'atmega'):\n print(\"Comienza el proceso de update...\")\n else: \n print(\"Invalid parameters!!\")\n else:\n print(\"Invalid parameters!!\")\n sys.exit()\n\n# First I open the file:\nfile = open(update_file_name, \"rb\")\nupdate_bin = file.read()\n# Obtein the md5 checksum:\nmd5_value = hashlib.md5(update_bin).hexdigest()\n# I create the string file and compress to zlib:\nupdate_zlib = zlib.compress(update_bin, level=9)\nupdate_zlib_encoded = base64.b64encode(update_zlib)\n# and close the file\nfile.close()\n\n\n# and close the file\n\ndef received_message(mqttc, obj, msg):\n # I get the version of the firmware in order to increase when I finish the update\n intro_info = str(msg.payload)\n offset = intro_info.find('firmware') + len('firmware\": \"')\n version = ''\n for i in range(0, 8):\n version += intro_info[offset + i]\n version = int(version[5:])\n # Copy the current version and increase 1 time\n intro_json['firmware'] =intro_json['firmware'][:5] + str(version + 1)\n # Fill the json correctly:\n intro_json['md5'] = md5_value\n intro_json['model'] = devise\n topic_to_update = UPDATE_TOPICS['esp32_intro'].replace('x', business)\n json_msg = json.dumps(intro_json)\n mqttc.unsubscribe(topic_to_update, 0)\n # I publish the new version json and then the version \n print('Increase the version')\n mqttc.publish(UPDATE_TOPICS['esp32_bin'].replace('x', business), update_zlib_encoded, 0, True)\n print('Publish the firmware in /bin ')\n mqttc.publish(topic_to_update, json_msg, 0, True)\n\n# Configure the mqtt client:\nmqttc = mqtt.Client()\nmqttc.on_message = received_message\n# I connect to broker and set all the permisions:\nmqttc.tls_set('C:/Users/Asus/Desktop/ESP_32_noob/MQTT_auto_replay/ca.crt', tls_version=ssl.PROTOCOL_TLSv1_2)\nmqttc.tls_insecure_set(True)\nmqttc.username_pw_set(MQTT_CREDENTIALS['USER'],MQTT_CREDENTIALS['PASS'])\nmqttc.connect(MQTT_CREDENTIALS['HOST'], 8883, 60)\nmqttc.publish('Start', '{\"Start_ALL\":1}', 0, False)\n\n# I generate the first update json:\nmqttc.subscribe(UPDATE_TOPICS['esp32_intro'].replace('x', business), 0)\nmqttc.loop_forever()\n","sub_path":"MQTT_auto_replay/mqtt.py","file_name":"mqtt.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"228637968","text":"\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport sys\nimport msgpack\nimport json\nfrom mpl_toolkits.mplot3d import axes3d\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom autolab_core import RigidTransform\nimport os\nimport cv2\nimport imageio\n\n\n\n\n\ndef main(bin_fn, video_path, dest_fn, plot):\n\n # Read file as binary and unpack data using MessagePack library\n with open(bin_fn, \"rb\") as f:\n data = msgpack.unpackb(f.read(), use_list=False, raw=False)\n\n # The point data is tagged \"landmarks\"\n key_frames = data[\"keyframes\"]\n\n print(\"Point cloud has {} points.\".format(len(key_frames)))\n\n key_frame = {int(k): v for k, v in key_frames.items()}\n\n if plot:\n x = []\n y = []\n z = []\n t = []\n for key in sorted(key_frame.keys()):\n point = key_frame[key]\n trans_cw = np.asarray(point[\"trans_cw\"])\n rot_cw = np.asarray(point[\"rot_cw\"])\n\n rigid_cw = RigidTransform(rot_cw, trans_cw)\n\n pos = np.matmul(rigid_cw.rotation, trans_cw)\n\n x.append(pos[0])\n y.append(pos[1])\n z.append(pos[2])\n t.append(float(point[\"ts\"]))\n\n\n plt.xlabel('X')\n plt.ylabel('Y')\n plt.scatter(x, z)\n plt.show()\n\n plt.ylabel('Height')\n plt.xlabel('Time')\n plt.scatter(x=t, y=y)\n print(t)\n\n\n # # new a figure and set it into 3d\n fig = plt.figure()\n\n plt.show()\n\n \n else:\n # Write point coordinates into file, one point for one line\n with open(dest_fn, \"w\") as f:\n video_name = video_path.split(\"/\")[-1][:-4]\n if not os.path.exists(video_name):\n os.mkdir(video_name)\n\n vidcap = cv2.VideoCapture(video_path)\n fps = int(vidcap.get(cv2.CAP_PROP_FPS)) + 1\n print(fps)\n count = 0\n\n for key in sorted(key_frame.keys()):\n point = key_frame[key]\n\n # position capture\n trans_cw = np.asarray(point[\"trans_cw\"])\n rot_cw = np.asarray(point[\"rot_cw\"])\n\n rigid_cw = RigidTransform(rot_cw, trans_cw)\n\n pos = np.matmul(rigid_cw.rotation, trans_cw)\n\n f.write(\"{}, {}, {}\\n\".format(pos[0], pos[1], pos[2]))\n\n vidcap.set(cv2.CAP_PROP_POS_FRAMES, fps * float(point[\"ts\"]))\n\n\n # image capture\n success, image = vidcap.read()\n\n if not success:\n print(\"capture failed\")\n else:\n cv2.imwrite(os.path.join(video_name, str(count) +\".jpg\"), image)\n\n count+=1\n\n\n\n\n print(\"Finished\")\n\n\nif __name__ == \"__main__\":\n\n bin_fn = '/home/paulo/catkin_ws/openvslam/build/mapa_direita.msg'\n video_path = '/home/paulo/catkin_ws/openvslam/build/testes/direita.MP4'\n dest_fn = '/home/paulo/catkin_ws/openvslam/build/try'\n main(bin_fn, video_path,dest_fn, plot=False)\n","sub_path":"util/map_location_extractor.py","file_name":"map_location_extractor.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"224101786","text":"import os\nimport json\n\nimport pygments\nimport pygments.lexers\nimport pygments.formatters\n\nfrom azcat.guess_ext import guess_ext_by_contents, guess_ext_by_filename\n\ndef load_module (type_, name):\n try:\n m = getattr(getattr(__import__(\"azcat.{0}.{1}\".format(type_, name)), type_), name)\n except ImportError:\n return None\n return m\n\ndef _load_formatter (name):\n return load_module(\"formatters\", name)\n\ndef _load_highlighter (name):\n return load_module(\"highlighters\", name)\n\n\ndef pretty_print (src, s, out, with_formatter, ext=None):\n \"\"\" `src' is a filepath to be formatted. `out' is a file object\n to be written.\"\"\"\n\n if ext == \"h\":\n ext = \"c\" # XXX: Pygments does not highlight .h files\n elif ext is None:\n ext = guess_ext_by_filename(src)\n if ext == \"\":\n ext = guess_ext_by_contents(s)\n\n # format\n if with_formatter:\n f = _load_formatter(ext)\n if f is not None:\n ext,s = f.format(s)\n\n # highlight\n h = _load_highlighter(ext)\n if h is None:\n try:\n lexer = pygments.lexers.get_lexer_by_name(ext)\n except pygments.util.ClassNotFound:\n lexer = pygments.lexers.get_lexer_for_mimetype(\"text/plain\")\n fmt = pygments.formatters.Terminal256Formatter(encoding=\"utf-8\")\n pygments.highlight(s, lexer, fmt, out)\n else:\n h.highlight(out, s)\n out.close()\n","sub_path":"azcat/pretty_print.py","file_name":"pretty_print.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"184658353","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n\tCopyright (c) 2020 Jarosław Stańczyk \n\tSource code presented in the lectures \"Python programming language\"\n\n\tblackjack_srv3.py - version based on SocketServer (with threads)\n\"\"\"\nimport socketserver\nfrom threading import Thread\nfrom time import sleep\nimport os\n\n\nclass ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):\n\tallow_reuse_address = True\n\n\nclass ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):\n\n\tdef handle(self):\n\t\tprint(\"connection with %s:%d\\n\" % (str(self.client_address[0]), self.client_address[1]))\n\t\tsock = self.request\n\t\tsock.send(b\"\\nWelcome to the Blackjack server!\\n\")\n\t\tsleep(5)\n\t\tsock.send(b\"bye!\\n\\n\")\n\t\tsock.close()\n\n\nif __name__ == \"__main__\":\n\tprint(\"Blackjack server (PID=%d)\" % os.getpid())\n\taddress = (\"\", 6789)\n\tserver = ThreadedTCPServer(address, ThreadedTCPRequestHandler)\n\tserver.allow_reuse_address = True\n\tthread = Thread(target=server.serve_forever)\n\tthread.start()\n\n# eof.\n","sub_path":"blackjack/src/servers/blackjack_srv3.py","file_name":"blackjack_srv3.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"542134997","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport numpy as np\nimport os.path as osp\nimport sys\n\n\ndef load_cifar_testset(fn, n_classes=10, n_per_classes=1000):\n fp = open(fn, 'r')\n fp.readline()\n\n cls_indexes_mat = np.zeros((n_classes, n_per_classes), dtype=np.int32)\n index_pos_mat = np.zeros(n_classes, dtype=np.int32)\n\n for line in fp:\n splits = line.strip().split()\n idx = int(splits[0])\n label = int(splits[1])\n\n cls_indexes_mat[label][index_pos_mat[label]] = idx\n index_pos_mat[label] += 1\n\n print('===> index_pos_mat: \\n', index_pos_mat)\n print('===> cls_indexes_mat: \\n', cls_indexes_mat)\n\n return cls_indexes_mat\n\n\ndef convert_cifar_pairs(label_fn, pairs_fn):\n\n if 'cifar100' in label_fn:\n n_classes = 100\n n_per_classes = 100\n else:\n n_classes = 10\n n_per_classes = 1000\n\n cls_indexes_mat = load_cifar_testset(label_fn, n_classes, n_per_classes)\n\n fp = open(pairs_fn, 'r')\n\n prefix, ext = osp.splitext(pairs_fn)\n save_fn = prefix + '_real_idx' + ext\n fp2 = open(save_fn, 'w')\n\n for line in fp:\n spl = line.strip().split()\n\n idx1 = int(spl[0])\n idx2 = int(spl[1])\n label = int(spl[2])\n\n cls1 = idx1 / n_per_classes\n idx11 = idx1 % n_per_classes\n\n cls2 = idx2 / n_per_classes\n idx22 = idx2 % n_per_classes\n\n real_idx1 = cls_indexes_mat[cls1][idx11]\n real_idx2 = cls_indexes_mat[cls2][idx22]\n\n write_line = '%4d\\t%4d\\t%4d\\n' % (real_idx1, real_idx2, label)\n\n fp2.write(write_line)\n\n fp.close()\n fp2.close()\n\n print('===> Conversion finished!')\n\n\nif __name__ == '__main__':\n # label_fn = './cifar10_testset_labels.txt'\n # n_classes = 10\n # n_per_classes = 1000\n\n # cls_indexes_mat = load_cifar_testset(label_fn, n_classes, n_per_classes)\n\n label_fn = sys.argv[1]\n pairs_fn = sys.argv[2]\n convert_cifar_pairs(label_fn, pairs_fn)\n","sub_path":"test_list/convert_cifar_pairs_to_real_idx.py","file_name":"convert_cifar_pairs_to_real_idx.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"285161168","text":"from django import forms\nfrom django.forms import ValidationError\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate\nfrom django.contrib import messages\nfrom .models import camera\nimport re\nregex1=\"[\\w\\-]+\"\n\n\nclass CameraForm(forms.Form):\n camName = forms.CharField(max_length=64,label=\"Kamera Adı\",required=True,\n help_text='Kamera Adınızı Sadece \"A-Z\" \"a-z\" \"0-9\" \"-\" ve \"_\" Kullanarak Yazınız!',\n widget=forms.TextInput(attrs={'class': 'form-control rounded-0'}))\n camUrl = forms.CharField(max_length=256,label=\"Kamera URL' si\",required=True,\n help_text='Kamera Adresinizi \"http://mycam12345.com\" Şeklinde Yazınız',\n widget=forms.TextInput(attrs={'class': 'form-control rounded-0'}))\n\n\n def clean(self):\n cam_name=self.cleaned_data.get(\"camName\")\n cam_url=self.cleaned_data.get(\"camUrl\")\n cameracount=camera.objects.filter(cam_name=cam_name).count()\n print (cameracount)\n Errors=\"\"\n g=0\n if cameracount==0:\n if cam_name==None or cam_name==\"\" or cam_url==None or cam_url==\"\" :\n Errors+=\"Kamera Adı Alanı Boş Geçilemez?\\n\"\n g+=1\n else:\n if not re.search(regex1,cam_name) :\n Errors+=\"Kamera Adı İçin Geçersiz Karakter!\\n\"\n g+=1\n \n if g>0:\n values={\n \"Errors\":Errors,\n \"Durum\":\"0\"\n }\n return values\n\n\n\n values = {\n \"camName\": cam_name,\n \"camUrl\": cam_url,\n \"Durum\":\"1\"\n }\n\n return values\n else:\n Errors+=\"Bu Ada Sahip Bir Kamera Daha Önce Eklenmiş!\"\n values={\n \"Errors\":Errors,\n \"Durum\":\"0\"\n }\n return values\n\n\n\nclass AlertForm(forms.Form):\n a_start = forms.TimeField(label=\"Başlangıç Saati\",required=True,\n help_text='Saat:Dakika Şeklinde Giriş Yapınız',\n widget=forms.TimeInput(attrs={'class': 'form-control rounded-0','placeholder':'Saat Seçin'}))\n a_end = forms.TimeField(label=\"Bitiş Saati\",required=True,\n help_text='Saat:Dakika Şeklinde Giriş Yapınız',\n widget=forms.TimeInput(attrs={'class': 'form-control rounded-0','placeholder':'Saat Seçin'}))\n\n def clean(self):\n starttime=self.cleaned_data.get(\"a_start\")\n endtime=self.cleaned_data.get(\"a_end\")\n Errors=\"\"\n g=0\n if starttime==None or starttime==\"\" or endtime==None or endtime==\"\" :\n Errors+=\"Saat:Dakika Formatını Doğru Girdiğinizden Emin olun\\n\"\n g+=1\n \n if g>0:\n values={\n \"Errors\":Errors,\n \"Durum\":\"0\"\n \n }\n return values\n\n\n\n values = {\n \"Durum\":\"1\",\n \"starttime\":starttime,\n \"endtime\":endtime\n }\n\n return values\n ","sub_path":"camera553/Kamera553/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"414716615","text":"import pymysql\nimport redis\n\n# 连接MySQL\nCONN_MYSQL = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='mysql', db='hlj', charset='utf8')\nCURSOR_MYSQL = CONN_MYSQL.cursor()\n# 连接Redis\nPOOL = redis.ConnectionPool(host='127.0.0.1', port=6379)\nCONN_REDIS = redis.Redis(connection_pool=POOL)\n\nRedisSetName = \"hlj:name\"\n\ntry:\n while True:\n name = CONN_REDIS.spop(RedisSetName)\n if name:\n name = name.decode(\"utf-8\")\n print(\"-> \", name)\n sql = \"INSERT INTO name (name) VALUES ('%s')\" % name\n CURSOR_MYSQL.execute(sql)\n CONN_MYSQL.commit()\n else:\n print(\"No data -> stop!\")\n break\nexcept Exception as e:\n raise e\nfinally:\n CURSOR_MYSQL.close()\n CONN_MYSQL.close()\n print(\"数据库连接已经关闭\")\n","sub_path":"MixNotes/other/转移redis数据到mysql.py","file_name":"转移redis数据到mysql.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"624884099","text":"# Copyright 2018 Novo Nordisk Foundation Center for Biosustainability, DTU.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom schema_collector.app import collect_schemas\nfrom venom.rpc.reflect.stubs import OpenAPISchema, InfoMessage, \\\n OperationMessage, SchemaMessage, TagMessage\n\ncurrent_schema = OpenAPISchema(\n info=InfoMessage(\n title='API',\n version='3.1.4',\n ),\n)\nschema1 = OpenAPISchema(\n swagger='2.0',\n consumes=['application/json'],\n produces=['application/json'],\n info=InfoMessage(\n title='First',\n version='1.1.1',\n ),\n paths={\n 'a': {'get': OperationMessage()},\n 'b': {'post': OperationMessage()},\n },\n definitions={\n 'm': SchemaMessage(),\n 'n': SchemaMessage(),\n }\n)\nschema2 = OpenAPISchema(\n swagger='2.0',\n consumes=['application/json'],\n produces=['application/json'],\n tags=[TagMessage(name='pet')],\n info=InfoMessage(\n title='Second',\n version='2.2.2',\n ),\n paths={\n 'b': {'get': OperationMessage(tags=['pet'])},\n 'c': {'post': OperationMessage()},\n },\n definitions={\n 'l': SchemaMessage(),\n 's': SchemaMessage(),\n }\n)\n\n\ndef test_collect_schema():\n schema = collect_schemas(current_schema, schema1, schema2)\n assert schema.info.description == \\\n 'Collected from: First: 1.1.1, Second: 2.2.2'\n assert set(schema.paths.keys()) == {'a', 'b', 'c'}\n assert set(schema.paths['b'].keys()) == {'get'}\n assert set(schema.definitions.keys()) == {'m', 'n', 'l', 's'}\n assert list(schema.tags) == [\n TagMessage(name='pet'),\n TagMessage(name='First-1.1.1', description=''),\n TagMessage(name='Second-2.2.2', description='')\n ]\n assert list(schema.paths['a']['get'].tags) == ['First-1.1.1']\n assert list(schema.paths['b']['get'].tags) == ['pet', 'Second-2.2.2']\n","sub_path":"tests/test_unit.py","file_name":"test_unit.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"460045981","text":"# -*- coding:utf-8 -*-\n\n\n# Given the head of a linked list, remove the nth node from the end of the list and return its head.\n#\n# Follow up: Could you do this in one pass?\n#\n#  \n# Example 1:\n#\n#\n# Input: head = [1,2,3,4,5], n = 2\n# Output: [1,2,3,5]\n#\n#\n# Example 2:\n#\n#\n# Input: head = [1], n = 1\n# Output: []\n#\n#\n# Example 3:\n#\n#\n# Input: head = [1,2], n = 1\n# Output: [1]\n#\n#\n#  \n# Constraints:\n#\n#\n# \tThe number of nodes in the list is sz.\n# \t1 <= sz <= 30\n# \t0 <= Node.val <= 100\n# \t1 <= n <= sz\n#\n#\n\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def removeNthFromEnd(self, head, n):\n \"\"\"\n :type head: ListNode\n :type n: int\n :rtype: ListNode\n \"\"\"\n p = head\n i=0\n while p:\n p=p.next\n i+=1\n \n if (i-n==0):\n return head.next\n else:\n p = head\n for ii in xrange (0,i-n-1):\n p=p.next\n p.next = p.next.next\n return head\n","sub_path":"0019-remove-nth-node-from-end-of-list/remove-nth-node-from-end-of-list.py","file_name":"remove-nth-node-from-end-of-list.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"169749659","text":"import pytest\nimport random\n\nfrom cs506 import det\n\ndef test_det():\n # sanity checks\n try:\n det.det([[1, 2], [2, 3], [3, 4]])\n except ValueError as e:\n assert str(e) == \"A must be a square matrix\"\n \n assert det.Aij([[1, 2, 3], [4, 5, 6], [7, 8, 9]], i=1, j=1) == [[1, 3], [7, 9]]\n assert det.det([[4, 3], [6, 3]]) == -6\n\n # all elements of a row is 0, determinant is 0\n assert det.det([[0, 0], [6, 3]]) == 0\n\n # determinant of identity matrix is 1\n assert det.det([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) == 1\n\n # det(AT) == det(A)\n A = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n AT = [[1, 4, 7], [2, 5, 8], [3, 6, 9]]\n assert det.det(A) == det.det(AT)","sub_path":"02-library/tests/test_det.py","file_name":"test_det.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"275895543","text":"from common.gAPI import GoogleAPI\nimport pandas as pd\nfrom Edelweiss.scrapEdDB import ScrapData\nfrom common.common import CommonFunctions\nfrom common.DBOperations import DatabaseOp\nfrom Edelweiss.helpEdDB import HelpEdDB\nimport time\nimport os\nfrom pytz import timezone\nimport datetime\nimport threading\nimport Edelweiss.edleConfig as edleConfig\nimport config\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\nclass ProcessEd(threading.Thread):\n\n def concate(self, df_now, previous_df):\n objCommon = CommonFunctions()\n fixed_columns = ['ID', 'ScrapedDate', 'ScripName', 'IndexORStocks', 'StrikePrice', 'OptionType', 'StrTradeDateTime', 'TradeDateTime', 'ExpiryDate', 'OI',\n 'COI', 'IV', 'VOL', 'MinuteOI', 'Flag']\n try:\n previous_df = objCommon.drop_extra_columns(previous_df, fixed_columns)\n df_now = objCommon.drop_extra_columns(df_now, fixed_columns)\n final = df_now.append(previous_df)\n #final.reset_index(inplace=True)\n except Exception as e:\n final = previous_df\n print('concat exception: ', e)\n\n return final\n\n def save_to_drive(self, folder_id, name_of_file, destination):\n objGAPI = GoogleAPI()\n try:\n service = objGAPI.intiate_gdAPI()\n # Search file id to check it is exists or not\n # def search_file(service, file_name, mime_type, folder_id, search_in_folder=False):\n file_id = objGAPI.search_file(service, str(name_of_file), 'text/csv', folder_id, True)\n if type(file_id) is int:\n objGAPI.upload_file(service, str(name_of_file), destination, folder_id, 'text/csv')\n if type(file_id) is str:\n objGAPI.delete_file(service, file_id)\n # time.sleep(1)\n objGAPI.upload_file(service, str(name_of_file), destination, folder_id, 'text/csv')\n return True\n except Exception as e:\n print('Exception while saving files on drive', e)\n return False\n\n def endupload(self, symbol, expiry_date, table_name, folder_id):\n try:\n exd = expiry_date.replace(' ', '_')\n file_name = symbol + '_' + exd + '.csv'\n objHDB = HelpEdDB()\n objGAPI = GoogleAPI()\n objCommon = CommonFunctions()\n result_df, st = objHDB.DB2CSV(symbol, table_name)\n destination = os.getcwd() + '/Edelweiss/sample_data/' + file_name\n\n result_df.to_csv(os.getcwd() + '/Edelweiss/sample_data/' + file_name, index=False)\n service = objGAPI.intiate_gdAPI()\n isDataAvailable, file_id = objCommon.check_pdata_exist(file_name, folder_id)\n if isDataAvailable == True:\n objGAPI.delete_file(service, file_id)\n objGAPI.upload_file(service, str(file_name), destination, folder_id, 'text/csv')\n except Exception as e:\n print('Exception while saving files on drive at the end of day', e)\n return False\n\n def process(self, symbol, table_name, expiry_date, iterations, threshold, pVtime):\n #objGAPI = GoogleAPI()\n objScrap = ScrapData()\n objCommon = CommonFunctions()\n try:\n exd = expiry_date.replace(' ', '_')\n file_name = symbol + '_' + exd + '.csv'\n status, pVtime = objScrap.start_scraping(str(symbol), expiry_date, threshold, pVtime)\n if iterations == 30:\n objHDB = HelpEdDB()\n if status == True:\n result_df, st = objHDB.DB2CSV(symbol, table_name)\n # if os.path.exists(os.getcwd() + '/Edelweiss/d_csv/' + file_name):\n # previous_df = pd.read_csv(os.getcwd() + '/Edelweiss/d_csv/' + file_name, index_col=0)\n # result_df = self.concate(current_df, previous_df)\n # else:\n # result_df = current_df\n\n destination = os.getcwd() + '/Edelweiss/sample_data/' + file_name\n\n result_df.to_csv(os.getcwd() + '/Edelweiss/sample_data/' + file_name, index=False)\n #service = objGAPI.intiate_gdAPI()\n #isDataAvailable, file_id = objCommon.check_pdata_exist(file_name, folder_id)\n # if isDataAvailable == True:\n # objGAPI.delete_file(service, file_id)\n # objGAPI.upload_file(service, str(file_name), destination, folder_id, 'text/csv')\n else:\n print(f\"Scrapping df empty for : {symbol}\")\n return False, pVtime\n return True, pVtime\n except Exception as e:\n print('Exception in Edle Scrapping Process:', e)\n return False, pVtime\n\n def start(self, q, result, isMarketON, diction): #FolderIDs\n while not q.empty():\n work = q.get()\n if work[1] == 'UPLOAD_THREAD':\n pass\n # print('In upload thread')\n # while True:\n # strcurrentTime = datetime.datetime.now(timezone('Asia/Calcutta')).strftime('%H:%M')\n # strcurrentTime = strcurrentTime.replace(':', '.')\n # print(\"strcurrentTime===\",strcurrentTime)\n # #if float(strcurrentTime) > float(15.30):\n # if float(strcurrentTime) > float(15.30):\n # print('Market is not ON. Try tomorrow or change isMarketON flag')\n # break\n # for a in range(1800):\n # sT = datetime.datetime.now(timezone('Asia/Calcutta')).strftime('%H:%M')\n # sT = sT.replace(':', '.')\n # #if float(sT) > float(15.30):\n # print(\"st===\", sT)\n # if float(sT) > float(15.30):\n # print('Market is not ON. Try tomorrow or change isMarketON flag')\n # break\n # time.sleep(1)\n # objGAPI = GoogleAPI()\n # service = objGAPI.intiate_gdAPI()\n # file_id = objGAPI.search_file(service, config.DB_Name, 'mime_type', '1llZZacQjhf2iNPjjpCBSSD4AdKFc5Con', True)\n # if file_id != 0:\n # objGAPI.delete_file(service, file_id)\n # objGAPI.upload_file(service, config.DB_Name, os.getcwd() + '/DB/' + config.DB_Name,\n # '1llZZacQjhf2iNPjjpCBSSD4AdKFc5Con', 'application/vnd.sqlite3')\n\n else:\n try:\n if isMarketON == 'TRUE':\n threshold = 1.0\n ns = threading.local()\n ns.iterations = 0\n\n #Get threshold\n ScrapedFor = work[1]\n if diction[ScrapedFor] == 'FALSE':\n ScrapedFor = ScrapedFor.split('_')\n expDate = ScrapedFor[1]\n symbol = ScrapedFor[2]\n else:\n ScrapedFor = ScrapedFor.split('_')\n expDate = ScrapedFor[0]\n symbol = ScrapedFor[1]\n # objDB = DatabaseOp()\n # conn = objDB.create_connection()\n # print(\"query====issue\")\n # ed = expDate.replace(' ', '-')\n # ed = ed.replace('20', '')\n # que = \"SELECT Threshold FROM Threshold WHERE ScripName='\"+symbol+\"' AND ExpiryDate='\"+str(ed)+\"'\"\n # #que = 'SELECT Threshold FROM Threshold WHERE ScripName=? AND ExpiryDate=?'\n # cur = conn.cursor()\n # #cur.execute(que, [symbol, str(ed)])\n # cur.execute(que)\n # rr = cur.fetchone()\n # if len(rr) != 0:\n # threshold = rr[0]\n # else:\n # threshold = 1.0\n # #print('No threshold existed for given expiry date')\n # conn.close()\n pVtime = ''\n while True:\n print('******************* Iterations : ', ns.iterations)\n strcurrentTime = datetime.datetime.now(timezone('Asia/Calcutta')).strftime('%H:%M')\n strcurrentTime = strcurrentTime.replace(':', '.')\n #print(\"else====vala ==strcurrentTime\",strcurrentTime)\n #if float(strcurrentTime) > float(15.30):\n if float(strcurrentTime) > float(15.30):\n print('Market is not ON. Try tomorrow or change isMarketON flag')\n exd = expDate.replace(' ', '_')\n table_name = config.TableName + exd\n #folder_ID = FolderIDs[expDate]\n #self.endupload(symbol, expDate, table_name, folder_ID)\n break\n ScrapedFor = work[1]\n if diction[ScrapedFor] == 'FALSE':\n ScrapedFor = ScrapedFor.split('_')\n expDate = ScrapedFor[1]\n symbol = ScrapedFor[2]\n #folder_ID = FolderIDs[expDate]\n exd = expDate.replace(' ', '_')\n table_name = config.TableName + exd\n status, pVtime = self.process(symbol, table_name, expDate, ns.iterations, threshold, pVtime)\n else:\n ScrapedFor = ScrapedFor.split('_')\n expDate = ScrapedFor[0]\n symbol = ScrapedFor[1]\n #folder_ID = FolderIDs[expDate]\n exd = expDate.replace(' ', '_')\n table_name = config.TableName + exd\n status, pVtime = self.process(symbol, table_name, expDate, ns.iterations, threshold, pVtime)\n if status == True:\n ns.iterations += 1\n if ns.iterations == 31:\n ns.iterations = 0\n #Sleep for a minute before next scrapping\n time.sleep(59)\n\n else:\n it = 0\n pVtime = ''\n strcurrentTime = datetime.datetime.now(timezone('Asia/Calcutta')).strftime('%H:%M')\n strcurrentTime = strcurrentTime.replace(':', '.')\n #print(\"third====vala ==strcurrentTime\", strcurrentTime)\n #if float(strcurrentTime) > float(15.30):\n if float(strcurrentTime) > float(15.30):\n print('Market is not ON. Try tomorrow or change isMarketON flag')\n break\n ScrapedFor = work[1]\n if diction[ScrapedFor] == 'FALSE':\n ScrapedFor = ScrapedFor.split('_')\n expDate = ScrapedFor[1]\n symbol = ScrapedFor[2]\n #folder_ID = FolderIDs[expDate]\n exd = expDate.replace(' ', '_')\n table_name = config.TableName + exd\n status, pVtime = self.process(symbol, table_name, expDate, it, threshold, pVtime)\n else:\n ScrapedFor = ScrapedFor.split('_')\n expDate = ScrapedFor[0]\n symbol = ScrapedFor[1]\n #folder_ID = FolderIDs[expDate]\n exd = expDate.replace(' ', '_')\n table_name = config.TableName + exd\n status, pVtime = self.process(symbol, table_name, expDate, it, threshold, pVtime)\n\n except Exception as e:\n print(e)\n result[work[0]] = {}\n # signal to the queue that task has been processed\n q.task_done()\n return True\n\n\n#\n# obj = ProcessEd()\n# name_of_file = 'NIFTY_29_Apr_2021.csv'\n# previous_df = pd.read_csv(os.getcwd() + '/d_csv/' + name_of_file, index_col=0)\n# print(previous_df.head(1))\n# df_now = pd.read_csv(os.getcwd() + '/csv/' + name_of_file, index_col=0)\n# print(df_now.head(1))\n# d = obj.concate(previous_df, df_now)\n# print(d.head())\n# print(d.tail())","sub_path":"Edelweiss_MYSQL_DB/Edelweiss/pEDDB.py","file_name":"pEDDB.py","file_ext":"py","file_size_in_byte":13017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"238548150","text":"test1 = [5, 1, 7, 10, 5, 5, 1000, 99, 1]\ntest2 = [0]\ntest3 = [9,9,9,9,9,9,9,9,1]\ntest4 = [1,9,9,9,9,9,9,9]\n\ndef remove_dublicates(arr):\n table = {}\n res = []\n\n for item in arr:\n if item not in table:\n table[item] = False\n else:\n table[item] = True\n\n # for item in arr:\n # if table[item] is False:\n # res.append(item)\n\n return [ item for item in arr if table[item] is False ]\n\n \nassert(remove_dublicates(test1) == [7,10,1000,99])\nassert(remove_dublicates(test2) == [0])\nassert(remove_dublicates(test3) == [1])\nassert(remove_dublicates(test4) == [1])","sub_path":"Various/data_structures/zadachki_zakachki/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"526722333","text":"import numpy as np\nimport pytest\n\nimport pylas\n\n\n@pytest.fixture(params=['simple.las', 'simple.laz'])\ndef las(request):\n return pylas.open(request.param)\n\n\ndef test_classification_overflows(las):\n if not pylas.lib.USE_UNPACKED:\n c = las.classification\n c[0] = 54\n with pytest.raises(OverflowError):\n las.classification = c\n else:\n las.classification[0] = 54\n with pytest.raises(OverflowError):\n las.points_data.repack_sub_fields()\n\n\ndef test_classification_change(tmpdir, las):\n c = las.classification\n c[:] = 10\n\n las.classification = c\n assert np.allclose(c, las.classification)\n\n out = tmpdir.join('tmp.las').open('wb')\n las.write_to(out)\n out.close()\n\n out = tmpdir.join('tmp.las').open('rb')\n las = pylas.open(out)\n\n assert np.allclose(c, las.classification)\n\n\ndef test_synthetic_change(tmpdir, las):\n s = las.synthetic\n s[:] = False\n s[17] = True\n\n las.synthetic = s\n assert np.allclose(s, las.synthetic)\n\n out = tmpdir.join('tmp.las').open('wb')\n las.write_to(out)\n out.close()\n\n out = tmpdir.join('tmp.las').open('rb')\n las = pylas.open(out)\n\n assert np.allclose(s, las.synthetic)\n\n\ndef test_key_point_change(tmpdir, las):\n kp = las.key_point\n kp[:] = False\n kp[25] = True\n\n las.key_point = kp\n assert np.allclose(kp, las.key_point)\n\n out = tmpdir.join('tmp.las').open('wb')\n las.write_to(out)\n out.close()\n\n out = tmpdir.join('tmp.las').open('rb')\n las = pylas.open(out)\n\n assert np.allclose(kp, las.key_point)\n\n\ndef test_withheld_changes(tmpdir, las):\n withheld = las.withheld\n withheld[:] = False\n withheld[180] = True\n\n las.withheld = withheld\n assert np.allclose(withheld, las.withheld)\n\n out = tmpdir.join('tmp.las').open('wb')\n las.write_to(out)\n out.close()\n\n out = tmpdir.join('tmp.las').open('rb')\n las = pylas.open(out)\n\n assert np.allclose(withheld, las.withheld)\n\n\ndef dim_does_not_exists(las, dim_name):\n try:\n _ = getattr(las, dim_name)\n except ValueError:\n return True\n return False\n\n\ndef test_change_format(las):\n assert las.points_data.point_format_id == 3\n assert las.header.point_data_format_id == 3\n\n las.to_point_format(2)\n assert las.points_data.point_format_id == 2\n assert las.header.point_data_format_id == 2\n assert dim_does_not_exists(las, 'gps_time')\n\n las.to_point_format(1)\n assert las.points_data.point_format_id == 1\n assert las.header.point_data_format_id == 1\n assert dim_does_not_exists(las, 'red')\n assert dim_does_not_exists(las, 'green')\n assert dim_does_not_exists(las, 'blue')\n\n las.to_point_format(0)\n assert las.points_data.point_format_id == 0\n assert las.header.point_data_format_id == 0\n assert dim_does_not_exists(las, 'red')\n assert dim_does_not_exists(las, 'green')\n assert dim_does_not_exists(las, 'blue')\n assert dim_does_not_exists(las, 'gps_time')\n","sub_path":"tests/test_modif_1_2.py","file_name":"test_modif_1_2.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"142260402","text":"# Standard Library Imports\nfrom __future__ import division, print_function\nfrom time import time\n\n# External Imports\nimport numpy as np\nfrom sklearn.decomposition import TruncatedSVD\nimport matplotlib.pyplot as plt\n# %matplotlib inline # use when in JUPYTER NOTEBOOK (or risk hang)\n# plt.ion() # allow ipython %run to terminate without closing figure\n\n# Internal Imports\nfrom utils import load_data\n\n# User Parameters\nDATASET = '/Users/Andy/Google Drive/Development/ML/kaggle/leaf-classification/KAGGLE_LEAF.mat'\n# DATASET = 'usps'\nVALIDATION_PERCENTAGE = .2\nTESTING_PERCENTAGE = .2\n# RANKS2TRY = range(10)\nRANKS2TRY = 'all' # must be 'all' or list of integers\nTIMER_ON = False\nassert 0 < VALIDATION_PERCENTAGE + TESTING_PERCENTAGE < 1\n\n# Load dataset\nX_train, Y_train, X_valid, Y_valid, X_test, Y_test = \\\n load_data(DATASET, VALIDATION_PERCENTAGE, TESTING_PERCENTAGE)\n\n# only used for score, has no affect on approximation\nbatch_size = len(Y_train) // 10\n\n# Classification by distance from best k-dimensional subspace \n# approximation from SVD of each label's example set\nif RANKS2TRY == 'all':\n ranks2try = list(range(1, X_train.shape[1])) # all must be less than full rank\nelse:\n ranks2try = RANKS2TRY\nvalid_accuracy = []\nfor rnk in ranks2try:\n start_time = time()\n print(\"\")\n print(\"rank = {} -- \".format(rnk), end='')\n distinct_labels = list(set(Y_train))\n svd = {}\n for l in distinct_labels:\n examples_labeled_l = \\\n np.array([x for x, y in zip(X_train, Y_train) if y == l])\n svd[l] = TruncatedSVD(n_components=rnk)\n svd[l].fit(examples_labeled_l)\n\n # Training Set Accuracy\n def svd_predict(X_, Y_, svd_dict_):\n X_, Y_ = np.array(X_), np.array(Y_)\n distinct_labels = svd_dict_.keys()\n distances = {}\n for l in distinct_labels:\n X_appr = svd_dict_[l].inverse_transform(\n svd_dict_[l].transform(X_))\n distances[l] = np.linalg.norm(X_ - X_appr, axis=1)\n distances = np.array(distances.values()).transpose()\n distance_minimizers = np.argmin(distances, axis=1)\n Y_predictions = [distinct_labels[idx] for idx in distance_minimizers]\n number_correct_ = np.sum(Y_predictions == Y_)\n return number_correct_\n\n batches = [(k*batch_size, (k+1)*batch_size) for k in\n range(len(Y_train) // batch_size)]\n ct = 0\n number_correct = 0\n for i0, i1 in batches:\n ct += 1\n number_correct_batch = \\\n svd_predict(X_train[i0: i1], Y_train[i0: i1], svd)\n # print(\"Training Batch {}/{} Accuracy: {}\"\n # \"\".format(ct, len(batches), number_correct_batch/(i1 - i0)))\n number_correct += number_correct_batch\n if len(Y_train) % batch_size:\n i0, i1 = i1, len(Y_train)\n number_correct_batch = \\\n svd_predict(X_train[i0: i1], Y_train[i0: i1], svd)\n # print(\"Training Remainder Batch Accuracy: {}\"\n # \"\".format(ct, len(batches), number_correct_batch/(i1 - i0)))\n number_correct += number_correct_batch\n print(\"Training / Validation Accuracy: {:.2f}% / \"\n \"\".format(100 * number_correct / len(Y_train)), end='')\n\n # Validation Set Accuracy\n if batch_size < len(Y_valid):\n batches = [(k*batch_size, (k+1)*batch_size) for k in\n range(len(Y_valid) // batch_size)]\n ct = 0\n number_correct = 0\n for i0, i1 in batches:\n ct += 1\n number_correct_batch = \\\n svd_predict(X_valid[i0: i1], Y_valid[i0: i1], svd)\n # print(\"valid Batch {}/{} Accuracy: {}\"\n # \"\".format(ct, len(batches), \n # number_correct_batch/(i1 - i0)))\n number_correct += number_correct_batch\n if len(Y_valid) % batch_size:\n i0, i1 = i1, len(Y_valid)\n number_correct_batch = \\\n svd_predict(X_valid[i0: i1], Y_valid[i0: i1], svd)\n # print(\"valid Remainder Batch Accuracy: {}\"\n # \"\".format(ct, len(batches), \n # number_correct_batch/(i1 - i0)))\n number_correct += number_correct_batch\n else:\n number_correct = svd_predict(X_valid, Y_valid, svd)\n if not number_correct/len(Y_valid):\n raise Exception()\n valid_accuracy.append(number_correct / len(Y_valid))\n print(\"{:.2f}%\".format(100*valid_accuracy[-1]))\n if TIMER_ON:\n print(\"Time to Train and Validate with this rank: {:.2f} seconds\"\n \"\".format(time() - start_time))\nprint(\"\\nWinner winner chicken dinner goes to rank =\",\n ranks2try[np.argmax(valid_accuracy)])\n\nplt.grid(True)\nplt.plot(ranks2try, valid_accuracy)\n\n# Now that we've found the best rank to use.\nrnk = ranks2try[np.argmax(valid_accuracy)]\ndistinct_labels = list(set(Y_train))\nsvd = {}\nfor l in distinct_labels:\n examples_labeled_l = np.array([x for x, y in\n zip(X_train, Y_train) if y == l])\n svd[l] = TruncatedSVD(n_components=rnk)\n svd[l].fit(examples_labeled_l)\n\n# Test Set Accuracy\nif batch_size < len(Y_test):\n batches = [(k*batch_size, (k+1)*batch_size) for k in\n range(len(Y_test) // batch_size)]\n ct = 0\n number_correct = 0\n for i0, i1 in batches:\n ct += 1\n number_correct_batch = \\\n svd_predict(X_test[i0: i1], Y_test[i0: i1], svd)\n # print(\"Test Batch {}/{} Accuracy: {}\"\n # \"\".format(ct, len(batches), number_correct_batch/(i1 - i0)))\n number_correct += number_correct_batch\n if len(Y_test) % batch_size:\n i0, i1 = i1, len(Y_test)\n number_correct_batch = \\\n svd_predict(X_test[i0: i1], Y_test[i0: i1], svd)\n # print(\"Test Remainder Batch Accuracy: {}\"\n # \"\".format(ct, len(batches), number_correct_batch/(i1 - i0))) \n number_correct += number_correct_batch\nelse:\n number_correct = svd_predict(X_test, Y_test, svd)\nprint(\"Test Accuracy with winner: {:.2f}%\"\n \"\".format(100 * number_correct / len(Y_test)))\n\nplt.show() # prevent python from terminating and closing figure","sub_path":"experiments/svd_sklearn.py","file_name":"svd_sklearn.py","file_ext":"py","file_size_in_byte":6163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"67725811","text":"# python - problem 7\n\nimport math\nimport time\nt = time.time()\n\nprime = [2]\nprime_count = 1\nx = 3\n\nwhile len(prime) < 10001:\n prime_flag = True\n for a in prime:\n if(x%a==0):\n prime_flag = False\n break\n if prime_flag == True:\n prime.append(x)\n x += 1\n\nprint(prime[-1])\n\nt = time.time()-t\nprint( \"Spend time :\", t , \"sec\")","sub_path":"problem 007.py","file_name":"problem 007.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"423305411","text":"# -*- coding: utf-8 -*-\nfrom scrapy.selector import Selector\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\n\nfrom items import TopicItem, AuthorInfo, ReplyItem\nclass KiwiSpider(CrawlSpider):\n name = \"kiwi\"\n allowed_domains = [\"douban.com\"]\n\n anchorTitleXPath = 'a/text()'\n anchorHrefXPath = 'a/@href'\n\n start_urls = [\n \"https://www.douban.com/group/topic/90895393/?start=0\",\n ]\n rules = (\n Rule(\n LinkExtractor(allow=(r'/group/[^/]+/discussion\\?start=\\d+',)),\n callback='parse_topic_list',\n follow=True\n ),\n Rule(\n LinkExtractor(allow=(r'/group/topic/\\d+/$',)), # 帖子内容页面\n callback='parse_topic_content',\n follow=True\n ),\n Rule(\n LinkExtractor(allow=(r'/group/topic/\\d+/\\?start=\\d+',)), # 帖子内容页面\n callback='parse_topic_content',\n follow=True\n ),\n )\n\n # 帖子详情页面\n def parse_topic_content(self, response):\n # 标题XPath\n titleXPath = '//html/head/title/text()'\n # 帖子内容XPath\n contentXPath = '//div[@class=\"topic-content\"]/p/text()'\n # 发帖时间XPath\n timeXPath = '//div[@class=\"topic-doc\"]/h3/span[@class=\"color-green\"]/text()'\n # 发帖人XPath\n authorXPath = '//div[@class=\"topic-doc\"]/h3/span[@class=\"from\"]'\n\n item = TopicItem()\n # 当前页面Url\n item['url'] = response.url\n # 标题\n titleFragment = Selector(response).xpath(titleXPath)\n item['title'] = str(titleFragment.extract()[0]).strip()\n\n # 帖子内容\n contentFragment = Selector(response).xpath(contentXPath)\n strs = [line.extract().strip() for line in contentFragment]\n item['content'] = '\\n'.join(strs)\n # 发帖时间\n timeFragment = Selector(response).xpath(timeXPath)\n if timeFragment:\n item['time'] = timeFragment[0].extract()\n\n # 发帖人信息\n authorInfo = AuthorInfo()\n authorFragment = Selector(response).xpath(authorXPath)\n if authorFragment:\n authorInfo['authorName'] = authorFragment[0].xpath(self.anchorTitleXPath).extract()[0]\n authorInfo['authorUrl'] = authorFragment[0].xpath(self.anchorHrefXPath).extract()[0]\n\n item['author'] = dict(authorInfo)\n\n # 回复列表XPath\n replyRootXPath = r'//div[@class=\"reply-doc content\"]'\n # 回复时间XPath\n replyTimeXPath = r'div[@class=\"bg-img-green\"]/h4/span[@class=\"pubtime\"]/text()'\n # 回复人XPath\n replyAuthorXPath = r'div[@class=\"bg-img-green\"]/h4'\n\n replies = []\n itemsFragment = Selector(response).xpath(replyRootXPath)\n for replyItemXPath in itemsFragment:\n replyItem = ReplyItem()\n # 回复内容\n contents = replyItemXPath.xpath('p/text()')\n strs = [line.extract().strip() for line in contents]\n replyItem['content'] = '\\n'.join(strs)\n # 回复时间\n timeFragment = replyItemXPath.xpath(replyTimeXPath)\n if timeFragment:\n replyItem['time'] = timeFragment[0].extract()\n # 回复人\n replyAuthorInfo = AuthorInfo()\n authorFragment = replyItemXPath.xpath(replyAuthorXPath)\n if authorFragment:\n replyAuthorInfo['authorName'] = authorFragment[0].xpath(self.anchorTitleXPath).extract()[0]\n replyAuthorInfo['authorUrl'] = authorFragment[0].xpath(self.anchorHrefXPath).extract()[0]\n\n replyItem['author'] = dict(replyAuthorInfo)\n # 添加进回复列表\n replies.append(dict(replyItem))\n\n item['reply'] = replies\n yield item\n\n # 帖子列表页面\n def parse_topic_list(self, response):\n # 帖子列表XPath(跳过表头行)\n topicRootXPath = r'//table[@class=\"olt\"]/tr[position()>1]'\n # 单条帖子条目XPath\n titleXPath = r'td[@class=\"title\"]'\n # 发帖人XPath\n authorXPath = r'td[2]'\n # 回复条数XPath\n replyCountXPath = r'td[3]/text()'\n # 发帖时间XPath\n timeXPath = r'td[@class=\"time\"]/text()'\n\n topicsPath = Selector(response).xpath(topicRootXPath)\n for topicItemPath in topicsPath:\n item = TopicItem()\n titlePath = topicItemPath.xpath(titleXPath)\n item['title'] = titlePath.xpath(self.anchorTitleXPath).extract()[0]\n item['url'] = titlePath.xpath(self.anchorHrefXPath).extract()[0]\n # 发帖时间\n timePath = topicItemPath.xpath(timeXPath)\n if timePath:\n item['time'] = timePath[0].extract()\n # 发帖人\n authorPath = topicItemPath.xpath(authorXPath)\n authInfo = AuthorInfo()\n authInfo['authorName'] = authorPath[0].xpath(self.anchorTitleXPath).extract()[0]\n authInfo['authorUrl'] = authorPath[0].xpath(self.anchorHrefXPath).extract()[0]\n item['author'] = dict(authInfo)\n # 回复条数\n replyCountPath = topicItemPath.xpath(replyCountXPath)\n item['replyCount'] = replyCountPath[0].extract()\n\n item['content'] = ''\n yield item\n\n parse_start_url = parse_topic_content","sub_path":"python-demo/data-scrapy/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":5450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"481856599","text":"import serial\nfrom serial.tools import list_ports\n\n# Get list of available ports\navailablePorts = list_ports.comports()\n\n# Print list by name\nportNumber = 0\nfor port in availablePorts:\n print(portNumber, port.device)\n portNumber += 1\n\n# Connect to the port we want\nser = serial.Serial(availablePorts[2].device, 9600)\nwhile True:\n print(str(ser.read(5)))\n ser.flush()\n\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"641411374","text":"from django import forms\nfrom apps.tools.models import Calendar\n\n\nclass CalendarForm(forms.ModelForm):\n class Meta:\n model = Calendar\n\n fields = [\n 'id',\n 'title',\n 'color',\n 'allDay',\n 'start',\n 'startTimer',\n 'end',\n 'endTimer',\n ]\n labels = {\n 'title': 'Title to Events:',\n 'color': 'Category:',\n 'allDay': 'is All day:',\n 'start': 'Start Date:',\n 'startTimer': 'Start Timer:',\n 'end': 'End Date:',\n 'endTimer': 'End Timer',\n }\n widgets = {\n 'title': forms.TextInput(attrs={'placeholder': 'Title', 'class': 'form-control input-md'}),\n 'color': forms.Select(attrs={'class': 'form-control input-md'}, choices=(('#E74C3C','Very Importan'), ('#DC7633', 'Importan'), ('#27AE60', 'Event'))),\n 'allDay': forms.CheckboxInput(attrs={'data-off-color':\"danger\", 'class':\"switch\", 'data-size':\"mini\", 'data-on-text':\"YES\", 'data-off-text': \"NO\"}),\n 'start': forms.DateInput(attrs={'class': 'form-control input-md'}),\n 'startTimer': forms.TimeInput(attrs={'class': 'form-control'}),\n 'end': forms.DateInput(attrs={'class': 'form-control input-md'}),\n 'endTimer': forms.TimeInput(attrs={'class': 'form-control'}),\n }","sub_path":"apps/tools/components/CalendarForm.py","file_name":"CalendarForm.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"266014807","text":"import os\nfrom os import system \n\nsystem('clear')\n\n# To create a set, you need to use {}\nmySet = {'apple', 'apple', 'mango', 'kiwi'}\n\n# Print each element of the set - Order is not guaranteed\n# where fruit is each fruit\nfor fruit in mySet:\n print(fruit)\n\nmyNameSet = {'Dunieski', 'Yanet', 'Martha', 'Orlando', 'Yara'}\n\nprint(myNameSet)\n\n# Print each element of the set - Order is not guaranteed\n# where name is each name\n# for (every) name in mySet:\n# print the name\nfor name in myNameSet:\n print(name)\n\n\n\n","sub_path":"Collections/Sets/sets.py","file_name":"sets.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"490181817","text":"# -*- coding: utf-8 -*-\r\nimport cv2\r\n#Import OpenCV\r\n#import cv2.cv as cv\r\n#Import Numpy\r\nimport numpy as np\r\n\r\ncamera_feed = cv2.VideoCapture(0)\r\n\r\ndef nothing(x):\r\n pass\r\n\r\n\r\ncv2.namedWindow('image')\r\ncv2.createTrackbar('h_max','image',179,179,nothing)\r\ncv2.createTrackbar('h_min','image',0,179,nothing)\r\ncv2.createTrackbar('s_max','image',255,255,nothing)\r\ncv2.createTrackbar('s_min','image',0,255,nothing)\r\ncv2.createTrackbar('v_max','image',255,255,nothing)\r\ncv2.createTrackbar('v_min','image',0,255,nothing)\r\n\r\n\r\nwhile(1):\r\n\r\n _,frame = camera_feed.read()\r\n #Convert the current frame to HSV\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n\r\n # 设定蓝色的阈值\r\n h_max=cv2.getTrackbarPos('h_max','image')\r\n h_min=cv2.getTrackbarPos('h_min','image')\r\n s_max=cv2.getTrackbarPos('s_max','image')\r\n s_min=cv2.getTrackbarPos('s_min','image')\r\n v_max=cv2.getTrackbarPos('v_max','image')\r\n v_min=cv2.getTrackbarPos('v_min','image')\r\n \r\n # -----------------pink------------------\r\n #lower_blue=np.array([110,50,50])\r\n lower_blue=np.array([h_min,s_min,v_min])\r\n #upper_blue=np.array([130,255,255])\r\n upper_blue=np.array([h_max,s_max,v_max])\r\n \r\n \r\n '''\r\n #Define the threshold for finding a blue object with hsv\r\n lower_blue = np.array([120,69,0])\r\n upper_blue = np.array([179,224,255])\r\n '''\r\n #Create a binary image, where anything blue appears white and everything else is black\r\n mask = cv2.inRange(hsv, lower_blue, upper_blue)\r\n res=cv2.bitwise_and(frame,frame,mask=mask)\r\n #Get rid of background noise using erosion and fill in the holes using dilation and erode the final image on last time\r\n element = cv2.getStructuringElement(cv2.MORPH_RECT,(9,9))\r\n mask = cv2.erode(mask,element, iterations=2)\r\n mask = cv2.dilate(mask,element,iterations=2)\r\n\r\n mask = cv2.erode(mask,element)\r\n \r\n #Create Contours for all blue objects\r\n image1,contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n #image1,contours, hierarchy = cv2.findContours(mask, 1, 2)\r\n maximumArea = 0\r\n bestContour = None\r\n\r\n # for all object\r\n for contour in contours:\r\n '''\r\n #Straight Bounding Rectangle\r\n x,y,w,h = cv2.boundingRect(contour)\r\n cv2.rectangle(frame, (x,y),(x+w,y+h), (0,0,255), 3)\r\n '''\r\n\r\n #Rotated Rectangle\r\n rect = cv2.minAreaRect(contour)\r\n box = cv2.boxPoints(rect)\r\n box = np.int0(box)\r\n cv2.drawContours(frame,[box],0,(0,0,255),2)\r\n\r\n\r\n # #only for the biggest object\r\n # for contour in contours:\r\n # currentArea = cv2.contourArea(contour)\r\n # if currentArea > maximumArea:\r\n # bestContour = contour\r\n # maximumArea = currentArea\r\n # #Create a bounding box around the biggest blue object\r\n # if bestContour is not None:\r\n # x,y,w,h = cv2.boundingRect(bestContour)\r\n # cv2.rectangle(frame, (x,y),(x+w,y+h), (0,0,255), 3)\r\n \r\n\r\n #Show the original camera feed with a bounding box overlayed \r\n cv2.imshow('frame',frame)\r\n\r\n #cv2.imshow('hsv', hsv)\r\n\r\n #Show the contours in a seperate window\r\n #cv2.imshow('mask',mask)\r\n\r\n cv2.imshow('res',res)\r\n #Use this command to prevent freezes in the feed\r\n k = cv2.waitKey(5) & 0xFF\r\n #If escape is pressed close all windows\r\n if k == 27:\r\n break\r\n\r\n\r\ncv2.destroyAllWindows() ","sub_path":"코드/PythonCode/opencv_basic.py","file_name":"opencv_basic.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"233088320","text":"\n\nfrom batchinsert import *\n\n\nif __name__ == '__main__':\n\n if len(sys.argv) < 2:\n print(\"Please provide an output filename\")\n exit()\n\n s = RelatedDataSource('lottery_prize_pool2.csv')\n i = Insert('hd_lottery_prize_pool', \n prize_id=None,\n prize_type=s.get_generator('prize_type'),\n prize_title=s.get_generator('prize_title'),\n prize_weight=[1],\n virtual_prize_type=s.get_generator('virtual_prize_type'),\n virtual_prize_amount=s.get_generator('virtual_prize_amount'),\n display_position=s.get_generator('display_position'),\n lottery_id=[1],\n member_id=[0],\n prize_status=[0],\n status=[1],\n create_time=['2016-09-20 13:00:00'],\n last_modified=['2016-09-20 13:00:00'],\n )\n\n i.set_related_data_source(s)\n i.set_fields_order([\n 'prize_id', 'prize_type', 'prize_title', 'prize_weight', \n 'virtual_prize_type', 'virtual_prize_amount', 'display_position', \n 'lottery_id', 'member_id', 'prize_status',\n 'status', 'create_time', 'last_modified'])\n\n i.set_fields_order(['display_position', 'prize_title', 'prize_type', 'virtual_prize_type', 'virtual_prize_amount', 'lottery_id'])\n\n filename = sys.argv[1]\n i.perform(10, filename, 'csv')\n\n\n","sub_path":"scripts/batchinsert/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"273352243","text":"\"\"\"\n/***************************************************************************\n Geo360 viewer Plugin \n ***************************************************************************/\n\"\"\"\nimport os\n\nfrom PyQt4 import QtGui, QtCore\nfrom PyQt4.QtCore import Qt, QCoreApplication\n\nfrom qgis.gui import QgsRubberBand, QgsMessageBar\n\nfrom EquirectangularViewer.utils.log import log\n\ntry:\n\t_fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n\tdef _fromUtf8(s):\n\t\treturn s\n\t\ntry:\n\timport sys\n\tfrom pydevd import *\nexcept:\n\tNone;\n\nclass qgsutils(object):\n \n\t@staticmethod\n\tdef getAttributeFromFeature(feature, columnName):\n\t\treturn feature.attribute(columnName)\n\n\t@staticmethod\t\n\tdef zoomToFeature(canvas, layer, id):\n\t\tif layer:\n\t\t\tfor feature in layer.getFeatures():\n\t\t\t\tif feature.id()== id:\n\t\t\t\t\tcanvas.setExtent(feature.geometry().boundingBox())\n\t\t\t\t\tcanvas.refresh()\n\t\t\t\t\treturn True\n\t\treturn False\n\n\t@staticmethod\n\tdef removeAllHighlightFeaturesFromCanvasScene(canvas):\n\t\tvertex_items = [ i for i in canvas.scene().items() if issubclass(type(i), QgsRubberBand)]\n\t\tfor ver in vertex_items:\n\t\t\tif ver in canvas.scene().items():\n\t\t\t\tcanvas.scene().removeItem(ver)\n\n\t#Show user & log info/warning/error messages\n\t@staticmethod\n\tdef showUserAndLogMessage(parent, before, text, level, duration = 3, onlyLog = False):\n\t\tif not onlyLog:\t \n\t\t\tparent.iface.messageBar().popWidget()\n\t\t\tparent.iface.messageBar().pushMessage(_fromUtf8(before), _fromUtf8(text), level = level, duration = duration) \n\t\t\tQtGui.qApp.processEvents()\n\t\tif level == QgsMessageBar.INFO:\n\t\t\tlog.info(text)\n\t\telif level == QgsMessageBar.WARNING:\n\t\t\tlog.warning(text)\n\t\telif level == QgsMessageBar.CRITICAL:\n\t\t\tlog.error(text)\n\t\t#QgsMessageLog.logMessage(text, level = level)\n\t\treturn\n\n\t@staticmethod\t\n\tdef getToFeature(canvas, layer, id):\n\t\tif layer:\n\t\t\tfor feature in layer.getFeatures():\n\t\t\t\tif feature.id()== id:\n\t\t\t\t\treturn feature\n\t\treturn False\n ","sub_path":"utils/qgsutils.py","file_name":"qgsutils.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"84046292","text":"from libIOWUtils import rst_table\nfrom classIOWGeneratorSourceBase import iow_generator_source_base\n\nclass iow_generator_source_confluence2(iow_generator_source_base):\n\n def __init__(self, config, source, extensions):\n super(iow_generator_source_confluence2, self).__init__(config, source, extensions)\n\n def generate_iow(self):\n super(iow_generator_source_confluence2, self).generate_iow()\n\n #Configuration\n self.__output_general_configuration()\n #mappings\n self.output_mappings()\n #mappings\n self.output_extensions()\n\n self.save()\n\n def __output_general_configuration(self):\n self.output_header('Configuration', 1)\n table = rst_table()\n table.headers = ['Name', 'Value']\n table.rows.append(['Instance URL', self.source._raw['configuration']['startingAddresses'][0]])\n table.rows.append(['Content security', self.format_source_content_security()])\n \n table.rows.append(['Authentication: Username', self.source._raw['configuration']['userIdentities']['UserIdentity']['userName']])\n \n self.__generate_source_advanced_authentication_options(table)\n self.__generate_source_content_to_include_spacetype(table)\n self.__generate_source_content_to_include_spacestatus(table)\n self.__generate_source_content_to_include_spacestatus(table) \n table.rows.append(['Content to include: space filter', self.source._raw['configuration']['parameters'].get('FilterSpaceRegex', {}).get('value','')])\n table.rows.append(['Content to include: options: Attachments', self.source._raw['configuration']['parameters']['IndexAttachments']['value']])\n table.rows.append(['Content to include: options: Comments', self.source._raw['configuration']['parameters']['IndexAttachments']['value']])\n\n self.output_table(table)\n\n def __generate_source_advanced_authentication_options(self, table):\n UseKerberosAuthentication = self.source._raw['configuration']['parameters']['UseKerberosAuthentication']['value'] == \"true\"\n UseRequestParametersAuth = self.source._raw['configuration']['parameters']['UseRequestParametersAuth']['value'] == \"true\"\n\n if UseKerberosAuthentication or UseRequestParametersAuth:\n table.rows.append(['Authentication: Advanced Authentication options','CHECKED'])\n else:\n table.rows.append(['Authentication: Advanced Authentication options','UNCHECKED'])\n\n if UseKerberosAuthentication:\n table.rows.append(['Authentication: Use Kerberos authentication','SELECTED'])\n if UseRequestParametersAuth:\n table.rows.append(['Authentication: Bypass Single Sign-On (SSO)','SELECTED'])\n\n def __generate_source_content_to_include_spacetype(self, table):\n IndexOnlyGlobalSpaces = self.source._raw['configuration']['parameters']['IndexOnlyGlobalSpaces']['value'] == \"true\"\n IndexOnlyPersonalSpaces = self.source._raw['configuration']['parameters']['IndexOnlyPersonalSpaces']['value'] == \"true\"\n\n if IndexOnlyGlobalSpaces and IndexOnlyPersonalSpaces:\n table.rows.append(['Content to include: space type','Both'])\n elif IndexOnlyGlobalSpaces:\n table.rows.append(['Content to include: space type','Global'])\n elif IndexOnlyPersonalSpaces:\n table.rows.append(['Content to include: space type','Personal'])\n\n def __generate_source_content_to_include_spacestatus(self, table):\n IndexCurrentSpaces = self.source._raw['configuration']['parameters']['IndexCurrentSpaces']['value'] == \"true\"\n IndexArchivedSpaces = self.source._raw['configuration']['parameters']['IndexArchivedSpaces']['value'] == \"true\"\n\n if IndexCurrentSpaces and IndexArchivedSpaces:\n table.rows.append(['Content to include: space status','Both'])\n elif IndexCurrentSpaces:\n table.rows.append(['Content to include: space type','Current'])\n elif IndexArchivedSpaces:\n table.rows.append(['Content to include: space type','Archived'])","sub_path":"api/bin/classIOWGeneratorSourceConfluence2.py","file_name":"classIOWGeneratorSourceConfluence2.py","file_ext":"py","file_size_in_byte":4083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"411842852","text":"import torch\nfrom torchvision.models import mobilenet\n\nMODEL_FILE_NAME = \"mobilenet_v2.pt\"\n\n\ndef create_model(out_dir=\"./\"):\n model = mobilenet.mobilenet_v2(pretrained=True)\n model.eval()\n traced_model = torch.jit.trace(model, torch.randn(1, 3, 224, 224))\n traced_model.save(out_dir + MODEL_FILE_NAME)\n\n\nif __name__ == \"__main__\":\n create_model()\n print(f\"{MODEL_FILE_NAME} model file is created.\")\n","sub_path":"model/create_mobilenet_v2_model.py","file_name":"create_mobilenet_v2_model.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"208533132","text":"import json\r\nfrom sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm\r\nimport pandas, numpy, textblob, string\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\n\r\nfrom sklearn import metrics, cross_validation\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.model_selection import cross_val_score\r\n\r\nfrom sklearn.externals import joblib\r\n\r\nclass Model:\r\n\tdef __init__(self, xtrain_count, train_y, xvalid_count, valid_y):\r\n\t\tself.xtrain_count = xtrain_count\r\n\t\tself.train_y = train_y\r\n\t\tself.xvalid_count = xvalid_count\r\n\t\tself.valid_y = valid_y\r\n \r\n\tdef train_model(self, classifier):\r\n\t\tclassifier.fit(self.xtrain_count, self.train_y)\r\n\t\tpredictions = classifier.predict(self.xvalid_count)\r\n\t\treturn metrics.accuracy_score(predictions, self.valid_y)\r\n \r\n\tdef cross_validation(self, classifier):\r\n\t\tscores_accuracy = cross_val_score(classifier, self.xvalid_count, self.valid_y, cv=10, scoring='accuracy')\r\n\t\tscores_log_loss = cross_val_score(classifier, self.xvalid_count, self.valid_y, cv=10, scoring='neg_log_loss')\r\n\t\t\r\n\t\tprint('K-fold cross-validation results: ' + classifier.__class__.__name__)\r\n\t\tprint(classifier.__class__.__name__+\" average accuracy is %2.3f\" % scores_accuracy.mean())\r\n\t\tprint(classifier.__class__.__name__+\" average log_loss is %2.3f\" % -scores_log_loss.mean())\r\n\t\t\r\n \r\n\tdef confusion_matrix(self, classifier):\r\n\t\tclassifier.fit(self.xtrain_count, self.train_y)\r\n\t\tpredictions = classifier.predict(self.xvalid_count)\r\n\t\tconf_matrix = confusion_matrix(predictions, self.valid_y)\r\n\t\tprint('confusion matrix:' + classifier.__class__.__name__ )\r\n\t\tprint(conf_matrix)\r\n\r\n\tdef save_model(self, classifier, model_file):\r\n\t\tclassifier.fit(self.xtrain_count, self.train_y)\r\n\t\tjoblib.dump(classifier, model_file)\r\n \r\n\t\t\r\n\t\t","sub_path":"core/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"156466961","text":"import psycopg2, psycopg2.extras\nimport os\nimport glob\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom matplotlib import patches\nfrom matplotlib.pyplot import figure\nfrom datetime import timedelta, date\n\n\n\n\n\ntask = pd.read_csv(\"task.csv\") \n\n\ndef draw():\n\n\tfor index, row in task.iterrows():\n\n\t\ttask_id = int(row['task_id'])\n\n\t\ttask_dir = os.path.join(os.getcwd(), 'result/'+str(task_id))\n\t\tif not os.path.isdir(task_dir):\n\t\t\tcontinue\n\n\t\timage_dir = os.path.join(task_dir, 'image')\n\t\tif not os.path.isdir(image_dir):\n\t\t\tos.makedirs(image_dir)\n\n\t\tload_dir = os.path.join(task_dir, 'output')\n\t\tif not os.path.isdir(load_dir):\n\t\t\tcontinue\n\n\t\ttrain_loss = np.load(os.path.join(load_dir, 'train_loss.npy'))\n\t\ttrain_acc = np.load(os.path.join(load_dir, 'train_acc.npy'))\n\t\ttrain_f1= np.load(os.path.join(load_dir, 'train_f1.npy'))\n\t\ttrain_precision = np.load(os.path.join(load_dir, 'train_precision.npy'))\n\t\ttrain_recall = np.load(os.path.join(load_dir, 'train_recall.npy'))\n\n\t\tvalid_loss = np.load(os.path.join(load_dir, 'valid_loss.npy'))\n\t\tvalid_acc = np.load(os.path.join(load_dir, 'valid_acc.npy'))\n\t\tvalid_f1= np.load(os.path.join(load_dir, 'valid_f1.npy'))\n\t\tvalid_precision = np.load(os.path.join(load_dir, 'valid_precision.npy'))\n\t\tvalid_recall = np.load(os.path.join(load_dir, 'valid_recall.npy'))\n\n\n\t\t#plot train and validation loss\n\t\tplt.plot(train_loss)\n\t\tplt.plot(valid_loss)\n\t\tplt.title('Loss')\n\t\tplt.ylabel('Loss')\n\t\tplt.xlabel('Epoch')\n\t\tplt.legend(['Train', 'Valid'])\n\t\tplt.savefig(os.path.join(image_dir, 'loss.png'))\n\t\tplt.clf()\n\n\n\t\t#plot train and validation accuracy\n\t\tplt.plot(train_acc)\n\t\tplt.plot(valid_acc)\n\t\tplt.title('Accuracy')\n\t\tplt.ylabel('Accuracy')\n\t\tplt.xlabel('Epoch')\n\t\tplt.legend(['Train', 'Valid'])\n\t\tplt.savefig(os.path.join(image_dir, 'accuracy.png'))\n\t\tplt.clf()\n\n\t\t#plot train and validation accuracy\n\t\tplt.plot(train_f1)\n\t\tplt.plot(valid_f1)\n\t\tplt.title('F1')\n\t\tplt.ylabel('F1')\n\t\tplt.xlabel('Epoch')\n\t\tplt.legend(['Train', 'Valid'])\n\t\tplt.savefig(os.path.join(image_dir, 'f1.png'))\n\t\tplt.clf()\n\n\t\t#plot train and validation accuracy\n\t\tplt.plot(train_precision)\n\t\tplt.plot(valid_precision)\n\t\tplt.title('Precision')\n\t\tplt.ylabel('Precision')\n\t\tplt.xlabel('Epoch')\n\t\tplt.legend(['Train', 'Valid'])\n\t\tplt.savefig(os.path.join(image_dir, 'precision.png'))\n\t\tplt.clf()\n\n\t\t#plot train and validation accuracy\n\t\tplt.plot(train_recall)\n\t\tplt.plot(valid_recall)\n\t\tplt.title('Recall')\n\t\tplt.ylabel('Recall')\n\t\tplt.xlabel('Epoch')\n\t\tplt.legend(['Train', 'Valid'])\n\t\tplt.savefig(os.path.join(image_dir, 'recall.png'))\n\t\tplt.clf()\n\n\t\tplt.plot(train_acc)\n\t\tplt.plot(train_recall)\n\t\tplt.plot(train_precision)\n\t\tplt.plot(train_f1)\n\t\tplt.title('Train evaluation')\n\t\tplt.ylabel('Score')\n\t\tplt.xlabel('Epoch')\n\t\tplt.legend(['Accuracy', 'Recall', 'Precision', 'F1'])\n\t\tplt.savefig(os.path.join(image_dir, 'train_evaluation.png'))\n\t\tplt.clf()\n\n\t\tplt.plot(valid_acc)\n\t\tplt.plot(valid_recall)\n\t\tplt.plot(valid_precision)\n\t\tplt.plot(valid_f1)\n\t\tplt.title('Valid evaluation')\n\t\tplt.ylabel('Score')\n\t\tplt.xlabel('Epoch')\n\t\tplt.legend(['Accuracy', 'Recall', 'Precision', 'F1'])\n\t\tplt.savefig(os.path.join(image_dir, 'valid_evaluation.png'))\n\t\tplt.clf()\n\ndef get_loss():\n\n\tfor index, row in task.iterrows():\n\n\t\ttask_id = int(row['task_id'])\n\n\t\ttask_dir = os.path.join(os.getcwd(), 'result/'+str(task_id))\n\t\tif not os.path.isdir(task_dir):\n\t\t\tcontinue\n\n\t\tload_dir = os.path.join(task_dir, 'output')\n\t\tif not os.path.isdir(load_dir):\n\t\t\tcontinue\n\n\t\ttrain_f1= np.load(os.path.join(load_dir, 'train_f1.npy'))\n\t\tvalid_f1= np.load(os.path.join(load_dir, 'valid_f1.npy'))\n\n\t\ttrain_loss = np.load(os.path.join(load_dir, 'train_loss.npy'))\n\t\tvalid_loss = np.load(os.path.join(load_dir, 'valid_loss.npy'))\n\n\t\ttrain_loss_min = np.min(train_loss)\n\t\tvalid_loss_min = np.min(valid_loss)\n\n\t\ttrain_f1_max = np.max(train_f1)\n\t\tvalid_f1_max = np.max(valid_f1)\n\n\t\t# 8047 valid loss min = 0.599\n\t\t#print('task id: '+str(task_id)+' train loss min: '+str(train_loss_min)+' valid loss min: '+str(valid_loss_min))\n\n\t\t# 8056 valid f1 max = 0.594\n\t\tprint('task id: '+str(task_id)+' train f1 max: '+str(train_f1_max)+' valid f1 max: '+str(valid_f1_max))\n\n\nif __name__ == '__main__':\n get_loss()","sub_path":"experiment/experiment1/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"230503509","text":"#!/usr/bin/env python3\n# agent.py\n#This program is realised by evaluation and tree. First, it search all the possibility, such as, searching the mist,\n#blowing up the wall, make aboat, blow up wall,blow up door. Every possible step ,it will find out.\n#Second, it is to pruning the tree, for instance, record a harbour for a island, and this could reduce the possiblity of\n#tree.\n#Third, it is to find out wich wall need to explode,if we consider every wall, it will be exponential.\n#We recorded the searched place and try to search the unknown place.\n#The evaluation part is to customized the score by state. For example, if I have dynamites, it will be high score. if I\n#used a dynamites wronly, it will score low.\n#\n#This program can be invoke by some hidden parament, [-print] could show everystep , --w [float] could show the algorithm\n#between greegy and uniform search, when the float value is one, then it is A* search\n#We finished this assignment by 7 days after the assinment has released.\n#Huang,Wei z5119435 ChengWen, Peng z5103407\n#\n#\nimport socket\nfrom argparse import ArgumentParser\nfrom collections import deque\nimport os\nimport sys\nimport math\n\nview=[[str('m') for i in range(5) ] for _ in range(5)]\nview[2][2]='I'\nimap=15\nw=1\nbest_path=[]\ntemp_path=[]\nused_wall=[]\nused_tree=[]\nused_ground_mist=[]\nused_ocean_mist=[]\nisland_ocean_index=[]\ncenter_x=round(imap/2)\ncenter_y=round(imap/2)\noff_x=0\noff_y=0\nreduce_mark=0\nsys.setrecursionlimit(2147483640)\n\nparser = ArgumentParser()\nparser.add_argument('-p', type=int,dest = 'port', required = True)\nparser.add_argument('--imap', type=int,dest = 'imap', required = False)\nparser.add_argument('--w', type=float,dest = 'w', required = False)\nparser.add_argument('-print', dest = 'print', action='store_true',required = False)\nargs = parser.parse_args()\nport = args.port\nif(args.imap):\n imap=args.imap\nif(args.w):\n w=args.w\n#port=12344\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.connect(('localhost',port))\n\nirow=round(imap/2)-1\nicol=round(imap/2)\nrow=round(imap/2)\ncol=round(imap/2)\neast = 2\nnorth = 1\nwest = 0\nsouth = 3\ndirn = 1\nmap=[['m' for _ in range(imap+1)] for _ in range(imap+1)]\nmap[round(imap/2)][round(imap/2)]=' '\nhave_axe = 0\nhave_key = 0\nhave_raft = 0\ngame_won = False\ngame_lost = False\nhave_treasure = 0\nnum_dynamites_held = 0\nsimulate_mark=0\n\n\n\n\ndef action(a):\n if(a):\n #print('take action:',a)\n sock.send(bytes(a,encoding='utf-8'))\n\ndef print_view():\n global view\n for i in view:\n print(i)\n\ndef manhattan_dist(x, y, dx, dy):\n return abs(x - dx) + abs(y - dy)\n\ndef read_view():\n global view\n for i in range(5):\n for j in range(5):\n if ((i == 2) and (j == 2)):\n continue\n k = str(sock.recv(1), encoding='utf-8')\n view[i][j]=k\ndef draw_map():\n global map\n global view\n global row\n global col\n temp=[[str(i) for i in range(5) ] for _ in range(5)]\n for i in range(5):\n for j in range(5):\n if(dirn==0):\n temp[4 - j][ i] = view[i][j]\n if (dirn == 1):\n temp[i][j] = view[i][j]\n if (dirn == 2):\n temp[j][4 - i] = view[i][j]\n if (dirn == 3):\n temp[4 - i][4 - j] = view[i][j]\n for i in range(-2,3):\n for j in range(-2,3):\n if(i!=0 or j!=0):\n map[row + i][col + j] = temp[2+i][2+j]\n\ndef forward_step():\n global irow\n global icol\n if(dirn==0):\n irow = row;\n icol = col - 1;\n if (dirn == 1):\n irow = row - 1;\n icol = col;\n if (dirn == 2):\n irow = row;\n icol = col + 1;\n if (dirn == 3):\n irow = row + 1;\n icol = col;\n\ndef judge_move(x,y,dx,dy,action=None):\n dd=0\n global dirn\n op = [['', 'R', 'RR', 'L'],\n ['L', '', 'R', 'RR'],\n ['RR', 'L', '', 'R'],\n ['R', 'RR', 'L', '']]\n if(x==dx and y==dy ):\n return ''\n if(x-1==dx and y==dy):#1\n dd=1\n if(x+1==dx and y==dy):#3\n dd=3\n if(x==dx and y+1==dy):#2\n dd=2\n if(x==dx and y-1==dy):#0\n dd=0\n if(action in {'C','U','B'}):\n return op[dirn][dd]+action+'F'\n return op[dirn][dd]+'F'\n\ndef change_dirn(a):\n global dirn\n if(a=='L'):\n dirn = (dirn-1)%4\n if(a=='R'):\n dirn = (dirn+1)%4\n\n################################################################################\ndef find_all_continents(x,y):\n options = find_ocean_options(x,y)\n temp_all_continents =[]\n all_continents=[]\n def looking_for_land(x,y):\n nonlocal temp_all_continents\n nonlocal all_continents\n new_land=[]\n temp=[]\n for i in range(len(temp_all_continents)):\n for j in range(len(temp_all_continents[i])):\n if(temp_all_continents[i][j]==[x,y]):\n all_continents[i].append([[x,y],' '])\n return\n new_land = find_ground_options(x,y)\n for n in new_land:\n temp.append(n[0])\n temp_all_continents.append(temp)\n all_continents.append([[[x,y],' ']])\n for opt in options:\n for o in opt[1:]:\n if(o[2]==' '):\n looking_for_land(o[0],o[1])\n return all_continents\n\ndef find_all_oceans(x,y):\n options = find_ground_options(x,y)\n temp_all_ocean = []\n all_oceans=[]\n def looking_for_ocean(gx,gy,x, y):\n nonlocal all_oceans\n nonlocal temp_all_ocean\n new_ocean = []\n temp = []\n for i in range(len(temp_all_ocean)):\n for j in range(len(temp_all_ocean[i])):\n if (temp_all_ocean[i][j] == [x, y]):\n all_oceans[i].append([[gx,gy],[x, y],'~'])\n return\n new_ocean = find_ocean_options(x, y)\n for n in new_ocean:\n temp.append(n[0])\n temp_all_ocean.append(temp)\n all_oceans.append([[[gx,gy],[x, y],'~']])\n\n for opt in options:\n for o in opt[1:]:\n if (o[2] == '~'):\n looking_for_ocean(opt[0][0],opt[0][1],o[0], o[1])\n return all_oceans\n################################################################################\ndef ground_neighbor(x,y):\n neighbor=[[x,y]]\n if (map[x - 1][y] not in {' ','.'}):\n neighbor.append([x - 1,y,map[x - 1][y]])\n if (map[x][y + 1] not in {' ','.'}):\n neighbor.append([x, y+1, map[x][y+1]])\n if (map[x + 1][y] not in {' ','.'}):\n neighbor.append([x+1, y, map[x+1][y]])\n if (map[x][y - 1] not in {' ','.'}):\n neighbor.append([x, y-1, map[x][y-1]])\n\n for i in range(-2,3):\n for j in range(-2,3):\n if (map[x + i][y +j] == 'm' and [x + i, y + j, map[x + i][y + j]] not in neighbor) and (i!=0 or j!=0):\n neighbor.append([x + i, y + j, map[x + i][y + j]])\n if len(neighbor)>1:\n return neighbor\n else:\n return []\n\ndef find_ground_options(x,y):\n u = deque()\n temp=[]\n option=[]\n used=[]\n global map\n if(map[x][y] ==' '):\n u.append([x,y])\n else :\n return []\n\n while(u):\n [x, y]=u.popleft()\n temp=ground_neighbor(x,y)\n if (temp):\n option.append(temp)\n if([x,y]==[center_x,center_y] and [x,y] not in used and [x,y] not in u):\n option.append([[x,y]])\n used.append([x, y])\n if (map[x-1][y] == ' ' and [x-1,y] not in used and [x-1,y] not in u):\n u.append([x-1, y])\n if (map[x][y+1] == ' ' and [x,y+1] not in used and [x,y+1] not in u):\n u.append([x, y+1])\n if (map[x+1][y] == ' ' and [x+1,y] not in used and [x+1,y] not in u):\n u.append([x+1, y])\n if (map[x][y-1] == ' ' and [x,y-1] not in used and [x,y-1] not in u):\n u.append([x, y-1])\n return option\n\ndef ocean_neighbor(x,y):\n neighbor=[[x,y]]\n if (map[x - 1][y] not in {'~','.'}):\n neighbor.append([x - 1, y, map[x - 1][y]])\n if (map[x][y + 1] not in {'~','.'}):\n neighbor.append([x, y + 1, map[x][y + 1]])\n if (map[x + 1][y] not in {'~','.'}):\n neighbor.append([x + 1, y, map[x + 1][y]])\n if (map[x][y - 1] not in {'~','.'}):\n neighbor.append([x, y - 1, map[x][y - 1]])\n\n for i in range(-2,3):\n for j in range(-2,3):\n if (map[x + i][y +j] == 'm' and [x + i, y + j, map[x + i][y + j]] not in neighbor)and (i!=0 or j!=0):\n neighbor.append([x + i, y + j, map[x + i][y + j]])\n if len(neighbor)>1:\n return neighbor\n else:\n return []\n\ndef find_ocean_options(x,y):\n u = deque()\n option=[]\n temp=[]\n used=[]\n global map\n\n if(map[x][y] =='~'):\n u.append([x,y])\n else :\n return []\n\n while(u):\n [x, y]=u.popleft()\n temp=ocean_neighbor(x,y)\n if (temp ):\n option.append(temp)\n used.append([x,y])\n if (map[x-1][y] =='~' and [x-1,y] not in used and [x-1,y] not in u):\n u.append([x-1,y])\n if (map[x][y+1] =='~' and [x,y+1] not in used and [x,y+1] not in u):\n u.append([x,y+1])\n if (map[x+1][y] =='~' and [x+1,y] not in used and [x+1,y] not in u):\n u.append([x+1, y])\n if (map[x][y-1] =='~' and [x,y-1] not in used and [x,y-1] not in u):\n u.append([x, y-1])\n return option\n\n\n\ndef find_path(x,y,dx,dy):\n\n used_path = []\n path1 = []\n global map\n temp1=''\n temp2=''\n if (map[x][y] in {'d', 'k', 'a', '$'}):\n temp1 = map[x][y]\n map[x][y] = ' '\n if (map[dx][dy] in {'d', 'k', 'a', '$'}):\n temp2 = map[dx][dy]\n map[dx][dy] = ' '\n current_state = map[x][y]\n\n if (current_state != map[dx][dy] and current_state in {' ','~'} and map[dx][dy] in {' ','~'} and\n ((dx - 1 == x and dy == y) or (dx + 1 == x and dy == y) or (dx == x and dy - 1 == y)\n or (dx == x and dy + 1 == y))):\n return [[x, y], [dx, dy]]\n def recursive_path(cx,cy):\n nonlocal path1\n nonlocal used_path\n nonlocal current_state\n nonlocal x\n nonlocal y\n nonlocal dx\n nonlocal dy\n global w\n path1.append([cx,cy])\n used_path.append([cx,cy])\n if([cx-1,cy]==[dx,dy]):\n path1.append([cx-1, cy])\n return True\n if([cx,cy+1]==[dx,dy]):\n path1.append([cx,cy+1])\n return True\n if([cx+1,cy]==[dx,dy]):\n path1.append([cx+1,cy])\n return True\n if([cx,cy-1]==[dx,dy]):\n path1.append([cx,cy-1])\n return True\n for _ in range(4):\n a_star = []\n temp = []\n if(map[cx-1][cy] == current_state and [cx-1,cy] not in used_path):\n a_star.append([cx-1,cy,w*manhattan_dist(x,y,cx-1,cy)+(2-w)*manhattan_dist(dx,dy,cx-1,cy)])\n if(map[cx][cy+1] == current_state and [cx,cy+1] not in used_path):\n a_star.append([cx,cy+1,w*manhattan_dist(x,y,cx,cy+1)+(2-w)*manhattan_dist(dx,dy,cx,cy+1)])\n if(map[cx+1][cy] == current_state and [cx+1,cy] not in used_path):\n a_star.append([cx+1,cy,w*manhattan_dist(x,y,cx+1,cy)+(2-w)*manhattan_dist(dx,dy,cx+1,cy)])\n if(map[cx][cy-1] == current_state and [cx,cy-1] not in used_path):\n a_star.append([cx,cy-1,w*manhattan_dist(x,y,cx,cy-1)+(2-w)*manhattan_dist(dx,dy,cx,cy-1)])\n if(a_star):\n temp=min(a_star,key=lambda x:x[2])\n if(temp):\n if( recursive_path(temp[0],temp[1])):\n return True\n else:\n path1.pop()\n return False\n\n if(recursive_path(x,y)):\n if (temp1):\n map[x][y]=temp1\n if (temp2):\n map[dx][dy]=temp2\n return path1\n else:\n if (temp1):\n map[x][y]=temp1\n if (temp2):\n map[dx][dy]=temp2\n return []\n\ndef ground_evaluate(x,y):\n option=[]\n global used1\n global num_dynamites_held#mark=100\n global have_raft#mark=20\n global have_axe#mark=5\n global have_key#mark=10\n global have_treasure # mark=99999\n global game_won\n global reduce_mark\n mist=[]\n mark=0\n explored_mist=[]\n dynamites=[]\n tree=[]\n key=False\n axe=False\n treasure=[]\n temp_raft=0\n if(map[x][y]==' '):\n option=find_ground_options(x,y)\n for o in option:\n for op in o[1:]:\n if (op[2]=='m' and [op[0],op[1]] not in mist):\n mist.append([op[0],op[1]])\n if (op[2] == 'd' and [op[0], op[1]] not in dynamites):\n dynamites.append([op[0], op[1]])\n if (op[2] == 'T' and [op[0], op[1]] not in tree):\n tree.append([op[0], op[1]])\n if (op[2] == '$' and [op[0], op[1]] not in treasure):\n treasure.append([op[0], op[1]])\n if (op[2] == 'X' and [op[0], op[1]] not in explored_mist):\n explored_mist.append([op[0], op[1]])\n if (op[2] == 'k'):\n key=True\n if(op[2]=='a' ):\n axe=True\n #if([[center_x,center_y]] in option and have_treasure ):\n # mark=99999\n # return mark\n mark=len(dynamites)*100+num_dynamites_held*200+((axe or have_axe) and len(tree)>=1)*20\\\n +axe*5+ bool(have_axe)*20+bool(have_raft)*4+bool(have_key)*20\\\n +key*10+len(explored_mist)*3+len(mist)*2+reduce_mark\n return mark\n\ndef ocean_evaluate(x,y):\n global used1\n global num_dynamites_held#mark=100\n global have_raft#mark=20\n global have_axe#mark=5\n global have_key#mark=10\n global reduce_mark\n used1=[]\n explored_mist=[]\n mist=[]\n mark = 0\n if(map[x][y]=='~'):\n option = find_ocean_options(x,y)\n for o in option:\n for op in o[1:]:\n if (op[2] == 'm' and [op[0],op[1]] not in mist):\n mist.append([op[0],op[1]])\n if (op[2] == 'X' and [op[0], op[1]] not in explored_mist):\n explored_mist.append([op[0], op[1]])\n mark = len(mist) + len(explored_mist) * 1.2 + num_dynamites_held * 200 +bool(have_axe) * 20 \\\n + bool(have_raft) * 4 + len(mist)*2 + len(explored_mist) * 3 + bool(have_key) * 20 +reduce_mark\n return mark\ndef is_value_tree(x,y):\n global map\n for i in range(-2,3):\n for j in range(-2, 3):\n if(map[x+i][y+j] in {'d', 'k', 'a', '$','m'}):\n return True\n return False\ndef is_value(x1,y1):\n used_wall1 = []\n def is_v(x,y,x0,y0,depth=num_dynamites_held):\n global num_dynamites_held\n global map\n nonlocal used_wall1\n used_wall1.append([x, y])\n if (map[x][y] in {'d', 'k', 'a', '$','m'}):\n return True\n if(depth<1):\n return False\n if(manhattan_dist(x,y,x0,y0)>4):\n return False\n for i in {-1,1}:\n if ([x + i,y] not in used_wall1 and map[x + i][y] not in {'.','~'}):\n if(map[x + i][y]=='*' and num_dynamites_held>1):\n num_dynamites_held-=1\n if(is_v(x + i,y,x0,y0)):\n num_dynamites_held+=1\n return True\n num_dynamites_held += 1\n elif(map[x + i][y]!='*' and is_v(x + i, y,x0,y0)):\n return True\n if ([x,y + i] not in used_wall1 and map[x][y + i] not in {'.','~'}):\n if(map[x][y + i]=='*' and num_dynamites_held>1):\n num_dynamites_held-=1\n if(is_v(x, y + i,x0,y0)):\n num_dynamites_held+=1\n return True\n num_dynamites_held += 1\n elif(map[x][y + i]!='*' and is_v(x, y + i,x0,y0)):\n return True\n used_wall1.pop()\n return False\n return is_v(x1,y1,x1,y1)\n\ndef init_value():\n global simulate_mark\n global best_path\n global used_wall\n global used_tree\n global used_ground_mist\n global used_ocean_mist\n simulate_mark=0\n best_path = []\n used_wall = []\n used_tree = []\n used_ground_mist=[]\n used_ocean_mist=[]\n\ndef clear_mist(x,y):\n global map\n temp=[]\n for i in range(-2, 3):\n for j in range(-2, 3):\n if (map[x + i][y + j] == 'm'):\n temp.append([x + i,y + j])\n map[x + i][y + j] = 'X'\n return temp\n\ndef recover_mist(mist):\n global map\n for temp in mist:\n if (map[temp[0]][temp[1]] == 'X'):\n map[temp[0]][temp[1]]='m'\n\ndef simulate(x,y):\n global temp_path\n global best_path\n global used_tree\n global used_wall\n global used_ground_mist\n global used_ocean_mist\n global simulate_mark\n global have_axe\n global have_key\n global have_raft\n global game_won\n global have_treasure\n global num_dynamites_held\n global island_ocean_index\n global reduce_mark\n option=[]\n if(map[x][y] == ' '):\n g_option=find_ground_options(x,y)\n if (map[x][y] == '~'):\n o_option=find_ocean_options(x,y)\n if( best_path == []):# do nothing ,just waiting for death\n simulate_mark=-99999\n if (game_won):\n return\n if (map[x][y] == ' '): # find the treasure and go home\n if(have_treasure and [[center_x,center_y]] in g_option):\n temp1=[[center_x,center_y],' ']\n ##print('[center_x,center_y]:',[[center_x,center_y],' '])\n temp_path.append(temp1)\n #print('game_won:',game_won)\n game_won = True\n temp_mark = 99999\n if (temp_mark>simulate_mark or(temp_mark==simulate_mark and len(temp_path)manhattan_dist(x,y,opt[0][0],opt[0][1]))\n or not temp1):\n temp1=opt[0]\n if(temp1):\n used_ground_mist.append(temp1)\n mist=clear_mist(temp1[0],temp1[1])\n temp_path.append([temp1,'m'])\n temp_mark = ground_evaluate(temp1[0],temp1[1])\n if(temp_mark > simulate_mark or (temp_mark == simulate_mark and len(temp_path)manhattan_dist(x,y,opt[0][0],opt[0][1]))\n or not temp1):\n temp1=opt[0]\n temp2=[o[0],o[1]]\n if(temp1 and temp2 ):\n mist1=clear_mist(temp1[0], temp1[1])\n mist2=clear_mist(temp2[0], temp2[1])\n if(map[temp2[0]][temp2[1]]=='-'):\n map[temp2[0]][temp2[1]] = ' '\n temp_path.append([temp1,temp2,'U'])\n temp_mark = ground_evaluate(x,y)\n if(temp_mark > simulate_mark or (temp_mark == simulate_mark and len(temp_path)manhattan_dist(x,y,opt[0][0],opt[0][1]))or not temp2):\n temp2=o\n if(temp2):\n mist=clear_mist(temp2[0], temp2[1])\n if(map[temp2[0]][temp2[1]]=='d'):\n num_dynamites_held+=1\n map[temp2[0]][temp2[1]] = ' '\n temp_path.append([temp2,'d'])\n if(map[temp2[0]][temp2[1]]=='k'):\n have_key+=1\n map[temp2[0]][temp2[1]] = ' '\n temp_path.append([temp2,'k'])\n if(map[temp2[0]][temp2[1]]=='a'):\n have_axe+=1\n map[temp2[0]][temp2[1]] = ' '\n temp_path.append([temp2,'a'])\n if(map[temp2[0]][temp2[1]]=='$'):\n have_treasure+=1\n map[temp2[0]][temp2[1]] = ' '\n temp_path.append([temp2,'$'])\n temp_mark = ground_evaluate(temp2[0],temp2[1])\n if(temp_mark > simulate_mark or (temp_mark == simulate_mark and len(temp_path) simulate_mark or (temp_mark == simulate_mark and len(temp_path) simulate_mark or (temp_mark == simulate_mark and len(temp_path)manhattan_dist(x,y,opt[0][0],opt[0][1]))\n or not temp1):\n temp1=opt[0]\n if(temp1 ):\n used_ocean_mist.append(temp1)\n mist=clear_mist(temp1[0], temp1[1])\n temp_path.append([temp1,'m'])\n temp_mark = ocean_evaluate(x,y)\n if(temp_mark > simulate_mark or (temp_mark == simulate_mark and len(temp_path)manhattan_dist(x,y,opt[0][0],opt[0][1]))or not temp1):\n temp1=opt[0]\n temp2=[o[0],o[1]]\n if (temp1 and temp2):\n mist1=clear_mist(temp1[0], temp1[1])\n mist2=clear_mist(temp2[0], temp2[1])\n if(map[temp2[0]][temp2[1]]=='-'):\n map[temp2[0]][temp2[1]] = ' '\n temp_path.append([temp1,temp2,'U'])\n temp_mark = ground_evaluate(x,y)\n if(temp_mark > simulate_mark or (temp_mark == simulate_mark and len(temp_path) simulate_mark or (temp_mark == simulate_mark and len(temp_path) simulate_mark or (temp_mark == simulate_mark and len(temp_path) simulate_mark or (temp_mark == simulate_mark and len(temp_path) simulate_mark or (temp_mark == simulate_mark and len(temp_path) simulate_mark or (temp_mark == simulate_mark and len(temp_path) simulate_mark or (temp_mark == simulate_mark and len(temp_path) simulate_mark or (temp_mark == simulate_mark and len(temp_path) simulate_mark or (temp_mark == simulate_mark and len(temp_path)manhattan_dist(x,y,opt[0][0],opt[0][1]))\n or not temp1)):\n temp1 = opt[0]\n temp2 = [o[0], o[1]]\n if(temp1):\n used_tree.append([temp2[0],temp2[1]])\n mist1=clear_mist(temp1[0], temp1[1])\n mist2=clear_mist(temp2[0], temp2[1])\n map[temp2[0]][temp2[1]] = ' '\n temp_path.append([temp1,temp2,'C'])\n have_raft += 1\n temp_mark = ground_evaluate(temp2[0],temp2[1])\n if(temp_mark > simulate_mark or (temp_mark == simulate_mark and len(temp_path)= len(map)):\n temp_map = [['m' for _ in range(len(map[0]))] for _ in range(len(map) + 2)]\n for i in range(len(map)):\n for j in range(len(map[0])):\n temp_map[i][j] = map[i][j]\n map = [[i for i in temp_map[j]] for j in range(len(temp_map))]\n elif (row - 6 < 0):\n temp_map = [['m' for _ in range(len(map[0]))] for _ in range(len(map) + 2)]\n for i in range(len(map)):\n for j in range(len(map[0])):\n temp_map[i + 2][j] = map[i][j]\n map = [[i for i in temp_map[j]] for j in range(len(temp_map))]\n center_x += 2\n off_x += 2\n elif (col + 6 >= len(map[0])):\n temp_map = [['m' for _ in range(len(map[0]) + 2)] for _ in range(len(map))]\n for i in range(len(map)):\n for j in range(len(map[0])):\n temp_map[i][j] = map[i][j]\n map = [[i for i in temp_map[j]] for j in range(len(temp_map))]\n elif (col - 6 < 0):\n temp_map = [['m' for _ in range(len(map[0]) + 2)] for _ in range(len(map))]\n for i in range(len(map)):\n for j in range(len(map[0])):\n temp_map[i][j + 2] = map[i][j]\n map = [[i for i in temp_map[j]] for j in range(len(temp_map))]\n center_y += 2\n off_y += 2\n [row, col] = [p[0]+off_x, p[1]+off_y]\n #print('off_x,off_y:',off_x,off_y)\n action(a_s)\n change_dirn(a_s)\n read_view()\n draw_map()\n forward_step()\n #print_view()#################\n if(args.print):\n print_map()###################\n [x, y] = [x + off_x, y + off_y]\n [dx, dy] = [dx + off_x, dy + off_y]\n if bool(act):\n #print('row,col,dx,dy,act:',row,col,dx,dy,act)\n action_str=judge_move(row,col,dx,dy,action=act)\n #print('action_str:', action_str)\n for a_s in action_str:\n #print('current_direction:',dirn)\n if(a_s == 'F'):\n if (row + 6 > len(map)):\n temp_map = [['m' for _ in range(len(map[0]))] for _ in range(len(map) + 2)]\n for i in range(len(map)):\n for j in range(len(map[0])):\n temp_map[i][j] = map[i][j]\n map = [[i for i in temp_map[j]] for j in range(len(temp_map))]\n elif (row - 6 < 0):\n temp_map = [['m' for _ in range(len(map[0]))] for _ in range(len(map) + 2)]\n for i in range(len(map)):\n for j in range(len(map[0])):\n temp_map[i + 2][j] = map[i][j]\n map = [[i for i in temp_map[j]] for j in range(len(temp_map))]\n center_x += 2\n off_x += 2\n row+=1\n elif (col + 6 > len(map[0])):\n temp_map = [['m' for _ in range(len(map[0]) + 2)] for _ in range(len(map))]\n for i in range(len(map)):\n for j in range(len(map[0])):\n temp_map[i][j] = map[i][j]\n map = [[i for i in temp_map[j]] for j in range(len(temp_map))]\n elif (col - 6 < 0):\n temp_map = [['m' for _ in range(len(map[0]) + 2)] for _ in range(len(map))]\n for i in range(len(map)):\n for j in range(len(map[0])):\n temp_map[i][j + 2] = map[i][j]\n map = [[i for i in temp_map[j]] for j in range(len(temp_map))]\n center_y += 2\n off_y += 2\n col+=1\n else:\n [row,col]=[dx,dy]\n action(a_s)\n change_dirn(a_s)\n read_view()\n draw_map()\n forward_step()\n #print_view()###############\n #print_map()#############\n #print('find path~:',temp)\n if(map[row][col]=='d'):\n map[row][col]=' '\n num_dynamites_held += 1\n if(map[row][col]=='k'):\n map[row][col]=' '\n have_key += 1\n if(map[row][col]=='a'):\n map[row][col]=' '\n have_axe += 1\n if(map[row][col]=='$'):\n map[row][col]=' '\n have_treasure += 1\n #print('have boat:',have_raft)\n return\nmain_option=[]\nmain_path=[]\nmain_temp1=[]\ng_think = False\no_think = False\nread_view()\ndraw_map()\ntry:\n while(not game_won):\n #print('direction: ',dirn)\n #print_view()\n #print_map()\n #print('boat##############################',have_raft)\n #print('num_dynamites_held:',num_dynamites_held)\n main_temp1=[]\n main_temp2=[]\n off_x = 0\n off_y = 0\n #print('current location:',map[row][col])\n if (map[row][col] ==' '):\n main_option=find_ground_options(row,col)\n #print('options:',main_option)\n #print(main_option)\n #print('current_location:'[row,col])\n for opt in main_option:\n for o in opt[1:]:\n if (o[2] == 'm'):\n if ((main_temp1 and manhattan_dist(row, col, main_temp1[0], main_temp1[1])\n > manhattan_dist(row, col, opt[0][0], opt[0][1])) or not main_temp1):\n main_temp1 = opt[0]\n g_think=True\n break\n if(main_temp1):\n break\n if(main_temp1):\n move(row,col,main_temp1[0],main_temp1[1])\n continue\n elif(g_think):\n g_think=False\n for i in find_all_oceans(row, col):\n if (i[0] not in island_ocean_index):\n island_ocean_index.append(i[0])\n if(have_axe):\n main_temp1 = []\n main_temp2 = []\n for opt in main_option:\n for o in opt[1:]:\n if (o[2] == 'T' and is_value_tree(o[0],o[1])):\n if ((main_temp1 and manhattan_dist(row, col, main_temp1[0], main_temp1[1])\n > manhattan_dist(row, col, opt[0][0], opt[0][1])) or not main_temp1):\n main_temp1 = opt[0]\n main_temp2 = [o[0],o[1]]\n break\n if (main_temp1):\n break\n if(main_temp1):\n move(main_temp1[0], main_temp1[1], main_temp2[0], main_temp2[1],act='C')\n have_raft=1\n continue\n for opt in main_option:\n for o in opt[1:]:\n if (o[2] in { 'd','a','$','k' }):\n if ((main_temp1 and manhattan_dist(row, col, main_temp1[0], main_temp1[1])\n > manhattan_dist(row, col, opt[0][0], opt[0][1])) or not main_temp1):\n main_temp1 = [o[0],o[1]]\n break\n if (main_temp1):\n move(row, col,main_temp1[0], main_temp1[1])\n continue\n if (map[row][col] == '~'):\n main_option=find_ocean_options(row,col)\n for opt in main_option:\n for o in opt[1:]:\n if (o[2] == 'm'):\n if ((main_temp1 and manhattan_dist(row, col, main_temp1[0], main_temp1[1])\n > manhattan_dist(row, col, opt[0][0], opt[0][1])) or not main_temp1):\n main_temp1 = opt[0]\n o_think=True\n break\n if(main_temp1):\n move(row,col,main_temp1[0],main_temp1[1])\n continue\n elif(o_think):\n o_think=False\n for i in find_all_continents(row, col):\n if (i[0] not in island_ocean_index):\n island_ocean_index.append(i[0])\n\n if (not main_temp1):\n #print('have_axe:',str(have_axe))\n #print(find_ground_options(row,col))\n init_value()\n temp_off_x=0\n temp_off_y=0\n simulate(row,col)\n #print('best_path:', best_path, 'simulate_mark:', simulate_mark)\n for i in range(len(best_path)):\n off_x=0\n off_y=0\n path=best_path[i]\n if(path[-1] not in {'U','B','C'}):\n #print('path[' + str(i) + ']:', path[0], path[1])\n move(row,col,path[0][0]+temp_off_x,path[0][1]+temp_off_y)\n temp_off_x += off_x\n temp_off_y += off_y\n if(path[-1]=='~'):\n move(row, col, path[1][0]+temp_off_x, path[1][1]+temp_off_y)\n temp_off_x += off_x\n temp_off_y += off_y\n if(path[0]==[center_x,center_y]):\n sock.close()\n if(path[-1] in {'U','B','C'}):\n #print('path[0][0], path[0][1],path[1][0],path[1][1],a=path[2]:',path[0][0], path[0][1], path[1][0], path[1][1],path[-1])\n move(path[0][0]+temp_off_x, path[0][1]+temp_off_y, path[1][0]+temp_off_x, path[1][1]+temp_off_y,act=path[-1])\n temp_off_x += off_x\n temp_off_y += off_y\n #print('main_best_path:', best_path)\nexcept ConnectionResetError:\n os.system('clear')\n sys.exit()\n\n\n\n","sub_path":"all/agent1.py","file_name":"agent1.py","file_ext":"py","file_size_in_byte":50686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"99630676","text":"#!/usr/bin/python\n\nimport os\nimport sys\nimport quippy as qp\nimport numpy as np\nimport argparse\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('-input', type=str, default=None,\n help='File to extract atoms from')\nparser.add_argument('-output', type=str, default='atoms.dat',\n help='Output file')\nparser.add_argument('-Z', type=int, default=[], nargs='+',\n help='Atomic numbers of atoms to list')\nparser.add_argument('-sp', type=str, default=[], nargs='+',\n help='Structure properties to extract')\nparser.add_argument('-ap', type=str, default=[], nargs='+',\n help='Atom properties to extract')\n\nargs = parser.parse_args()\n\n# Load atoms list\nal = qp.AtomsReader(args.input)\n\n# Open output file for writing\nf = open(args.output, 'w')\n\n# Central atoms\nZ = set(args.Z)\n\n# Initialize atom number and stucture number\nna = 0\nns = 0\n\n# Loop over atoms list\nfor i, at in enumerate(al):\n\n # Parse the desired structure properties\n structureProperties = []\n for sp in args.sp:\n structureProperties.append(at.params[sp])\n v = np.linalg.det(at.cell)\n\n # Parse the desired atom properties (Fortran indexed)\n atomProperties = []\n for ap in args.ap:\n atomProperties.append(at.properties[ap])\n\n for j, aa in enumerate(at):\n line = []\n\n # Write out the atom numbers\n if aa.number in Z:\n f.write('%6d %6d %6d %2s %12.8f %12.8f %12.8f ' % (na, aa.index,\n aa.number, aa.symbol, \n aa.position[0], aa.position[1], aa.position[2]))\n\n # Write out the atom properties\n for ap in atomProperties:\n aap = ap[j+1]\n f.write('%10s ' % str(aap))\n\n # Write out the structure properties\n f.write('%6d %12.8f ' % (ns, v))\n for sp in structureProperties:\n sp = str(sp)\n f.write('%10s ' % sp)\n f.write('\\n')\n na += 1\n ns += 1\n\nf.close()\n","sub_path":"Scripts/atomLabels.py","file_name":"atomLabels.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"255698673","text":"import os\nimport inspect\nimport multiprocessing\nimport Spartacus.Database\n\nfrom .import custom_exceptions\nfrom .import utils\n\n\ndef inserted_callback(p_queue=None, p_columns=None, p_row=None, p_key=None):\n \"\"\"Callback executed when a table fk was created in second database. Sends a row by queue to master process.\n\n Args:\n p_queue (multiprocessing.managers.BaseProxy): queue used to communicate to parent process. Created from a multiprocessing.Manager instance. Defaults to None.\n p_columns (list): list of columns that are present in p_row parameter.\n p_row (list): the row that was inserted in the database 2.\n p_key (list): the key used for comparison.\n\n Raises:\n custom_exceptions.InvalidParameterTypeException.\n \"\"\"\n\n if not isinstance(p_queue, multiprocessing.managers.BaseProxy):\n raise custom_exceptions.InvalidParameterTypeException('\"p_queue\" parameter must be a \"multiprocessing.managers.BaseProxy\" instance.', p_queue)\n\n if not isinstance(p_columns, list):\n raise custom_exceptions.InvalidParameterTypeException('\"p_columns\" parameter must be a \"list\" instance.', p_columns)\n\n if not isinstance(p_row, list):\n raise custom_exceptions.InvalidParameterTypeException('\"p_row\" parameter must be a \"list\" instance.', p_row)\n\n if not isinstance(p_key, list):\n raise custom_exceptions.InvalidParameterTypeException('\"p_key\" parameter must be a \"list\" instance.', p_key)\n\n p_queue.put({\n 'type': 'tables_fks',\n 'row': {\n 'schema_name': p_row['namespace'],\n 'table_name': p_row['class_name'],\n 'constraint_name': p_row['constraint_name'],\n 'status': 'INSERTED',\n 'sql': inspect.cleandoc(doc=p_row['add_fk_ddl'])\n }\n })\n\n\ndef updated_callback(p_queue=None, p_columns=None, p_row_1=None, p_row_2=None, p_key=None, p_all_diffs=None):\n \"\"\"Callback executed when a table fk was updated in second database. Sends a row by queue to master process.\n\n Args:\n p_queue (multiprocessing.managers.BaseProxy): queue used to communicate to parent process. Created from a multiprocessing.Manager instance. Defaults to None.\n p_columns (list): list of columns that are present in p_row_1 and p_row_2 parameters.\n p_row_1 (list): the row as it is in database 1.\n p_row_2 (list): the row as it is in database 2.\n p_key (list): the key used for comparison.\n p_all_diffs (list): list of diffs. Each item has the following structure:\n {\n 'column' (str): the column that differs,\n 'old_value' (object): value in database 1,\n 'new_value' (object): value in database 2.\n }\n\n Raises:\n custom_exceptions.InvalidParameterTypeException.\n \"\"\"\n\n if not isinstance(p_queue, multiprocessing.managers.BaseProxy):\n raise custom_exceptions.InvalidParameterTypeException('\"p_queue\" parameter must be a \"multiprocessing.managers.BaseProxy\" instance.', p_queue)\n\n if not isinstance(p_columns, list):\n raise custom_exceptions.InvalidParameterTypeException('\"p_columns\" parameter must be a \"list\" instance.', p_columns)\n\n if not isinstance(p_row_1, list):\n raise custom_exceptions.InvalidParameterTypeException('\"p_row_1\" parameter must be a \"list\" instance.', p_row_1)\n\n if not isinstance(p_row_2, list):\n raise custom_exceptions.InvalidParameterTypeException('\"p_row_2\" parameter must be a \"list\" instance.', p_row_2)\n\n if not isinstance(p_key, list):\n raise custom_exceptions.InvalidParameterTypeException('\"p_key\" parameter must be a \"list\" instance.', p_key)\n\n if not isinstance(p_all_diffs, list):\n raise custom_exceptions.InvalidParameterTypeException('\"p_all_diffs\" parameter must be a \"list\" instance.', p_all_diffs)\n\n for v_diff in p_all_diffs:\n p_queue.put({\n 'type': 'tables_fks',\n 'row': {\n 'schema_name': p_row_2['namespace'],\n 'table_name': p_row_2['class_name'],\n 'constraint_name': p_row_2['constraint_name'],\n 'status': 'UPDATED',\n 'sql': inspect.cleandoc(\n doc='''\\\n {p_drop}\n {p_add}\n '''.format(\n p_drop=p_row_2['drop_fk_ddl'],\n p_add=p_row_2['add_fk_ddl']\n )\n )\n }\n })\n\n\ndef deleted_callback(p_queue=None, p_columns=None, p_row=None, p_key=None):\n \"\"\"Callback executed when a table fk was dropped from second database. Sends a row by queue to master process.\n\n Args:\n p_queue (multiprocessing.managers.BaseProxy): queue used to communicate to parent process. Created from a multiprocessing.Manager instance. Defaults to None.\n p_columns (list): list of columns that are present in p_row parameter.\n p_row (list): the row that was inserted in the database 2.\n p_key (list): the key used for comparison.\n\n Raises:\n custom_exceptions.InvalidParameterTypeException.\n \"\"\"\n\n if not isinstance(p_queue, multiprocessing.managers.BaseProxy):\n raise custom_exceptions.InvalidParameterTypeException('\"p_queue\" parameter must be a \"multiprocessing.managers.BaseProxy\" instance.', p_queue)\n\n if not isinstance(p_columns, list):\n raise custom_exceptions.InvalidParameterTypeException('\"p_columns\" parameter must be a \"list\" instance.', p_columns)\n\n if not isinstance(p_row, list):\n raise custom_exceptions.InvalidParameterTypeException('\"p_row\" parameter must be a \"list\" instance.', p_row)\n\n if not isinstance(p_key, list):\n raise custom_exceptions.InvalidParameterTypeException('\"p_key\" parameter must be a \"list\" instance.', p_key)\n\n p_queue.put({\n 'type': 'tables_fks',\n 'row': {\n 'schema_name': p_row['namespace'],\n 'table_name': p_row['class_name'],\n 'constraint_name': p_row['constraint_name'],\n 'status': 'DELETED',\n 'sql': inspect.cleandoc(doc=p_row['drop_fk_ddl'])\n }\n })\n\n\ndef compare_tables_fks(p_database_1=None, p_database_2=None, p_block_size=None, p_queue=None, p_is_sending_data_array=None, p_worker_index=None):\n \"\"\"Used to compare tables fks between databases.\n\n Args:\n p_database_1 (Spartacus.Database.PostgreSQL): the first database. Defaults to None.\n p_database_2 (Spartacus.Database.PostgreSQL): the second database. Defaults to None.\n p_block_size (int): Number of data records that the comparer will deal with at the same time. Defaults to None.\n p_queue (multiprocessing.managers.BaseProxy): queue used to communicate to parent process. Created from a multiprocessing.Manager instance. Defaults to None.\n p_is_sending_data_array (multiprocessing.managers.ArrayProxy): array used to control process that are still sending data. Defaults to None.\n p_worker_index (int): the worker sub process index. Defaults to None.\n\n Raises:\n custom_exceptions.InvalidParameterTypeException.\n custom_exceptions.InvalidParameterValueException\n \"\"\"\n\n try:\n if not isinstance(p_database_1, Spartacus.Database.PostgreSQL):\n raise custom_exceptions.InvalidParameterTypeException('\"p_database_1\" parameter must be a \"Spartacus.Database.PostgreSQL\" instance.', p_database_1)\n\n if not isinstance(p_database_2, Spartacus.Database.PostgreSQL):\n raise custom_exceptions.InvalidParameterTypeException('\"p_database_2\" parameter must be a \"Spartacus.Database.PostgreSQL\" instance.', p_database_2)\n\n if not isinstance(p_block_size, int):\n raise custom_exceptions.InvalidParameterTypeException('\"p_block_size\" parameter must be an \"int\" instance.', p_block_size)\n\n if p_block_size < 1:\n raise custom_exceptions.InvalidParameterValueException('\"p_block_size\" parameter must be a positive \"int\" instance.', p_block_size)\n\n if not isinstance(p_queue, multiprocessing.managers.BaseProxy):\n raise custom_exceptions.InvalidParameterTypeException('\"p_queue\" parameter must be a \"multiprocessing.managers.BaseProxy\" instance.', p_queue)\n\n if not isinstance(p_is_sending_data_array, multiprocessing.managers.ArrayProxy):\n raise custom_exceptions.InvalidParameterTypeException('\"p_is_sending_data_array\" parameter must be an \"multiprocessing.managers.ArrayProxy\" instance.', p_is_sending_data_array)\n\n if not isinstance(p_worker_index, int):\n raise custom_exceptions.InvalidParameterTypeException('\"p_worker_index\" parameter must be an \"int\" instance.', p_worker_index)\n\n if p_worker_index < 0:\n raise custom_exceptions.InvalidParameterTypeException('\"p_worker_index\" parameter must be an \"int\" instance greater than or equal to 0.', p_worker_index)\n\n #Prepare table query\n v_sql = '''\\\n WITH constraints AS (\n SELECT nc.nspname AS namespace,\n r.relname AS class_name,\n c.conname AS constraint_name,\n PG_GET_CONSTRAINTDEF(c.oid, true) AS constraint_definition,\n c.condeferrable AS is_deferrable,\n c.condeferred AS initially_deferred,\n r.oid AS regclass,\n c.oid AS sysid\n FROM pg_namespace nc,\n pg_namespace nr,\n pg_constraint c,\n pg_class r\n WHERE nc.oid = c.connamespace AND nr.oid = r.relnamespace AND c.conrelid = r.oid\n AND c.contype = 'f'\n AND nc.nspname NOT IN (\n 'information_schema',\n 'pg_catalog',\n 'pg_toast'\n )\n AND nc.nspname NOT LIKE 'pg%%temp%%'\n ),\n cs AS (\n SELECT namespace,\n class_name,\n QUOTE_IDENT(constraint_name) AS constraint_name,\n 'ALTER TABLE ' || TEXT(REGCLASS(regclass)) ||\n ' ADD CONSTRAINT ' || QUOTE_IDENT(constraint_name) ||\n E'\\n ' || constraint_definition || ';' AS sql\n FROM constraints\n ORDER BY sysid\n )\n SELECT namespace,\n class_name,\n constraint_name,\n sql AS add_fk_ddl,\n FORMAT(\n 'ALTER TABLE %s.%s DROP CONSTRAINT %s;',\n QUOTE_IDENT(namespace),\n QUOTE_IDENT(class_name),\n QUOTE_IDENT(constraint_name)\n ) AS drop_fk_ddl\n FROM cs\n ORDER BY 1,\n 2,\n 3\n '''\n\n utils.compare_datatables(\n p_database_1=p_database_1,\n p_database_2=p_database_2,\n p_block_size=p_block_size,\n p_key=['namespace', 'class_name', 'constraint_name'],\n p_sql=v_sql,\n p_inserted_callback=lambda p_columns, p_row, p_key: inserted_callback(p_queue=p_queue, p_columns=p_columns, p_row=p_row, p_key=p_key),\n p_updated_callback=lambda p_columns, p_row_1, p_row_2, p_key, p_all_diffs: updated_callback(p_queue=p_queue, p_columns=p_columns, p_row_1=p_row_1, p_row_2=p_row_2, p_key=p_key, p_all_diffs=p_all_diffs),\n p_deleted_callback=lambda p_columns, p_row, p_key: deleted_callback(p_queue=p_queue, p_columns=p_columns, p_row=p_row, p_key=p_key)\n )\n finally:\n p_queue.put(None)\n p_is_sending_data_array[p_worker_index] = False\n\n\ndef get_compare_tables_fks_tasks():\n \"\"\"Get list of tasks that will compare tables fks between databases.\n\n Args:\n\n Returns:\n list: list of tasks to be executed in a process pool. Each item is a dict instance with following strucutre:\n {\n 'function' (function): the function to be executed.\n 'kwds': keyworded args to be passed to the function.\n }\n \"\"\"\n\n return [{\n 'function': compare_tables_fks,\n 'kwds': {}\n }]\n","sub_path":"workers/compare_tables_fks.py","file_name":"compare_tables_fks.py","file_ext":"py","file_size_in_byte":12484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"327831653","text":"import numpy as np\nimport sys\nsys.setrecursionlimit(9500)\nfrom MoleculeClassify.hoomd_mols import hoomd_mols\nfrom Parser.hoomd_xml_pd import hoomd_xml\nfrom sys import argv\n\n\nclass Graph:\n\tdef __init__(self):\n\t\tself.neighbors = {}\n \n\tdef add_vertex(self, v):\n\t\tif v not in self.neighbors:\n\t\t\tself.neighbors[v] = []\n \n\tdef add_edge(self, u, v):\n\t\tself.neighbors[u].append(v)\n\t\t# if u == v, do not connect u to itself twice\n\t\t#if u != v: # Unconmment if unidirect bond hash was built.\n\t\t\t#self.neighbors[v].append(u)\n \n\tdef vertices(self):\n\t\treturn list(self.neighbors.keys())\n \n\tdef vertex_neighbors(self, v):\n\t\treturn self.neighbors[v]\n\n\t@staticmethod\n\tdef is_cycl(G):\n\t\tQ = []\n\t\tV = G.vertices()\n\t\t# initially all vertices are unexplored\n\t\tlayer = { v: -1 for v in V }\n\t\tfor v in V:\n\t\t\t# v has already been explored; move on\n\t\t\tif layer[v] != -1:\n\t\t\t\tcontinue\n\t\t\t# take v as a starting vertex\n\t\t\tlayer[v] = 0\n\t\t\tQ.append(v)\n\t\t\t# as long as Q is not empty\n\t\t\twhile len(Q) > 0:\n\t\t\t\t# get the next vertex u of Q that must be looked at\n\t\t\t\tu = Q.pop(0)\n\t\t\t\tC = G.vertex_neighbors(u)\n\t\t\t\tfor z in C:\n\t\t\t\t\t# if z is being found for the first time\n\t\t\t\t\tif layer[z] == -1:\n\t\t\t\t\t\tlayer[z] = layer[u] + 1\n\t\t\t\t\t\tQ.append(z)\n\t\t\t\t\telif layer[z] >= layer[u]:\n\t\t\t\t\t\treturn True\n\t\treturn False\n\ndef ggm(bond_hash, molecule): # Dual bond_hash\n\tmol_graph = Graph()\n\tfor atom in molecule:\n\t\tmol_graph.add_vertex(atom)\n\tfor atom in molecule:\n\t\tfor btom in bond_hash[atom]:\n\t\t\tmol_graph.add_edge(atom, btom)\n\treturn mol_graph\n\n\nfrom sys import argv\n\nxml = hoomd_xml(argv[1])\nmol = hoomd_mols(xml)\n\ndef loop_linear(mol):\n\tloop = []\n\tlinear = []\n\tfor m in mol.mol_idxes:\n\t\tm_graph = ggm(mol.bond_hash_nn, m) # Turn molecules into graphs\n\t\tif Graph.is_cycl(m_graph): # check if loop in molecule\n\t\t\tloop.append(m)\n\t\t\t#print(m)\n\t\telse:\n\t\t\tlinear.append(m)\n\n\tlloop = [ len(x) for x in loop ]\n\tllinear = [ len(x) for x in linear ]\n\treturn lloop,llinear\nlloop, llinear = loop_linear(mol)\n\nfor f in argv[2:]:\n\txml = hoomd_xml(f, needed = ['bond','type'])\n\tmol = hoomd_mols(xml)\n\to1, l1 = loop_linear(mol)\n\tlloop += o1\n\tllinear += l1\n\n\n#\n## remove the unreacted part\n#\n#while 5 in llinear:\n#\tllinear.remove(5)\n\nfrom pylab import *\n#import seaborn as sns\n#sns.set(color_codes=True)\nf = figure(figsize=(18,9))\nbinsize = 1\nbins_loop = int((max(lloop) - min(lloop))/binsize)\nbins_linear = int((max(llinear)-min(llinear))/binsize)\nax1 = f.add_subplot(121)\nax1.hist(lloop, bins=bins_loop, label='Loop')\n#sns.distplot(lloop, ax=ax1, label='Loop', rug=1)\nax1.legend()\nax2 = f.add_subplot(122)\nax2.hist(llinear, bins=bins_linear, label='Linear')\n#sns.distplot(llinear, ax=ax2, label='Linear', rug=1)\nax2.legend()\nax1.set_xscale('log')\nax2.set_xscale('log')\nshow()\nprint(llinear)\n","sub_path":"MoleCuleDist.py","file_name":"MoleCuleDist.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"346287640","text":"import pickle\nfrom pathlib import Path\nimport os\n\nimport numpy as np\n\nPROJECT_DIR = Path(__file__).resolve().parents[2]\n\nDATA_DIR = os.path.join(PROJECT_DIR, \"data\")\nDATA_RAW_DIR = os.path.join(DATA_DIR, \"raw\")\nDATA_INTERIM_DIR = os.path.join(DATA_DIR, \"interim\")\nDATA_FEATURES_DIR = os.path.join(DATA_DIR, \"features\")\nDATA_DIMRED_DIR = os.path.join(DATA_DIR, \"dimred\")\n\nDATASET_CONFIGS_DIR = os.path.join(PROJECT_DIR, \"dataset_configs\")\n\n\ndef write_ndarray(path: str, array: np.ndarray, overwrite=False) -> bool:\n \"\"\"\n Wrapper for writing writing numpy array to file\n \"\"\"\n if os.path.isfile(path):\n if overwrite or str.lower(\n input(f\"There is already a file at {path}. Overwrite? [y/n]\")\n ) in (\"yes\", \"y\", \"t\"):\n np.save(path, array)\n return True\n return False\n\n\ndef stringify_funcall(func, *args, **kwargs):\n # Use protocol = 0 for ascii encoded bytes object:\n # https://stackoverflow.com/questions/30469575/how-to-pickle-and-unpickle-to-portable-string-in-python-3\n return pickle.dumps((func, args, kwargs), protocol=0).decode(\"ASCII\")\n\n\ndef unpickle_funcall(string: str):\n return pickle.loads(bytes(string, \"ASCII\"))\n","sub_path":"src/common/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"343574594","text":"\n\nfrom xai.brain.wordbase.nouns._omelette import _OMELETTE\n\n#calss header\nclass _OMELETTES(_OMELETTE, ):\n\tdef __init__(self,): \n\t\t_OMELETTE.__init__(self)\n\t\tself.name = \"OMELETTES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"omelette\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_omelettes.py","file_name":"_omelettes.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"401897099","text":"'''\nCreated on 2010. 10. 9.\n\n@author: hsh\n'''\n\nimport logging\nimport thread\n\nimport Pyro.core \nimport Pyro.naming \nfrom Pyro.errors import NamingError \n\nimport util.const;\n\nimport util.ready_logging;\n\nimport nexus.nexus_service_for_interceptor as nis\nimport nexus.nexus_service_for_carrier as ncs\n\ndef run_carrier_service():\n daemon=Pyro.core.Daemon()\n ns=Pyro.naming.NameServerLocator().getNS()\n daemon.useNameServer(ns)\n \n nexus_name = util.const.NEXUS_SERVICE_NAME_FOR_CARRIER\n\n try:\n ns.unregister(nexus_name)\n except NamingError:\n pass\n \n uri = daemon.connect(ncs.NexusServiceForCarrier(), nexus_name)\n logging.info(\"carrier service connect: %s\" % uri)\n \n while True:\n daemon.handleRequests(10.0)\n ncs.CarrierManager.ins().update_carrier_keys()\n\ndef run_interceptor_service():\n daemon=Pyro.core.Daemon()\n ns=Pyro.naming.NameServerLocator().getNS()\n daemon.useNameServer(ns)\n \n nexus_name = util.const.NEXUS_SERVICE_NAME_FOR_INTERCEPTOR\n\n try:\n ns.unregister(nexus_name)\n except NamingError:\n pass\n \n uri = daemon.connect(nis.NexusServiceForInterceptor(), nexus_name)\n \n logging.info(\"interceptor service connect return: %s\" % uri)\n while True:\n daemon.handleRequests(10.0)\n nis.InterceptorManager.ins().update_interceptor_keys()\n\ndef main():\n thread.start_new(run_carrier_service, ())\n run_interceptor_service()\n \nif __name__ == '__main__':\n util.ready_logging.ready_logging(\"nexus_main_log.txt\")\n \n logging.info(\"start nexus\")\n main()\n logging.info(\"end nexus\")\n","sub_path":"kb_codes/sandbox/syscarrier/src/nexus/nexus_main.py","file_name":"nexus_main.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"210765795","text":"class Employee:\n # Empid=0\n # Age=0\n # Ename=\"\"\n # Address=\"\"\n def getEmpdata(self):\n print(\"enter employee data\")\n Employee.Empid = int(input(\"Enter Empid : \"))\n Employee.Age = int(input(\"Enter Age : \"))\n Employee.Ename = input(\"Enter Name : \")\n Employee.Address = input(\"Enter Address : \")\n def displayEmpdata(self):\n print(\"Empid : \", Employee.Empid)\n print(\"Age : \", Employee.Age)\n print(\"Ename : \", Employee.Ename)\n print(\"Address : \", Employee.Address)\nobj=Employee()\nobj.getEmpdata()\nobj.displayEmpdata()\n","sub_path":"Empdata.py","file_name":"Empdata.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"355767834","text":"from flask import (Blueprint, render_template, redirect, url_for, \n jsonify, abort, request)\nfrom flask_security import current_user\nfrom flask_security.decorators import login_required, roles_required\nfrom .forms import DepotForm, ArretForm, RemisageForm\nfrom ..config import BaseConfig\n\ndepots = Blueprint('depots', __name__, url_prefix='/depots')\n\n\n# --------------------------------------------------\n# -------------------- DEPOT ---------------------\n# --------------------------------------------------\n@depots.route('/')\n@login_required\ndef indexDepots():\n from ..models import Depot, Arret\n if current_user.has_role('admin'):\n depots = Depot.getAll()\n else:\n idGroupeDeLigne = request.cookies['idGroupeDeLigne']\n depots = Depot.getAll(idGroupeDeLigne)\n arrets = Arret.getAll()\n \n return render_template('depots/index-depots.html', \n depots=depots,\n arrets=arrets)\n\n@depots.route('/ajouter', methods=['GET', 'POST'])\n@login_required\n@roles_required('admin')\ndef ajouterDepot():\n f = DepotForm()\n\n from ..models import Vehicule\n f.famille.choices = [(x[0], x[0]) for x in Vehicule.getDiscriminants()]\n\n from ..models import Depot\n from ..extensions import db\n if f.validate_on_submit():\n depot = Depot(codeDepot=f.code.data,\n nom=f.nom.data,\n familleVehicule=f.famille.data)\n db.session.add(depot)\n db.session.commit()\n if f.gererRemisage.data:\n return redirect(url_for('depots.editerRemisageDepot', id=depot.idDepot))\n return redirect(url_for('depots.indexDepots'))\n\n return render_template('depots/ajouter.html', form=f)\n\n@depots.route('/editer/', methods=['GET', 'POST'])\n@login_required\n@roles_required('admin')\ndef editerDepot(id):\n from ..models import Depot\n depot = Depot.getById(id)\n aRemisage = True if depot.voie_collection else False\n\n f = DepotForm(code=depot.codeDepot, nom=depot.nom,\n famille=depot.familleVehicule, gererRemisage=aRemisage)\n from ..models import Vehicule\n f.famille.choices = [(x[0], x[0]) for x in Vehicule.getDiscriminants()]\n\n\n from ..extensions import db\n if f.validate_on_submit():\n depot.codeDepot = f.code.data\n depot.nom = f.nom.data\n depot.familleVehicule = f.famille.data\n db.session.commit()\n\n if aRemisage and not f.gererRemisage.data:\n for v in depot.voie_collection:\n db.session.delete(v)\n for pa in depot.pointacces_collection:\n db.session.delete(pa)\n db.session.commit()\n\n if f.gererRemisage.data:\n return redirect(url_for('depots.editerRemisageDepot', id=depot.idDepot))\n return redirect(url_for('depots.indexDepots'))\n\n return render_template('depots/editer.html', form=f)\n\n@depots.route('/supprimer/', methods=['DELETE'])\ndef supprimerDepot(id):\n from ..models import Depot\n depot = Depot.getById(id)\n if not depot:\n abort(404)\n Depot.remove(depot)\n return jsonify({'result': True}), 200\n\n@depots.route('/depot/')\n@login_required\ndef afficherDepot(id):\n from ..models import Depot\n depot = Depot.getById(id)\n\n return render_template('depots/depot.html',\n depot=depot)\n\n@depots.route('/depot//remisage/editer', methods=['GET', 'POST'])\n@roles_required('admin')\ndef editerRemisageDepot(id):\n f = RemisageForm()\n\n from ..models import Depot, Voie, Place, Arret, PointAcces\n depot = Depot.getById(id)\n if not depot:\n abort(404)\n aRemisage = True if depot.voie_collection else False\n\n from ..extensions import db\n if f.validate_on_submit():\n # S'il y a un remisage, on le supprime\n if aRemisage:\n for v in depot.voie_collection:\n db.session.delete(v)\n for pa in depot.pointacces_collection:\n db.session.delete(pa)\n\n # On ajoute les voies et places\n for voie in f.voies.data:\n v = Voie(idDepot=id, libelleVoie=voie['libelle'])\n db.session.add(v)\n db.session.flush()\n for i in range(0, voie['nbPlaces']):\n db.session.add(Place(idVoie=v.idVoie, \n position=(i+1),\n type=voie['places'][i]['typePlace']))\n\n # On ajoute les points d'accès\n for pointAcces in f.pointsAcces.data:\n idArret = None\n if pointAcces['idArret'] != -1:\n idArret = pointAcces['idArret']\n db.session.add(PointAcces(idDepot=id,\n estEntree=pointAcces['estEntree'],\n estSortie=pointAcces['estSortie'],\n idArret=idArret,\n cote=pointAcces['cote']))\n db.session.commit()\n return redirect(url_for('depots.indexDepots'))\n\n # Si le dépot a un remisage\n if aRemisage:\n f.voies.pop_entry()\n for v in depot.voie_collection:\n f.voies.append_entry()\n f.voies.entries[-1].libelle.data = v.libelleVoie\n f.voies.entries[-1].nbPlaces.data = len(v.place_collection)\n for p in v.place_collection:\n f.voies.entries[-1].places.append_entry()\n f.voies.entries[-1].places.entries[-1].typePlace.data = int(p.type) if p.type else 0\n f.pointsAcces.pop_entry()\n for pa in depot.pointacces_collection:\n f.pointsAcces.append_entry()\n f.pointsAcces.entries[-1].estEntree.data = pa.estEntree\n f.pointsAcces.entries[-1].estSortie.data = pa.estSortie\n f.pointsAcces.entries[-1].cote.data = int(pa.cote)\n f.pointsAcces.entries[-1].idArret.data = pa.idArret if pa.idArret else -1\n\n return render_template('depots/editer-remisage.html', \n form=f,\n maxPlaces=BaseConfig.MAXIMUM_PLACES_PAR_VOIE)\n\n\n# --------------------------------------------------\n# -------------------- ARRET ---------------------\n# --------------------------------------------------\n@depots.route('/ajouter/arret', methods=['GET', 'POST'])\n@login_required\n@roles_required('admin')\ndef ajouterArret():\n f = ArretForm()\n\n from ..models import Arret\n from ..extensions import db\n if f.validate_on_submit():\n arret = Arret(codeArret=f.code.data,\n libelleArret=f.libelle.data)\n db.session.add(arret)\n db.session.commit()\n return redirect(url_for('depots.indexDepots'))\n\n return render_template('depots/ajouter-arret.html', form=f)\n\n@depots.route('/editer/arret/', methods=['GET', 'POST'])\n@login_required\n@roles_required('admin')\ndef editerArret(id):\n from ..models import Arret\n arret = Arret.getById(id)\n\n f = ArretForm(code=arret.codeArret,\n libelle=arret.libelleArret)\n\n from ..extensions import db\n if f.validate_on_submit():\n arret.codeArret = f.code.data\n arret.libelleArret = f.libelle.data\n db.session.commit()\n return redirect(url_for('depots.indexDepots'))\n\n return render_template('depots/editer-arret.html', form=f)\n\n@depots.route('/supprimer/arret/', methods=['DELETE'])\ndef supprimerArret(id):\n from ..models import Arret\n arret = Arret.getById(id)\n if not arret:\n abort(404)\n Arret.remove(arret)\n return jsonify({'result': True}), 200\n","sub_path":"main/depots/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"493343556","text":"from __future__ import print_function\n\nimport sys\nimport re\nfrom pyspark import SparkContext\nfrom csv import reader\nfrom csv import writer\nfrom StringIO import StringIO\n\n#filter if there is no id key code\n#filter if offense description is '' or key code is ''\n#filter if start,end date and time are ''\n#filter if borough or precint is ''\nnull_indices = (0,6,8,14,7,9,19,20,21,22,23)\nint_indices = (0,6,8,14,19,20)\nfloat_indices = (21,22)\ndef mark_null(line):\n for i in null_indices:\n if line[i] == '':\n line[i] = \"null\"\n return line\n\ndef mark_invalid_int(line):\n for i in int_indices:\n if line[i] == \"null\":\n pass\n else:\n reg = \"^[-]?\\d+$\"\n if re.match(reg, line[i]) is None:\n line[i] = \"invalid\"\n return line\n\ndef mark_invalid_float(line):\n for i in float_indices:\n if line[i] == \"null\":\n pass\n else:\n reg = \"^[-]?\\d+?\\.\\d+?$\"\n if re.match(reg, line[i]) is None:\n line[i] = \"invalid\"\n return line\n\ndef mark_invalid_coord(line):\n if line[23] == \"null\":\n return line\n reg = \"^(\\([-+]?\\d{1,2}[.]\\d+),\\s*([-+]?\\d{1,3}[.]\\d+\\))$\"\n res = re.match(reg, line[23])\n if res is None:\n line[23] = \"invalid\"\n return line\n\ndef mark_invalid(line):\n line = mark_null(line)\n line = mark_invalid_int(line)\n line = mark_invalid_float(line)\n return mark_invalid_coord(line)\n\n\n# def filter_file(line):\n# for index in indices:\n# if line[index] == '':\n# return False\n# return True\n# #return (line[1] == '' and line[2] == '') or (line[3] == '' and line[4] == '')\n\ndef repack(line):\n res = StringIO(\"\")\n writer(res).writerow(line)\n return res.getvalue().strip()\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"Usage: bigram \", file=sys.stderr)\n exit(-1)\n sc = SparkContext()\n lines = sc.textFile(sys.argv[1], 1)\n\n lines = lines.mapPartitions(lambda line: reader(line))\\\n .map(lambda line: mark_invalid(line))\\\n .map(lambda line: repack(line))\\\n .saveAsTextFile(\"filter_lines\")\n sc.stop()\n","sub_path":"clean/mark_null_invalid_columns.py","file_name":"mark_null_invalid_columns.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"293128523","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n\turl(r'^$', views.index, name=\"index\"),\n\turl(r'^show/(?P\\w+)$', views.show),\n\turl(r'^createproduct$', views.createProduct, name=\"create_product\"),\n\turl(r'^createcategory$', views.createCategory, name=\"create_category\"),\n\t# url(r'^upload_image$', views.upload_pic, name=\"upload_pic\"),\n\turl(r'^item_description/(?P\\w+)$', views.item_description, name=\"item_description\"),\n]\n","sub_path":"apps/products/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"124325876","text":"import json\nimport socket\nfrom threading import Thread\nfrom rdt import *\n\nclass Client:\n\tdef __init__(self, msgSize=1024):\n\t\tself.msgSize = msgSize\n\t\tself.ip = \"\"\n\n\tdef __call__(self):\n\t\tif(self.retrieveIP()):\n\t\t\twhile True:\n\t\t\t\tcommand = self.menu()\n\t\t\t\tif command == \"1\":\n\t\t\t\t\tself.requestFile()\n\t\t\t\telif command == \"2\":\n\t\t\t\t\tself.requestList()\n\t\t\t\telif command == \"3\":\n\t\t\t\t\tself.closeConnection()\n\t\t\t\t\tbreak\n\n\tdef requestFile(self):\n\t\tpair = json.dumps({\"METHOD\": \"GET\", \"FILENAME\": input(\"Please insert the file name: \")})\n\t\tsender.send(pair, client_rx_address)\n\n\t\twhile True:\n\t\t\tglobal rcv\n\t\t\tif rcv:\n\t\t\t\tmsg = json.loads(rcv)\n\t\t\t\tif msg[\"FILENAME\"] != \"\":\n\t\t\t\t\tprint(\"Retrieved file content: \" + msg[\"BODY\"])\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Error retrieving file\")\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tbreak\n\t\t# s.close()\n\t\trcv = None\n\n\tdef requestList(self):\n\t\tpair = json.dumps({\"METHOD\": \"LIST\"})\n\t\tsender.send(pair, client_rx_address)\t\t\n\t\twhile True:\n\t\t\tglobal rcv\n\t\t\tif rcv:\n\t\t\t\tmsg = json.loads(rcv)\n\n\t\t\t\tif len(msg) > 0:\n\t\t\t\t\tprint(\"Files at server:\")\n\t\t\t\t\tfor file in msg:\n\t\t\t\t\t\tprint(\"- \" + file[\"FILENAME\"])\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Error retrieving list\")\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tbreak\n\t\t# s.close()\n\t\trcv = None\n\t\n\tdef closeConnection(self):\n\t\tpair = json.dumps({\"METHOD\": \"EXIT\"})\n\t\tsender.send(pair, client_rx_address)\n\t\tprint(\"Closing connection with server\")\n\t\tquit(0)\n\t\t\n\tdef getDomain(self):\n\t\tdomain = input(\"To which domain would you like to connect? \")\n\t\treturn domain\n\n\tdef retrieveIP(self):\n\t\tpair = json.dumps({\"METHOD\": \"RETRIEVE\", \"DOMAIN\": self.getDomain()})\n\n\t\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\ts.connect((\"localhost\", 4000))\n\t\tprint(\"Retrieving IP from domain\")\n\t\ts.sendall(pair.encode())\n\n\t\tsuccess = False\n\t\twhile True:\n\t\t\trcv = s.recv(self.msgSize).decode(\"utf-8\")\n\n\t\t\tif rcv:\n\t\t\t\tmsg = json.loads(rcv)\n\n\t\t\t\tif msg[\"STATUS\"] == \"OK\":\n\t\t\t\t\tself.ip = msg[\"IP\"]\n\t\t\t\t\tprint(\"IP \" + self.ip + \" was successfully retrieved \")\n\t\t\t\t\tsuccess = True\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Error retrieving IP\")\n\t\t\t\t\tsuccess = False\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tbreak\n\n\t\ts.close()\n\t\treturn success\n\t\t\n\tdef menu(self):\n\t\treturn input(\"\"\"Enter a number and press enter\n1 - Request file\n2 - List files \n3 - Close connection\n\"\"\" )\n\ndef callback(body):\n global rcv\n rcv = body\n\nrcv = None\nrx_port = 5005\ntx_port = 5007\nclient_rx_address = ('localhost', 8080)\n\nreceiver = Receiver(('localhost', rx_port), callback)# address and callback\nsender = Sender(('localhost', tx_port))# address\n\ndef main():\n\tCLIENT = Client()\n\treceiver.listen()\n\tCLIENT()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Client/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"290400032","text":"import pandas as pd\nimport numpy as np\nfrom sklearn import svm, metrics\nimport sklearn\n\ndata = pd.read_csv(\"/Users/alexchandy13/Documents/Programming/gitstuff/pitch-ML/baseballdf.csv\", sep=\",\")\ntarget = 'description'\n\nx = np.array(data.drop(data.columns[[0,13]], 1))\ny = np.array(data[target])\n\nx_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(x, y, test_size=0.1)\n\nclf = svm.SVC(kernel=\"linear\",C=2)\n\nclf.fit(x_train,y_train)\ny_pred = clf.predict(x_test)\nacc = metrics.accuracy_score(y_test,y_pred)\nprint(acc)\nprint(clf.predict([[87,-2,6,3,2,0.63,-0.34,1.17,1,3100,5.8,7]]))\n\n","sub_path":"pitchModel-python.py","file_name":"pitchModel-python.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"97989577","text":"import re, json, requests\nfrom os import path\nfrom mygene import MyGeneInfo\nfrom medoo import Raw, Field\nfrom diot import Diot\nfrom pyppl.utils import always_list\nfrom bioprocs.utils.cache import Cache\nfrom bioprocs.utils.tsvio2 import TsvReader, TsvWriter, TsvRecord\nfrom tempfile import gettempdir\n\n\"\"\"\n`notfound`: What if a symbol is not found. Default: skip\n\t- skip : skip the record(don't write it to output file)\n\t- ignore: use the original name;\n\t- error : report erro\n\"\"\"\nSPECIES = {\n\t'hg19': 'human',\n\t'hg38': 'human',\n\t'mm9' : 'mouse',\n\t'mm10': 'mouse'\n}\nTAXIDS = {\n\t'hg19': 9606,\n\t'hg38': 9606,\n\t'mm9' : 10090,\n\t'mm10': 10090\n}\n# local to remote\nFIELD_L2M = {\n\t'ensembl_gene' : 'ensembl.gene',\n\t'ensembl_protein' : 'ensembl.protein',\n\t'ensembl_transcript': 'ensembl.transcript',\n\t'refseq_genomic' : 'refseq.genomic',\n\t'refseq_rna' : 'refseq.rna',\n\t'refseq_protein' : 'refseq.protein',\n\t'uniprot_Swiss_Prot': 'uniprot.Swiss-Prot',\n}\n# remote to local\nFIELD_M2L = {\n\t'ensembl.gene' : 'ensembl_gene',\n\t'ensembl.protein' : 'ensembl_protein',\n\t'ensembl.transcript': 'ensembl_transcript',\n\t'refseq.genomic' : 'refseq_genomic',\n\t'refseq.rna' : 'refseq_rna',\n\t'refseq.protein' : 'refseq_protein',\n\t'uniprot.Swiss-Prot': 'uniprot_Swiss_Prot'\n}\nclass RecordNotFound(Exception):\n\tpass\n\ndef replaceList(l, search, replace):\n\tif not isinstance(search, list):\n\t\tsearch = [search]\n\tret = l[:]\n\tfor i, e in enumerate(ret):\n\t\tif e in search:\n\t\t\tret[i] = replace\n\treturn ret\n\ndef querygene(*args, **kwargs):\n\trets = []\n\ttry:\n\t\tmgret = MyGeneInfo().querymany(*args, **kwargs)\n\texcept requests.exceptions.ConnectionError:\n\t\treturn rets\n\tfor ret in mgret:\n\t\tout = {}\n\t\trets.append(out)\n\t\tfor key, val in ret.items():\n\t\t\tif 'ensembl' == key:\n\t\t\t\tensembl = val[0] if isinstance(val, list) else (val or {})\n\t\t\t\tout['ensembl_gene'] = ensembl.get('gene', '')\n\t\t\t\tout['ensembl_protein'] = ensembl.get('protein', [])\n\t\t\t\tout['ensembl_transcript'] = ensembl.get('transcript', [])\n\t\t\telif 'refseq' == key:\n\t\t\t\trefseq = val[0] if isinstance(val, list) else (val or {})\n\t\t\t\tout['refseq_genomic'] = refseq.get('genomic', [])\n\t\t\t\tout['refseq_rna'] = refseq.get('rna', [])\n\t\t\t\tout['refseq_protein'] = refseq.get('protein', [])\n\t\t\telif 'uniprot' == key:\n\t\t\t\tuniprot = val[0] if isinstance(val, list) else (val or {})\n\t\t\t\tout['uniprot_Swiss_Prot'] = uniprot.get('Swiss-Prot', [])\n\t\t\telse:\n\t\t\t\tout[key] = val\n\treturn rets\n\nfields2local = lambda keys: [FIELD_M2L.get(key, key) for key in keys]\nfields2remote = lambda keys: [FIELD_L2M.get(key, key) for key in keys]\n\ndef genenorm(infile, outfile = None, notfound = 'ignore', frm = 'symbol, alias', to = 'symbol', genome = 'hg19', inopts = None, outopts = None, genecol = None, cachedir = gettempdir()):\n\n\t_inopts = Diot(skip = 0, comment = '#', delimit = '\\t')\n\t_inopts.update(inopts or {})\n\tinopts = _inopts\n\n\t_outopts = Diot(delimit = '\\t', append = False, query = False, head = True)\n\t_outopts.update(outopts or {})\n\toutopts = _outopts\n\toutquery = outopts.get('query', False)\n\touthead = outopts.get('head', outopts.get('cnames', True))\n\tif 'query' in outopts:\n\t\toutquery = outopts['query']\n\t\tdel outopts['query']\n\tif 'head' in outopts:\n\t\touthead = outopts['head']\n\t\tdel outopts['head']\n\tif 'cnames' in outopts:\n\t\touthead = outopts['cnames']\n\t\tdel outopts['cnames']\n\n\treader = TsvReader(infile, **inopts)\n\t#if not reader.meta: reader.autoMeta()\n\tgenecol = genecol or 0\n\tgenes = set()\n\tncol = 0\n\tfor r in reader:\n\t\tncol = ncol or len(r)\n\t\tgenes.add(r[genecol].strip())\n\treader.rewind()\n\tif not reader.meta:\n\t\treader.meta.extend(['COL' + str(i + 1) for i in range(ncol)])\n\tgenes = list(genes)\n\n\tdbfile = path.join(cachedir, 'geneinfo.db')\n\tcache = Cache(dbfile, 'geneinfo', {\n\t\t'_id' : 'text',\n\t\t'symbol' : 'text',\n\t\t'HGNC' : 'int',\n\t\t'alias' : \"text default ''\",\n\t\t'ensembl_gene' : 'text',\n\t\t'ensembl_protein' : 'text',\n\t\t'ensembl_transcript': 'text',\n\t\t'refseq_genomic' : 'text',\n\t\t'refseq_rna' : 'text',\n\t\t'refseq_protein' : 'text',\n\t\t'entrezgene' : 'int',\n\t\t'genomic_pos' : 'text',\n\t\t'genomic_pos_hg19' : 'text',\n\t\t'genomic_pos_mm9' : 'text',\n\t\t'ipi' : 'text',\n\t\t'pfam' : \"text default ''\",\n\t\t'pdb' : 'text',\n\t\t'type_of_gene' : 'text',\n\t\t'taxid' : 'int',\n\t\t'uniprot_Swiss_Prot': \"text default ''\",\n\t}, '_id')\n\n\tdummies = {\n\t\t'symbol' : 'iplain',\n\t\t'alias' : 'iarray',\n\t\t'pfam' : 'iarray',\n\t\t'uniprot' : 'iarray',\n\t\t'genomic_pos' : 'json',\n\t\t'genomic_pos_hg19' : 'json',\n\t\t'genomic_pos_mm9' : 'json',\n\t\t'ipi' : 'iarray',\n\t\t'pdb' : 'iarray',\n\t\t'refseq_genomic' : 'iarray',\n\t\t'refseq_protein' : 'iarray',\n\t\t'refseq_rna' : 'iarray',\n\t\t'ensembl_protein' : 'iarray',\n\t\t'ensembl_transcript': 'iarray',\n\t\t'uniprot_Swiss_Prot': 'iarray',\n\t}\n\n\t# query from cache\n\ttocols = always_list(to)\n\t# alias\n\ttocols = replaceList(tocols, ['ensg', 'ensemblgene', 'ensembl'], 'ensembl.gene')\n\ttocols = replaceList(tocols, ['uniprot'], 'uniprot.Swiss-Prot')\n\ttocols = replaceList(tocols, ['refseq'], 'refseq.rna')\n\ttocols = fields2local(tocols)\n\n\tfrmcols = always_list(frm)\n\tfrmcols = replaceList(frmcols, ['ensg', 'ensemblgene', 'ensembl'], 'ensembl.gene')\n\tfrmcols = replaceList(frmcols, ['uniprot'], 'uniprot.Swiss-Prot')\n\tfrmcols = replaceList(frmcols, ['refseq'], 'refseq.rna')\n\tfrmcols = fields2local(frmcols)\n\n\tcolumns = list(set(tocols + frmcols + ['taxid']))\n\tfrmkeys = ','.join(frmcols)\n\n\tallfound, allrest = cache.query(columns, {frmkeys: genes, 'taxid': TAXIDS[genome]}, dummies)\n\t# query from api\n\tmgret = querygene(allrest[frmkeys], scopes = fields2remote(frmcols), fields = fields2remote(columns), species = SPECIES[genome])\n\t# get all result for each query\n\tgenetmp = {}\n\tfor gret in mgret:\n\t\tif not gret['query'] in genetmp:\n\t\t\tgenetmp[gret['query']] = []\n\t\tgenetmp[gret['query']].append(gret)\n\n\tgenemap = {}\n\tdata2save = {}\n\tfor query, gret in genetmp.items():\n\t\t# re-score the items if query is entirely matched\n\t\tscore = 0\n\t\tgr = None\n\t\tfor g in gret:\n\t\t\t# not all result returned\n\t\t\tif not all([x in g for x in tocols]): continue\n\n\t\t\tif any([g[x] == query for x in tocols]):\n\t\t\t\tthescore = g['_score'] + 10000\n\t\t\telif any([str(g[x]).upper() == query.upper() for x in tocols]):\n\t\t\t\tthescore = g['_score'] + 5000\n\t\t\telif any([x in g and query.upper() in [str(u).upper() for u in list(g[x])] for x in tocols]):\n\t\t\t\tthescore = g['_score'] + 1000\n\t\t\telse:\n\t\t\t\tthescore = g['_score']\n\t\t\tif thescore > score:\n\t\t\t\tscore = thescore\n\t\t\t\tgr = g\n\n\t\tif not gr: continue\n\t\tdel gr['_score']\n\t\tdel gr['query']\n\t\tgr = Cache._result({x:(gr[x] if x in gr else '') for x in set(columns + list(gr.keys()))}, dummies)\n\t\tfor x, val in gr.items():\n\t\t\tif not x in data2save:\n\t\t\t\tdata2save[x] = []\n\t\t\tdata2save[x].append(val)\n\t\tgenemap[query] = gr\n\n\t# add cached data\n\tfor i, ret in allfound.items():\n\t\tquery = genes[i]\n\t\tgenemap[query] = ret\n\n\t#del genetmp\n\t#print genemap\n\n\t# cache genemap\n\t#cachedata = {}\n\t#querys = genemap.keys()\n\t#for query in querys:\n\t#\tfor k, v in genemap[query].items():\n\t#\t\tif not k in cachedata:\n\t#\t\t\tcachedata[k] = []\n\t#\t\tcachedata[k].append(v)\n\n\t#if cachedata:\n\t#\tcache.save(cachedata, cachefactory)\n\t#\tdel cachedata\n\tif data2save:\n\t\t# make it unique\n\t\tds_keys = list(data2save.keys())\n\t\tdata2save_uniq = {k:[] for k in ds_keys}\n\t\ttmp_container = []\n\t\tfor i in range(len(data2save[ds_keys[0]])):\n\t\t\ttmp = {k:data2save[k][i] for k in ds_keys}\n\t\t\tif not tmp in tmp_container:\n\t\t\t\ttmp_container.append(tmp)\n\t\t\t\tfor k in ds_keys:\n\t\t\t\t\tdata2save_uniq[k].append(data2save[k][i])\n\n\t\tcache.save(data2save_uniq, dummies)\n\n\tif outfile:\n\t\twriter = TsvWriter(outfile, **outopts)\n\t\twriter.meta.extend(reader.meta)\n\t\tif outquery:\n\t\t\twrite.meta.append('_QUERY')\n\n\t\tif len(tocols) > 1:\n\t\t\tgcolidx = genecol if isinstance(genecol, int) else writer.meta.index(genecol)\n\t\t\twriter.meta[(gcolidx+1):(gcolidx+1)] = [(tocol, None) for tocol in tocols[1:]]\n\n\t\tif outhead:\n\t\t\twriter.writeHead()\n\t\t#print writer.meta\n\n\t\t#i = 0\n\t\tfor row in reader:\n\t\t\tr = TsvRecord(row.values(), reader.meta)\n\n\t\t\t#if (i <= 10): print r\n\t\t\tquery = r[genecol].strip()\n\t\t\tif query not in genemap:\n\t\t\t\tif notfound == 'error':\n\t\t\t\t\traise RecordNotFound('Record not found: %s' % query)\n\t\t\t\telif notfound == 'skip':\n\t\t\t\t\tcontinue\n\t\t\t\tif len(tocols) > 1:\n\t\t\t\t\tfor tocol in tocols[1:]:\n\t\t\t\t\t\tr[tocol] = ''\n\t\t\telse:\n\t\t\t\t#if (i <= 10): print genecol\n\t\t\t\tr[genecol] = genemap[query][tocols[0]]\n\t\t\t\t#if (i <= 10): print genemap[query][tocols[0]], r\n\t\t\t\tif len(tocols) > 1:\n\t\t\t\t\tfor tocol in tocols[1:]:\n\t\t\t\t\t\tr[tocol] = genemap[query][tocol]\n\n\t\t\tif outquery:\n\t\t\t\tr._QUERY = query\n\n\t\t\t#if (i <= 10): print r\n\t\t\ti += 1\n\t\t\twriter.write(r)\n\n\treturn genemap\n","sub_path":"bioprocs/utils/gene.py","file_name":"gene.py","file_ext":"py","file_size_in_byte":8851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"307244126","text":"import os\nimport numpy as np\nimport cmaps\nimport pandas as pd\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nfrom copy import copy\nfrom cartopy.mpl.gridliner import LATITUDE_FORMATTER, LONGITUDE_FORMATTER\nimport cartopy.io.shapereader as shpreader\nimport shapely.geometry as sgeom\nimport datetime\n\n# Constants\nBIGFONT=18\nMIDFONT=14\nSMFONT=10\n\n\n#--------------Function Defination----------------\n\n\n\ndef find_side(ls, side):\n \"\"\"\n Given a shapely LineString which is assumed to be rectangular, return the\n line corresponding to a given side of the rectangle.\n \"\"\"\n minx, miny, maxx, maxy = ls.bounds\n points = {'left': [(minx, miny), (minx, maxy)],\n 'right': [(maxx, miny), (maxx, maxy)],\n 'bottom': [(minx, miny), (maxx, miny)],\n 'top': [(minx, maxy), (maxx, maxy)],}\n return sgeom.LineString(points[side])\ndef lambert_xticks(ax, ticks):\n \"\"\"Draw ticks on the bottom x-axis of a Lambert Conformal projection.\"\"\"\n te = lambda xy: xy[0]\n lc = lambda t, n, b: np.vstack((np.zeros(n) + t, np.linspace(b[2], b[3], n))).T\n xticks, xticklabels = _lambert_ticks(ax, ticks, 'bottom', lc, te)\n ax.xaxis.tick_bottom()\n ax.set_xticks(xticks)\n ax.set_xticklabels([ax.xaxis.get_major_formatter()(xtick) for xtick in xticklabels], fontsize=MIDFONT)\ndef lambert_yticks(ax, ticks):\n \"\"\"Draw ricks on the left y-axis of a Lamber Conformal projection.\"\"\"\n te = lambda xy: xy[1]\n lc = lambda t, n, b: np.vstack((np.linspace(b[0], b[1], n), np.zeros(n) + t)).T\n yticks, yticklabels = _lambert_ticks(ax, ticks, 'left', lc, te)\n ax.yaxis.tick_left()\n ax.set_yticks(yticks)\n ax.set_yticklabels([ax.yaxis.get_major_formatter()(ytick) for ytick in yticklabels], fontsize=MIDFONT)\ndef _lambert_ticks(ax, ticks, tick_location, line_constructor, tick_extractor):\n \"\"\"Get the tick locations and labels for an axis of a Lambert Conformal projection.\"\"\"\n outline_patch = sgeom.LineString(ax.outline_patch.get_path().vertices.tolist())\n axis = find_side(outline_patch, tick_location)\n n_steps = 30\n extent = ax.get_extent(ccrs.PlateCarree())\n _ticks = []\n for t in ticks:\n xy = line_constructor(t, n_steps, extent)\n proj_xyz = ax.projection.transform_points(ccrs.Geodetic(), xy[:, 0], xy[:, 1])\n xyt = proj_xyz[..., :2]\n ls = sgeom.LineString(xyt.tolist())\n locs = axis.intersection(ls)\n if not locs:\n tick = [None]\n else:\n tick = tick_extractor(locs.xy)\n _ticks.append(tick[0])\n # Remove ticks that aren't visible: \n ticklabels = copy(ticks)\n while True:\n try:\n index = _ticks.index(None)\n except ValueError:\n break\n _ticks.pop(index)\n ticklabels.pop(index)\n return _ticks, ticklabels\n\ndef get_station_df(sta_path):\n '''get station info'''\n df = pd.read_excel(sta_path)\n df=df.dropna()\n return(df)\n\ndef conv_deg(deg_str):\n '''convert to degree info'''\n value=int(deg_str)//100\n value=value+(int(deg_str)-value*100)/60\n return(value)\n\n#--------------Function Defination----------------\n\ndef main():\n # Input File\n raw_file='/home/metctm1/array/data/2011-UST-RAP/a_precip_20201113141016.csv'\n\n # Province shp file\n province_shp_file=os.getenv('SHP_LIB')+'/cnmap/cnhimap.dbf'\n county_shp_file=os.getenv('SHP_LIB')+'/cnmap/county_2004.dbf'\n\n south_china_province=['广东', '广西', '海南']\n \n \n \n # deal with raw input\n df = pd.read_csv(raw_file,parse_dates=True) \n df['id']=df['lon']*df['lat']\n df_process=df.groupby('id').sum() # Resample into hourly data\n df_process['lon'] =df_process['lon']/df_process['val2']\n df_process['lat'] =df_process['lat']/df_process['val2']\n \n\n # read shp files\n province_shp=shpreader.Reader(province_shp_file).geometries()\n county_shp = shpreader.Reader(county_shp_file).geometries()\n \n \n \n # Set figure size\n proj = ccrs.Mercator(central_longitude=115., min_latitude=-80.0, max_latitude=84.0, globe=None, \n latitude_true_scale=22.0, false_easting=0.0, false_northing=0.0, scale_factor=None)\n fig = plt.figure(figsize=[10, 8],frameon=True)\n # Set projection and plot the main figure\n ax = fig.add_axes([0.08, 0.01, 0.8, 0.94], projection=proj)\n # Set figure extent\n ax.set_extent([109, 118, 20, 26],crs=ccrs.PlateCarree())\n \n\n # plot shp boundaries\n ax.add_geometries(county_shp, ccrs.PlateCarree(),facecolor='none', edgecolor='gray',linewidth=0.5, zorder = 0)\n ax.add_geometries(province_shp, ccrs.PlateCarree(),facecolor='none', edgecolor='black',linewidth=1., zorder = 1)\n\n # Add ocean, land, rivers and lakes\n #ax.add_feature(cfeature.OCEAN.with_scale('50m'))\n #ax.add_feature(cfeature.LAND.with_scale('50m'))\n # *must* call draw in order to get the axis boundary used to add ticks:\n fig.canvas.draw()\n # Define gridline locations and draw the lines using cartopy's built-in gridliner:\n # xticks = np.arange(80,130,10)\n # yticks = np.arange(15,55,5)\n xticks = range(109, 118, 2)\n yticks = range(20, 26, 2) \n #ax.gridlines(xlocs=xticks, ylocs=yticks,zorder=1,linestyle='--',lw=0.5,color='gray')\n\n # Label the end-points of the gridlines using the custom tick makers:\n ax.xaxis.set_major_formatter(LONGITUDE_FORMATTER) \n ax.yaxis.set_major_formatter(LATITUDE_FORMATTER)\n lambert_xticks(ax, xticks)\n lambert_yticks(ax, yticks)\n\n # Marker size in units of points^2\n cmap=cmaps.precip2_17lev\n sc=ax.scatter( df_process['lon'], df_process['lat'], marker='.', c=df_process['val1'], \n cmap=cmap, norm=matplotlib.colors.BoundaryNorm([0, 1, 2, 5, 10, 20, 30, 40, 50, 70, 100, 150, 200, 250, 300, 400, 500, 600], cmap.N),\n s=15,zorder=1, transform=ccrs.Geodetic(), label='pr')\n\n df_sig=df_process.where(df_process['val1']>250.)\n ax.scatter( df_sig['lon'], df_sig['lat'], marker='.', c=df_sig['val1'], \n cmap=cmap, norm=matplotlib.colors.BoundaryNorm([0, 1, 2, 5, 10, 20, 30, 40, 50, 70, 100, 150, 200, 250, 300, 400, 500, 600], cmap.N),\n s=50,zorder=9, transform=ccrs.Geodetic())\n \n plt.title('Observed Accumulated Rainfall during Mangkhut (1822)')\n cax=fig.add_axes([0.15, 0.02, 0.7, 0.03])#位置[左,下,右,上]\n cbar = fig.colorbar(sc,ticks=[0, 1, 5, 20, 40, 70, 150, 250, 400], cax=cax, orientation='horizontal')\n# cbar = fig.colorbar(sc)\n\n# Show figure\n plt.savefig('../fig/mangkhut_pr.png', dpi=120, bbox_inches='tight')\n# plt.show()\n\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"2011-UST-RAP/script/201119-draw-station-map-prec-mangkhut.py","file_name":"201119-draw-station-map-prec-mangkhut.py","file_ext":"py","file_size_in_byte":6685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"572776070","text":"import logging\nimport re\nimport MySQLdb\nimport sys\nimport os\n\nsys.path.append(os.path.abspath(os.path.dirname(__file__) + '../..'))\n#from config.config import hpo_conn as db\nfrom text.entity import Entity, Entities\nfrom config import config\nfrom text.token2 import Token2\n#from text.offset import Offset, Offsets, perfect_overlap, contained_by\n\nhpo_words = set()\nhpo_stopwords = set() \n\n\nclass HPOEntity(Entity):\n\t\"\"\"HPO entities\"\"\"\n\tdef __init__(self, tokens, sid, *args, **kwargs):\n\t\tsuper(HPOEntity, self).__init__(tokens, *args, **kwargs)\n\t\tself.type = \"hpo\"\n\t\tself.subtype = kwargs.get(\"subtype\")\n\t\tself.nextword = kwargs.get(\"nextword\")\n\t\tself.sid = sid\n\t\tself.hpo_id = None\n\t\tself.hpo_score = 0\n\t\tself.hpo_name = 0\n \n\t#tf_regex = re.compile(r\"\\A[A-Z]+\\d*\\w*\\d*\\Z\")\n\n\tdef get_dic(self):\n\t\tdic = super(HPOEntity, self).get_dic()\n\t\t#dic[\"subtype\"] = self.subtype\n\t\tdic[\"hpo_id\"] = self.hpo_id\n\t\tdic[\"hpo_name\"] = self.hpo_name\n\t\tdic[\"ssm_score\"] = self.ssm_score\n\t\tdic[\"ssm_entity\"] = self.ssm_best_ID\n\t\treturn dic\n\n\n\tdef validate(self, ths, rules):\n\t\t\"\"\"\n\t\tUse rules to validate if the entity was correctly identified\n\t\t:param rules:\n\t\t:return: True if entity does not fall into any of the rules, False if it does\n\t\t\"\"\"\n\t\tif \"stopwords\" in rules:\n\t\t\twords = self.text.split(\" \")\n\t\t\t#words += self.text.split(\"-\")\n\t\t\tstop = False\n\t\t\tfor s in hpo_stopwords:\n\t\t\t\tif any([s == w.lower() for w in words]):\n\t\t\t\t\tlogging.debug(\"ignored stopword %s\" % self.text)\n\t\t\t\t\tstop = True\n\t\t\tif stop:\n\t\t\t\treturn False\n\n\t\tif \"paren\" in rules:\n\t\t\tif (self.text[-1] == \")\" and \"(\" not in self.text) or (self.text[-1] == \"]\" and \"[\" not in self.text) or \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t (self.text[-1] == \"}\" and \"{\" not in self.text):\n\t\t\t\tlogging.debug(\"parenthesis %s\" % self.text)\n\t\t\t\tself.dend -= 1\n\t\t\t\tself.end -= 1\n\t\t\t\tself.text = self.text[:-1]\n\t\treturn True","sub_path":"src/text/text/hpo_entity.py","file_name":"hpo_entity.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"67228768","text":"from wordcloud import WordCloud\r\nimport numpy as np\r\nimport jieba\r\nfrom PIL import Image\r\ndef trans(data):\r\n alice_coloring = np.array(Image.open(\"A.jpg\"))\r\n wordcloud = WordCloud(background_color=\"white\",mask=alice_coloring,font_path = 'WE.TTF',width=1000, height=860, margin=2).generate(data)\r\n wordcloud.to_file(\"qw.jpg\")\r\ndef main():\r\n a=[]\r\n f = open('comment.txt', 'r', encoding=\"utf-8\").read()\r\n words=list(jieba.cut(f))\r\n for word in words:\r\n if len(word)>1:\r\n a.append(word)\r\n txt=r' '.join(a)\r\n trans(txt)\r\nif __name__ == '__main__':\r\n # main()\r\n url = \"https://static.zhihu.com/heifetz/main.app.bcbe6146eb81b5efaede.js\"\r\n import requests\r\n r = requests.get(url)\r\n print(r.text)\r\n f = open(\"js.txt\",\"w\",encoding=\"utf-8\")\r\n f.write(r.text)\r\n\r\n\r\n\r\n\r\n\r\n# width,height,margin可以设置图片属性\r\n\r\n# generate 可以对全部文本进行自动分词,但是他对中文支持不好,对中文的分词处理请看我的下一篇文章\r\n#wordcloud = WordCloud(font_path = r'D:\\Fonts\\simkai.ttf').generate(f)\r\n# 你可以通过font_path参数来设置字体集\r\n\r\n#background_color参数为设置背景颜色,默认颜色为黑色\r\n#\r\n# import matplotlib.pyplot as plt\r\n# plt.imshow(wordcloud)\r\n# plt.axis(\"off\")\r\n# plt.show()\r\n#\r\n# wordcloud.to_file('test.png')\r\n","sub_path":"知乎/zhi_Spider/ICE DATA/ICE_ARTICLE/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"382105965","text":"from general import *\n\n# ture_set_1 = file_to_set(\"175 ture 1.csv\")\n# ture_set_2 = file_to_set(\"175 ture 2.csv\")\n\n# ture_set = ture_set_1 | ture_set_2\n\n# set_to_file(ture_set, 'ture.csv')\n\n# test_set_1 = file_to_set(\"test1.csv\")\n# test_set_2 = file_to_set(\"test2.csv\")\n#\n#\n# test = test_set_1 | test_set_2\n\n\n# set_to_file(test, 'test.csv')\n\n\n# lista = []\n\n# a = 0\n#\n# with open('test.csv', 'r') as f:\n# for line in f:\n# # l = line.split()\n# a+= int(line.split()[4])\n#\n# print(a)\n\n\na = 0\n\nwith open('175 ture.csv', 'r') as f:\n for line in f:\n # l = float(line.strip().split(',')[19].strip('\"'))\n l = line.strip().split(',')[19] # ture_change.csv\n # a+= l #float(a)\n print(type(l), l)\n\n# print(a)\n\n\n\n\n\n\n\n","sub_path":"proba.py","file_name":"proba.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"269204593","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport multiprocessing\n\n\ndef run(pname):\n print(pname)\n\n\ndef main():\n for i in range(10):\n p = multiprocessing.Process(target=run, args=('Process-' + str(i),))\n p.start()\n p.join()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"book_02_Python性能分析与优化/chapter_05_多线程与多进程/code_3_多进程.py","file_name":"code_3_多进程.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"188700839","text":"def main():\n argument_spec = ec2_argument_spec()\n argument_spec.update(dict(name=dict(required=True, type='str'), path=dict(default='/', type='str'), assume_role_policy_document=dict(type='json'), managed_policy=dict(type='list', aliases=['managed_policies']), state=dict(choices=['present', 'absent'], required=True), description=dict(required=False, type='str', default='')))\n module = AnsibleModule(argument_spec=argument_spec, required_if=[('state', 'present', ['assume_role_policy_document'])])\n if (not HAS_BOTO3):\n module.fail_json(msg='boto3 required for this module')\n (region, ec2_url, aws_connect_params) = get_aws_connection_info(module, boto3=True)\n connection = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_params)\n state = module.params.get('state')\n if (state == 'present'):\n create_or_update_role(connection, module)\n else:\n destroy_role(connection, module)","sub_path":"Data Set/bug-fixing-3/134b9f50c3cb71d8158f5e1d586c0da366ddb8db-

-bug.py","file_name":"134b9f50c3cb71d8158f5e1d586c0da366ddb8db-
-bug.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"279908344","text":"#There are 2 ways for scraping the website\n# 1. USE API\n# 2. HTML SCRAPING USING SOME TOOL LIKE BS4\n\n#Step: Install all the required packages\n#pip install requests\n#pip install bs4\n#pip install html5lib\n\nimport requests\nimport csv\nfrom bs4 import BeautifulSoup\nurl = \"https://shyamal2411.github.io/TypeTest/\"\n\n#step 1: Get the html\nr = requests.get(url)\nhtmlContent = r.content\n# print(htmlContent) \n# PRINTS THE WHOLE HTML CONTENT TO TERMINAL\n\n#step 2: Parse the html\nsoup = BeautifulSoup(htmlContent, 'html.parser')\n# print(soup.prettify)\n# prettifies the content\n\n#step 3: HTML tree traversal\ntitle = soup.title\n# print(title.string)\n\nsoup.find_all(\"a\")\n# print(soup.find_all)\n\nmeta = soup.find_all(\"meta\")\n# print(meta[0])\n# print(meta[1])\n# print(meta[2])\n# print(meta[3])\n\nfile = open(\"Try.csv\",\"w\")\ncsv_writer = csv.writer(file)\nprint(meta[0])\n\n","sub_path":"Python/webScrap.py","file_name":"webScrap.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"616065058","text":"\"\"\" This module defines the url patters for profiles \"\"\"\n\nfrom django.conf.urls import url\n\nfrom .views import (\n ProfileListApi, UpdateUserAPIView, UserProfileView\n)\n\napp_name = 'profiles'\n\nurlpatterns = [\n url(r'^profiles/(?P[\\w\\-]+)/?$',\n UserProfileView.as_view(), name='profile'),\n url(r'^profiles/(?P[\\w\\-]+)/edit/?$',\n UpdateUserAPIView.as_view(), name='profile_update'),\n url(r'^profiles/?$',\n ProfileListApi.as_view(), name='list_profiles'),\n]\n","sub_path":"authors/apps/profiles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"401612528","text":"from dutymanager.db.methods import AsyncDatabase\nfrom dutymanager.files.errors import VK_ERROR\nfrom dutymanager.units.utils import *\nfrom module import VKError, types\nfrom module import Blueprint\n\nbot = Blueprint()\ndb = AsyncDatabase.get_current()\n\n\n@bot.event.ban_expired()\nasync def ban_expired(event: types.BanExpired):\n peer_id = db.chats(event.object.chat)\n user_id = event.object.user_id\n try:\n await bot.api.messages.add_chat_user(\n chat_id=int(peer_id - 2e9),\n user_id=user_id\n )\n except VKError as e:\n error = list(e.args)[0]\n await send_msg(\n peer_id=int(peer_id - 2e9),\n message=f\"⚠ Произошла ошибка на этапе добавления забаненого [id{user_id}|пользователя].\"\n f\"\\nВК ответил: {error[1]} ({error[0]})\"\n f\"\\nПричина бана: {event.object.comment[:250]}\"\n )\n\n\n@bot.event.add_user()\nasync def add_user(event: types.AddUser):\n peer_id = db.chats(event.object.chat)\n user_id = event.object.user_id\n try:\n await bot.api.messages.add_chat_user(\n chat_id=int(peer_id - 2e9),\n user_id=user_id\n )\n except VKError as e:\n e = list(e.args)\n await send_msg(\n peer_id=peer_id,\n message=f\"⚠ Произошла ошибка на этапе добавления [id{user_id}|пользователя].\"\n f\"\\nВК ответил: {VK_ERROR.get(e[0])}\"\n )\n","sub_path":"dutymanager/plugins/base_events/return_user.py","file_name":"return_user.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"299442785","text":"# Bouncer\n# Written by aquova, 2018-2019\n# https://github.com/aquova/bouncer\n\nimport discord, json, sqlite3, datetime, asyncio, os, subprocess, sys\nimport Utils\nfrom User import User\nfrom Utils import DATABASE_PATH\nfrom Hunt import Hunter\n\n# Reading values from config file\nwith open('private/config.json') as config_file:\n cfg = json.load(config_file)\n\n# Configuring preferences\ndiscordKey = cfg['discord']\n# The first entry in validInputChannels is the one DMs and censor warnings are sent\nvalidInputChannels = cfg['channels']['listening']\n# Channel to save notes/warns/etc\nlogChannel = cfg['channels']['log']\n# Channel to save system logs - leaves, bans, joins, etc\nsystemLog = cfg['channels']['syslog']\nvalidRoles = cfg['roles']\n\nsendBanDM = (cfg['DM']['ban'].upper() == \"ON\")\nsendWarnDM = (cfg['DM']['warn'].upper() == \"ON\")\n\n# Determine if this is a debugging instance\ndebugBot = (cfg['debug'].upper() == \"TRUE\")\ndebugging = False\n\nclient = discord.Client()\nstartTime = 0\n\ncharLimit = 2000\n\n# Event hunt object\nhunter = Hunter()\n\n# Notes on database structure:\n# Most of the columns are self explanitory\n# num column is the category of the infraction\n# 0: Ban\n# >0: The number of the warning\n# -1: Note\n# -2: Kick\n# -3: Unban\n\nsqlconn = sqlite3.connect(DATABASE_PATH)\nsqlconn.execute(\"CREATE TABLE IF NOT EXISTS badeggs (dbid INT PRIMARY KEY, id INT, username TEXT, num INT, date DATE, message TEXT, staff TEXT, post INT);\")\nsqlconn.execute(\"CREATE TABLE IF NOT EXISTS blocks (id TEXT);\")\nsqlconn.execute(\"CREATE TABLE IF NOT EXISTS staffLogs (staff TEXT PRIMARY KEY, bans INT, warns INT);\")\nsqlconn.execute(\"CREATE TABLE IF NOT EXISTS monthLogs (month TEXT PRIMARY KEY, bans INT, warns INT);\")\nsqlconn.execute(\"CREATE TABLE IF NOT EXISTS hunters (id INT PRIMARY KEY, username TEXT, count INT);\")\nsqlconn.commit()\nsqlconn.close()\n\nwarnThreshold = 3\nreviewThreshold = 6 # In months\n\n# Containers to store needed information in memory\nrecentBans = {}\nblockList = []\nrecentReply = None\n\nhelpInfo = {\n '$WARN': '`$warn USER reason`',\n '$BAN': '`$ban USER reason`',\n '$UNBAN': '`$unban USER reason`',\n '$KICK': '`$kick USER reason`',\n '$SEARCH': '`$search USER`',\n '$NOTE': '`$note USER message`',\n '$REMOVE': '`$remove USER [num]`',\n '$BLOCK': '`$block USER`',\n '$UNBLOCK': '`$unblock USER`',\n '$REPLY': '`$reply USER`',\n '$EDIT': '`$edit USER [num] new_message`'\n}\n\n# This is basically a makeshift enum\nclass LogTypes:\n UNBAN = -3\n KICK = -2\n NOTE = -1\n BAN = 0\n WARN = 1\n\n# Searches the database for the specified user, given a message\n# m: Discord message object\nasync def userSearch(m):\n try:\n user = User(m, recentBans)\n except User.MessageError:\n await m.channel.send(\"I wasn't able to find a user anywhere based on that message. `$search USER`\")\n return\n\n searchResults = user.search()\n try:\n username = user.getName(recentBans)\n if searchResults == []:\n await m.channel.send(\"User {} was not found in the database\\n\".format(username))\n return\n except User.MessageError:\n await m.channel.send(\"That user was not found in the database or the server\\n\")\n return\n\n noteTotal = 0\n criticizeNotes = True\n out = \"User {} was found with the following infractions\\n\".format(username)\n for index, item in enumerate(searchResults):\n n = \"{}. \".format(index+1)\n if item[1] == LogTypes.BAN:\n n += \"[{}] **{}** - Banned by {} - {}\\n\".format(Utils.formatTime(item[2]), item[0], item[4], item[3])\n elif item[1] == LogTypes.NOTE:\n n += \"[{}] **{}** - Note by {} - {}\\n\".format(Utils.formatTime(item[2]), item[0], item[4], item[3])\n noteTotal += 1\n elif item[1] == LogTypes.KICK:\n n += \"[{}] **{}** - Kicked by {} - {}\\n\".format(Utils.formatTime(item[2]), item[0], item[4], item[3])\n elif item[1] == LogTypes.UNBAN:\n n += \"[{}] **{}** - Unbanned by {} - {}\\n\".format(Utils.formatTime(item[2]), item[0], item[4], item[3])\n else: # LogTypes.WARN\n n += \"[{}] **{}** - Warning #{} by {} - {}\\n\".format(Utils.formatTime(item[2]), item[0], item[1], item[4], item[3])\n criticizeNotes = False\n\n if item[1] >= warnThreshold:\n n += \"They have received {} warnings, it is recommended that they be banned.\\n\".format(warnThreshold)\n\n if len(out) + len(n) < charLimit:\n out += n\n else:\n await m.channel.send(out)\n out = n\n\n await m.channel.send(out)\n\n# Note a warn or ban for a user\n# m: Discord message object\nasync def logUser(m, state):\n try:\n user = User(m, recentBans)\n except User.MessageError:\n if state == LogTypes.NOTE:\n await m.channel.send(\"I wasn't able to understand that message: `$note USER`\")\n else:\n await m.channel.send(\"I wasn't able to understand that message: `$log USER`\")\n return\n\n sqlconn = sqlite3.connect(DATABASE_PATH)\n if state == LogTypes.WARN:\n count = sqlconn.execute(\"SELECT COUNT(*) FROM badeggs WHERE id=? AND num > 0\", [user.id]).fetchone()[0] + 1\n else:\n count = state\n globalcount = sqlconn.execute(\"SELECT COUNT(*) FROM badeggs\").fetchone()[0]\n currentTime = datetime.datetime.utcnow()\n\n try:\n username = user.getName(recentBans)\n except User.MessageError:\n username = \"ID: \" + str(user.id)\n await m.channel.send(\"I wasn't able to find a username for that user, but whatever, I'll do it anyway.\")\n\n mes = Utils.parseMessage(m.content, username)\n if len(m.attachments) != 0:\n for item in m.attachments:\n mes += '\\n{}'.format(item.url)\n\n if mes == \"\":\n await m.channel.send(\"Please give a reason for why you want to log them.\")\n return\n\n params = [globalcount + 1, user.id, username, count, currentTime, mes, m.author.name]\n\n # Generate message for log channel\n import Visualize\n if state == LogTypes.BAN:\n logMessage = \"[{}] **{}** - Banned by {} - {}\\n\".format(Utils.formatTime(currentTime), params[2], m.author.name, mes)\n Visualize.updateCache(sqlconn, m.author.name, (1, 0), Utils.formatTime(currentTime))\n elif state == LogTypes.WARN:\n logMessage = \"[{}] **{}** - Warning #{} by {} - {}\\n\".format(Utils.formatTime(currentTime), params[2], count, m.author.name, mes)\n Visualize.updateCache(sqlconn, m.author.name, (0, 1), Utils.formatTime(currentTime))\n elif state == LogTypes.KICK:\n logMessage = \"[{}] **{}** - Kicked by {} - {}\\n\".format(Utils.formatTime(currentTime), params[2], m.author.name, mes)\n elif state == LogTypes.UNBAN:\n def unban_check(check_mes):\n if check_mes.author == m.author and check_mes.channel == m.channel:\n # The API is stupid, returning a boolean will keep the check open, you have to return something non-false\n if check_mes.content.upper() == 'YES' or check_mes.content.upper() == 'Y':\n return 'Y'\n else:\n return 'N'\n\n # In the event of an unban, we need to first\n # A. Ask if they are sure they meant to do this\n await m.channel.send(\"In order to log an unban, all old logs will be removed. Are you sure? Y/[N]\")\n check = await client.wait_for('message', check=unban_check, timeout=10.0)\n # I have no idea why this returns a message and not just 'Y'\n if check.content.upper() == 'Y':\n # B. If so, clear out all previous logs\n await m.channel.send(\"Very well, removing all old logs to unban\")\n logs = user.search()\n for log in logs:\n sqlconn.execute(\"REPLACE INTO badeggs (dbid, id, username, num, date, message, staff, post) VALUES (?, NULL, NULL, NULL, NULL, NULL, NULL, NULL)\", [log[5]])\n\n # C. Proceed with the unbanning\n logMessage = \"[{}] **{}** - Unbanned by {} - {}\\n\".format(Utils.formatTime(currentTime), params[2], m.author.name, mes)\n Visualize.updateCache(sqlconn, m.author.name, (-1, 0), Utils.formatTime(currentTime))\n else:\n await m.channel.send(\"Unban aborted.\")\n sqlconn.close()\n return\n else: # LogTypes.NOTE\n noteCount = sqlconn.execute(\"SELECT COUNT(*) FROM badeggs WHERE id=? AND num = -1\", [user.id]).fetchone()[0] + 1\n logMessage = \"Note #{} made for {}\".format(noteCount, username)\n\n await m.channel.send(logMessage)\n\n # Send ban recommendation, if needed\n if (state == LogTypes.WARN and count >= warnThreshold):\n await m.channel.send(\"This user has received {} warnings or more. It is recommended that they be banned.\".format(warnThreshold))\n\n logMesID = 0\n if state != LogTypes.NOTE:\n # Send message to log channel\n try:\n chan = client.get_channel(logChannel)\n logMes = await chan.send(logMessage)\n logMesID = logMes.id\n except discord.errors.InvalidArgument:\n await m.channel.send(\"The logging channel has not been set up in `config.json`. In order to have a visual record, please specify a channel ID.\")\n\n # Send a DM to the user\n try:\n u = user.getMember()\n if u != None:\n DMchan = u.dm_channel\n if DMchan == None:\n DMchan = await u.create_dm()\n\n if state == LogTypes.BAN and sendBanDM:\n await DMchan.send(\"Hi there! You've been banned from the Stardew Valley Discord for violating the rules: `{}`. If you have any questions, you can send a message to the moderators via the sidebar at , and they'll forward it to us.\".format(mes))\n elif state == LogTypes.WARN and sendWarnDM:\n await DMchan.send(\"Hi there! You received warning #{} in the Stardew Valley Discord for violating the rules: `{}`. Please review <#445729591533764620> and <#445729663885639680> for more info. If you have any questions, you can reply directly to this message to contact the staff.\".format(count, mes))\n elif state == LogTypes.KICK and sendBanDM:\n await DMchan.send(\"Hi there! You've been kicked from the Stardew Valley Discord for violating the following reason: `{}`. If you have any questions, you can send a message to the moderators via the sidebar at , and they'll forward it to us.\".format(mes))\n\n # I don't know if any of these are ever getting tripped\n except discord.errors.HTTPException as e:\n await m.channel.send(\"ERROR: While attempting to DM, there was an unexpected error. Tell aquova this: {}\".format(e))\n except discord.errors.Forbidden:\n await m.channel.send( \"ERROR: I am not allowed to DM the user. It is likely that they are not accepting DM's from me.\")\n except discord.errors.NotFound:\n await m.channel.send(\"ERROR: I was unable to find the user to DM. I'm unsure how this can be the case, unless their account was deleted\")\n\n # Update database\n params.append(logMesID)\n sqlconn.execute(\"INSERT INTO badeggs (dbid, id, username, num, date, message, staff, post) VALUES (?, ?, ?, ?, ?, ?, ?, ?)\", params)\n sqlconn.commit()\n sqlconn.close()\n\n# Removes last database entry for specified user\n# m: Discord message object\n# edit: Boolean, signifies if this is a deletion or an edit\nasync def removeError(m, edit):\n try:\n user = User(m, recentBans)\n except User.MessageError:\n if edit:\n await m.channel.send(\"I wasn't able to understand that message: `$remove USER [num] new_message`\")\n else:\n await m.channel.send(\"I wasn't able to understand that message: `$remove USER [num]`\")\n return\n\n # Needed for multi-word usernames\n try:\n username = user.getName(recentBans)\n except User.MessageError:\n username = str(user.id)\n\n mes = Utils.parseMessage(m.content, username)\n if mes == \"\":\n if edit:\n await m.channel.send(\"You need to specify an edit message\")\n return\n else:\n mes = \"0\"\n\n try:\n index = int(mes.split(\" \")[0]) - 1\n mes = Utils.strip(mes)\n except (IndexError, ValueError):\n index = -1\n\n # Find most recent entry in database for specified user\n sqlconn = sqlite3.connect(DATABASE_PATH)\n searchResults = sqlconn.execute(\"SELECT dbid, id, username, num, date, message, staff, post FROM badeggs WHERE id=?\", [user.id]).fetchall()\n\n if searchResults == []:\n await m.channel.send(\"I couldn't find that user in the database\")\n elif (index > len(searchResults) - 1) or index < -1:\n await m.channel.send(\"I can't modify item number {}, there aren't that many for this user\".format(index+1))\n else:\n item = searchResults[index]\n import Visualize\n if edit:\n if item[3] == LogTypes.NOTE:\n currentTime = datetime.datetime.utcnow()\n # Make a copy of the original log, then modify a few fields\n params = list(item)\n params[4] = currentTime\n params[5] = mes\n params[6] = m.author.name\n sqlconn.execute(\"REPLACE INTO badeggs (dbid, id, username, num, date, message, staff, post) VALUES (?, ?, ?, ?, ?, ?, ?, ?)\", params)\n out = \"The following log was edited:\\n[{}] **{}** - Note by {} - {}\\n\".format(Utils.formatTime(item[4]), item[2], item[6], item[5])\n out = \"The log now reads as follows:\\n[{}] **{}** - Note by {} - {}\\n\".format(Utils.formatTime(params[4]), params[2], params[6], params[5])\n await m.channel.send(out)\n\n sqlconn.commit()\n sqlconn.close()\n return\n else:\n await m.channel.send(\"You can only edit notes for now\")\n sqlconn.close()\n return\n\n # Everything after here is deletion\n sqlconn.execute(\"REPLACE INTO badeggs (dbid, id, username, num, date, message, staff, post) VALUES (?, NULL, NULL, NULL, NULL, NULL, NULL, NULL)\", [item[0]])\n out = \"The following log was deleted:\\n\"\n\n if item[3] == LogTypes.BAN:\n out += \"[{}] **{}** - Banned by {} - {}\\n\".format(Utils.formatTime(item[4]), item[2], item[6], item[5])\n Visualize.updateCache(sqlconn, item[6], (-1, 0), Utils.formatTime(item[4]))\n elif item[3] == LogTypes.NOTE:\n out += \"[{}] **{}** - Note by {} - {}\\n\".format(Utils.formatTime(item[4]), item[2], item[6], item[5])\n elif item[3] == LogTypes.UNBAN:\n out += \"[{}] **{}** - Unbanned by {} - {}\\n\".format(Utils.formatTime(item[4]), item[2], item[6], item[5])\n Visualize.updateCache(sqlconn, item[6], (1, 0), Utils.formatTime(item[4]))\n elif item[3] == LogTypes.KICK:\n out += \"[{}] **{}** - Kicked by {} - {}\\n\".format(Utils.formatTime(item[4]), item[2], item[6], item[5])\n else: # LogTypes.WARN\n out += \"[{}] **{}** - Warning #{} by {} - {}\\n\".format(Utils.formatTime(item[4]), item[2], item[3], item[6], item[5])\n Visualize.updateCache(sqlconn, item[6], (0, -1), Utils.formatTime(item[4]))\n await m.channel.send(out)\n\n # Search logging channel for matching post, and remove it\n if item[7] != 0:\n chan = client.get_channel(logChannel)\n m = await chan.fetch_message(item[7])\n await m.delete()\n sqlconn.commit()\n sqlconn.close()\n\n# Prevents DM from a specific user from being forwarded\n# message: Discord message object\n# block: Boolean, true for block, false for unblock\nasync def blockUser(m, block):\n global blockList\n try:\n user = User(m, recentBans)\n except User.MessageError:\n await m.channel.send(\"I wasn't able to understand that message: `$block USER`\")\n return\n\n sqlconn = sqlite3.connect(DATABASE_PATH)\n if block:\n if user.id in blockList:\n await m.channel.send(\"Um... That user was already blocked...\")\n else:\n sqlconn.execute(\"INSERT INTO blocks (id) VALUES (?)\", [user.id])\n blockList.append(user.id)\n await m.channel.send(\"I have now blocked {}. Their messages will no longer display in chat, but they will be logged for later review.\".format(user.id))\n else:\n if user.id not in blockList:\n await m.channel.send(\"That user hasn't been blocked...\")\n else:\n sqlconn.execute(\"DELETE FROM blocks WHERE id=?\", [user.id])\n blockList.remove(user.id)\n await m.channel.send(\"I have now unblocked {}. You will once again be able to hear their dumb bullshit in chat.\".format(user.id))\n sqlconn.commit()\n sqlconn.close()\n\n# Sends a private message to the specified user\nasync def reply(m):\n if m.content.split(\" \")[1] == \"^\":\n if recentReply != None:\n u = recentReply\n else:\n await m.channel.send(\"Sorry, I have no previous user stored. Gotta do it the old fashioned way.\")\n return\n else:\n try:\n user = User(m, recentBans)\n except User.MessageError:\n await m.channel.send(\"I wasn't able to understand that message: `$reply USER`\")\n return\n\n u = user.getMember()\n if u == None:\n await m.channel.send(\"Sorry, but they need to be in the server for me to message them\")\n return\n try:\n mes = Utils.removeCommand(m.content)\n if len(m.attachments) != 0:\n for item in m.attachments:\n mes += '\\n{}'.format(item.url)\n ts = m.created_at.strftime('%Y-%m-%d %H:%M:%S')\n uname = \"{}#{}\".format(u.name, u.discriminator)\n with open(\"private/DMs.txt\", 'a', encoding='utf-8') as openFile:\n openFile.write(\"{} - {} sent a DM to {}: {}\\n\".format(ts, m.author.name, uname, mes))\n\n DMchan = u.dm_channel\n if DMchan == None:\n DMchan = await u.create_dm()\n await DMchan.send(\"A message from the SDV staff: {}\".format(mes))\n await m.channel.send(\"Message sent to {}.\".format(uname))\n\n # I don't know if any of these are ever getting tripped\n except discord.errors.HTTPException as e:\n await m.channel.send(\"ERROR: While attempting to DM, there was an unexpected error. Tell aquova this: {}\".format(e))\n except discord.errors.Forbidden:\n await m.channel.send(\"ERROR: I am not allowed to DM the user. It is likely that they are not accepting DM's from me.\")\n except discord.errors.NotFound:\n await m.channel.send(\"ERROR: I was unable to find the user to DM. I'm unsure how this can be the case, unless their account was deleted\")\n\nasync def notebook(m):\n sqlconn = sqlite3.connect(DATABASE_PATH)\n allNotes = sqlconn.execute(\"SELECT * FROM badeggs WHERE num=-1\").fetchall()\n sqlconn.commit()\n sqlconn.close()\n\n with open(\"private/notes.txt\", \"w\") as f:\n for item in allNotes:\n note = \"[{}] **{}** - Note by {} - {}\\n\".format(Utils.formatTime(item[4]), item[2], item[6], item[5])\n f.write(note)\n\n await m.channel.send(\"Your notes, as requested.\")\n with open(\"./private/notes.txt\", \"r\") as f:\n await m.channel.send(file=discord.File(f))\n\n\n# Posts the usernames of all users whose oldest logs are older than reviewThreshold\nasync def userReview(channel):\n # There's probably a clever way to have these first two arrays merged\n usernames = []\n ids = []\n tooNew = []\n sqlconn = sqlite3.connect(DATABASE_PATH)\n # Reverse order so newest logs are checked/eliminated first\n allLogs = sqlconn.execute(\"SELECT id, username, date, num FROM badeggs WHERE num > -1\").fetchall()[::-1]\n\n now = datetime.datetime.now()\n for log in allLogs:\n # Don't want to list users who have been banned\n if log[3] == 0:\n tooNew.append(log[0])\n if log[0] not in ids and log[0] not in tooNew:\n day = log[2].split(\" \")[0]\n dateval = datetime.datetime.strptime(day, \"%Y-%m-%d\")\n testDate = dateval + datetime.timedelta(days=30*reviewThreshold)\n if testDate < now:\n ids.append(log[0])\n usernames.append(log[1])\n else:\n tooNew.append(log[0])\n\n sqlconn.close()\n\n mes = \"These users had their most recent log greater than {} months ago.\\n\".format(reviewThreshold)\n # Reverse order so oldest are first\n for user in usernames[::-1]:\n # This gets past Discord's 2000 char limit\n if len(mes) + len(user) + 2 < charLimit:\n mes += \"`{}`, \".format(user)\n else:\n await channel.send(mes)\n mes = \"`{}`, \".format(user)\n\n await channel.send(mes)\n\nasync def uptime(channel):\n currTime = datetime.datetime.now()\n delta = currTime - startTime\n hours, remainder = divmod(delta.seconds, 3600)\n minutes, seconds = divmod(remainder, 60)\n mes = \"I have been running for {} days, {} hours, and {} minutes\".format(delta.days, hours, minutes)\n\n await channel.send(mes)\n\n@client.event\nasync def on_ready():\n global blockList\n global startTime\n print('Logged in as')\n print(client.user.name)\n print(client.user.id)\n\n startTime = datetime.datetime.now()\n\n sqlconn = sqlite3.connect(DATABASE_PATH)\n blockDB = sqlconn.execute(\"SELECT * FROM blocks\").fetchall()\n blockList = [str(x[0]) for x in blockDB]\n sqlconn.close()\n\n activity_object = discord.Activity(name=\"for your reports!\", type=discord.ActivityType.watching)\n await client.change_presence(activity=activity_object)\n\n@client.event\nasync def on_member_update(before, after):\n if debugBot:\n return\n if before.nick != after.nick:\n if after.nick == None:\n mes = \"**{}#{}** has reset their username\".format(after.name, after.discriminator)\n else:\n new = after.nick\n mes = \"**{}#{}** is now known as `{}`\".format(after.name, after.discriminator, after.nick)\n chan = client.get_channel(systemLog)\n await chan.send(mes)\n elif before.roles != after.roles:\n # Temporary debugging\n try:\n if len(before.roles) > len(after.roles):\n missing = [r for r in before.roles if r not in after.roles]\n mes = \"**{}#{}** had the role `{}` removed.\".format(after.name, after.discriminator, missing[0])\n else:\n newRoles = [r for r in after.roles if r not in before.roles]\n mes = \"**{}#{}** had the role `{}` added.\".format(after.name, after.discriminator, newRoles[0])\n chan = client.get_channel(systemLog)\n await chan.send(mes)\n except IndexError as e:\n print(\"Error: Same role indexing issue as before.\")\n print(\"Old roles: {}\".format(before.roles))\n print(\"New roles: {}\".format(after.roles))\n print(\"Error message: {}\".format(e))\n\n@client.event\nasync def on_member_ban(server, member):\n global recentBans\n if debugBot:\n return\n recentBans[member.id] = \"{}#{} : {}\".format(member.name, member.discriminator, member.id)\n mes = \"**{}#{} ({})** has been banned.\".format(member.name, member.discriminator, member.id)\n chan = client.get_channel(systemLog)\n await chan.send(mes)\n\n@client.event\nasync def on_member_remove(member):\n # I know they aren't banned, but still we may want to log someone after they leave\n global recentBans\n if debugBot:\n return\n recentBans[member.id] = \"{}#{} : {}\".format(member.name, member.discriminator, member.id)\n mes = \"**{}#{} ({})** has left\".format(member.name, member.discriminator, member.id)\n chan = client.get_channel(systemLog)\n await chan.send(mes)\n\n@client.event\n# Needs to be raw reaction so it can still get reactions after reboot\nasync def on_raw_reaction_add(payload):\n if debugBot:\n return\n if payload.message_id == cfg[\"gatekeeper\"][\"message\"] and payload.emoji.name == cfg[\"gatekeeper\"][\"emoji\"]:\n # Raw payload just returns IDs, so need to iterate through connected servers to get server object\n # Since each bouncer instance will only be in one server, it should be quick.\n # If bouncer becomes general purpose (god forbid), may need to rethink this\n try:\n server = [x for x in client.guilds if x.id == payload.guild_id][0]\n new_role = discord.utils.get(server.roles, id=cfg[\"gatekeeper\"][\"role\"])\n target_user = discord.utils.get(server.members, id=payload.user_id)\n await target_user.add_roles(new_role)\n except IndexError as e:\n print(\"Something has seriously gone wrong.\")\n print(\"Error: {}\".format(e))\n\n@client.event\nasync def on_message_delete(message):\n if debugBot:\n return\n # Don't allow bouncer to react to its own deleted messages\n if message.author.id == client.user.id:\n return\n mes = \"**{}#{}** deleted in <#{}>: `{}`\".format(message.author.name, message.author.discriminator, message.channel.id, message.content)\n if message.attachments != []:\n for item in message.attachments:\n mes += '\\n' + item.url\n chan = client.get_channel(systemLog)\n await chan.send(mes)\n\n@client.event\nasync def on_message_edit(before, after):\n if debugBot:\n return\n # This is to prevent embedding of content from triggering the log\n if before.content == after.content:\n return\n try:\n if len(before.content) + len(after.content) > 200:\n mes1 = \"**{}#{}** modified in <#{}>: `{}`\".format(before.author.name, before.author.discriminator, before.channel.id, before.content)\n mes2 = \"to `{}`\".format(after.content)\n if before.attachments != []:\n for item in before.attachments:\n mes1 += '\\n' + item.url\n if after.attachments != []:\n for item in after.attachments:\n mes2 += '\\n' + item.url\n chan = client.get_channel(systemLog)\n await chan.send(mes1)\n await chan.send(mes2)\n else:\n mes = \"**{}#{}** modified in <#{}>: `{}` to `{}`\".format(before.author.name, before.author.discriminator, before.channel.id, before.content, after.content)\n if after.attachments != []:\n for item in after.attachments:\n mes += '\\n' + item.url\n chan = client.get_channel(systemLog)\n await chan.send(mes)\n except discord.errors.HTTPException as e:\n print(\"Unknown error with editing message. This message was unable to post for this reason: {}\\n\".format(e))\n\n@client.event\nasync def on_member_join(member):\n if debugBot:\n return\n mes = \"**{}#{} ({})** has joined\".format(member.name, member.discriminator, member.id)\n chan = client.get_channel(systemLog)\n await chan.send(mes)\n\n@client.event\nasync def on_voice_state_update(member, before, after):\n if debugBot:\n return\n if (after.channel == None):\n mes = \"**{}#{}** has left voice channel {}\".format(member.name, member.discriminator, before.channel.name)\n chan = client.get_channel(systemLog)\n await chan.send(mes)\n elif (before.channel == None):\n mes = \"**{}#{}** has joined voice channel {}\".format(member.name, member.discriminator, after.channel.name)\n chan = client.get_channel(systemLog)\n await chan.send(mes)\n\n@client.event\nasync def on_reaction_add(reaction, user):\n if user.id == client.user.id:\n return\n\n if hunter.getWatchedChannel() == reaction.message.channel.id:\n hunter.addReaction(user)\n\n@client.event\nasync def on_message(message):\n global recentReply\n global debugging\n if message.author.id == client.user.id:\n return\n try:\n # Enable debugging\n if message.content.startswith(\"$debug\") and message.author.id == cfg['owner']:\n if not debugBot:\n debugging = not debugging\n await message.channel.send(\"Debugging {}\".format(\"enabled\" if debugging else \"disabled\"))\n return\n\n # If debugging, the real bot should ignore the owner\n if debugging and message.author.id == cfg['owner']:\n return\n # The debug bot should only ever obey the owner\n elif debugBot and message.author.id != cfg['owner']:\n return\n\n # If they sent a private DM to bouncer\n if type(message.channel) is discord.channel.DMChannel:\n # Regardless of blocklist or not, log their messages\n ts = message.created_at.strftime('%Y-%m-%d %H:%M:%S')\n\n # Store who the most recent user was, for $reply ^\n recentReply = message.author\n\n mes = \"**{}#{}** (ID: {}): {}\".format(message.author.name, message.author.discriminator, message.author.id, message.content)\n if message.attachments != []:\n for item in message.attachments:\n mes += '\\n' + item.url\n\n with open(\"private/DMs.txt\", 'a', encoding='utf-8') as openFile:\n openFile.write(\"{} - {}\\n\".format(ts, mes))\n\n if str(message.author.id) not in blockList:\n chan = client.get_channel(validInputChannels[0])\n await chan.send(mes)\n\n # Temporary - notify if UB3R-BOT has removed something on its word censor\n elif (message.author.id == 85614143951892480 and message.channel.id == 233039273207529472) and (\"Word Censor Triggered\" in message.content) and not debugBot:\n mes = \"Uh oh, looks like the censor might've been tripped.\\nhttps://discordapp.com/channels/{}/{}/{}\".format(message.guild.id, message.channel.id, message.id)\n chan = client.get_channel(validInputChannels[0])\n await chan.send(mes)\n\n # If a user pings bouncer\n elif client.user in message.mentions:\n mes = \"**{}#{}** (ID: {}) pinged me in <#{}>: {}\".format(message.author.name, message.author.discriminator, message.author.id, message.channel.id, message.content)\n if message.attachments != []:\n for item in message.attachments:\n mes += '\\n' + item.url\n mes += \"\\nhttps://discordapp.com/channels/{}/{}/{}\".format(message.guild.id, message.channel.id, message.id)\n chan = client.get_channel(validInputChannels[0])\n await chan.send(mes)\n\n elif Utils.checkRoles(message.author, validRoles):\n # Special case for the egg hunt functions. We want only permitted roles to access them,\n # but their channel will always be new, so allow any channel access\n if message.content.startswith(\"$starthunt\"):\n words = message.clean_content.split(\" \")\n if len(words) != 2:\n await message.channel.send(\"Invalid command. `$starthunt EMOJI`\")\n return\n hunter.setWatchedChannel(message.channel)\n mes = await message.channel.send(\"{}\".format(words[1]))\n try:\n emoji = words[1].split(\":\")[1]\n emojiObject = [x for x in message.guild.emojis if x.name == emoji][0]\n await mes.add_reaction(emojiObject)\n except IndexError:\n emoji = words[1].replace(\":\", \"\")\n await mes.add_reaction(emoji)\n await message.delete()\n elif message.content.startswith(\"$endhunt\"):\n hunter.stopWatching()\n await message.channel.send(\"I hope your hunt has been victorious!\")\n elif message.content.startswith(\"$gethunt\"):\n hunter.export()\n with open(\"./private/hunters.csv\", \"r\") as f:\n await message.channel.send(file=discord.File(f))\n\n # If they have privledges to access bouncer functions\n elif message.channel.id in validInputChannels:\n # This if/elif thing isn't ideal, but it's by far the simpliest way\n if message.content.upper() == \"$HELP\":\n helpMes = (\n \"Issue a warning: `$warn USER message`\\n\"\n \"Log a ban: `$ban USER reason`\\n\"\n \"Log an unbanning: `$unban USER reason`\\n\"\n \"Log a kick: `$kick USER reason`\\n\"\n \"Search for a user: `$search USER`\\n\"\n \"Create a note about a user: `$note USER message`\\n\"\n \"Show all notes: `$notebook`\\n\"\n \"Remove a user's log: `$remove USER index(optional)`\\n\"\n \"Edit a user's note: `$edit USER index(optional) new_message`\\n\"\n \"Stop a user from sending DMs to us: `$block/$unblock USERID`\\n\"\n \"Reply to a user in DMs: `$reply USERID` - To reply to the most recent DM: `$reply ^`\\n\"\n \"Plot warn/ban stats: `$graph`\\nReview which users have old logs: `$review`\\n\"\n \"View bot uptime: `$uptime`\\n\"\n \"DMing users when they are banned is `{}`\\n\"\n \"DMing users when they are warned is `{}`\".format(sendBanDM, sendWarnDM)\n )\n await message.channel.send(helpMes)\n elif message.content.upper() == \"$NOTEBOOK\":\n await notebook(message)\n elif message.content.upper() in helpInfo.keys():\n await message.channel.send(helpInfo[message.content.upper()])\n elif message.content.upper() == \"$UPDATE\":\n if message.author.id == cfg[\"owner\"]:\n await message.channel.send(\"Updating and restarting...\")\n subprocess.call([\"git\", \"pull\"])\n sys.exit()\n else:\n await message.channel.send(\"Who do you think you are.\")\n return\n elif message.content.upper() == \"$GRAPH\":\n import Visualize # Import here to avoid debugger crashing from matplotlib issue\n Visualize.genUserPlot()\n Visualize.genMonthlyPlot()\n with open(\"./private/user_plot.png\", 'rb') as f:\n await message.channel.send(file=discord.File(f))\n\n with open(\"./private/month_plot.png\", 'rb') as f:\n await message.channel.send(file=discord.File(f))\n elif message.content.upper() == \"$REVIEW\":\n await userReview(message.channel)\n elif message.content.upper() == \"$UPTIME\":\n await uptime(message.channel)\n elif message.content.upper() == \"$GETROLES\":\n output = await Utils.fetchRoleList(message.guild)\n await message.channel.send(output)\n elif message.content.startswith(\"$search\"):\n await userSearch(message)\n elif message.content.startswith(\"$warn\"):\n await logUser(message, LogTypes.WARN)\n elif message.content.startswith(\"$ban\"):\n await logUser(message, LogTypes.BAN)\n elif message.content.startswith(\"$kick\"):\n await logUser(message, LogTypes.KICK)\n elif message.content.startswith(\"$unban\"):\n await logUser(message, LogTypes.UNBAN)\n elif message.content.startswith(\"$remove\"):\n await removeError(message, False)\n elif message.content.startswith(\"$block\"):\n await blockUser(message, True)\n elif message.content.startswith(\"$unblock\"):\n await blockUser(message, False)\n elif message.content.startswith(\"$reply\"):\n await reply(message)\n elif message.content.startswith(\"$note\"):\n await logUser(message, LogTypes.NOTE)\n elif message.content.startswith(\"$edit\"):\n await removeError(message, True)\n\n # Debug functions only to be executed by the owner\n elif message.content.upper() == \"$DUMPBANS\" and message.author.id == cfg[\"owner\"]:\n output = await Utils.dumpbans(recentBans)\n await message.channel.send(output)\n\n except discord.errors.HTTPException as e:\n print(\"HTTPException: {}\", e)\n pass\n\nclient.run(discordKey)\n","sub_path":"bouncer.py","file_name":"bouncer.py","file_ext":"py","file_size_in_byte":36892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"427283077","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nfrom ..items import AmazonScraperItem\nfrom ..items import BookOption\n\n\nclass AmazonSpider(scrapy.Spider):\n name = \"amazon\"\n\n # \n with open('results.json') as blackboard_results:\n classes = json.load(blackboard_results)\n list_blackboard_ISBNs = []\n for course in classes:\n if \"bookList\" in course:\n for book in course[\"bookList\"]:\n if book:\n list_blackboard_ISBNs.append(book[\"ISBN\"])\n\n start_urls = [\"https://www.campusbooks.com/search/{0}?condition%5Bnew%5D=new&condition%5Bused%5D=used&rental_period=0&postal_code=08540&buysellrent=buy&op=Apply+Filters&form_build_id=form-JPpsrJqHKfZ7FLJEFh_LL_ibJWGBITegB9RgsE9vejM&form_id=cb_search_filters_form\".format(isbn) for isbn in list_blackboard_ISBNs]\n\n def parse(self, response):\n\n allOffers = response.xpath('//div[@class = \"standard-offers\"]')\n options = allOffers.xpath('.//table[@class = \"table table-condensed\"]')\n\n buying_options = []\n\n for option in options:\n bookOption = BookOption()\n\n bookOption['condition'] = option.xpath('.//td[@class = \"condition\"]/text()').extract_first().strip()\n bookOption['price'] = option.xpath('.//td[@class = \"price hidden-xs\"]/text()').extract_first().strip()[1:]\n bookOption['seller'] = option.xpath('.//span[@class = \"sprite-logo\"]/@title').extract_first()\n bookOption['link'] = option.xpath('.//button[@class = \"btn orange-btn btn-fit\"]/../@href').extract_first()\n buying_options.append(dict(bookOption))\n\n scraperItem = AmazonScraperItem()\n scraperItem[\"isbn\"] = response.request.url[35:48]\n scraperItem[\"options\"] = buying_options\n\n yield scraperItem\n","sub_path":"server/public/scripts/amazon_scraper/amazon_scraper/spiders/amazon.py","file_name":"amazon.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"165657796","text":"import cv2\r\nimport time\r\nimport numpy as np\r\n\r\n#To save the output\r\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\r\noutputfile = cv2.VideoWriter('output.avi', fourcc, 30.0, (640, 480))\r\n\r\n#To start the webcam\r\ncam = cv2.VideoCapture(0)\r\n\r\n#make the code sleep\r\ntime.sleep(5)\r\n\r\n#making it loop 60 seconds\r\nfor i in range(60):\r\n ret, bg = cam.read()\r\n\r\nbg = np.flip(bg, axis = 1)\r\n\r\n#capturing then flipping\r\nwhile(cam.isOpened()):\r\n ret, ing = cam.read()\r\n if not ret:\r\n break","sub_path":"cam.py","file_name":"cam.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"471455803","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 4 03:41:59 2020\n\n@author: Rabbitking\n\"\"\"\n\nfrom tkinter import *\n\nroot = Tk()\n\n\n # 버튼에 기능 부여를 위한 함수작성\ndef myClick():\n myLabel = Label(root, text=\"i clicked a button.!\")\n myLabel.pack()\n\n # Creating a button 버튼 위젯 만들기\nmyButton1 = Button(root, text=\"click me\", state=DISABLED) # DISABLED로 버튼 비활성화\nmyButton1.pack()\n\nmyButton2 = Button(root, text=\"click me\", padx=50,pady=30, fg=\"blue\", bg=\"#000000\") # padx, pady로 가로 세로 길이 지정, fg로 폰트 색상, bg로 배경 색상 지정\nmyButton2.pack()\n\nmyButton3 = Button(root, text=\"click me\", command=myClick) # command로 지정함수를 기능부여함\nmyButton3.pack()\n\n\n#myButton4 = Button(root, text='click me2') # pack이 완료된 창에서는 grid를 적용할 수 없음\n#myButton4.grid(row=1,column=2)\n\nroot.mainloop() # 프로그램을 꺼지지 않게 무한 반복\n","sub_path":"03.creating_button.py","file_name":"03.creating_button.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"217313204","text":"\"\"\"layout\"\"\"\n\nfrom snipping import key_bindings\nfrom snipping.prompt_toolkit import layout\nfrom snipping.prompt_toolkit import style\n\nMAIN_WINDOW_SIZE = 100\nSUB_WINDOW_SIZE = 40\n\n\ndef title_tokens(_):\n return [(style.Title, 'Snipping')]\n\n\ndef footer_tokens(_):\n tokens = []\n for token in key_bindings.REGISTER_KEYS:\n tokens.append((style.Key, \"[%s]\" % token[0]))\n tokens.append((style.Token, \" %s \" % token[1]))\n return tokens\n\n\ndef create_layout(contents, key_binding_manager=None):\n result_windows = []\n for content in contents:\n result_windows.append(layout.text_window_with_bar(\n name=content, width=layout.dim(SUB_WINDOW_SIZE),\n key_binding_manager=key_binding_manager))\n result_layout = layout.window_rows(result_windows)\n editor_window = layout.text_window_with_bar(\n lineno=True, trailing_space=True, width=layout.dim(MAIN_WINDOW_SIZE),\n key_binding_manager=key_binding_manager)\n main_layout = layout.window_columns([\n editor_window, layout.vertical_line(), result_layout\n ])\n screen = layout.window_rows([\n layout.horizontal_tokenlist_window(title_tokens, align='center'),\n layout.horizontal_line(),\n main_layout,\n layout.horizontal_tokenlist_window(footer_tokens, align='right'),\n ])\n return screen\n","sub_path":"snipping/layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"108819504","text":"from torch.nn.utils import clip_grad_norm_\nimport pickle as pkl\nimport torch\nfrom torch.nn import functional as F\nimport os\nfrom os.path import join, exists\nimport json\nimport math\n\ndef meta_save(path, word2id, net_args, train_params):\n \n if not exists(path):\n os.makedirs(path)\n\n with open(join(path, 'vocab.pkl'), 'wb') as f:\n pkl.dump(word2id, f, pkl.HIGHEST_PROTOCOL)\n\n meta = {}\n meta['net_args'] = net_args\n meta['traing_params'] = train_params\n\n with open(join(path, 'meta.json'), 'w') as f:\n json.dump(meta, f, indent=4)\n \n return meta\n\ndef get_basic_grad_fn(net, clip_grad, max_grad=1e2):\n def f():\n grad_norm = clip_grad_norm_(\n [p for p in net.parameters() if p.requires_grad], clip_grad) \n\n if max_grad is not None and grad_norm >= max_grad:\n\t\t\t# print('WARNING: Exploding Gradients {:.2f}'.format(grad_norm))\n grad_norm = max_grad\n grad_log = {}\n grad_log['grad_norm'] = grad_norm\n return grad_log\n return f\n\n\n#################### LSTM helper #########################\n\ndef reorder_sequence(sequence_emb, order, batch_first=False):\n \"\"\"\n sequence_emb: [T, B, D] if not batch_first\n order: list of sequence length\n \"\"\"\n batch_dim = 0 if batch_first else 1\n assert len(order) == sequence_emb.size()[batch_dim]\n\n order = torch.LongTensor(order).to(sequence_emb.device)\n sorted_ = sequence_emb.index_select(index=order, dim=batch_dim)\n\n return sorted_\n\ndef reorder_lstm_states(lstm_states, order):\n \"\"\"\n lstm_states: (H, C) of tensor [layer, batch, hidden]\n order: list of sequence length\n \"\"\"\n assert isinstance(lstm_states, tuple)\n assert len(lstm_states) == 2\n assert lstm_states[0].size() == lstm_states[1].size()\n assert len(order) == lstm_states[0].size()[1]\n\n order = torch.LongTensor(order).to(lstm_states[0].device)\n sorted_states = (lstm_states[0].index_select(index=order, dim=1),\n lstm_states[1].index_select(index=order, dim=1))\n\n return sorted_states\n\n#################### general sequence helper #########################\ndef len_mask(lens, device):\n \"\"\" users are resposible for shaping\n Return: tensor_type [B, T]\n \"\"\"\n #有种在填充有无字的感觉\n max_len = max(lens)\n batch_size = len(lens)\n mask = torch.ByteTensor(batch_size, max_len).to(device)\n mask.fill_(0)\n for i, l in enumerate(lens):\n mask[i, :l].fill_(1) #(34,81),有字的地方为1,没有的为0\n return mask\n\ndef sequence_mean(sequence, seq_lens, dim=1):\n if seq_lens:\n assert sequence.size(0) == len(seq_lens) # batch_size\n sum_ = torch.sum(sequence, dim=dim, keepdim=False)\n mean = torch.stack([s/l for s, l in zip(sum_, seq_lens)], dim=0)\n else:\n mean = torch.mean(sequence, dim=dim, keepdim=False)\n return mean\n\ndef sequence_loss(logits, targets, xent_fn=None, pad_idx=0):\n \"\"\" functional interface of SequenceLoss\"\"\"\n assert logits.size()[:-1] == targets.size()\n\n mask = targets != pad_idx\n target = targets.masked_select(mask)\n logit = logits.masked_select(\n mask.unsqueeze(2).expand_as(logits)\n ).contiguous().view(-1, logits.size(-1))\n if xent_fn:\n loss = xent_fn(logit, target)\n else:\n loss = F.cross_entropy(logit, target)\n assert (not math.isnan(loss.mean().item())\n and not math.isinf(loss.mean().item()))\n return loss\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"635543278","text":"import fresh_tomatoes\nimport media\n\n# Creates instances of media.Movie with title, movie poster url, and\n# youtube url attributes\n\nnapolean_dynamite = media.Movie(\"Napolean Dynamite\",\n \"https://upload.wikimedia.org/wikipedia/en/8/87/Napoleon_dynamite_post.jpg\", # NOQA\n \"https://youtu.be/ZHDi_AnqwN4\")\n\ntropic_thunder = media.Movie(\"Tropic Thunder\",\n \"https://upload.wikimedia.org/wikipedia/en/thumb/d/d6/Tropic_thunder_ver3.jpg/220px-Tropic_thunder_ver3.jpg\", # NOQA\n \"https://youtu.be/T-6YhRZowgc\")\n\nhigh_noon = media.Movie(\"High Noon\",\n \"https://upload.wikimedia.org/wikipedia/en/5/54/High_Noon_poster.jpg\", # NOQA\n \"https://youtu.be/Gh-vOc-gwZs\")\n\nfight_club = media.Movie(\"Fight Club\",\n \"https://vignette1.wikia.nocookie.net/fightclub/images/6/6a/Fight-club-dvd.jpg/revision/latest?cb=20081116042426\", # NOQA\n \"https://youtu.be/SUXWAEX2jlg\")\n\nnew_hope = media.Movie(\"Star Wars: A New Hope\",\n \"https://upload.wikimedia.org/wikipedia/en/8/87/StarWarsMoviePoster1977.jpg\", # NOQA\n \"https://youtu.be/1g3_CFmnU7k\")\n\nmatrix = media.Movie(\"The Matrix\",\n \"https://upload.wikimedia.org/wikipedia/en/c/c1/The_Matrix_Poster.jpg\", # NOQA\n \"https://youtu.be/vKQi3bBA1y8\")\n\npick_of_destiny = media.Movie(\"Pick of Destiny\",\n \"https://upload.wikimedia.org/wikipedia/en/thumb/b/b6/Tenacious_d_in_the_pick_of_destiny_ver3.jpg/220px-Tenacious_d_in_the_pick_of_destiny_ver3.jpg\", # NOQA\n \"https://youtu.be/TXxQFMG86HA\")\n\ndodgeball = media.Movie(\"Dodgeball\",\n \"https://upload.wikimedia.org/wikipedia/en/thumb/7/70/Movie_poster_Dodgeball_A_True_Underdog_Story.jpg/220px-Movie_poster_Dodgeball_A_True_Underdog_Story.jpg\", # NOQA\n \"https://youtu.be/W-XbDZUnUmw\")\n\ngood_will_hunting = media.Movie(\"Good Will Hunting\",\n \"https://upload.wikimedia.org/wikipedia/en/thumb/b/b8/Good_Will_Hunting_theatrical_poster.jpg/220px-Good_Will_Hunting_theatrical_poster.jpg\", # NOQA\n \"https://youtu.be/nH9LZOXBMUE\")\n\n# open_movies_page function creates HTML page displaying\n# the movies from the movies list\n\nmovies = [napolean_dynamite, tropic_thunder, high_noon,\n fight_club, new_hope, matrix, pick_of_destiny,\n dodgeball, good_will_hunting]\n\nfresh_tomatoes.open_movies_page(movies)\n","sub_path":"entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"141984458","text":"'''\r\nCreated on Jun 23, 2020\r\nProgram that reads a line and prints its statics like\u000Buppercase,\r\n lowercase, alphabets and digits.\r\n@author: admin\r\n'''\r\nline = input('Enter a line - ')\r\nlowercount = uppercount = 0\r\ndigitcount = alphacount = 0\r\nspecialcount = 0\r\n\r\nfor a in line:\r\n if a.islower():\r\n lowercount += 1\r\n elif a.isupper():\r\n uppercount += 1\r\n elif a.isdigit():\r\n digitcount += 1\r\n if a.isalpha():\r\n alphacount += 1\r\n if a.isalnum()==False :\r\n specialcount+=1\r\n \r\nprint('Number of uppercase letters - ', uppercount)\r\nprint('Number of lowercase letters - ', lowercount)\r\nprint('Number of alphabets letters - ', alphacount)\r\nprint('Number of digits - ', digitcount)\r\nprint('Number of Special Characters - ', specialcount)","sub_path":"StringWorks/StringCounts.py","file_name":"StringCounts.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"40690719","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.externals import joblib\nfrom sklearn.ensemble import RandomForestRegressor\n\n\n#ファイル読み込み&クリーニング\ndef read_clean(url,raceName,day):\n #alldata = pd.read_csv(\"C:\\\\Users\\\\owner\\\\PycharmProjects\\\\keirin\\\\df_result.csv\")\n alldata = pd.read_csv(url,encoding='s-jis')\n # 不要なカラムを削除\n #デバッグ用にデータ削減\n #msk = np.random.rand(len(alldata)) < 0.00001\n #alldata = alldata[msk]\n\n for col in ['レース日','出身県','名前','競輪場','結果上がりタイム','結果着差(cm)','地名','タイム','出身地方','KEYSTRING','競走時刻','出身県.1','結果SB']:\n del alldata[col]\n\n alldata =alldata[(alldata['レース名'] == raceName)]\n alldata =alldata[(alldata['日数'] == day)]\n\n # カテゴリカル変数にする\n for col in ['レース名','日数','級班','脚質','競輪場コード','バック','競走ランク','レース番号_車番']:\n tmp = pd.get_dummies(alldata[col])\n alldata = pd.concat((alldata,tmp), axis=1)\n\n for col in ['レース名','日数','級班','脚質','競輪場コード','バック','競走ランク','レース番号_車番']:\n del alldata[col]\n\n alldata['res1'] = alldata.結果順位.apply(lambda x: (1 if x == 1 else 0))#目的変数(一位フラグ)を作成\n alldata['res3'] = alldata.結果順位.apply(lambda x: (1 if x <= 3 else 0))#目的変数(三位フラグ)を作成\n del alldata['結果順位']\n\n #alldata.to_csv(\"test.csv\")\n\n #不正文字列(Err,inf,\" \")を置換\n #alldata=alldata.replace({'Err': 0}, regex=True)\n #alldata=alldata.replace({'inf': 0}, regex=True)\n #alldata=alldata.replace({'': 0}, regex=True)\n #alldata=alldata.replace({' ': 0}, regex=True)\n #alldata=alldata.replace({' ': 0}, regex=True)\n alldata = alldata.fillna(0)\n alldata = alldata.replace('inf','0')\n alldata.replace([np.inf, -np.inf], 0)\n # np.any(np.isnan(alldata))\n # np.all(np.isfinite(alldata))\n # alldata[(alldata==float(\"inf\")) | (alldata==float(\"-inf\"))] = 0.0\n # alldata[np.isnan(alldata)] = 0\n # print(alldata.isnan().values.any()) # これだめ\n\n # 一回保存(デバッグ用)\n # alldata.to_csv(\"testResult_work_nensyuu.csv\")\n return alldata\n\n\ndef test(alldata,leanDateRate, testCount,tergetCol1,raceName1,day1):\n #  学習データを説明変数と目的変数に分割\n # msk = np.random.rand(len(alldata)) < 0.05\n\n # res1,res3は相関それぞれ関連が強いのでどちらかを削除する\n if tergetCol1 == 'res1':\n del alldata['res3']\n if tergetCol1 == 'res3':\n del alldata['res1']\n\n if len(alldata) >= 10000:\n msk = np.random.rand(len(alldata)) < leanDateRate\n else:\n msk = np.random.rand(len(alldata)) < 0.5\n train = alldata[msk]\n test = alldata[~msk]\n\n # データの列数を表示\n print(len(train))\n print(len(test))\n\n trainArr = train.drop(tergetCol1, axis=1).as_matrix() # training array\n trainRes = train.as_matrix([tergetCol1]) # training results\n\n # trainArr.to_csv(\"train_nensyuu.csv\")\n # np.savetxt(\"train_nensyuu.csv\", trainArr)\n # trainRes.to_csv(\"test_nensyuu.csv\")\n # np.savetxt(\"test_nensyuu.csv\", trainRes)\n\n #  学習しモデルを作成\n # rf = RandomForestRegressor(n_estimators=10000) # 100 decision trees is a good enough number\n rf = RandomForestRegressor(n_estimators=testCount)\n\n # print(trainArr)\n # print(trainRes)\n\n # print(np.isfinite(trainArr))\n # print(np.isfinite(trainRes))\n rf.fit(trainArr, trainRes) # finally, we fit the data to the algorithm!!! :)\n\n #  テスト用の説明変数を作成\n testArr = test.drop(tergetCol1, axis=1).as_matrix()\n\n #  テストデータで説明変数から目的変数を作成\n results = rf.predict(testArr)\n\n #  結果出力用に学習結果をDfに追加する\n test['predictions'] = results\n\n #重要度を表示\n print(list(train.columns.values))\n print(rf.feature_importances_)\n\n # with open(str(leanDateRate) + \"_\" + str(testCount)+ \"_\" + tergetCol+ \"_\" + tmp_res+\"_\" + raceName+\"_\" + day+\".importance.txt\", 'wb') as f:\n # f.writelines([\"%s\\n\" % str(item) for item in list(train.columns.values)])\n # f.write(rf.feature_importances_)\n # f.close()\n\n # 学習結果をファイルに出力\n test.to_csv(\"testResult_\" + \"_\" + str(leanDateRate) + \"_\" + str(testCount)+ \"_\" + tergetCol1+ \"_\" + raceName1+\"_\" + day1+ \".csv\")\n # モデルを保存\n joblib.dump(rf, str(leanDateRate) + \"_\" + str(testCount)+ \"_\" + tergetCol1+ \"_\" + raceName1+\"_\" + day1+\".model\")\n\n return test\n\n#############################主処理#################################\n# file='C:\\\\Users\\\\owner\\\\PycharmProjects\\\\keirin\\\\testResult_res1-1_10000ken-10000kai-1.5h.csv'\n# tmpdf = pd.read_csv(file,encoding='s-jis',header=0)\n#\n# main = pd.DataFrame({'A': [np.nan]})\n# main.empty\n#\n# main[\"0.95_mean\"] = tmpdf[ (tmpdf['predictions'] >= 0.95)]['res'].mean()\n# main[\"0.90_mean\"] = tmpdf[ (tmpdf['predictions'] >= 0.9)]['res'].mean()\n# main[\"0.85_mean\"] = tmpdf[ (tmpdf['predictions'] >= 0.85)]['res'].mean()\n# main[\"0.80_mean\"] = tmpdf[ (tmpdf['predictions'] >= 0.80)]['res'].mean()\n# main[\"0.75_mean\"] = tmpdf[ (tmpdf['predictions'] >= 0.75)]['res'].mean()\n# main[\"0.70_mean\"] = tmpdf[ (tmpdf['predictions'] >= 0.7)]['res'].mean()\n#\n# main[\"0.95_count\"] = tmpdf[ (tmpdf['predictions'] >= 0.95)]['res'].count()\n# main[\"0.90_count\"] = tmpdf[ (tmpdf['predictions'] >= 0.9)]['res'].count()\n# main[\"0.85_count\"] = tmpdf[ (tmpdf['predictions'] >= 0.85)]['res'].count()\n# main[\"0.80_count\"] = tmpdf[ (tmpdf['predictions'] >= 0.80)]['res'].count()\n# main[\"0.75_count\"] = tmpdf[ (tmpdf['predictions'] >= 0.75)]['res'].count()\n# main[\"0.70_count\"] = tmpdf[ (tmpdf['predictions'] >= 0.7)]['res'].count()\n# with open('testtesttest.csv', 'a') as f:\n# main.to_csv(f, header=False)\n\n#学習データ用ファイル\nurl = \"C:\\\\Users\\\\owner\\\\PycharmProjects\\\\keirin\\\\df_result.csv\"\n# 結果書き込み用ファイル\nmain = pd.DataFrame({'A': [np.nan]})\nmain.empty\n\nisFirst = True\n#テスト\n#for (rate, testCnt) in zip( [0.05, 0.05, 0.05], [1000, 2000, 4000]):\nfor tmp_res in ['res1','res3']:\n #for (rate, testCnt) in zip([0.01,0.01,0.01,0.01], [100,1000,2000,5000]):\n for (rate, testCnt) in zip([0.01,0.05,0.1], [2000, 2000, 2000, 2000]):\n raceNames = [\"A級ガ一般\",\"A級ガ予1\",\"A級ガ予2\",\"A級ガ決勝\",\"A級チ一般\",\"A級チ予選\",\"A級チ決勝\",\"A級チ準決\",\"A級チ選抜\",\"A級一予選\",\"A級一般\",\"A級予選\",\"A級二予選\",\"A級優秀\",\"A級初特選\",\"A級決勝\",\"A級準決勝\",\"A級特一般\",\"A級特予選\",\"A級特選\",\"A級選抜\",\"S級シャイ\",\"S級ローズ\",\"S級一予選\",\"S級一般\",\"S級一般一\",\"S級一般二\",\"S級予選\",\"S級二予選\",\"S級二予A\",\"S級二予B\",\"S級優秀\",\"S級初特選\",\"S級日競杯\",\"S級決勝\",\"S級準決勝\",\"S級特一般\",\"S級特秀\",\"S級特選\",\"S級特選一\",\"S級特選予\",\"S級特選二\",\"S級白虎賞\",\"S級選抜\",\"S級選抜一\",\"S級選抜二\",\"S級青龍賞\",\"S級順位決\",\"S級DRM\",\"S級DS\",\"S級GDR\",\"S級ORI\",\"S級SPR\",\"S級STR\",\"S級WS\",\"SA混合YGP\"]\n days = [\"初日\", \"2日目\", \"3日目\", \"4日目\", \"5日目\", \"最終日\"]\n\n for raceName in raceNames:\n for day in days:\n # ファイル読み込み\n alldata = read_clean(url,raceName,day)\n #100行以下なら処理しない\n if len(alldata) <= 100:\n continue\n #テストを実行\n tmpdf = test(alldata,rate,testCnt,tmp_res,raceName,day)\n #テスト結果を分析、評価、結果をファイルに追記\n main[\"receName\"] = raceName\n main[\"day\"] = day\n main[\"colcount\"] = len(tmpdf)\n #予測したレースの的中率を算出\n main[\"0.95_mean\"] = tmpdf[ (tmpdf['predictions'] >= 0.95)][tmp_res].mean()\n main[\"0.90_mean\"] = tmpdf[ (tmpdf['predictions'] >= 0.9)][tmp_res].mean()\n main[\"0.85_mean\"] = tmpdf[ (tmpdf['predictions'] >= 0.85)][tmp_res].mean()\n main[\"0.80_mean\"] = tmpdf[ (tmpdf['predictions'] >= 0.80)][tmp_res].mean()\n main[\"0.75_mean\"] = tmpdf[ (tmpdf['predictions'] >= 0.75)][tmp_res].mean()\n main[\"0.70_mean\"] = tmpdf[ (tmpdf['predictions'] >= 0.7)][tmp_res].mean()\n main[\"0.60_mean\"] = tmpdf[(tmpdf['predictions'] >= 0.6)][tmp_res].mean()\n main[\"0.50_mean\"] = tmpdf[(tmpdf['predictions'] >= 0.5)][tmp_res].mean()\n main[\"0.40_mean\"] = tmpdf[(tmpdf['predictions'] >= 0.4)][tmp_res].mean()\n # 予測したレースのレース数を算出\n main[\"0.95_count\"] = tmpdf[ (tmpdf['predictions'] >= 0.95)][tmp_res].count()\n main[\"0.90_count\"] = tmpdf[ (tmpdf['predictions'] >= 0.9)][tmp_res].count()\n main[\"0.85_count\"] = tmpdf[ (tmpdf['predictions'] >= 0.85)][tmp_res].count()\n main[\"0.80_count\"] = tmpdf[ (tmpdf['predictions'] >= 0.80)][tmp_res].count()\n main[\"0.75_count\"] = tmpdf[ (tmpdf['predictions'] >= 0.75)][tmp_res].count()\n main[\"0.70_count\"] = tmpdf[ (tmpdf['predictions'] >= 0.7)][tmp_res].count()\n main[\"0.60_count\"] = tmpdf[(tmpdf['predictions'] >= 0.6)][tmp_res].count()\n main[\"0.50_count\"] = tmpdf[(tmpdf['predictions'] >= 0.5)][tmp_res].count()\n main[\"0.40_count\"] = tmpdf[(tmpdf['predictions'] >= 0.4)][tmp_res].count()\n\n main[\"rate\"] = rate\n main[\"testCnt\"] = testCnt\n with open('keirinAnalisysResult.csv', 'a') as f:\n if isFirst:\n main.to_csv(f, header=True)\n else:\n main.to_csv(f, header=False)\n isFirst = False\n","sub_path":"kring_project/code/sandbox/03_randomF.py","file_name":"03_randomF.py","file_ext":"py","file_size_in_byte":10254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"335722949","text":"import numpy as np\n\n\ndef map_outputs(y, k):\n \"\"\" Maps integer output labels to binary. \"\"\"\n\n m = y.size\n y_map = np.zeros((k, m))\n for i in range(m):\n y_map[y[i], i] = 1\n\n return y_map\n","sub_path":"neural-network/map_outputs.py","file_name":"map_outputs.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"298357079","text":"#Segment Tree(Sum of given Range)\nfrom math import ceil,log2\n\ndef getMid(s,e):\n return s+(e-s)//2\n \n\"\"\" A recursive function to update the nodes \nwhich have the given index in their range. \nThe following are parameters st, si, ss and se \nare same as getSumUtil() \ni --> index of the element to be updated. \n This index is in the input array. \ndiff --> Value to be added to all nodes \nwhich have i in range \"\"\"\ndef updateValueUtil(st,ss,se,i,diff,si):\n if ise:\n return\n st[si]+=diff\n if se!=ss:\n mid=getMid(ss,se)\n updateValueUtil(st,ss,mid,i,diff,2*si+1)\n updateValueUtil(st,mid+1,se,i,diff,2*si+2)\n \ndef updateValue(arr,st,n,i,new_val):\n if i<0 or i>n-1:\n print(\"Invalid Input\")\n return \n diff=new_val-arr[i]\n arr[i]=new_val\n updateValueUtil(st,0,n-1,i,diff,0)\n \n\"\"\" A recursive function to get the sum of values \n in the given range of the array. The following \n are parameters for this function. \n \n st --> Pointer to segment tree \n si --> Index of current node in the segment tree. \n Initially 0 is passed as root is always at index 0 \n ss & se --> Starting and ending indexes of the segment \n represented by current node, i.e., st[si] \n qs & qe --> Starting and ending indexes of query range \"\"\"\ndef getSumUtil(st,ss,se,qs,qe,si):\n if qs<=ss and qe>=se:\n return st[si]\n \n if seqe:\n return 0\n \n mid=getMid(ss,se)\n return getSumUtil(st,ss,mid,qs,qe,2*si+1)+getSumUtil(st,mid+1,se,qs,qe,2*si+2)\n \ndef getSum(st,n,start,end):\n if start<0 or end>n-1 or start>end:\n print(\"Invalid Input\")\n return -1 \n return getSumUtil(st,0,n-1,start,end,0)\n\n#A recursive function that constructs \n#Segment Tree for array[ss..se]. \n#si is index of current node in segment tree st\ndef constructSTUtil(arr,ss,se,st,si):\n if ss==se:\n st[si]=arr[ss]\n return arr[ss]\n \n mid=getMid(ss,se)\n st[si]=constructSTUtil(arr,ss,mid,st,si*2+1)+constructSTUtil(arr,mid+1,se,st,si*2+2)\n return st[si]\n\ndef constructST(arr,n):\n x=int(ceil(log2(n))) #Height of the segment tree \n max_size=2*int(2**x)-1 #Max size of segment tree \n \n st=[0]*max_size #Allocate memory \n constructSTUtil(arr,0,n-1,st,0) #Filling the allocated memory st \n \n return st \n\narr=[1,3,5,7,9,11]\nn=len(arr)\nst=constructST(arr,n) #Constructing segment tree\n\nprint(\"Sum of values in the given range from 1 to 3: \",getSum(st,n,1,3))\nupdateValue(arr,st,n,1,10) #Updating arr[1]=10\nprint(\"updated sum of values in the given range: \",getSum(st,n,1,3))","sub_path":"Segment tree/Sum of given range.py","file_name":"Sum of given range.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"498077444","text":"# -*- coding: utf-8 -*-\n# @Author : tongyuze\n# @Date & Time : 2019/4/1 0:00\n# @FileName : mpl_squares.py\n# @Software : PyCharm\n# --------------\nimport matplotlib.pyplot as plt\n\nvalues = [1, 2, 3, 4, 5, 6]\nsquares = [1, 4, 9, 16, 25, 36]\nplt.plot(values, squares, linewidth=3)\n\nplt.title('Square Numbers', fontsize=16)\nplt.xlabel('Value', fontsize=14)\nplt.ylabel('Square of Value', fontsize=14)\nplt.tick_params(axis='both', labelsize=14)\nplt.show()\n","sub_path":"chapter15/mpl_squares.py","file_name":"mpl_squares.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"433930362","text":"## Cameron Calv ECE T480\n# Performs the Gauss-Jordan method of solving a system of equations. The inputs\n# and the outputs are the same as the gauss elim function\n\ndef pivot(some_matrix, some_rhs, s_vector, k_value, iter):\n n = iter\n A_matrix = []\n b_vector = []\n for i in range(len(some_matrix)):\n A_matrix.append([])\n b_vector.append(some_rhs[i])\n for j in range(len(some_matrix)):\n A_matrix[i].append(some_matrix[i][j])\n pivot = k_value\n big = abs(some_matrix[k_value][k_value]/s_vector[k_value])\n for i in range(k_value, n):\n dummy = abs(some_matrix[i][k_value]/s_vector[i])\n if dummy > big:\n big = dummy\n pivot = i\n if not pivot == k_value:\n for j in range(k_value, n):\n dummy = some_matrix[pivot][j]\n A_matrix[pivot][j] = some_matrix[k_value][j]\n A_matrix[k_value][j] = dummy\n dummy = some_rhs[pivot]\n b_vector[pivot] = some_rhs[k_value]\n b_vector[k_value] = dummy\n dummy = s_vector[pivot]\n s_vector[pivot] = s_vector[k_value]\n s_vector[k_value] = dummy\n return A_matrix, b_vector, s_vector\n\n\ndef eliminate(A_matrix, s_vector, b_vector, tolerance):\n n = len(A_matrix)\n error = 0\n for k in range(n):\n [temp_A, temp_B, temp_S] = pivot(A_matrix, b_vector, s_vector, k, n)\n A_matrix = temp_A\n b_vector = temp_B\n s_vector = temp_S\n if abs(A_matrix[k][k]/s_vector[k]) < tolerance:\n error = -1\n break\n b_vector[k] = b_vector[k] / A_matrix[k][k]\n for i in reversed(range(k, n)):\n A_matrix[k][i] = A_matrix[k][i]/A_matrix[k][k]\n if k == n-1:\n break\n for i in range(n):\n if not i == k:\n factor = A_matrix[i][k]/A_matrix[k][k]\n for j in range(k, n):\n A_matrix[i][j] = A_matrix[i][j] - factor*A_matrix[k][j]\n b_vector[i] = b_vector[i] - factor * b_vector[k]\n else:\n continue\n if abs(A_matrix[n-1][n-1]/s_vector[n-1]) < tolerance:\n error = -1\n return A_matrix, b_vector, error\n\ndef substitute(A_matrix, b_vector, x_vector):\n n = len(A_matrix)\n x_vector[n-1] = b_vector[n-1] / A_matrix[n-1][n-1]\n for i in reversed(range(n-1)):\n total = 0\n for j in range(i, n):\n total += A_matrix[i][j]*x_vector[j]\n x_vector[i] = (b_vector[i] - total)/A_matrix[i][i]\n return A_matrix, b_vector, x_vector\n\ndef gauss_jordan(A_matrix, b_vector, tolerance):\n n = len(A_matrix)\n solution = []\n for row in range(n):\n solution.append(0)\n s_vector = []\n for i in range(n):\n s_vector.append([])\n for i in range(n):\n s_vector[i] = abs(A_matrix[i][0])\n for j in range(1, n):\n if abs(A_matrix[i][j] > s_vector[i]):\n s_vector[i] = abs(A_matrix[i][j])\n [temp_A, temp_B, error] = eliminate(A_matrix, s_vector, b_vector, tolerance)\n A_matrix = temp_A\n b_vector = temp_B\n if not error == -1:\n [temp_A, temp_B, temp_sol] = substitute(A_matrix, b_vector, solution)\n A_matrix = temp_A\n b_vector = temp_B\n solution = temp_sol\n return solution\n\n\n\nif __name__ == '__main__':\n A = [[70, 1, 0], [60, -1, 1], [40, 0, -1]]\n b = [636.7, 518.6, 307.4]\n # A = [[3, -0.1, -0.2], [0.1, 7, -0.3], [0.3, -0.2, 10]]\n # b = [7.85, -19.3, 71.4]\n # A = [[-0.2, -0.1, 3], [-0.3, 7, 0.1], [10, -0.2, 0.3]]\n # b = [7.85, -19.3, 71.4]\n tolerance = 1e-6\n\n print(gauss_jordan(A, b, tolerance))\n","sub_path":"Week 4/gauss_jordan.py","file_name":"gauss_jordan.py","file_ext":"py","file_size_in_byte":3616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"315618198","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 25 15:31:09 2017\n\n@author: gibraanrahman\n\nScript to plot tumor 170410 t-SNE data.\n\"\"\"\n\nimport os\nimport re\nimport tSNETools\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler\nfrom bhtsne import run_bh_tsne\nfrom matplotlib.pylab import savefig\n\ndef get_nolgcl_data():\n \"\"\"Import untransformed data csv.\"\"\"\n filename_comp_nolgcl = '170410_Tumor/woPI_comp.csv'\n comp_nolgcl_data = pd.read_csv(filename_comp_nolgcl)\n return comp_nolgcl_data[comp_nolgcl_data['FSC-A'] > 50000]\n\ndef get_data():\n \"\"\"Import transformed data csv and pre-process.\"\"\"\n filename_comp = '170410_Tumor/woPI_comp_lgcl.csv' \n comp_data = pd.read_csv(filename_comp)\n \n comp_data = comp_data[comp_data['FSC-A'] > 50000]\n comp_data = comp_data.drop('Alexa Fluor 700-A_Comp', axis=1)\n comp_data = comp_data.drop('SSC-A', axis=1)\n \n comp_channels = comp_data.drop('FSC-A', axis=1)\n comp_channels_min = comp_channels.values.min()\n comp_channels_max = comp_channels.values.max()\n \n comp_FSC = comp_data['FSC-A'].values.reshape(-1, 1)\n \n min_max_scaler = MinMaxScaler((comp_channels_min, comp_channels_max))\n comp_data['FSC-A'] = min_max_scaler.fit_transform(comp_FSC)\n \n return comp_data\n\ndef run_tSNE(flow_data):\n \"\"\"Perform t-SNE dimensionality reduction.\"\"\"\n tSNE = run_bh_tsne(flow_data, initial_dims=flow_data.shape[1],\n randseed=1, max_iter=1000)\n df = pd.DataFrame(tSNE)\n \n df.to_csv('170410_Tumor/1000it_tseed1_comp.csv', index=False)\n\ndef load_tSNE():\n \"\"\"Load pre-calculated t-SNE data.\"\"\"\n return pd.read_csv('170410_Tumor/1000it_tseed1_comp.csv')\n\ndef plot_tSNE(df, flow_data, nolgcl_data):\n \"\"\"Plot tSNE maps colored by parameters.\"\"\"\n for col in flow_data.columns:\n tSNETools.heatmap(df, flow_data[col], nolgcl_data[col])\n try:\n os.mkdir('170410_Tumor/Plots')\n except FileExistsError:\n pass\n filename = '170410_Tumor/Plots/1000it_tseed1_{}'.format(col)\n chan = re.sub('_Comp', '', col)\n plt.xlabel('t-SNE X1')\n plt.ylabel('t-SNE X2')\n #plt.title('Tumor t-SNE Map Colored By {}'.format(chan))\n savefig(filename)\n plt.close()\n\n \n\n","sub_path":"170410_heatmap.py","file_name":"170410_heatmap.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"349850520","text":"from __future__ import print_function\nimport numpy as np\nfrom scipy.interpolate import RegularGridInterpolator\nimport parameters as par\n\nclass Sigma:\n\n def __init__(self):\n\n log_mass, sigma, alpha = np.loadtxt(par.sigma_file, unpack=True)\n redshift, delta_crit = \\\n np.loadtxt(par.deltacrit_file, skiprows=1, unpack=True)\n\n self._sigma_interpolator = \\\n RegularGridInterpolator((log_mass,), sigma, bounds_error=False, \n fill_value=None)\n\n self._deltacrit_interpolator = \\\n RegularGridInterpolator((redshift,), delta_crit, bounds_error=False, \n fill_value=None)\n\n\n def sigma(self, log_mass, redshift):\n\n # sigma(M, z=0)\n sigma = self._sigma_interpolator(log_mass)\n\n # sigma(M, z)\n return sigma * \\\n self.deltacrit(redshift) / self.deltacrit(np.array([0,]))[0]\n\n def deltacrit(self, redshift):\n \n return self._deltacrit_interpolator(redshift)\n\nif __name__ == \"__main__\":\n\n s = Sigma()\n print(s.sigma(np.array([14,]), np.array([1,])))\n","sub_path":"sigma.py","file_name":"sigma.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"381001915","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nENGR 212 Fall 2016 Mini Project 5 Solution File\nDoğukan Kotan \n\nStarting '_' functions are related to GUI.\n\"\"\"\nimport copy\nimport csv\nimport random\nimport ttk\nfrom Tkinter import *\nfrom tkMessageBox import showerror\nimport docclass as ml\n\n\nclass AAAIClassifer(Frame):\n def __init__(self, master):\n \"\"\"\n Constructor of the program. It contains every component of the GUI and initial values\n :param master: initial window object of tkinter\n :return:\n \"\"\"\n\n \"\"\" Variables \"\"\"\n Frame.__init__(self, master)\n self.root = master\n self.root.columnconfigure(0, weight=1)\n self.columnconfigure(0, weight=1)\n self.radio_var = IntVar()\n self.radio_var.set(1)\n self.thresholds = []\n self.classifier_dataset = []\n\n # Frames\n self.top_frame = Frame(self.root)\n self.middle_frame = Frame(self.root)\n self.bottom_frame = Frame(self.root)\n self.middle2_frame = Frame(self.bottom_frame)\n self.area_frame = Frame(self.bottom_frame)\n\n # Labels\n self.title = Label(self.top_frame, text=\"AAAI Classifier\", bg=\"brown\", fg=\"white\",\n font=(\"Comic Sans\", 13), height=2)\n self.set_threshold_label = Label(self.middle2_frame, text=\"Set Thresholds\",\n font=(\"Comic Sans\", 10))\n self.method_label = Label(self.middle2_frame, text=\"Choose Classifier\", font=(\"Comic Sans\", 10))\n self.load_data_label = Label(self.middle_frame, bg=\"white\", text=\"Waiting Dataset...\", font=(\"Comic Sans\", 10))\n # Buttons\n\n self.calculate_accuracy_button = Button(self.middle2_frame, text=\"Calculate Accuracy\", font=(\"Comic Sans\", 10),\n command=self._calculate_accuracy_button)\n self.set_threshold_button = Button(self.middle2_frame, text=\"Set\", font=(\"Comic Sans\", 10),\n command=self._set_threshold_to_listbox)\n self.remove_threshold_button = Button(self.middle2_frame, text=\"Remove Selected\", font=(\"Comic Sans\", 10),\n command=self._remove_selected_threshold)\n self.load_data_button = Button(self.middle_frame, text=\"Load Dataset\", font=(\"Comic Sans\", 10),\n command=self._load_data)\n\n # Radio\n self.radio_naive = Radiobutton(self.middle2_frame, text=\"Naive-Bayes\", font=(\"Comic Sans\", 10),\n value=1,\n variable=self.radio_var)\n self.radio_fisher = Radiobutton(self.middle2_frame, text=\"Fisher\", font=(\"Comic Sans\", 10), value=2,\n variable=self.radio_var)\n # Entry\n self.threshold_entry = Entry(self.middle2_frame, width=3)\n self.threshold_entry.insert(0, 0.3)\n\n # Combobox\n self.combobox = ttk.Combobox(self.middle2_frame, height=5, width=15)\n\n # Canvas\n self.canvas_area = Canvas(self.area_frame, bg=\"white\", width=800, height=350, scrollregion=(0, 0, 0, 0))\n\n # Listbox\n self.threshold_listbox = Listbox(self.middle2_frame, selectmode=EXTENDED)\n\n # Scrollbar\n self.hbar = Scrollbar(self.area_frame, orient=VERTICAL, command=self.canvas_area.yview)\n self._customize()\n\n def _customize(self):\n \"\"\"\n Customize grid geometry\n :return:\n \"\"\"\n # Frames\n self.top_frame.grid(row=0, sticky=W + E)\n self.top_frame.columnconfigure(0, weight=1)\n self.middle_frame.grid(row=1, pady=10)\n self.bottom_frame.grid(row=2)\n self.area_frame.grid(row=1, column=0, sticky=W + E + N + S)\n self.middle2_frame.grid(row=0)\n\n # Top Frame\n self.title.grid(sticky=W + E)\n\n # Middle Frame\n self.load_data_button.grid(row=0, column=0, columnspan=3)\n self.load_data_label.grid(row=0, column=4, columnspan=5)\n\n # Middle2 Frame\n self.method_label.grid(row=0, column=0, columnspan=2)\n self.set_threshold_label.grid(row=0, column=2, columnspan=3)\n self.radio_naive.grid(row=1, column=0, columnspan=2)\n self.threshold_entry.grid(row=1, column=2, columnspan=1)\n self.combobox.grid(row=1, column=3, columnspan=2)\n self.set_threshold_button.grid(row=1, column=5)\n self.threshold_listbox.grid(row=1, column=6, rowspan=5)\n self.remove_threshold_button.grid(row=1, column=7)\n self.radio_fisher.grid(row=2, column=0, columnspan=2)\n self.calculate_accuracy_button.grid(row=3, column=2)\n\n # Area Frame\n self.canvas_area.grid(row=0, column=0)\n self.canvas_area.config(yscrollcommand=self.hbar.set)\n self.hbar.grid(sticky=N + S, row=0, column=1)\n\n def _calculate_accuracy_button(self):\n \"\"\"\n Main program function.\n :return:\n \"\"\"\n if not self.combobox.get():\n showerror(\"Error\", \"You should load dataset before calculating it\")\n else:\n self._clear()\n cl = None\n if self.radio_var.get() == 1:\n cl = ml.naivebayes(ml.getwords)\n elif self.radio_var.get() == 2:\n cl = ml.fisherclassifier(ml.getwords)\n self.set_thresholds(cl)\n self.load_data_label.config(text=\"Training classifier and calculating accuracies. Loading...\", fg=\"brown\")\n self.root.update()\n for i in range(4):\n train_set, test_set = self.decompose_dataset()\n self.train_classifer(cl, train_set)\n self.predict_classifier(cl, test_set)\n output = \"\"\n output += \"Topics\\t\\t\\tClassifier Accuracy\\n\"\n output += \"-------\\t\\t\\t-------------------\\n\"\n for topic, accuracy in self.calculate_accuracy().iteritems():\n output += \"{0}\\t\\t\\t{1:.2f}%\\n\".format(topic, accuracy)\n self._write_to_canvas(output)\n self.load_data_label.config(text=\"Accuracies calculated\", fg=\"green\")\n self.root.update()\n\n def decompose_dataset(self):\n \"\"\"\n This function separates test and train set\n :return: list of tuples trains_set and test_set ([(), ...], [(), ...])\n \"\"\"\n test_set = copy.deepcopy(self.classifier_dataset)\n train_set = []\n for i in range(300):\n selected = self.select_random_data_with_remove(test_set)\n train_set.append(selected)\n return train_set, test_set\n\n def _set_threshold_to_listbox(self):\n \"\"\"\n It will set threshold to listbox,\n and also update its value\n :return:\n \"\"\"\n selected = self.combobox.get()\n if not selected:\n showerror(\"Error\", \"You should load dataset before setting threshold\")\n else:\n thresh = self.threshold_entry.get()\n if 0.0 < float(thresh) <= 1.0:\n text = \"{0}-{1}\".format(selected, thresh)\n self.thresholds.append(tuple((selected, float(thresh))))\n for index, item in enumerate(self.threshold_listbox.get(0, END)):\n if item.split(\"-\")[0] == selected:\n self.threshold_listbox.delete(index)\n self.threshold_listbox.insert(0, text)\n else:\n showerror(\"Error\", \"You should select threshold value between 0.0 and 1.0\")\n\n def set_thresholds(self, classifier):\n \"\"\"\n It will set threshold values from thresholds list of tuples\n :param classifier: classifier method\n :return:\n \"\"\"\n if self.radio_var.get() == 1:\n for threshold in self.thresholds:\n classifier.setthreshold(threshold[0], threshold[1])\n elif self.radio_var.get() == 2:\n for threshold in self.thresholds:\n classifier.setminimum(threshold[0], threshold[1])\n\n @staticmethod\n def train_classifer(classifier, train_set):\n \"\"\"\n\n :param classifier: naive or fisher classifier\n :param train_set: list of tuples\n :return:\n \"\"\"\n for item in train_set:\n classifier.train(item[1], item[0])\n\n def predict_classifier(self, classifier, test_set):\n \"\"\"\n\n :param classifier: naive or fisher classifier\n :param test_set: list of tuples\n :return:\n \"\"\"\n for item in test_set:\n if item not in self.results:\n self.results.setdefault(item[0], [0, 0])\n predicted = classifier.classify(item[1])\n if predicted == item[0]:\n self.results[item[0]][0] += 1\n self.results[item[0]][1] += 1\n\n def _remove_selected_threshold(self):\n \"\"\"\n It will remove selected items in listbox\n :return:\n \"\"\"\n item_index = map(int, self.threshold_listbox.curselection())\n for index in item_index:\n self.threshold_listbox.delete(index)\n\n @staticmethod\n def select_random_data_with_remove(dataset):\n \"\"\"\n It will select random data from given dataset\n :param dataset: list of tuples\n :return: a tuple\n \"\"\"\n return dataset.pop(random.randint(0, len(dataset) - 1))\n\n def calculate_accuracy(self):\n \"\"\"\n returns average accuracy dictionary\n :return: accuracy dictionary {cat:accuracy, ...}\n \"\"\"\n accuracy_dict = {}\n for key, value in self.results.iteritems():\n accuracy_dict[key] = value[0] * 100.0 / value[1]\n return accuracy_dict\n\n @staticmethod\n def create_data():\n \"\"\"\n Read data from Mini Project 3\n :return: list of dictionaries [{\"title\", \"abstract\"...}, ...]\n \"\"\"\n papers = []\n with open(\"AAAI-14_Accepted_Papers_corrected.txt\", \"r\") as csvfile:\n aaai_reader = csv.reader(csvfile, delimiter=',')\n first_line = True\n for row in aaai_reader:\n if first_line:\n first_line = False\n else:\n paper_dict = {}\n author_list = []\n group_list = []\n title = \"\".join(row[0])\n authors = \"\".join(row[1])\n groups = \"\".join(row[2])\n keywords = \"\".join(row[3])\n topics = \"\".join(row[4])\n abstract = \"\".join(row[5:])\n if 'and' in authors:\n author_names = authors.split('and')\n for name in author_names:\n for x in name.split(','):\n author_list.append(x.strip())\n else:\n author_names = authors.split(',')\n for name in author_names:\n author_list.append(name.strip())\n if 'and' in groups:\n group_names = groups.split('and')\n for name in group_names:\n for x in name.split(','):\n group_list.append(x.strip())\n else:\n group_names = groups.split('and')\n for name in group_names:\n group_list.append(name.strip())\n paper_dict['title'] = title\n paper_dict['authors'] = author_list\n paper_dict['groups'] = group_list\n paper_dict['keywords'] = keywords\n paper_dict['topics'] = topics\n paper_dict['abstract'] = abstract\n papers.append(paper_dict)\n return papers\n\n def _write_to_canvas(self, out):\n \"\"\"\n It will write string to canvas\n :param out: string of word\n :return:\n \"\"\"\n self.canvas_area.create_text(10, 10, anchor=\"nw\", text=out, font=(\"Comic Sans\", 10))\n self.canvas_area.config(scrollregion=(0, 0, 0, len(out) * 1.2))\n\n def _load_data(self):\n \"\"\"\n Load data button\n :return:\n \"\"\"\n self.load_data_label.config(text=\"Loading Dataset...\", fg=\"brown\")\n self.root.update()\n # self.transform_dataset()\n self.classifier_dataset = []\n with open('dataset.txt', 'r') as classifier_dataset:\n dataset_reader = csv.reader(classifier_dataset, delimiter=',')\n topics = []\n for row in dataset_reader:\n topics.append(row[0])\n paper = (row[0], row[1])\n self.classifier_dataset.append(paper)\n self.topics = sorted(list(set(topics)))\n self.combobox.config(values=self.topics)\n self.combobox.current(0)\n self.load_data_label.config(text=\"Dataset Loaded\", fg=\"Green\")\n\n def transform_dataset(self):\n \"\"\"\n Adaptor creates a dataset file\n :return:\n \"\"\"\n papers = self.create_data()\n dataset = []\n for paper in papers:\n topics = paper['topics']\n main_topic = topics.split(\":\")[0]\n if main_topic == \"\":\n main_topic = \"XYZ\"\n feature = \"\"\n for value in paper.values():\n try:\n transformed_value = \"\".join(value)\n except:\n feature += value + \" \"\n else:\n feature += transformed_value + \" \"\n dataset.append((main_topic, feature))\n dataset_file = open('dataset.txt', 'w')\n for paper in dataset:\n topic = paper[0]\n rest = paper[1]\n dataset_file.write('\"{0}\",\"{1}\"\\n'.format(topic, rest))\n dataset_file.close()\n\n def _clear(self):\n \"\"\"\n clear canvas and results\n :return:\n \"\"\"\n self.canvas_area.config(scrollregion=(0, 0, 0, 0))\n self.canvas_area.delete(\"all\")\n self.results = {}\n\n\nif __name__ == '__main__':\n root = Tk() # Root frame of Tkinter\n root.resizable(width=FALSE, height=FALSE) # Prevent all resize actions\n root.title('AAAI Classifier') # Set GUI Title\n root.geometry('1000x650+250+0') # Set GUI geometry\n app = AAAIClassifer(root) # Starting our app\n root.mainloop() # Sh ow GUI to user\n","sub_path":"Programming2 School Projects/MP5/mp5_solution/mp5_solution.py","file_name":"mp5_solution.py","file_ext":"py","file_size_in_byte":14513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"36479911","text":"from rest_framework import serializers\nfrom django.core.validators import RegexValidator\nimport re\nfrom django.core.validators import MinLengthValidator,MaxLengthValidator\nfrom helper.validate import validate_activation_pass\nfrom gtb import constant\nfrom .models import Activation\n\n\nclass ComputerInfoializer(serializers.Serializer):\n name = serializers.CharField(required=True, validators=[MaxLengthValidator(150)])\n windows_product_id = serializers.CharField(\n required=True,\n validators=[\n MaxLengthValidator(24),\n RegexValidator(\n regex=re.compile(constant.PATTERN_WINDOWS_PRODUCT_ID),\n code=constant.INVALID_FORMAT_CODE,\n message='includes alphabet and numbers,hyphen'\n )\n ])\n mac_address = serializers.CharField(\n required=True,\n validators=[\n MaxLengthValidator(20),\n RegexValidator(\n regex=re.compile(constant.PATTERN_MAC_ADDRESS),\n code=constant.INVALID_FORMAT_CODE,\n message='includes alphabet and numbers,hyphen'\n )\n ])\n drive_serial_number = serializers.CharField(\n required=True,\n validators=[\n MaxLengthValidator(20),\n RegexValidator(\n regex=re.compile(constant.PATTERN_DRIVE_SERIAL_NUMBER),\n code=constant.INVALID_FORMAT_CODE,\n message='includes alphabet and numbers,hyphen'\n )\n ])\n\n\nclass ActivationSerializer(serializers.Serializer):\n license_id = serializers.CharField(\n required=True,\n validators=[\n MinLengthValidator(6),\n MaxLengthValidator(6),\n RegexValidator(\n regex=re.compile(constant.PATTERN_PATTERN_ALPHANUMERIC),\n code=constant.INVALID_FORMAT_CODE,\n message='includes alphabet and numbers'\n )\n ]\n )\n application_name = serializers.CharField(required=True)\n activate_password = serializers.CharField(validators=[MaxLengthValidator(128), validate_activation_pass], required=True)\n computer_info = ComputerInfoializer(required=True)\n locale = serializers.CharField(required=True)\n\n\nclass AcitvationModelSerializer(serializers.ModelSerializer):\n license_key = serializers.StringRelatedField(source='license')\n product_name = serializers.SerializerMethodField()\n\n class Meta:\n model = Activation\n exclude = ('is_deleted',)\n\n def get_product_name(self, obj):\n return obj.license.product.product_name\n","sub_path":"apps/admin/activation/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"441110116","text":"from app.libs.db.db import Db\nfrom .share import db_writers\nfrom .sql_lite_reader import SqlLiteReader\nfrom .sql_lite_writer import SqlLiteWriter\n\n\nclass SqlLiteDb(Db):\n _writer = None\n _reader = None\n\n def __init__(self, db_name, create_db_sql=None):\n if db_name not in db_writers:\n writer = SqlLiteWriter(db_name, 200)\n db_writers[db_name] = writer\n else:\n writer = db_writers[db_name]\n self._writer = writer\n self._reader = SqlLiteReader(db_name)\n if create_db_sql:\n self._writer.execute(create_db_sql)\n pass\n\n def select(self, table, where=None, *args, **kwargs):\n self._reader.select(table, where, *args, **kwargs)\n\n def update(self, table, where=None, *args, **kwargs):\n result = None\n try:\n values = ()\n if args:\n values = tuple(args)\n if kwargs:\n values += tuple(kwargs.values())\n if values == ():\n return 0\n query = \"UPDATE %s SET \" % table\n keys = kwargs.keys()\n l = len(keys) - 1\n for i, key in enumerate(keys):\n query += \"`\" + key + \"` = ?\"\n if i < l:\n query += \",\"\n # End if i less than 1\n # End for keys\n query += \" WHERE %s\" % where\n result = self._writer.execute(query)\n except Exception as err:\n print(\"Error \", err)\n return result\n\n def insert(self, table, *args, **kwargs):\n result = None\n try:\n query = \"INSERT INTO %s \" % table\n values = []\n if kwargs:\n keys = kwargs.keys()\n values = tuple(kwargs.values())\n query += \"(\" + \",\".join([\"`%s`\"] * len(keys)) % tuple(keys) + \") VALUES (\" + \",\".join(\n [\"?\"] * len(values)) + \")\"\n elif args:\n values = args\n query += \" VALUES(\" + \",\".join([\"?\"] * len(values)) + \")\"\n # print(query)\n result = self._writer.execute(query, values)\n except Exception as err:\n print(\"Error \", err)\n return result\n\n def delete(self, table, where=None, *args):\n result = None\n try:\n query = \"DELETE FROM %s\" % table\n if where:\n query += ' WHERE %s' % where\n result = self._writer.execute(query)\n except Exception as err:\n print(\"Error \", err)\n return result\n\n def select_advanced(self, sql, *args):\n return self._reader.select_advanced(sql, *args)\n","sub_path":"app/repositories/db/sql_lite_db.py","file_name":"sql_lite_db.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"184472472","text":"\nfrom secrets import token_bytes\nfrom typing import Tuple\n\n\ndef random_key(length: int) -> int:\n tb: bytes = token_bytes(length)\n return int.from_bytes(tb, \"big\")\n\n\ndef encrypt(orig: str) -> Tuple[int, int]:\n orig_bytes: bytes = orig.encode()\n dummy: int = random_key(len(orig_bytes))\n orig_key: int = int.from_bytes(orig_bytes, \"big\")\n encrypted: int = orig_key ^ dummy\n return dummy, encrypted\n\n\ndef decrypt(key1: int, key2: int) -> str:\n decrypted: int = key1 ^ key2\n temp: bytes = decrypted.to_bytes((decrypted.bit_length() + 7) // 8, \"big\")\n return temp.decode()\n\n\nif __name__ == \"__main__\":\n key1, key2 = encrypt(\"masooria\")\n result: str = decrypt(key1, key2)\n print(result)\n","sub_path":"0encryption.py","file_name":"0encryption.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"145885128","text":"import datetime\nimport json\nimport os\nimport os.path\nimport shutil\nimport tempfile\nimport unittest\nimport unittest.mock\nimport urllib.request\n\nfrom cate.core.ds import DATA_STORE_REGISTRY, DataAccessError, format_variables_info_string\nfrom cate.core.types import PolygonLike, TimeRangeLike, VarNamesLike\nfrom cate.ds.esa_cci_odp import EsaCciOdpDataStore, find_datetime_format\nfrom cate.ds.local import LocalDataStore\n\n\n@unittest.skip(reason='Because it writes a lot of files')\n# @unittest.skipUnless(condition=os.environ.get('CATE_ODP_TEST', None), reason=\"skipped unless CATE_ODP_TEST=1\")\nclass EsaCciOdpDataStoreIndexCacheTest(unittest.TestCase):\n def test_index_cache(self):\n self.data_store = EsaCciOdpDataStore(index_cache_used=True, index_cache_expiration_days=1.0e-6)\n data_sources = self.data_store.query()\n self.assertIsNotNone(data_sources)\n for data_source in data_sources:\n data_source.update_file_list()\n # data_source.sync()\n\n\ndef _create_test_data_store():\n with open(os.path.join(os.path.dirname(__file__), 'esgf-index-cache.json')) as fp:\n json_text = fp.read()\n json_dict = json.loads(json_text)\n # The EsaCciOdpDataStore created with an initial json_dict avoids fetching it from remote\n data_store = EsaCciOdpDataStore('test-odp', index_cache_json_dict=json_dict)\n DATA_STORE_REGISTRY.add_data_store(data_store)\n return data_store\n\n\nclass EsaCciOdpDataStoreTest(unittest.TestCase):\n def setUp(self):\n self.data_store = _create_test_data_store()\n\n def test_id_title_and_is_local(self):\n self.assertEqual(self.data_store.id, 'test-odp')\n self.assertEqual(self.data_store.title, 'ESA CCI Open Data Portal')\n self.assertEqual(self.data_store.is_local, False)\n\n def test_query(self):\n data_sources = self.data_store.query()\n self.assertIsNotNone(data_sources)\n self.assertEqual(len(data_sources), 61)\n\n def test_query_with_string(self):\n data_sources = self.data_store.query(query_expr='OC')\n self.assertIsNotNone(data_sources)\n self.assertEqual(len(data_sources), 20)\n\n\nclass EsaCciOdpDataSourceTest(unittest.TestCase):\n def setUp(self):\n self.data_store = _create_test_data_store()\n oc_data_sources = self.data_store.query(query_expr='OC')\n self.assertIsNotNone(oc_data_sources)\n self.assertIsNotNone(oc_data_sources[0])\n self.first_oc_data_source = oc_data_sources[0]\n self.tmp_dir = tempfile.mkdtemp()\n\n self._existing_local_data_store = DATA_STORE_REGISTRY.get_data_store('local')\n DATA_STORE_REGISTRY.add_data_store(LocalDataStore('local', self.tmp_dir))\n\n def tearDown(self):\n if self._existing_local_data_store:\n DATA_STORE_REGISTRY.add_data_store(self._existing_local_data_store)\n shutil.rmtree(self.tmp_dir, ignore_errors=True)\n\n def test_make_local_and_update(self):\n\n soilmoisture_data_sources = self.data_store.query(\n query_expr='esacci.SOILMOISTURE.day.L3S.SSMV.multi-sensor.multi-platform.COMBINED.02-1.r1')\n soilmoisture_data_source = soilmoisture_data_sources[0]\n\n reference_path = os.path.join(os.path.dirname(__file__),\n os.path.normpath('resources/datasources/local/files/'))\n\n def find_files_mock(_, time_range):\n\n def build_file_item(item_name: str, date_from: datetime, date_to: datetime, size: int):\n\n return [item_name, date_from, date_to, size,\n {'OPENDAP': os.path.join(reference_path, item_name),\n 'HTTPServer': 'file:' + urllib.request.pathname2url(os.path.join(reference_path, item_name))}]\n\n reference_files = {\n 'ESACCI-SOILMOISTURE-L3S-SSMV-COMBINED-19781114000000-fv02.2.nc': {\n 'date_from': datetime.datetime(1978, 11, 14, 0, 0),\n 'date_to': datetime.datetime(1978, 11, 14, 23, 59),\n 'size': 21511378\n },\n 'ESACCI-SOILMOISTURE-L3S-SSMV-COMBINED-19781115000000-fv02.2.nc': {\n 'date_from': datetime.datetime(1978, 11, 15, 0, 0),\n 'date_to': datetime.datetime(1978, 11, 15, 23, 59),\n 'size': 21511378\n },\n 'ESACCI-SOILMOISTURE-L3S-SSMV-COMBINED-19781116000000-fv02.2.nc': {\n 'date_from': datetime.datetime(1978, 11, 16, 0, 0),\n 'date_to': datetime.datetime(1978, 11, 16, 23, 59),\n 'size': 21511378\n }\n }\n\n reference_files_list = []\n\n for reference_file in reference_files.items():\n file_name = reference_file[0]\n file_date_from = reference_file[1].get('date_from')\n file_date_to = reference_file[1].get('date_to')\n file_size = reference_file[1].get('size')\n if time_range:\n if file_date_from >= time_range[0] and file_date_to <= time_range[1]:\n reference_files_list.append(build_file_item(file_name,\n file_date_from,\n file_date_to,\n file_size))\n else:\n reference_files_list.append(build_file_item(file_name,\n file_date_from,\n file_date_to,\n file_size))\n return reference_files_list\n\n with unittest.mock.patch('cate.ds.esa_cci_odp.EsaCciOdpDataSource._find_files', find_files_mock):\n with unittest.mock.patch.object(EsaCciOdpDataStore, 'query', return_value=[]):\n\n new_ds_title = 'local_ds_test'\n new_ds_time_range = TimeRangeLike.convert((datetime.datetime(1978, 11, 14, 0, 0),\n datetime.datetime(1978, 11, 16, 23, 59)))\n try:\n new_ds = soilmoisture_data_source.make_local(new_ds_title, time_range=new_ds_time_range)\n except Exception:\n raise ValueError(reference_path, os.listdir(reference_path))\n self.assertIsNotNone(new_ds)\n\n self.assertEqual(new_ds.id, \"local.%s\" % new_ds_title)\n self.assertEqual(new_ds.temporal_coverage(), new_ds_time_range)\n\n new_ds_w_one_variable_title = 'local_ds_test_var'\n new_ds_w_one_variable_time_range = TimeRangeLike.convert((datetime.datetime(1978, 11, 14, 0, 0),\n datetime.datetime(1978, 11, 16, 23, 59)))\n new_ds_w_one_variable_var_names = VarNamesLike.convert(['sm'])\n\n new_ds_w_one_variable = soilmoisture_data_source.make_local(\n new_ds_w_one_variable_title,\n time_range=new_ds_w_one_variable_time_range,\n var_names=new_ds_w_one_variable_var_names\n )\n self.assertIsNotNone(new_ds_w_one_variable)\n\n self.assertEqual(new_ds_w_one_variable.id, \"local.%s\" % new_ds_w_one_variable_title)\n ds = new_ds_w_one_variable.open_dataset()\n\n new_ds_w_one_variable_var_names.extend(['lat', 'lon', 'time'])\n\n self.assertSetEqual(set(ds.variables),\n set(new_ds_w_one_variable_var_names))\n\n new_ds_w_region_title = 'from_local_to_local_region'\n new_ds_w_region_time_range = TimeRangeLike.convert((datetime.datetime(1978, 11, 14, 0, 0),\n datetime.datetime(1978, 11, 16, 23, 59)))\n new_ds_w_region_spatial_coverage = PolygonLike.convert(\"10,20,30,40\")\n\n new_ds_w_region = soilmoisture_data_source.make_local(\n new_ds_w_region_title,\n time_range=new_ds_w_region_time_range,\n region=new_ds_w_region_spatial_coverage) # type: LocalDataSource\n\n self.assertIsNotNone(new_ds_w_region)\n\n self.assertEqual(new_ds_w_region.id, \"local.%s\" % new_ds_w_region_title)\n\n self.assertEqual(new_ds_w_region.spatial_coverage(), new_ds_w_region_spatial_coverage)\n\n new_ds_w_region_title = 'from_local_to_local_region_one_var'\n new_ds_w_region_time_range = TimeRangeLike.convert((datetime.datetime(1978, 11, 14, 0, 0),\n datetime.datetime(1978, 11, 16, 23, 59)))\n new_ds_w_region_var_names = VarNamesLike.convert(['sm'])\n new_ds_w_region_spatial_coverage = PolygonLike.convert(\"10,20,30,40\")\n\n new_ds_w_region = soilmoisture_data_source.make_local(\n new_ds_w_region_title,\n time_range=new_ds_w_region_time_range,\n var_names=new_ds_w_region_var_names,\n region=new_ds_w_region_spatial_coverage) # type: LocalDataSource\n\n self.assertIsNotNone(new_ds_w_region)\n\n self.assertEqual(new_ds_w_region.id, \"local.%s\" % new_ds_w_region_title)\n\n self.assertEqual(new_ds_w_region.spatial_coverage(), new_ds_w_region_spatial_coverage)\n data_set = new_ds_w_region.open_dataset()\n new_ds_w_region_var_names.extend(['lat', 'lon', 'time'])\n\n self.assertSetEqual(set(data_set.variables), set(new_ds_w_region_var_names))\n\n new_ds_w_region_title = 'from_local_to_local_region_two_var_sm_uncertainty'\n new_ds_w_region_time_range = TimeRangeLike.convert((datetime.datetime(1978, 11, 14, 0, 0),\n datetime.datetime(1978, 11, 16, 23, 59)))\n new_ds_w_region_var_names = VarNamesLike.convert(['sm', 'sm_uncertainty'])\n new_ds_w_region_spatial_coverage = PolygonLike.convert(\"10,20,30,40\")\n\n new_ds_w_region = soilmoisture_data_source.make_local(\n new_ds_w_region_title,\n time_range=new_ds_w_region_time_range,\n var_names=new_ds_w_region_var_names,\n region=new_ds_w_region_spatial_coverage) # type: LocalDataSource\n\n self.assertIsNotNone(new_ds_w_region)\n\n self.assertEqual(new_ds_w_region.id, \"local.%s\" % new_ds_w_region_title)\n\n self.assertEqual(new_ds_w_region.spatial_coverage(), new_ds_w_region_spatial_coverage)\n data_set = new_ds_w_region.open_dataset()\n new_ds_w_region_var_names.extend(['lat', 'lon', 'time'])\n\n self.assertSetEqual(set(data_set.variables), set(new_ds_w_region_var_names))\n\n empty_ds_timerange = (datetime.datetime(2017, 12, 1, 0, 0), datetime.datetime(2017, 12, 31, 23, 59))\n with self.assertRaises(DataAccessError) as cm:\n soilmoisture_data_source.make_local('empty_ds', time_range=empty_ds_timerange)\n self.assertEqual('CCI Open Data Portal data source \"{}\"\\ndoes not seem to have any datasets in given '\n 'time range {}'.format(soilmoisture_data_source.id,\n TimeRangeLike.format(empty_ds_timerange)),\n str(cm.exception))\n\n new_ds_time_range = TimeRangeLike.convert((datetime.datetime(1978, 11, 14, 0, 0),\n datetime.datetime(1978, 11, 14, 23, 59)))\n\n new_ds = soilmoisture_data_source.make_local(\"title_test_copy\", time_range=new_ds_time_range)\n self.assertIsNotNone(new_ds)\n self.assertEqual(new_ds.meta_info['title'], soilmoisture_data_source.meta_info['title'])\n\n title = \"Title Test!\"\n new_ds = soilmoisture_data_source.make_local(\"title_test_set\", title, time_range=new_ds_time_range)\n self.assertIsNotNone(new_ds)\n self.assertEqual(new_ds.meta_info['title'], title)\n\n def test_data_store(self):\n self.assertIs(self.first_oc_data_source.data_store,\n self.data_store)\n\n def test_id(self):\n self.assertEqual(self.first_oc_data_source.id,\n 'esacci.OC.day.L3S.K_490.multi-sensor.multi-platform.MERGED.1-0.r2')\n\n def test_schema(self):\n self.assertEqual(self.first_oc_data_source.schema,\n None)\n\n @unittest.skip(reason='outdated info string')\n def test_info_string(self):\n self.assertIn('product_string: MERGED\\n',\n self.first_oc_data_source.info_string)\n\n def test_variables_info_string(self):\n self.assertIn('kd_490 (m-1):\\n',\n format_variables_info_string(self.first_oc_data_source.variables_info),\n self.first_oc_data_source.variables_info)\n self.assertIn('Long name: Downwelling attenuation coefficient at 490nm',\n format_variables_info_string(self.first_oc_data_source.variables_info))\n\n @unittest.skip(reason='ssl error on windows')\n def test_temporal_coverage(self):\n self.assertEqual(self.first_oc_data_source.temporal_coverage(),\n (datetime.datetime(1997, 9, 4, 0, 0), datetime.datetime(2000, 6, 24, 0, 0)))\n\n def assert_tf(self, filename: str, expected_time_format: str):\n time_format, p1, p2 = find_datetime_format(filename)\n self.assertEqual(time_format, expected_time_format)\n\n def test_time_filename_patterns(self):\n self.assert_tf('20020730174408-ESACCI-L3U_GHRSST-SSTskin-AATSR-LT-v02.0-fv01.1.nc', '%Y%m%d%H%M%S')\n self.assert_tf('19911107054700-ESACCI-L2P_GHRSST-SSTskin-AVHRR12_G-LT-v02.0-fv01.0.nc', '%Y%m%d%H%M%S')\n self.assert_tf('ESACCI-SEAICE-L4-SICONC-SSMI-NH25kmEASE2-19920610-fv01.11.nc', '%Y%m%d')\n self.assert_tf('ESACCI-SEAICE-L4-SICONC-SSMI-SH25kmEASE2-20000101-20001231-fv01.11.nc', '%Y%m%d')\n self.assert_tf('ESACCI-SEAICE-L4-SICONC-AMSR-NH25kmEASE2-20070204-fv01.11.nc', '%Y%m%d')\n self.assert_tf('ESACCI-SEAICE-L4-SICONC-AMSR-SH25kmEASE2-20040427-fv01.11.nc', '%Y%m%d')\n self.assert_tf('19921018120000-ESACCI-L4_GHRSST-SSTdepth-OSTIA-GLOB_LT-v02.0-fv01.0.nc', '%Y%m%d%H%M%S')\n self.assert_tf('19940104120000-ESACCI-L4_GHRSST-SSTdepth-OSTIA-GLOB_LT-v02.0-fv01.1.nc', '%Y%m%d%H%M%S')\n self.assert_tf('ESACCI-OZONE-L3S-TC-MERGED-DLR_1M-20090301-fv0100.nc', '%Y%m%d')\n self.assert_tf('20070328-ESACCI-L3U_CLOUD-CLD_PRODUCTS-AVHRR_NOAA-15-fv1.0.nc', '%Y%m%d')\n self.assert_tf('20091002-ESACCI-L3U_CLOUD-CLD_PRODUCTS-AVHRR_NOAA-16-fv1.0.nc', '%Y%m%d')\n self.assert_tf('20090729-ESACCI-L3U_CLOUD-CLD_PRODUCTS-AVHRR_NOAA-18-fv1.0.nc', '%Y%m%d')\n self.assert_tf('20070410-ESACCI-L3U_CLOUD-CLD_PRODUCTS-AVHRR_NOAA-17-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OC-L3S-K_490-MERGED-1D_DAILY_4km_SIN_PML_KD490_Lee-20000129-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OC-L3S-K_490-MERGED-1D_DAILY_4km_GEO_PML_KD490_Lee-19980721-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OZONE-L3-NP-MERGED-KNMI-200812-fv0002.nc', '%Y%m')\n self.assert_tf('ESACCI-OC-L3S-CHLOR_A-MERGED-1D_DAILY_4km_GEO_PML_OC4v6-19971130-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OC-L3S-CHLOR_A-MERGED-1D_DAILY_4km_SIN_PML_OC4v6-19980625-fv1.0.nc', '%Y%m%d')\n self.assert_tf('200903-ESACCI-L3C_CLOUD-CLD_PRODUCTS-AVHRR_NOAA-15-fv1.0.nc', '%Y%m')\n self.assert_tf('ESACCI-GHG-L2-CH4-GOSAT-SRPR-20100501-fv1.nc', '%Y%m%d')\n self.assert_tf('ESACCI-GHG-L2-CH4-GOSAT-SRPR-20091201-fv1.nc', '%Y%m%d')\n self.assert_tf('ESACCI-GHG-L2-CO2-GOSAT-SRFP-20101220-fv1.nc', '%Y%m%d')\n self.assert_tf('ESACCI-GHG-L2-CH4-GOSAT-SRFP-20100109-fv1.nc', '%Y%m%d')\n self.assert_tf('ESACCI-GHG-L2-CO2-GOSAT-SRFP-20090527-fv1.nc', '%Y%m%d')\n self.assert_tf('ESACCI-GHG-L2-CH4-GOSAT-SRFP-20100714-fv1.nc', '%Y%m%d')\n self.assert_tf('20090616-ESACCI-L3U_CLOUD-CLD_PRODUCTS-MODIS_TERRA-fv1.0.nc', '%Y%m%d')\n self.assert_tf('20070717-ESACCI-L3U_CLOUD-CLD_PRODUCTS-MODIS_AQUA-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OC-L3S-OC_PRODUCTS-MERGED-8D_DAILY_4km_GEO_PML_OC4v6_QAA-19971211-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OC-L3S-OC_PRODUCTS-MERGED-8D_DAILY_4km_SIN_PML_OC4v6_QAA-20080921-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OC-L3S-OC_PRODUCTS-MERGED-1M_MONTHLY_4km_SIN_PML_OC4v6_QAA-200906-fv1.0.nc', '%Y%m')\n self.assert_tf('ESACCI-OC-L3S-OC_PRODUCTS-MERGED-1M_MONTHLY_4km_GEO_PML_OC4v6_QAA-200707-fv1.0.nc', '%Y%m')\n self.assert_tf('ESACCI-OC-L3S-OC_PRODUCTS-MERGED-1Y_YEARLY_4km_GEO_PML_OC4v6_QAA-2005-fv1.0.nc', '%Y')\n self.assert_tf('ESACCI-OC-L3S-OC_PRODUCTS-MERGED-1Y_YEARLY_4km_GEO_PML_OC4v6_QAA-2003-fv1.0.nc', '%Y')\n self.assert_tf('ESACCI-OC-L3S-OC_PRODUCTS-MERGED-8D_DAILY_4km_GEO_PML_OC4v6_QAA-19970914-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OC-L3S-IOP-MERGED-1D_DAILY_4km_GEO_PML_QAA-19970915-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OC-L3S-IOP-MERGED-1D_DAILY_4km_GEO_PML_QAA-19980724-fv1.0.nc', '%Y%m%d')\n self.assert_tf('20020822103843-ESACCI-L3U_GHRSST-SSTskin-AATSR-LT-v02.0-fv01.0.nc', '%Y%m%d%H%M%S')\n self.assert_tf('ESACCI-OZONE-L3S-TC-MERGED-DLR_1M-19980301-fv0100.nc', '%Y%m%d')\n self.assert_tf('ESACCI-SOILMOISTURE-L3S-SSMV-COMBINED-19781120000000-fv02.1.nc', '%Y%m%d%H%M%S')\n self.assert_tf('ESACCI-SOILMOISTURE-L3S-SSMV-PASSIVE-19791011000000-fv02.1.nc', '%Y%m%d%H%M%S')\n self.assert_tf('ESACCI-SOILMOISTURE-L3S-SSMV-PASSIVE-19790519000000-fv02.2.nc', '%Y%m%d%H%M%S')\n self.assert_tf('ESACCI-SOILMOISTURE-L3S-SSMS-ACTIVE-19911026000000-fv02.1.nc', '%Y%m%d%H%M%S')\n self.assert_tf('ESACCI-SOILMOISTURE-L3S-SSMS-ACTIVE-19911010000000-fv02.2.nc', '%Y%m%d%H%M%S')\n self.assert_tf('ESACCI-SEALEVEL-IND-MSL-MERGED-20151104000000-fv01.nc', '%Y%m%d%H%M%S')\n self.assert_tf('ESACCI-SEALEVEL-IND-MSLAMPH-MERGED-20151104000000-fv01.nc', '%Y%m%d%H%M%S')\n self.assert_tf('ESACCI-SEALEVEL-IND-MSLTR-MERGED-20151104000000-fv01.nc', '%Y%m%d%H%M%S')\n self.assert_tf('ESACCI-OC-L3S-RRS-MERGED-1D_DAILY_4km_GEO_PML_RRS-19980418-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OC-L3S-RRS-MERGED-1D_DAILY_4km_SIN_PML_RRS-19980925-fv1.0.nc', '%Y%m%d')\n self.assert_tf('200811-ESACCI-L3C_CLOUD-CLD_PRODUCTS-AVHRR_NOAA-18-fv1.0.nc', '%Y%m')\n self.assert_tf('200704-ESACCI-L3C_CLOUD-CLD_PRODUCTS-AVHRR_NOAA-16-fv1.0.nc', '%Y%m')\n self.assert_tf('200811-ESACCI-L3C_CLOUD-CLD_PRODUCTS-AVHRR_NOAA-17-fv1.0.nc', '%Y%m')\n self.assert_tf('200712-ESACCI-L3C_CLOUD-CLD_PRODUCTS-MODIS_TERRA-fv1.0.nc', '%Y%m')\n self.assert_tf('200902-ESACCI-L3C_CLOUD-CLD_PRODUCTS-MODIS_AQUA-fv1.0.nc', '%Y%m')\n self.assert_tf('200706-ESACCI-L3S_CLOUD-CLD_PRODUCTS-MODIS_MERGED-fv1.0.nc', '%Y%m')\n self.assert_tf('200901-ESACCI-L3S_CLOUD-CLD_PRODUCTS-AVHRR_MERGED-fv1.0.nc', '%Y%m')\n self.assert_tf('ESACCI-OC-L3S-OC_PRODUCTS-MERGED-1M_MONTHLY_4km_GEO_PML_OC4v6_QAA-200505-fv1.0.nc', '%Y%m')\n self.assert_tf('ESACCI-OC-L3S-OC_PRODUCTS-MERGED-1D_DAILY_4km_SIN_PML_OC4v6_QAA-19980720-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OC-L3S-OC_PRODUCTS-MERGED-1D_DAILY_4km_GEO_PML_OC4v6_QAA-19990225-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OC-L3S-OC_PRODUCTS-MERGED-8D_DAILY_4km_GEO_PML_OC4v6_QAA-19990407-fv1.0.nc', '%Y%m%d')\n self.assert_tf('ESACCI-OC-L3S-OC_PRODUCTS-MERGED-1D_DAILY_4km_GEO_PML_OC4v6_QAA-19970915-fv1.0.nc', '%Y%m%d')\n self.assert_tf('20060107-ESACCI-L4_FIRE-BA-MERIS-fv4.1.nc', '%Y%m%d')\n","sub_path":"test/ds/test_esa_cci_odp.py","file_name":"test_esa_cci_odp.py","file_ext":"py","file_size_in_byte":20235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"625379327","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nimport requests\nimport json\nimport numpy as np\nimport pymongo\nfrom bson.objectid import ObjectId\nfrom django.conf import settings\n\nhostaddress = settings.DB_ADDR\nhostport = settings.DB_PORT\ndbname = settings.DB_NAME\n\ngtwhost = settings.GTW_HOST\ngtwport = settings.GTW_PORT\n\nhrahost = settings.HRA_HOST\nhraport = settings.HRA_PORT\n\n# Versi Development\nclient2 = pymongo.MongoClient('mongodb://%s:%s/%s'\\\n\t%(hostaddress,hostport,dbname))\nnDB = client2[dbname]\n\nclient = pymongo.MongoClient('mongodb://%s:%s/%s'\\\n\t%(hostaddress,hostport,dbname))\ndb = client[dbname]\n\n\ndef homepage(response):\n args = get_topics(response)\n return render(response, \"main/index.html\", args)\n\n\ndef arrhytmia(response):\n if response.method == \"POST\":\n args = check_topic_data(response)\n if args[\"error\"] == \"true\":\n return render(response, \"main/index.html\", args)\n else:\n topic = args[\"topic\"]\n args = arrhytmia_getdata(response, args[\"topic\"])\n key = arrhytmia_get_history(response, topic)\n args[\"keys\"] = key\n return render(response, \"main/ecgdashboard.html\", args)\n # arrhytmia_process(response)\n\n\ndef arrhytmia_get_history(response, topic):\n data = response.user\n user = data.username\n conn = db[user]\n history = conn.find({\n \"analytic_type\": \"arrhytmia\",\n \"topic\": topic\n }, {\n \"analytic_type\": 0,\n \"arrhytmia\": 0,\n \"topic\": 0,\n \"created\": 0\n })\n key = {}\n historys = list(history)\n if len(historys) > 0:\n for history in historys:\n key[history[\"key\"]] = history[\"_id\"]\n return key\n\n\ndef arrhytmia_getdata(response, topic):\n data = response.user\n user = data.username\n conn = db[user]\n\n args = {}\n args[\"topic\"] = topic\n args[\"user\"] = user\n filter = {\n \"status\": {\n \"$exists\": False\n },\n \"analytic_type\": {\n \"$exists\": False\n },\n \"payload.ecg\": {\n \"$exists\": True\n },\n \"topic\": topic\n }\n count = conn.find(filter).count()\n if count > 499:\n start = conn.find(filter).sort(\"_id\", 1).limit(1)\n datas = list(start)[0]\n idstart = datas['_id']\n start = datas['time']\n end = conn.find(filter).sort(\"_id\", -1).limit(1)\n datae = list(end)[0]\n idend = datae['_id']\n end = datae['time']\n args[\"start\"] = start\n args[\"end\"] = end\n args[\"idstart\"] = idstart\n args[\"idend\"] = idend\n args[\"error\"] = \"\"\n else:\n args[\"start\"] = \"\"\n args[\"end\"] = \"\"\n args[\"idstart\"] = \"\"\n args[\"idend\"] = \"\"\n args[\"error\"] = \"true\"\n args[\"message\"] = \"There is not enough data to clasify!\"\n\n return args\n\n\ndef arrhytmia_history(response):\n if response.method == \"POST\":\n data = response.user\n username = data.username\n age = data.age\n topic = response.POST.get(\"topic\")\n key = response.POST.get(\"selectdata\")\n uid = response.POST.get(key)\n key_split = key.split(\" : \")\n start = key_split[0]\n end = key_split[1]\n args = get_hasil(username, uid, age)\n args[\"topic\"] = topic\n args[\"start\"] = start\n args[\"end\"] = end\n return render(response, \"main/details.html\", args)\n\n\ndef arrhytmia_process(response):\n if response.method == \"POST\":\n data = response.user\n username = data.username\n age = data.age\n topic = response.POST.get(\"topic\")\n start = response.POST.get(\"idstart\")\n end = response.POST.get(\"idend\")\n args = {\n \"user\": username,\n \"start\": start,\n \"end\": end,\n \"topic\": topic\n }\n conn = requests.post(\n \"http://3.1.49.16/analytic/arrhytmia\",\n json=json.dumps(args),\n headers={\n 'Content-type': 'application/json; charset=UTF-8'}\n\n )\n data = json.loads(conn.text)\n if data['status'] == \"OK\":\n args = get_hasil(username, data['id'], age)\n args[\"topic\"] = topic\n args[\"start\"] = start\n args[\"end\"] = end\n return render(response, \"main/details.html\", args)\n else:\n return render(response, \"main/404.html\")\n\n\ndef heart_rate(response):\n if response.method == \"POST\":\n args = check_topic_data(response)\n if args[\"error\"] == \"true\":\n return render(response, \"main/index.html\", args)\n else:\n args = heart_rate_process(response, args[\"topic\"])\n return render(response, \"main/heart_rate/home.html\", args)\n # payload = {\n # \"status\" : \"OK\",\n # }\n # payload = json.dumps(payload)\n # return HttpResponse(payload, content_type='application/json')\n\n\ndef heart_rate_process(response, topic):\n data = response.user\n username = data.username\n uid = data.id\n age = data.age\n args = {\n \"user\": username,\n \"uid\": uid,\n \"age\": age,\n \"topic\": topic\n }\n conn = requests.post(\n \"http://{}:{}/analytic/hr/dashboard\".format(hrahost,hraport),\n json=json.dumps(args),\n headers={\n 'Content-type': 'application/json; charset=UTF-8'}\n )\n data = json.loads(conn.text)\n print(data)\n print(type(data))\n args[\"created\"] = data['user']['time']['created']\n args[\"now\"] = data['user']['time']['now']\n args[\"status\"] = data['status']\n args[\"topic\"] = topic\n\n return args\n\n\ndef trend_process(response):\n data = {}\n args = {}\n datauser = response.user\n username = datauser.username\n age = datauser.age\n uid = datauser.id\n data['user'] = username\n data['age'] = age\n data['uid'] = uid\n if response.method == \"POST\":\n data['type'] = response.POST.get(\"type\")\n data['topic'] = response.POST.get(\"topic\")\n if data['type'] == \"track\":\n data['date'] = \"\"\n else:\n data['date'] = response.POST.get(\"date\")\n conn = requests.post(\n \"http://{}:{}/analytic/hr/process\".format(hrahost,hraport), \n json=json.dumps(data),\n headers={\n 'Content-type': 'application/json; charset=UTF-8'}\n )\n data = json.loads(conn.text)\n args[\"user\"] = data['user']\n args[\"status\"] = data['status']\n args[\"trend\"] = data['trend']\n args[\"topic\"] = response.POST.get(\"topic\")\n return render(response, \"main/heart_rate/home.html\", args)\n\n\ndef article_summaries(response, status):\n if status == \"slow\":\n return render(response, \"main/heart_rate/article_summaries/slow.html\")\n else:\n return render(response, \"main/heart_rate/article_summaries/fast.html\")\n\n\ndef get_topics(response):\n userdata = response.user\n uid = userdata.id\n topics_conn = nDB[\"topics\"]\n topics = topics_conn.find({\n \"user_id\": uid\n })\n payload = []\n args = {}\n for topic in topics:\n payload.append(topic['apiendpoint'])\n args['topics'] = payload\n args['error'] = \"\"\n args['error_msg'] = \"\"\n return args\n\n\ndef check_topic_data(response):\n user = str(response.user)\n analytic_type = response.POST.get(\"analytictype\")\n topic = response.POST.get(\"selecttopic\")\n\n if topic == \"0\":\n args = get_topics(response)\n args['error'] = \"true\"\n args['error_msg'] = \"Please select topic to analyze!!!\"\n print(\"topic 0\")\n else:\n args = get_topics(response)\n payload = {\n \"user\" : user,\n \"analytic_type\" : analytic_type,\n \"topic\" : topic\n }\n\n conn = requests.post(\n \"http://{}:{}/api/countdata\".format(gtwhost,gtwport),\n json=json.dumps(payload),\n headers={\n 'Content-type': 'application/json; charset=UTF-8'\n }\n )\n\n if conn.status_code:\n data = json.loads(conn.text)\n data_length = data[\"data_length\"]\n else:\n data_length = 0\n \n if data_length <= 0:\n args['error'] = \"true\"\n args['error_msg'] = \"There is no data to analyze!!!\"\n # if analytic_type == \"arrhytmia\":\n # args['error'] = \"true\"\n # args['error_msg'] = \"There is no data to analyze!!!\"\n # print(\"topic 0 arrhytmia\")\n # else:\n # args['error'] = \"true\"\n # args['error_msg'] = \"There is no data to analyze!!!\"\n # print(\"topic 0 hr\")\n args[\"topic\"] = topic\n return args\n\n\ndef get_hasil(nama, id, umur):\n connection = nDB[nama]\n query_result = connection.find_one({\"_id\": ObjectId(id)})\n # query_result = connection.find(\n # {\"analytic_type\": \"arrhytmia\"},\n # {\"analytic_type\": 0, \"_id\": 0, \"processed\": 0}\n # )\n\n # query_result = list(query_result)[0]['arrhytmia']\n data = query_result[\"arrhytmia\"]['data']\n hasil = query_result[\"arrhytmia\"]['hasil']\n\n umur = umur\n created = query_result[\"created\"]\n created = created.strftime(\"%a, %d %b %Y %H:%M:%S\")\n\n ecg = data[\"ecg\"]\n ts = data[\"timeseries\"]\n ecg_ts = []\n # ts + ecg\n for count, i in enumerate(ecg):\n ecg_ts.append([ts[count], i])\n\n filtered = data[\"filtered\"]\n filtered_ts = []\n # ts + filtered\n for count, i in enumerate(filtered):\n filtered_ts.append([ts[count], i])\n\n # hasil\n result = hasil[\"hasil\"]\n PVC = []\n PAB = []\n RBB = []\n LBB = []\n APC = []\n VEB = []\n for key in result.keys():\n if len(result[key]) > 0:\n tmp = result[key]\n for x in tmp:\n if key == \"PVC\":\n PVC.append([ts[x[0]], ts[x[1]]])\n elif key == \"PAB\":\n PAB.append([ts[x[0]], ts[x[1]]])\n elif key == \"RBB\":\n RBB.append([ts[x[0]], ts[x[1]]])\n elif key == \"LBB\":\n LBB.append([ts[x[0]], ts[x[1]]])\n elif key == \"APC\":\n APC.append([ts[x[0]], ts[x[1]]])\n elif key == \"VEB\":\n VEB.append([ts[x[0]], ts[x[1]]])\n result = {\n \"APC\": APC,\n \"LBB\": LBB,\n \"PAB\": PAB,\n \"PVC\": PVC,\n \"RBB\": RBB,\n \"VEB\": VEB\n }\n\n # get value ts by rpeaks index\n rpeaks = hasil[\"rpeaks\"]\n rpeaks = np.array(rpeaks)\n ts_tmp = np.array(ts)\n rpeaks = ts_tmp[rpeaks]\n rpeaks = rpeaks.tolist()\n\n hr = hasil[\"heart_rate\"]\n hr_template = hasil[\"hr_template\"]\n\n tmp = {\n \"status\": \"OK\",\n \"result\": {\n \"nama\": nama,\n \"umur\": umur,\n \"data\": ecg_ts,\n \"hasil\": result,\n \"filtered\": filtered_ts,\n \"hr_template\": hr_template,\n \"rpeaks\": rpeaks,\n \"hr\": hr\n }\n }\n\n args = {}\n\n args['data'] = ecg_ts\n args['filtered'] = filtered_ts\n args['hasil'] = result\n args['hr'] = hr\n args['hr_template'] = hr_template\n args['rpeaks'] = rpeaks\n args['nama'] = nama\n args['umur'] = umur\n args['created'] = created\n\n return args\n\n\ndef get_data_ecg(user):\n data = False\n connection = db[user]\n print(\"Connected to collection : \", user)\n\n id = connection.find({\"payload.ecg\": {\"$exists\": True}}, {\"payload\": 0})\n\n id = list(id)\n if id:\n id = id[0]['_id']\n cursor = connection.aggregate(\n [\n {\"$match\": {\"_id\": ObjectId(id)}},\n {\"$project\": {\n \"raw\": '$payload.ecg',\n \"_id\": 0\n }}\n ]\n )\n data = list(cursor)\n print(\"Getting payload from collection\")\n data = data[0]['raw']\n return data\n\n\n# Dari JAY Versi Lama 2\n# def homepage(response):\n# #urlAnalytic = \"http://3.1.218.130:5000/\"\n\n# #responAnalytic = requests.request(\"GET\", urlAnalytic)\n# #responAnalytic = responAnalytic.json()\n# #if responAnalytic['status'] == \"OK\":\n# # args = {}\n# # data = []\n# # col = db[\"pasien\"]\n# # pasien = col.find({})\n# # for i in pasien:\n# # tmp = {\n# # \"nama\": i[\"nama\"],\n# # \"umur\": i[\"umur\"]\n# # }\n# # data.append(tmp)\n# # args['data'] = data\n# # return render(response, \"main/index.html\", args)\n# #else:\n# # return render(response, \"main/404.html\")\n# # args = {}\n# # data = []\n# # col = db[\"pasien\"]\n# # pasien = col.find({})\n# # for i in pasien:\n# # tmp = {\n# # \"nama\": i[\"nama\"],\n# # \"umur\": i[\"umur\"]\n# # }\n# # data.append(tmp)\n# # args['data'] = data\n# # return render(response, \"main/index.html\", args)\n# payload = response.user\n# payload = json.dumps({\n# \"id\" : payload.id\n# })\n# return HttpResponse(payload, content_type='application/json')\n\n# def analytic(response, username):\n# urlAnalytic = \"http://3.1.218.130:5000/requestAnalysis/{}\".format(username)\n# responAnalytic = requests.request(\"GET\", urlAnalytic)\n# responAnalytic = responAnalytic.json()\n# if responAnalytic['status'] == \"OK\":\n# args = getHasil(username)\n# return render(response, \"main/details.html\", args)\n# else:\n# return render(response, \"main/404.html\")\n\n# def getHasil(name):\n# errorMsg = \"\"\n# count = 0\n\n# col = db[\"data\"]\n# col1 = db[\"pasien\"]\n# col2 = db[\"hasil\"]\n\n# check = []\n\n\n# check.append(col.find({\"nama\":name}).count())\n# check.append(col1.find({\"nama\":name}).count())\n# check.append(col2.find({\"nama\":name}).count())\n# if check.count(1) < 3:\n# for count,i in enumerate(check):\n# if i == 0:\n# if count == 0:\n# errorMsg = errorMsg + \" Data in Data Collection Not Found.\"\n# elif count == 1:\n# errorMsg = errorMsg + \" Data in Pasien Collection Not Found.\"\n# else:\n# errorMsg = errorMsg + \" Data in Hasil Collection Not Found.\"\n# tmp = {\n# \"status\":\"ERROR\",\n# \"message\":errorMsg\n# }\n\n# else:\n# data = col.find_one({\"nama\":name})\n# pasien = col1.find_one({\"nama\":name})\n# hasil = col2.find_one({\"nama\":name})\n\n# nama = name\n# umur = pasien[\"umur\"]\n# ecg = data[\"data\"]\n# ts = data[\"timeseries\"]\n# ecg_ts = []\n# # ts + ecg\n# for count,i in enumerate(ecg):\n# ecg_ts.append([ts[count],i])\n\n# filtered = data[\"filtered\"]\n# filtered_ts = []\n# # ts + filtered\n# for count,i in enumerate(filtered):\n# filtered_ts.append([ts[count],i])\n\n# # hasil\n# result = hasil[\"hasil\"]\n# PVC = []\n# PAB = []\n# RBB = []\n# LBB = []\n# APC = []\n# VEB = []\n# for key in result.keys():\n# if len(result[key]) > 0 :\n# tmp = result[key]\n# for x in tmp:\n# if key == \"PVC\":\n# PVC.append([ts[x[0]],ts[x[1]]])\n# elif key == \"PAB\":\n# PAB.append([ts[x[0]],ts[x[1]]])\n# elif key == \"RBB\":\n# RBB.append([ts[x[0]],ts[x[1]]])\n# elif key == \"LBB\":\n# LBB.append([ts[x[0]],ts[x[1]]])\n# elif key == \"APC\":\n# APC.append([ts[x[0]],ts[x[1]]])\n# elif key == \"VEB\":\n# VEB.append([ts[x[0]],ts[x[1]]])\n# result = {\n# \"APC\": APC,\n# \"LBB\": LBB,\n# \"PAB\": PAB,\n# \"PVC\": PVC,\n# \"RBB\": RBB,\n# \"VEB\": VEB\n# }\n\n# #get value ts by rpeaks index\n# rpeaks = hasil[\"rpeaks\"]\n# rpeaks = np.array(rpeaks)\n# ts_tmp = np.array(ts)\n# rpeaks = ts_tmp[rpeaks]\n# rpeaks = rpeaks.tolist()\n\n# hr = hasil[\"heart_rate\"]\n# hr_template = hasil[\"hr_template\"]\n\n# tmp = {\n# \"status\":\"OK\",\n# \"result\":{\n# \"nama\":nama,\n# \"umur\":umur,\n# \"data\":ecg_ts,\n# \"hasil\":result,\n# \"filtered\":filtered_ts,\n# \"hr_template\":hr_template,\n# \"rpeaks\": rpeaks,\n# \"hr\":hr\n# }\n# }\n# args = {}\n\n# args['data'] = ecg_ts\n# args['filtered'] = filtered_ts\n# args['hasil'] = result\n# args['hr'] = hr\n# args['hr_template'] = hr_template\n# args['rpeaks'] = rpeaks\n# args['nama'] = nama\n# args['umur'] = umur\n\n# return args\n# END dari JAY Versi Lama 2\n\n\n# DARI JAY Versi lama\n# def homepage(response):\n# urlStorage = \"http://127.0.0.1:5001/getAllName\"\n# urlAnalytic = \"http://127.0.0.1:5000/\"\n# responAnalytic = requests.request(\"GET\", urlAnalytic)\n# responAnalytic = responAnalytic.json()\n# if responAnalytic['status'] == \"OK\":\n# args = {}\n# responStorage = requests.request(\"GET\", urlStorage)\n# respon = responStorage.json()\n# args['data'] = respon\n# return render(response,\"dataanalytics/index.html\",args)\n# else:\n# return render(response,\"dataanalytics/404.html\")\n\n# def analytic(response, username):\n# urlAnalytic = \"http://127.0.0.1:5000/requestAnalysis/{}\".format(username)\n# responAnalytic = requests.request(\"GET\", urlAnalytic)\n# responAnalytic = responAnalytic.json()\n# if responAnalytic['status'] == \"OK\":\n# args = getHasil(username)\n# return render(response,\"dataanalytics/details.html\",args)\n# else:\n# return render(response,\"dataanalytics/404.html\")\n\n\n# def getHasil(username):\n# args = {}\n# url = \"http://127.0.0.1:5001/getOneData/{}\".format(username)\n# respon = requests.request(\"GET\", url)\n# result = respon.json()\n\n# args['data'] = result['result']['data']\n# args['filtered'] = result['result']['filtered']\n# args['hasil'] = result['result']['hasil']\n# print(args['hasil'])\n# args['hr'] = result['result']['hr']\n# args['hr_template'] = result['result']['hr_template']\n# args['rpeaks'] = result['result']['rpeaks']\n# args['nama'] = result['result']['nama']\n# args['umur'] = result['result']['umur']\n\n# return args\n# #END DARI JAY\n","sub_path":"dataanalytics/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"385464723","text":"#!/usr/bin/env python3\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, User, Electronics, Device\n\nengine = create_engine('sqlite:///electronics.db')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n# Laptop Devices\nelectronic1 = Electronics(name=\"laptops\", id=1, user_id=12345)\nsession.add(electronic1)\nsession.commit()\n\ndevice1 = Device(user_id=12345, name=\"MacBook Pro\", id=1,\n description=\"MacBook Pro made by Apple\",\n price=\"$1299.99\", year=\"2020\", brand=\"Apple\",\n electronics=electronic1)\nsession.add(device1)\nsession.commit()\n\ndevice2 = Device(user_id=12345, name=\"MacBook Air\", id=2,\n description=\"MacBook Air made by Apple\",\n price=\"$1099.99\", year=\"2020\", brand=\"Apple\",\n electronics=electronic1)\nsession.add(device2)\nsession.commit()\n\ndevice3 = Device(user_id=12345, name=\"XPS 13\", id=3,\n description=\"XPS 13 made by Dell\",\n price=\"$999.99\", year=\"2020\", brand=\"Dell\",\n electronics=electronic1)\nsession.add(device3)\nsession.commit()\n\ndevice4 = Device(user_id=12345, name=\"XPS 15\", id=4,\n description=\"XPS 15 made by Dell\",\n price=\"$1199.99\", year=\"2020\", brand=\"Dell\",\n electronics=electronic1)\nsession.add(device4)\nsession.commit()\n\ndevice5 = Device(user_id=12345, name=\"ZBook 15\", id=5,\n description=\"ZBook 15 made by HP\",\n price=\"$1299.99\", year=\"2020\", brand=\"HP\",\n electronics=electronic1)\nsession.add(device5)\nsession.commit()\n\ndevice6 = Device(user_id=12345, name=\"Pavilion x360\", id=6,\n description=\"Pavilion x360 made by HP\",\n price=\"$599.99\", year=\"2020\", brand=\"HP\",\n electronics=electronic1)\nsession.add(device6)\nsession.commit()\n\ndevice7 = Device(user_id=12345, name=\"Yoga 730\", id=7,\n description=\"Yoga 730 made by Lenovo\",\n price=\"$829.99\", year=\"2020\", brand=\"Lenovo\",\n electronics=electronic1)\nsession.add(device7)\nsession.commit()\n\ndevice8 = Device(user_id=12345, name=\"Nitro 5\", id=8,\n description=\"Nitro 5 made by Acer\",\n price=\"$749.99\", year=\"2020\", brand=\"Acer\",\n electronics=electronic1)\nsession.add(device8)\nsession.commit()\n\ndevice9 = Device(user_id=12345, name=\"Notebook 7\", id=9,\n description=\"Notebook 7 made by Samsung\",\n price=\"$799.99\", year=\"2020\", brand=\"Samsung\",\n electronics=electronic1)\nsession.add(device9)\nsession.commit()\n\ndevice10 = Device(user_id=12345, name=\"Surface Book 2\", id=10,\n description=\"Surface Book 2 made by Microsoft\",\n price=\"$1999.99\", year=\"2020\", brand=\"Microsoft\",\n electronics=electronic1)\nsession.add(device10)\nsession.commit()\n\n# Tablet Devices\nelectronic2 = Electronics(name=\"tablets\", id=2, user_id=12345)\nsession.add(electronic2)\nsession.commit()\n\ndevice1 = Device(user_id=12345, name=\"iPad Pro\", id=11,\n description=\"iPad Pro made by Apple\",\n price=\"$799.99\", year=\"2020\", brand=\"Apple\",\n electronics=electronic2)\nsession.add(device1)\nsession.commit()\n\ndevice2 = Device(user_id=12345, name=\"iPad Air\", id=12,\n description=\"iPad Air made by Apple\",\n price=\"$499.99\", year=\"2020\", brand=\"Apple\",\n electronics=electronic2)\nsession.add(device2)\nsession.commit()\n\ndevice3 = Device(user_id=12345, name=\"iPad\", id=13,\n description=\"iPad made by Apple\",\n price=\"$329.99\", year=\"2020\", brand=\"Apple\",\n electronics=electronic2)\nsession.add(device3)\nsession.commit()\n\ndevice4 = Device(user_id=12345, name=\"iPad mini\", id=14,\n description=\"iPad mini made by Apple\",\n price=\"$399.99\", year=\"2020\", brand=\"Apple\",\n electronics=electronic2)\nsession.add(device4)\nsession.commit()\n\ndevice5 = Device(user_id=12345, name=\"Surface Pro 7\", id=15,\n description=\"Surface Pro 7 made by Microsoft\",\n price=\"$699.99\", year=\"2020\", brand=\"Microsoft\",\n electronics=electronic2)\nsession.add(device5)\nsession.commit()\n\ndevice6 = Device(user_id=12345, name=\"Surface Go\", id=16,\n description=\"Surface Go made by Microsoft\",\n price=\"$549.99\", year=\"2020\", brand=\"Microsoft\",\n electronics=electronic2)\nsession.add(device6)\nsession.commit()\n\ndevice7 = Device(user_id=12345, name=\"Surface Pro X\", id=17,\n description=\"Surface Pro X made by Microsoft\",\n price=\"$1599.99\", year=\"2020\", brand=\"Microsoft\",\n electronics=electronic2)\nsession.add(device7)\nsession.commit()\n\ndevice8 = Device(user_id=12345, name=\"Galaxy Tab A\", id=18,\n description=\"Galaxy Tab A made by Samsung\",\n price=\"$289.99\", year=\"2020\", brand=\"Samsung\",\n electronics=electronic2)\nsession.add(device8)\nsession.commit()\n\ndevice9 = Device(user_id=12345, name=\"Galaxy Tab S6\", id=19,\n description=\"Galaxy Tab S6 made by Samsung\",\n price=\"$549.99\", year=\"2020\", brand=\"Samsung\",\n electronics=electronic2)\nsession.add(device9)\nsession.commit()\n\ndevice10 = Device(user_id=12345, name=\"Kindle\", id=20,\n description=\"Kindle made by Amazon\",\n price=\"$129.99\", year=\"2020\", brand=\"Amazon\",\n electronics=electronic2)\nsession.add(device10)\nsession.commit()\n\n# Phone Devices\nelectronic3 = Electronics(name=\"phones\", id=3, user_id=12345)\nsession.add(electronic3)\nsession.commit()\n\ndevice1 = Device(user_id=12345, name=\"iPhone 11\", id=21,\n description=\"iPhone 11 made by Apple\",\n price=\"$699.99\", year=\"2020\", brand=\"Apple\",\n electronics=electronic3)\nsession.add(device1)\nsession.commit()\n\ndevice2 = Device(user_id=12345, name=\"iPhone 11 Pro\", id=22,\n description=\"iPhone 11 Pro made by Apple\",\n price=\"$999.99\", year=\"2020\", brand=\"Apple\",\n electronics=electronic3)\nsession.add(device2)\nsession.commit()\n\ndevice3 = Device(user_id=12345, name=\"iPhone 11 Pro Max\", id=23,\n description=\"iPhone 11 Pro Max made by Apple\",\n price=\"$1099.99\", year=\"2020\", brand=\"Apple\",\n electronics=electronic3)\nsession.add(device3)\nsession.commit()\n\ndevice4 = Device(user_id=12345, name=\"Pixel 4\", id=24,\n description=\"Pixel 4 made by Google\",\n price=\"$699.99\", year=\"2020\", brand=\"Google\",\n electronics=electronic3)\nsession.add(device4)\nsession.commit()\n\ndevice5 = Device(user_id=12345, name=\"Pixel 4 XL\", id=25,\n description=\"Pixel 4 XL made by Google\",\n price=\"$749.99\", year=\"2020\", brand=\"Google\",\n electronics=electronic3)\nsession.add(device5)\nsession.commit()\n\ndevice6 = Device(user_id=12345, name=\"Galaxy Note10\", id=26,\n description=\"Galaxy Note10 made by Samsung\",\n price=\"$699.99\", year=\"2020\", brand=\"Samsung\",\n electronics=electronic3)\nsession.add(device6)\nsession.commit()\n\ndevice7 = Device(user_id=12345, name=\"Galaxy S10\", id=27,\n description=\"Galaxy S10 made by Samsung\",\n price=\"$599.99\", year=\"2020\", brand=\"Samsung\",\n electronics=electronic3)\nsession.add(device7)\nsession.commit()\n\n# Video Game Console Devices\nelectronic4 = Electronics(name=\"video-game-consoles\", id=4, user_id=12345)\nsession.add(electronic4)\nsession.commit()\n\ndevice1 = Device(user_id=12345, name=\"Xbox One S\", id=28,\n description=\"Xbox One S made by Microsoft\",\n price=\"$199.99\", year=\"2020\", brand=\"Microsoft\",\n electronics=electronic4)\nsession.add(device1)\nsession.commit()\n\ndevice2 = Device(user_id=12345, name=\"Xbox One X\", id=29,\n description=\"Xbox One X made by Microsoft\",\n price=\"$299.99\", year=\"2020\", brand=\"Microsoft\",\n electronics=electronic4)\nsession.add(device2)\nsession.commit()\n\ndevice3 = Device(user_id=12345, name=\"PS4\", id=30,\n description=\"PS4 made by Sony\",\n price=\"$299.99\", year=\"2020\", brand=\"Sony\",\n electronics=electronic4)\nsession.add(device3)\nsession.commit()\n\ndevice4 = Device(user_id=12345, name=\"PS4 Pro\", id=31,\n description=\"PS4 Pro made by Sony\",\n price=\"$399.99\", year=\"2020\", brand=\"Sony\",\n electronics=electronic4)\nsession.add(device4)\nsession.commit()\n\ndevice5 = Device(user_id=12345, name=\"Switch\", id=32,\n description=\"Switch made by Nintendo\",\n price=\"$299.99\", year=\"2020\", brand=\"Nintendo\",\n electronics=electronic4)\nsession.add(device5)\nsession.commit()\n\nprint(\"Added electronic devices!\")\n","sub_path":"catalog/lotsoftech.py","file_name":"lotsoftech.py","file_ext":"py","file_size_in_byte":9143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"189482931","text":"#!/usr/bin/env python\n# Usage: \n\n#make csv with labeled rows and columns\nimport sys\nimport pandas as pd\n\n# stringtie stuff\nbase = sys.argv[1]\ndf = pd.read_csv(base + \"/fastq/samples.csv\")\n\nd ={}\n\n#something we havent done \nfor _, sample, sex, stage in df.itertuples():\n #make a new dataframe with all my fpkm, \n # i want meaningful column and row names -- \n d[sex + \"_\" + stage] = pd.read_table(base + \"/results/stringtie/\" + sample + \"/t_data.ctab\",\n index_col = \"t_name\")[\"FPKM\"]\n \ndf = pd.DataFrame( d )\ndf.to_csv(sys.stdout)\n\n","sub_path":"day5/00-make_csv.py","file_name":"00-make_csv.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"347773398","text":"# Quiz 3\n# \n# \n# Instrucciones: Dado un intervalo de tiempo en segundos, calcular los segundos restantes que \n# corresponden para convertirse exactamente en minutos. Este programa debe funcionar para 5 oportunidades.\n\n \ndef get_segundos(S): \n segundos = S % 60\n return 60 - segundos\n \nif __name__ == '__main__':\n for x in range(5):\n segundosT = int(input(\"Introduzca la cantidad de segundos: \"))\n pSegundos = get_segundos(segundosT)\n print(\"faltan {0} segundos para completar el siguiente minuto\".format(pSegundos)) \n input()","sub_path":"laboratorios/quiz3/quiz3.py","file_name":"quiz3.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"457387043","text":"# -*- coding: utf-8 -*-\nimport settings\nimport os\n\nfrom bottle import static_file, request\nfrom utils import *\nfrom bottle import template\n\nimport bottle\n\napp = application = bottle.Bottle()\n\n\n# Artist routes\n\n@app.route('/getartist/')\ndef get_artist(artist_id):\n data, error = query_get_artist(artist_id)\n return template('editartist.tpl', artist=data[0], error=error)\n\n\n@app.route('/getartists/empty')\ndef get_artists_empty():\n return template(\n 'searchartist.tpl'\n )\n\n\n@app.route('/getartists')\ndef get_artists():\n firstname = request.query.firstname\n surname = request.query.surname\n yearfrom = request.query.yearfrom\n yearto = request.query.yearto\n type = request.query.type\n (data, error) = query_get_artists(\n firstname, surname, yearfrom, yearto, type)\n if error:\n return template('searchartist.tpl', error=error)\n return template(\n 'show_artists.tpl',\n header=data[0],\n data=data[1:],\n error=None\n )\n\n\n@app.route('/insertartist/empty')\ndef insert_artist_empty():\n return template(\n 'insertartist.tpl',\n error=None,\n )\n\n\n@app.route('/insertartist')\ndef insert_artist():\n artist_id = request.query.artist_id\n name = request.query.name\n surname = request.query.surname\n birth_year = request.query.birth_year\n data, error = query_insert_artist(artist_id, name, surname, birth_year)\n return template(\n 'insertartist.tpl',\n error=error,\n )\n\n\n@app.route('/editartist/')\ndef edit_artist(artist_id):\n name = request.query.name\n surname = request.query.surname\n birth_year = request.query.birth_year\n data, error = query_edit_artist(artist_id, name, surname, birth_year)\n return template(\n 'editartist.tpl',\n artist=data[0] if not error else (\n artist_id, name, surname, birth_year),\n error=error\n )\n\n\n@app.route('/getsongs/empty')\ndef get_songs_empty():\n return template('searchsong.tpl')\n\n\n@app.route('/getsongs')\ndef get_songs():\n song_title = request.query.songtitle\n production_year = request.query.productionyear\n company = request.query.company\n table = query_get_songs(song_title, production_year, company)\n return template(\n 'showsongs.tpl',\n header=table[0],\n data=table[1:],\n error=None\n )\n\n\n@app.route('/insertsong/empty')\ndef insert_song_empty():\n cd, singer, composer, songwriter = query_song_parameters()\n return template(\n 'insertsong.tpl',\n cd=cd,\n singer=singer,\n composer=composer,\n songwriter=songwriter,\n error=None,\n )\n\n\n@app.route('/insertsong')\ndef insert_song():\n success = False\n title = request.query.title\n production_year = request.query.productionyear\n cd = request.query.cd\n singer = request.query.singer\n composer = request.query.composer\n song_writer = request.query.songwriter\n error = query_insert_song(\n title, production_year, cd, singer, composer, song_writer\n )\n cd, singer, composer, songwriter = query_song_parameters()\n if not error:\n success = True\n return template(\n 'insertsong.tpl',\n cd=cd,\n singer=singer,\n composer=composer,\n songwriter=songwriter,\n error=error,\n success=success\n )\n\n\n@app.route('/:path')\ndef callback(path):\n return static_file(path, 'views')\n\n\n@app.route('/')\ndef index():\n return template('home.tpl', name='aa')\n\nif __name__ == '__main__':\n bottle.run(\n app=app, host='localhost', port=settings.web_port, reloader=True,\n debug=True\n )\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"406285563","text":"import Core.matrix as matrix\nimport Core.nn as network\nfrom random import randint as random\nnodeDistribution = [2,10,1]\nprint(\"OBJECTIVE - Learn AND Gate Logic\")\n#TRAINING DATA\ninputs = [[0.01,0.01],\n [0.01,1.00],\n [1.00,0.01],\n [1.00,1.00]\n ]\ntargets = [[0.01],\n [1.00],\n [1.00],\n [0.01]\n ]\n#END OF TRAINING DATA\n\nnn = network.NeuralNetwork(nodeDistribution,0.2)\naccuracy = 0\nfor epoch in range(500000):#500,000\n if epoch % 200 ==0:\n perc = ((accuracy/((200+1)*4))*100) #Display\n print('epoch: '+str(epoch)+\" accuracy \"+str(round(perc,1))+\"%\")\n accuracy = 0\n\n for Tcase in range(len(inputs)): #Train\n nn.train(inputs[Tcase],targets[Tcase])\n\n for k in range(len(inputs)): #Evaluate\n Tcase = random(0,(len(inputs)-1))\n result = nn.query(inputs[Tcase])\n if result[0] < 0.5 and targets[k][0] < 0.5:\n accuracy+=1\n if result[0] >= 0.5 and targets[k][0] >= 0.5:\n accuracy+=1\n\nprint(\"DONE TRAINING\")\nfor i in range(len(inputs)):\n temp = nn.query(inputs[i])\n print(round(temp[0],1))\n","sub_path":"Testing/XOR.py","file_name":"XOR.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"28167402","text":"import os\nimport maya.cmds as cmds\nfrom PySide import QtCore\nimport dev.python.lib.ui as ui\nreload(ui)\n\n# This file's path\nuiCodePath = os.path.realpath(__file__)\n\n# UI path\ndevDirectory = uiCodePath.rpartition('maya')[0]\nuiFile = os.path.join(devDirectory, 'ui', 'rigCreator.ui')\n\nlistForm, listBase = ui.loadUI(uiFile)\nmayaWindow = ui.getMayaWindow()\n\n\nclass RigModuleUI(listForm, listBase):\n\n def __init__(self, parent=mayaWindow):\n self.windowName = 'rigCreator'\n\n # Using an env variable makes the path more generic\n self.settings_path = os.path.join(os.getenv('HOME'), \"settingsFile.ini\")\n\n if cmds.window(self.windowName, exists=True):\n cmds.deleteUI(self.windowName)\n\n super(RigModuleUI, self).__init__(parent)\n\n self.setupUi(self)\n\n\n def create(self):\n # Restore window's previous geometry from file\n if os.path.exists(self.settings_path):\n settings_obj = QtCore.QSettings(self.settings_path, QtCore.QSettings.IniFormat)\n self.restoreGeometry(settings_obj.value(\"windowGeometry\"))\n\n\n def closeEvent(self, event):\n # Save window's geometry\n settings_obj = QtCore.QSettings(self.settings_path, QtCore.QSettings.IniFormat)\n settings_obj.setValue(\"windowGeometry\", self.saveGeometry())\n\n\ndef run():\n rigui = RigModuleUI()\n rigui.create()\n rigui.show()\n\n\"\"\"\nimport sys\nimport os.path\n\n# code path\n# ---------------\npath = os.path.join(\"D:\", os.path.sep,\n \"all_works\",\n \"MAYA_DEV\",\n \"EHM_tools\",\n \"MAYA\",\n \"codes\")\n\nif path not in sys.path:\n sys.path.append(path)\n\nfrom python import *\nimport python.rig.ui.rigModule as rigModule\nreload(rigModule)\nrigModule.run()\n\n\"\"\"","sub_path":"dev/maya/python/rig/ui/delete/rigModule.py","file_name":"rigModule.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"460485194","text":"import os\nimport time\nimport argparse\nimport numpy as np\nimport tensorflow as tf\nimport uabRepoPaths\nimport uabUtilreader\nimport util_functions\nimport uabCrossValMaker\nimport bohaoCustom.uabPreprocClasses as bPreproc\nimport uabPreprocClasses\nimport uab_collectionFunctions\nimport uab_DataHandlerFunctions\nfrom bohaoCustom import uabDataReader\nfrom bohaoCustom import uabMakeNetwork\nfrom bohaoCustom import uabMakeNetwork_UNet\n\nRUN_ID = 0\nBATCH_SIZE = 5\nLEARNING_RATE = 1e-5\nINPUT_SIZE = 572\nTILE_SIZE = 5000\nEPOCHS = 40\nNUM_CLASS = 2\nN_TRAIN = 8000\nN_VALID = 1000\nGPU = 1\nDECAY_STEP = 20\nDECAY_RATE = 0.1\nMODEL_NAME = 'inria_loo_mtl_retrain_finetune_{}_{}'\nSFN = 32\nLEAVE_CITY = 0\nPRE_TRAINED_DIR = r'/hdd6/Models/Inria_Domain_LOO/UnetCrop_inria_aug_leave_0_0_PS(572, 572)_BS5_' \\\n r'EP100_LR0.0001_DS60_DR0.1_SFN32'\n\n\nclass UnetPredictRetrain(uabMakeNetwork_UNet.UnetModelPredict):\n def __init__(self, inputs, trainable, input_size, model_name='', dropout_rate=None,\n learn_rate=1e-4, decay_step=60, decay_rate=0.1, epochs=100,\n batch_size=5, start_filter_num=32):\n uabMakeNetwork.Network.__init__(self, inputs, trainable, dropout_rate,\n learn_rate, decay_step, decay_rate, epochs, batch_size)\n self.name = 'UnetPredictRetrain'\n self.model_name = self.get_unique_name(model_name)\n self.sfn = start_filter_num\n self.learning_rate = None\n self.valid_cross_entropy = tf.placeholder(tf.float32, [])\n self.valid_iou = tf.placeholder(tf.float32, [])\n self.valid_images = tf.placeholder(tf.uint8, shape=[None, input_size[0],\n input_size[1] * 3, 3], name='validation_images')\n self.update_ops = None\n self.config = None\n\n def create_graph(self, x_name, class_num, start_filter_num=32):\n self.class_num = class_num\n sfn = self.sfn\n\n # downsample\n conv1, pool1 = self.conv_conv_pool(self.inputs[x_name], [sfn, sfn], self.trainable, name='conv1',\n padding='valid', dropout=self.dropout_rate)\n conv2, pool2 = self.conv_conv_pool(pool1, [sfn*2, sfn*2], self.trainable, name='conv2',\n padding='valid', dropout=self.dropout_rate)\n conv3, pool3 = self.conv_conv_pool(pool2, [sfn*4, sfn*4], self.trainable, name='conv3',\n padding='valid', dropout=self.dropout_rate)\n conv4, pool4 = self.conv_conv_pool(pool3, [sfn*8, sfn*8], self.trainable, name='conv4',\n padding='valid', dropout=self.dropout_rate)\n self.encoding = self.conv_conv_pool(pool4, [sfn*16, sfn*16], self.trainable, name='conv5', pool=False,\n padding='valid', dropout=self.dropout_rate)\n\n # upsample\n up6 = self.crop_upsample_concat(self.encoding, conv4, 8, name='6')\n conv6 = self.conv_conv_pool(up6, [sfn*8, sfn*8], self.trainable, name='up6', pool=False,\n padding='valid', dropout=self.dropout_rate)\n up7 = self.crop_upsample_concat(conv6, conv3, 32, name='7')\n conv7 = self.conv_conv_pool(up7, [sfn*4, sfn*4], self.trainable, name='up7', pool=False,\n padding='valid', dropout=self.dropout_rate)\n up8 = self.crop_upsample_concat(conv7, conv2, 80, name='8')\n conv8 = self.conv_conv_pool(up8, [sfn*2, sfn*2], self.trainable, name='up8', pool=False,\n padding='valid', dropout=self.dropout_rate)\n up9 = self.crop_upsample_concat(conv8, conv1, 176, name='9')\n conv9 = self.conv_conv_pool(up9, [sfn, sfn], self.trainable, name='up9', pool=False,\n padding='valid', dropout=self.dropout_rate)\n\n self.pred = tf.layers.conv2d(conv9, class_num, (1, 1), name='final', activation=None, padding='same')\n self.output = tf.nn.softmax(self.pred)\n\n def make_optimizer(self, train_var_filter):\n with tf.control_dependencies(self.update_ops):\n if train_var_filter is None:\n hard_optm = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss,\n global_step=self.global_step)\n soft_optm = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss,\n global_step=None)\n self.optimizer = [hard_optm, soft_optm]\n\n def make_loss(self, y_name, loss_type='xent', **kwargs):\n with tf.variable_scope('loss'):\n pred_flat = tf.reshape(self.pred, [-1, self.class_num])\n _, w, h, _ = self.inputs[y_name].get_shape().as_list()\n y = tf.image.resize_image_with_crop_or_pad(self.inputs[y_name], w-self.get_overlap(), h-self.get_overlap())\n y_flat = tf.reshape(tf.squeeze(y, axis=[3]), [-1, ])\n indices = tf.squeeze(tf.where(tf.less_equal(y_flat, self.class_num - 1)), 1)\n gt = tf.gather(y_flat, indices)\n prediction = tf.gather(pred_flat, indices)\n\n pred = tf.argmax(prediction, axis=-1, output_type=tf.int32)\n intersect = tf.cast(tf.reduce_sum(gt * pred), tf.float32)\n union = tf.cast(tf.reduce_sum(gt), tf.float32) + tf.cast(tf.reduce_sum(pred), tf.float32) \\\n - tf.cast(tf.reduce_sum(gt * pred), tf.float32)\n self.loss_iou = tf.convert_to_tensor([intersect, union])\n self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction, labels=gt))\n\n def make_update_ops(self, x_name, y_name):\n tf.add_to_collection('inputs', self.inputs[x_name])\n tf.add_to_collection('inputs', self.inputs[y_name])\n tf.add_to_collection('outputs', self.pred)\n self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n\n def make_summary(self, hist=False):\n if hist:\n tf.summary.histogram('Predicted Prob', tf.argmax(tf.nn.softmax(self.pred), 1))\n tf.summary.scalar('Cross Entropy', self.loss)\n tf.summary.scalar('learning rate', self.learning_rate)\n self.summary = tf.summary.merge_all()\n\n def train_config(self, x_name, y_name, n_train, n_valid, patch_size, ckdir, loss_type='xent',\n train_var_filter=None, hist=False, par_dir=None, **kwargs):\n self.make_loss(y_name, loss_type, **kwargs)\n self.make_learning_rate(n_train)\n self.make_update_ops(x_name, y_name)\n self.make_optimizer(train_var_filter)\n self.make_ckdir(ckdir, patch_size, par_dir)\n self.make_summary(hist)\n self.config = tf.ConfigProto(allow_soft_placement=True)\n self.n_train = n_train\n self.n_valid = n_valid\n\n def train(self, x_name, y_name, y_name2, n_train, sess, summary_writer, n_valid=1000,\n train_reader=None, train_reader_building=None, valid_reader=None,\n image_summary=None, verb_step=100, save_epoch=5,\n img_mean=np.array((0, 0, 0), dtype=np.float32),\n continue_dir=None, valid_iou=False):\n # define summary operations\n valid_cross_entropy_summary_op = tf.summary.scalar('xent_validation', self.valid_cross_entropy)\n valid_iou_summary_op = tf.summary.scalar('iou_validation', self.valid_iou)\n valid_image_summary_op = tf.summary.image('Validation_images_summary', self.valid_images,\n max_outputs=10)\n\n if continue_dir is not None and os.path.exists(continue_dir):\n self.load(continue_dir, sess)\n gs = sess.run(self.global_step)\n start_epoch = int(np.ceil(gs/n_train*self.bs))\n start_step = gs - int(start_epoch*n_train/self.bs)\n else:\n start_epoch = 0\n start_step = 0\n\n cross_entropy_valid_min = np.inf\n iou_valid_max = 0\n for epoch in range(start_epoch, self.epochs):\n start_time = time.time()\n for step in range(start_step, n_train, self.bs):\n X_batch, y_batch = train_reader.readerAction(sess)\n _, self.global_step_value = sess.run([self.optimizer[0], self.global_step],\n feed_dict={self.inputs[x_name]:X_batch,\n self.inputs[y_name]:y_batch,\n self.trainable: True})\n X_batch_retrain, y_batch_retrain = train_reader_building.readerAction(sess)\n _, self.global_step_value = sess.run([self.optimizer[1], self.global_step],\n feed_dict={self.inputs[x_name]: X_batch_retrain,\n self.inputs[y_name]: y_batch_retrain,\n self.trainable: True})\n if self.global_step_value % verb_step == 0:\n step_cross_entropy, step_summary = \\\n sess.run([self.loss, self.summary],\n feed_dict={self.inputs[x_name]: X_batch, self.inputs[y_name]: y_batch,\n self.trainable: False})\n summary_writer.add_summary(step_summary, self.global_step_value)\n print('Epoch {:d} step {:d}\\tcross entropy = {:.3f}'.\n format(epoch, self.global_step_value, step_cross_entropy))\n # validation\n cross_entropy_valid_mean = []\n iou_valid_mean = np.zeros(2)\n X_batch_val, y_batch_val, pred_valid = None, None, None\n for step in range(0, n_valid, self.bs):\n X_batch_val, y_batch_val = valid_reader.readerAction(sess)\n pred_valid, cross_entropy_valid, iou_valid = sess.run([self.pred, self.loss, self.loss_iou],\n feed_dict={self.inputs[x_name]: X_batch_val,\n self.inputs[y_name]: y_batch_val,\n self.trainable: False})\n cross_entropy_valid_mean.append(cross_entropy_valid)\n iou_valid_mean += iou_valid\n cross_entropy_valid_mean = np.mean(cross_entropy_valid_mean)\n iou_valid_mean = iou_valid_mean[0] / iou_valid_mean[1]\n duration = time.time() - start_time\n if valid_iou:\n print('Validation IoU: {:.3f}, duration: {:.3f}'.format(iou_valid_mean, duration))\n else:\n print('Validation cross entropy: {:.3f}, duration: {:.3f}'.format(cross_entropy_valid_mean,\n duration))\n valid_cross_entropy_summary = sess.run(valid_cross_entropy_summary_op,\n feed_dict={self.valid_cross_entropy: cross_entropy_valid_mean})\n valid_iou_summary = sess.run(valid_iou_summary_op,\n feed_dict={self.valid_iou: iou_valid_mean})\n summary_writer.add_summary(valid_cross_entropy_summary, self.global_step_value)\n summary_writer.add_summary(valid_iou_summary, self.global_step_value)\n if valid_iou:\n if iou_valid_mean > iou_valid_max:\n iou_valid_max = iou_valid_mean\n saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=1)\n saver.save(sess, '{}/best_model.ckpt'.format(self.ckdir))\n\n else:\n if cross_entropy_valid_mean < cross_entropy_valid_min:\n cross_entropy_valid_min = cross_entropy_valid_mean\n saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=1)\n saver.save(sess, '{}/best_model.ckpt'.format(self.ckdir))\n\n if image_summary is not None:\n valid_image_summary = sess.run(valid_image_summary_op,\n feed_dict={self.valid_images:\n image_summary(X_batch_val[:,:,:,:3], y_batch_val, pred_valid,\n img_mean)})\n summary_writer.add_summary(valid_image_summary, self.global_step_value)\n\n if epoch % save_epoch == 0:\n saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=1)\n saver.save(sess, '{}/model_{}.ckpt'.format(self.ckdir, epoch), global_step=self.global_step)\n\n\ndef read_flag():\n parser = argparse.ArgumentParser()\n parser.add_argument('--batch-size', default=BATCH_SIZE, type=int, help='batch size (10)')\n parser.add_argument('--learning-rate', type=float, default=LEARNING_RATE, help='learning rate (1e-3)')\n parser.add_argument('--input-size', default=INPUT_SIZE, type=int, help='input size 224')\n parser.add_argument('--tile-size', default=TILE_SIZE, type=int, help='tile size 5000')\n parser.add_argument('--epochs', default=EPOCHS, type=int, help='# epochs (1)')\n parser.add_argument('--num-classes', type=int, default=NUM_CLASS, help='# classes (including background)')\n parser.add_argument('--n-train', type=int, default=N_TRAIN, help='# samples per epoch')\n parser.add_argument('--n-valid', type=int, default=N_VALID, help='# patches to valid')\n parser.add_argument('--GPU', type=str, default=GPU, help=\"GPU used for computation.\")\n parser.add_argument('--decay-step', type=float, default=DECAY_STEP, help='Learning rate decay step in number of epochs.')\n parser.add_argument('--decay-rate', type=float, default=DECAY_RATE, help='Learning rate decay rate')\n parser.add_argument('--model-name', type=str, default=MODEL_NAME, help='Model name')\n parser.add_argument('--run-id', type=str, default=RUN_ID, help='id of this run')\n parser.add_argument('--sfn', type=int, default=SFN, help='filter number of the first layer')\n parser.add_argument('--leave-city', type=int, default=LEAVE_CITY, help='city id to leave-out in training')\n\n flags = parser.parse_args()\n flags.input_size = (flags.input_size, flags.input_size)\n flags.tile_size = (flags.tile_size, flags.tile_size)\n flags.model_name = flags.model_name.format(flags.leave_city, flags.run_id)\n return flags\n\n\ndef main(flags):\n # make network\n # define place holder\n X = tf.placeholder(tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='X')\n y = tf.placeholder(tf.int32, shape=[None, flags.input_size[0], flags.input_size[1], 1], name='y')\n y2 = tf.placeholder(tf.float32, shape=[None, 1], name='y2')\n mode = tf.placeholder(tf.bool, name='mode')\n model = UnetPredictRetrain({'X':X, 'Y':y},\n trainable=mode,\n model_name=flags.model_name,\n input_size=flags.input_size,\n batch_size=flags.batch_size,\n learn_rate=flags.learning_rate,\n decay_step=flags.decay_step,\n decay_rate=flags.decay_rate,\n epochs=flags.epochs,\n start_filter_num=flags.sfn)\n model.create_graph('X', class_num=flags.num_classes)\n\n # create collection\n # the original file is in /ei-edl01/data/uab_datasets/inria\n blCol = uab_collectionFunctions.uabCollection('inria')\n opDetObj = bPreproc.uabOperTileDivide(255) # inria GT has value 0 and 255, we map it back to 0 and 1\n # [3] is the channel id of GT\n rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj)\n rescObj.run(blCol)\n img_mean = blCol.getChannelMeans([0, 1, 2]) # get mean of rgb info\n\n # extract patches\n extrObj = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 4],\n cSize=flags.input_size,\n numPixOverlap=int(model.get_overlap()),\n extSave=['jpg', 'jpg', 'jpg', 'png'],\n isTrain=True,\n gtInd=3,\n pad=model.get_overlap()/2)\n patchDir = extrObj.run(blCol)\n\n # make data reader\n # use uabCrossValMaker to get fileLists for training and validation\n idx_city, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'city')\n idx_tile, _ = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile')\n idx = [j * 10 + i for i, j in zip(idx_city, idx_tile)]\n # use first city for validation\n filter_train = []\n filter_valid = []\n for i in range(5):\n for j in range(1, 37):\n if i != flags.leave_city and j > 5:\n filter_train.append(j * 10 + i)\n elif i == flags.leave_city and j <= 5:\n filter_valid.append(j * 10 + i)\n # use first city for validation\n file_list_train = uabCrossValMaker.make_file_list_by_key(idx, file_list, filter_train)\n file_list_valid = uabCrossValMaker.make_file_list_by_key(idx, file_list, filter_valid)\n\n dataReader_train = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir, file_list_train, flags.input_size,\n flags.batch_size, dataAug='flip,rotate',\n block_mean=np.append([0], img_mean), batch_code=0)\n dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir, file_list_valid, flags.input_size,\n flags.batch_size, dataAug='flip,rotate',\n block_mean=np.append([0], img_mean), batch_code=0)\n\n blCol = uab_collectionFunctions.uabCollection('inria_unet_retrain')\n img_mean = blCol.getChannelMeans([1, 2, 3]) # get mean of rgb info\n\n # extract patches\n extrObj = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 3],\n cSize=flags.input_size,\n numPixOverlap=int(model.get_overlap()),\n extSave=['png', 'jpg', 'jpg', 'jpg'],\n isTrain=True,\n gtInd=3,\n pad=model.get_overlap() / 2)\n patchDir = extrObj.run(blCol)\n\n # make data reader\n # use uabCrossValMaker to get fileLists for training and validation\n idx_city, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'city')\n idx_tile, _ = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile')\n idx = [j * 10 + i for i, j in zip(idx_city, idx_tile)]\n # use first city for validation\n filter_train = []\n filter_valid = []\n for i in range(5):\n for j in range(1, 37):\n if i != flags.leave_city and j > 5:\n filter_train.append(j * 10 + i)\n elif i == flags.leave_city and j > 5:\n filter_valid.append(j * 10 + i)\n # use first city for validation\n file_list_retrain = uabCrossValMaker.make_file_list_by_key(idx, file_list, filter_valid)\n # no augmentation needed for validation\n dataReader_retrain = uabDataReader.ImageLabelReader([0], [1, 2, 3], patchDir, file_list_retrain, flags.input_size,\n flags.batch_size, dataAug='flip,rotate',\n block_mean=np.append([0], img_mean), batch_code=0)\n\n # train\n start_time = time.time()\n\n model.train_config('X', 'Y', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath,\n loss_type='xent', par_dir='Inria_Domain_LOO')\n model.run(train_reader=dataReader_train,\n train_reader_building=dataReader_retrain,\n valid_reader=dataReader_valid,\n pretrained_model_dir=PRE_TRAINED_DIR,\n isTrain=True,\n img_mean=img_mean,\n verb_step=100, # print a message every 100 step(sample)\n save_epoch=5, # save the model every 5 epochs\n gpu=GPU,\n tile_size=flags.tile_size,\n patch_size=flags.input_size)\n\n duration = time.time() - start_time\n print('duration {:.2f} hours'.format(duration/60/60))\n\n\nif __name__ == '__main__':\n flags = read_flag()\n main(flags)\n","sub_path":"]tasks/2018.06.28.mtl_semi_unsupervised/train_inria_unet_retrain.py","file_name":"train_inria_unet_retrain.py","file_ext":"py","file_size_in_byte":21224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"623832970","text":"#Python 3.6.5\n\n#Importação da biblioteca sys e inicialização da variável de soma\nimport sys\nsum = 0\n\n#Laço indo de 1 até o maior inteiro do sistema\nfor i in range(1,sys.maxsize):\n #Calculo da função do somatório\n sum += (1/i)\n #Truncamento da soma para 3 dígitos\n truncated = float('%.3f'%(sum))\n #Calculo do erro absoluto e erro relativo\n absolute_error = abs(sum - truncated)\n relative_error = absolute_error / sum\n #Se o erro absoluto for diferente de zero ocorreu perda de informação no truncamento\n if absolute_error != 0:\n print (\"Soma exata nao bate em i = \",i)\n print (\"Erro absoluto = \", absolute_error)\n print (\"Erro relativo = \", relative_error)\n #Quebra do laço\n break\n","sub_path":"CN/TC2/TC2 - parte 1/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"524383495","text":"### Returns 1 whole image and annotations instead of indididual objects ###\n\n\nfrom pycocotools import coco\nimport numpy as np\nimport cv2\nimport math\nimport tensorflow as tf\n\nclass dataset:\n\n def __init__(self):\n \n self.dataType = 'val2017'\n \n # initialize COCO api for instance annotations\n annFile='./COCO/annotations/instances_{}.json'.format(self.dataType)\n self.imageDir = './COCO/images/'\n\n self.coco_handle=coco.COCO(annFile)\n\n # human-readable COCO categories\n cats = self.coco_handle.loadCats(self.coco_handle.getCatIds())\n nms=[cat['name'] for cat in cats]\n\n # get all images containing given categories (nms)\n self.catIds = self.coco_handle.getCatIds(catNms=nms)\n self.imgIds = self.coco_handle.getImgIds()\n self.totalImages = len(self.imgIds)\n self.numImages = 0 #number of processed images\n \n print(len(self.imgIds), \"total images in\", self.dataType, \"set.\")\n\n def nextImage(self): #return next image\n \n if self.numImages >= self.totalImages:\n print(\"No more images!\")\n return None, None, None\n \n #Retrieve image location\n img = self.coco_handle.loadImgs(self.imgIds[self.numImages])[0] #image descriptor\n image_location = self.imageDir+self.dataType+'/'+img['file_name']\n #Retrieve annotations\n annIds = self.coco_handle.getAnnIds(imgIds=self.imgIds[self.numImages],\n catIds=self.catIds,\n iscrowd=None)\n anns = self.coco_handle.loadAnns(annIds) #annotation data\n image, labels, boxes = parseAnnotation(image_location, anns)\n \n image = image.astype(np.float32)\n image = np.divide(image, 255.0)\n \n self.numImages = self.numImages + 1\n print(\"{} images left...\".format(self.totalImages - self.numImages))\n \n return image, labels, boxes, img['file_name']\n\ndef parseAnnotation(file, annotations):\n\n image = cv2.imread(file) #actual image\n #cv2.imshow(\"image\", image)\n #cv2.waitKey(1)\n if image is None: exit(\"No image!\")\n \n boxes = list()\n labels = list()\n for ann in annotations: #get bounding boxes\n boxes.append(ann['bbox'])\n labels.append(labeled(ann['category_id']))\n \n return image, labels, boxes\n \n\ndef labeled(id): #normalize labels to fit within 80\n if id == 81: return 12\n elif id == 82: return 26\n elif id == 84: return 30\n elif id == 85: return 45\n elif id == 86: return 66\n elif id == 87: return 68\n elif id == 88: return 69\n elif id == 89: return 71\n elif id == 90: return 29\n else: return id\n","sub_path":"COCO/COCOlite.py","file_name":"COCOlite.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"509096064","text":"# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]\n# snippet-sourcedescription:[put_bucket_acl.py demonstrates how to set the access control list for an Amazon S3 bucket.]\n# snippet-service:[s3]\n# snippet-keyword:[Amazon S3]\n# snippet-keyword:[Python]\n# snippet-sourcesyntax:[python]\n# snippet-sourcesyntax:[python]\n# snippet-sourcetype:[full-example]\n# snippet-sourcedate:[2019-03-07]\n# snippet-sourceauthor:[AWS]\n\n# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# This file is licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License. A copy of the\n# License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# This file is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS\n# OF ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport copy\nimport logging\nimport boto3\nfrom botocore.exceptions import ClientError\n\n\ndef get_bucket_acl(bucket_name):\n \"\"\"Retrieve the access control list of an Amazon S3 bucket\n\n :param bucket_name: string\n :return: Dictionary defining the bucket's access control list consisting\n of owner and grants. If error, return None.\n \"\"\"\n\n # Retrieve the bucket ACL\n s3 = boto3.client('s3')\n try:\n response = s3.get_bucket_acl(Bucket=bucket_name)\n except ClientError as e:\n # AllAccessDisabled error == bucket not found\n logging.error(e)\n return None\n\n # Return both the Owner and Grants keys\n # The Owner and Grants settings together form the Access Control Policy.\n # The Grants alone form the Access Control List.\n return {'Owner': response['Owner'], 'Grants': response['Grants']}\n\n\ndef put_bucket_acl(bucket_name, acl):\n \"\"\"Set the access control list of an Amazon S3 bucket\n\n :param bucket_name: string\n :param acl: Dictionary defining the ACL consisting of grants and permissions\n :return: True if ACL was set, otherwise False\n \"\"\"\n\n # Set the ACL\n s3 = boto3.client('s3')\n try:\n s3.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=acl)\n except ClientError as e:\n # AccessDenied error == bucket prohibits public access\n # AllAccessDisabled error == bucket not found\n # AmbiguousGrantByEmailAddress == email address is associated with\n # multiple AWS accounts\n logging.error(e)\n return False\n return True\n\n\ndef main():\n \"\"\"Exercise put_bucket_acl()\"\"\"\n\n # Assign these values before running the program\n test_bucket_name = 'BUCKET_NAME'\n new_grantee_canonical_user_id = 'AWS_USER_ID'\n # new_grantee_email = 'EMAIL_ADDRESS' # Set AWS User ID or email, but not both\n new_grantee_permission = 'READ' # Or 'FULL_CONTROL', etc.\n\n # Set up logging\n logging.basicConfig(level=logging.DEBUG,\n format='%(levelname)s: %(asctime)s: %(message)s')\n\n # Get the bucket's current ACL\n acl = get_bucket_acl(test_bucket_name)\n if acl is None:\n exit(-1)\n\n # Add a new grant to the current ACL\n new_grant = {\n 'Grantee': {\n 'ID': new_grantee_canonical_user_id,\n 'Type': 'CanonicalUser',\n #'EmailAddress': new_grantee_email, # Set ID or Email\n #'Type': 'AmazonCustomerByEmail',\n },\n 'Permission': new_grantee_permission,\n }\n # If we don't want to modify the original ACL variable, then we\n # must do a deepcopy\n modified_acl = copy.deepcopy(acl)\n modified_acl['Grants'].append(new_grant)\n\n # Put the updated bucket ACL\n if put_bucket_acl(test_bucket_name, modified_acl):\n logging.info(f'The ACL was set for {test_bucket_name}')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/example_code/s3/put_bucket_acl.py","file_name":"put_bucket_acl.py","file_ext":"py","file_size_in_byte":3857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"447754754","text":"import turtle\r\n#Bruno Omar Jiménez Mancilla\r\n#Programa que reciba la base y la altura de dos triangulos y calcule sus areas y sus perimetros además de dibujarlos usando la herramienta turtle\r\n\r\ndef main():\r\n b1 = float(input(\"Ingresa la base de el primer rectangulo: \"))\r\n a1 = float(input(\"Ingresa la altura de el primer rectangulo: \"))\r\n b2 = float(input(\"Ingresa la base de el segundo rectangulo: \"))\r\n a2 = float(input(\"Ingresa la altura de el segundo rectangulo: \"))\r\n area1 = CalculaArea1(b1,a1)\r\n perimetro1 = CalculaPerimetro1(b1,a1)\r\n area2 = CalculaArea2(b2,a2)\r\n perimetro2 = CalculaPerimetro2(b2,a2)\r\n mayor =CompararAreas(area1,area2,b1,a1,b2,a2)\r\n print(\"El perimetro del primer rectagnulo es: \",perimetro1)\r\n print(\"El area del primer rectagnulo es: \",area1)\r\n print(\"El perimetro del segundo rectagnulo es: \", perimetro2)\r\n print(\"El area del segundo rectagnulo es: \", area2)\r\n print(mayor)\r\n\r\n turtle.exitonclick()\r\n\r\ndef DibujarRectangulos1M(b1,a1,b2,a2):\r\n turtle.color(\"red\")\r\n turtle.forward(b1)\r\n turtle.left(90)\r\n turtle.forward(a1)\r\n turtle.left(90)\r\n turtle.forward(b1)\r\n turtle.left(90)\r\n turtle.forward(a1)\r\n turtle.color(\"green\")\r\n turtle.forward(b2)\r\n turtle.left(90)\r\n turtle.forward(a2)\r\n turtle.left(90)\r\n turtle.forward(b2)\r\n turtle.left(90)\r\n turtle.forward(a2)\r\n\r\ndef DibujarRectangulos2M(b1,a1,b2,a2):\r\n turtle.color(\"green\")\r\n turtle.forward(b1)\r\n turtle.left(90)\r\n turtle.forward(a1)\r\n turtle.left(90)\r\n turtle.forward(b1)\r\n turtle.left(90)\r\n turtle.forward(a1)\r\n turtle.color(\"red\")\r\n turtle.forward(b2)\r\n turtle.left(90)\r\n turtle.forward(a2)\r\n turtle.left(90)\r\n turtle.forward(b2)\r\n turtle.left(90)\r\n turtle.forward(a2)\r\n\r\ndef DibujarRectangulosIguales(b1,a1):\r\n turtle.color(\"green\")\r\n turtle.forward(b1)\r\n turtle.left(90)\r\n turtle.forward(a1)\r\n turtle.left(90)\r\n turtle.forward(b1)\r\n turtle.left(90)\r\n turtle.forward(a1)\r\n\r\n\r\ndef CompararAreas(area1,area2,b1,a1,b2,a2):\r\n if area1 > area2:\r\n DibujarRectangulos1M(b1,a1,b2,a2)\r\n return \"El area del primer rectangulo es mayor\"\r\n elif area1 == area2:\r\n DibujarRectangulosIguales(b1,a1)\r\n return \"Las areas son iguales\"\r\n else:\r\n DibujarRectangulos2M(b1,a1,b2,a2)\r\n return \"El area del segundo rectangulo es mayor\"\r\ndef CalculaPerimetro1(base,altura):\r\n perimetro = (2*base)+(2*altura)\r\n return perimetro\r\n\r\ndef CalculaArea1 (base,altura):\r\n area = base*altura\r\n return area\r\n\r\ndef CalculaPerimetro2(base,altura):\r\n perimetro = (2*base)+(2*altura)\r\n return perimetro\r\n\r\ndef CalculaArea2(base,altura):\r\n area = base*altura\r\n return area\r\n\r\n\r\nmain()","sub_path":"Rectangulos.py","file_name":"Rectangulos.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"320669503","text":"# 框架:MegEngine1.3.1\n# 网络:VGG16\n# 数据集:CIFAR-10\n\n# 导入库\nimport megengine as mge\nimport megengine.module as M\nimport megengine.functional as F\nfrom megengine.data.dataset import CIFAR10\nfrom megengine import optimizer\nfrom megengine.optimizer import MultiStepLR\nfrom megengine.autodiff import GradManager\nfrom megengine.jit import trace\nfrom megengine.data import DataLoader\nfrom megengine.data.sampler import RandomSampler, SequentialSampler\nfrom megengine.data.transform import Normalize, Compose, ToMode, RandomCrop, RandomHorizontalFlip\nimport numpy as np\nimport time as t\nfrom MyDataset import MyDataset\n\n# 设置超参数\nbatch_size=32\nnum_classes=10\nepochs=250\ndrop_rate=0.5\n\n# 从dataset导入CIFAR-100\ntrain_dataset=MyDataset(root_dir=r'E:/lemon_datasets/train_images',\n names_file=r'E:/lemon_datasets/train_images.csv',\n random_rotation=True,\n random_crop=True\n )\ntest_dataset=MyDataset(root_dir=r'E:/lemon_datasets/test_images',\n names_file=r'E:/lemon_datasets/test_images.csv',\n random_rotation=False,\n random_crop=False\n )\n\n\nclass ConvBN(M.Module):\n def __init__(self,\n in_channels: int,\n out_channels: int,\n kernel_size,\n stride,\n padding\n ):\n super().__init__()\n self.conv=M.Conv2d(in_channels=in_channels,out_channels=out_channels,kernel_size=kernel_size,\n stride=stride,padding=padding,bias=False)\n self.bn=M.BatchNorm2d(out_channels)\n\n def forward(self,x):\n x=self.conv(x)\n x=self.bn(x)\n return x\n\nclass ResidualBlock(M.Module):\n def __init__(self,\n in_channels:int,\n out_channels:int,\n kernel_size,\n stride,\n padding:int=0):\n super().__init__()\n #self.stride=stride\n self.conv1=ConvBN(in_channels=in_channels,out_channels=out_channels,kernel_size=(1,1),\n stride=stride,padding=padding)\n self.relu1 = M.ReLU()\n self.conv2=ConvBN(in_channels=out_channels,out_channels=out_channels,kernel_size=kernel_size,\n stride=(1,1),padding=1)\n self.relu2 = M.ReLU()\n self.conv3=ConvBN(in_channels=out_channels,out_channels=4*out_channels,kernel_size=(1,1),\n stride=(1,1),padding=padding)\n if stride==(1,1):\n self.identity=M.Identity()\n else:\n self.identity=ConvBN(in_channels=in_channels,out_channels=4*out_channels,kernel_size=(1,1),\n stride=stride,padding=padding)\n self.relu3 = M.ReLU()\n\n def forward(self,x):\n identity=self.identity(x)\n x=self.conv1(x)\n x=self.relu1(x)\n x=self.conv2(x)\n x=self.relu2(x)\n x=self.conv3(x)\n x=x+identity\n x=self.relu3(x)\n return x\n\n\nclass ResNet50(M.Module):\n def __init__(self):\n super().__init__()\n self.conv1=ConvBN(in_channels=3,out_channels=64,kernel_size=(7,7),\n stride=(2,2),padding=(3,3))\n self.relu=M.ReLU()\n self.maxpool=M.MaxPool2d(kernel_size=(3,3),stride=(2,2),padding=(1,1))\n def make_block(in_channels,out_channels,block_nums):\n block = []\n for i in range(block_nums):\n block.append(ResidualBlock(in_channels=in_channels,out_channels=out_channels,kernel_size=(3,3),\n stride=(2,2) if i==0 else (1,1),padding=0))\n in_channels=4*out_channels\n return M.Sequential(*block)\n self.block1=make_block(in_channels=64,out_channels=64,block_nums=3)\n self.dropout1 = M.Dropout(drop_prob=drop_rate)\n self.block2=make_block(in_channels=256, out_channels=128, block_nums=4)\n self.dropout2 = M.Dropout(drop_prob=drop_rate)\n self.block3=make_block(in_channels=512, out_channels=256, block_nums=6)\n self.dropout3 = M.Dropout(drop_prob=drop_rate)\n self.block4=make_block(in_channels=1024, out_channels=512, block_nums=3)\n self.dropout4 = M.Dropout(drop_prob=drop_rate)\n self.avgpool = M.AvgPool2d(kernel_size=(10,10))\n\n def make_fc(in_features, out_features):\n fc = []\n fc.append(M.Linear(in_features=in_features, out_features=out_features))\n fc.append(M.BatchNorm1d(out_features))\n fc.append(M.Dropout(drop_prob=drop_rate))\n fc.append(M.ReLU())\n return M.Sequential(*fc)\n\n self.fc1 = make_fc(in_features=2048, out_features=512)\n self.fc2 = make_fc(in_features=512, out_features=128)\n self.fc3 = M.Linear(in_features=128, out_features=num_classes)\n self.bn = M.BatchNorm1d(num_classes)\n self.softmax = M.Softmax()\n\n def forward(self,x):\n x = self.conv1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n x = self.block1(x)\n x = self.dropout1(x)\n x = self.block2(x)\n x = self.dropout2(x)\n x = self.block3(x)\n x = self.dropout3(x)\n x = self.block4(x)\n x = self.dropout4(x)\n x = self.avgpool(x)\n x = x.reshape(x.shape[0], -1)\n x = self.fc1(x)\n x = self.fc2(x)\n x = self.fc3(x)\n x = self.bn(x)\n x = self.softmax(x)\n return x\n\n\n# 实例化网络并打印结构\nresnet50 = ResNet50()\nprint(resnet50)\n\n# 创建Dataloader用于训练\nprint('\\n'+\"----------start training----------\"+'\\n')\nsampler_train=RandomSampler(dataset=train_dataset, batch_size=batch_size, drop_last=False)\ntrain_mean,train_std = train_dataset.get_mean_std()\ntransform_train = Compose([\n RandomHorizontalFlip(),\n RandomCrop(640, padding_size=4),\n Normalize(mean=train_mean, std=train_std),\n ToMode(\"CHW\")\n])\ntrain_dataloader = DataLoader(dataset=train_dataset, sampler=sampler_train, transform=transform_train)\n\n\n@trace\ndef train_func(data, label, *, net, gm):\n net.train()\n with gm:\n pred = net(data)\n loss = F.loss.cross_entropy(pred=pred, label=label)\n gm.backward(loss)\n return pred,loss\n\n\noptimizer = optimizer.SGD(params=resnet50.parameters(), lr=0.05, momentum=0.9, weight_decay=1e-4)\nschedule = MultiStepLR(optimizer=optimizer, milestones=[50,100,175], gamma=0.5)\ngm = GradManager().attach(resnet50.parameters())\n\n\n# 如果想用动态计算图模式,please set trace.enabled = False\n# trace.enabled = False\n\nfor epoch in range(epochs):\n start=t.time()\n total_loss=0\n correct=0\n total=0\n for batch_data,batch_label in train_dataloader:\n batch_label = np.array(batch_label).astype(np.int32)\n optimizer.clear_grad()\n pred,loss = train_func(mge.tensor(batch_data),mge.tensor(batch_label), net=resnet50, gm=gm)\n optimizer.step()\n total_loss += loss.numpy().item()\n correct += (pred.numpy().argmax(axis=1) == batch_label).sum().item()\n total += batch_label.shape[0]\n schedule.step()\n print(\"epoch{}: lr={:.8f}, loss={:.6f}, training accuracy={:.2f}%, time={:.2f}s\".format(epoch,schedule.get_lr()[0],total_loss/len(train_dataloader),correct*100.0/total,t.time()-start))\nprint('\\n'+\"----------end training----------\"+'\\n')\n\n\n# 模型保存\nmge.save(resnet50.state_dict(),'resnet_v1.mge')\n\n\n# 模型测试\nprint('\\n'+\"----------start testing----------\"+'\\n')\nsampler_test=SequentialSampler(dataset=test_dataset,batch_size=batch_size)\ntest_mean,test_std=test_dataset.get_mean_std()\ntransform_test=Compose([\n Normalize(mean=test_mean, std=test_std),\n ToMode(\"CHW\")\n])\ntest_dataloader = DataLoader(dataset=test_dataset,sampler=sampler_test,transform=transform_test)\n\n\n@trace\ndef eval_func(data, label, *, net):\n net.eval()\n pred = net(data)\n loss = F.loss.cross_entropy(pred=pred, label=label)\n return pred,loss\n\n\ncorrect = 0\ntotal = 0\nfor data, label in test_dataloader:\n label = np.array(label).astype(np.int32)\n pred, _ = eval_func(mge.tensor(data), mge.tensor(label), net=resnet50)\n correct += (pred.numpy().argmax(axis=1) == label).sum().item()\n total += label.shape[0]\n\nprint(\"correct={}, total={}, testing accuracy={:.2f}%\".format(correct,total,correct*100.0/total))\nprint('\\n'+\"----------end testing----------\")","sub_path":"Lemon-Megengine/leom_resnet.py","file_name":"leom_resnet.py","file_ext":"py","file_size_in_byte":8444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"358736478","text":"from __future__ import print_function\n\nimport datetime\nimport json\nimport os.path\nimport pickle\nfrom datetime import *\n\nfrom dateutil.relativedelta import *\nfrom dateutil.rrule import *\nfrom google.auth.transport.requests import Request\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom googleapiclient.discovery import build\n\nDATE_FORMAT = '%d/%m/%Y'\nΤΙΜΕΖΟΝΕ = \"Europe/Athens\"\nSCOPES = ['https://www.googleapis.com/auth/calendar']\n\nservice = None\nlab_calendar = None\ntheory_calendar = None\n\n\ndef api_service():\n global service\n\n creds = None\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('calendar', 'v3', credentials=creds)\n get_calendars()\n\n\ndef get_calendars():\n global theory_calendar\n global lab_calendar\n\n calendar_list = service.calendarList().list(pageToken=None).execute()\n for count, calendar_list_entry in enumerate(calendar_list['items']):\n print(str(count + 1) + \")\", calendar_list_entry['summary'])\n\n theory_choice = int(input(\"Choose calendar for theory: \")) - 1\n theory_calendar = calendar_list['items'][theory_choice]['id']\n\n lab_choice = int(input(\"Choose calendar for lab: \")) - 1\n lab_calendar = calendar_list['items'][lab_choice]['id']\n\n\ndef get_data_from_json(filename):\n with open(filename, \"r\", encoding='utf8') as f:\n data = json.load(f)\n\n return data\n\n\ndef get_dates():\n holidays_file = input(\"Enter the holidays json: \")\n holidays = get_data_from_json(holidays_file)\n\n courses_file = input(\"Enter the courses json: \")\n courses = get_data_from_json(courses_file)\n\n return (holidays, courses)\n\n\ndef get_duration():\n start_date = input(\"When should the first event take place: \")\n end_date = input(\"When should the last event take place: \")\n start_date = datetime.strptime(start_date, DATE_FORMAT)\n end_date = datetime.strptime(end_date, DATE_FORMAT)\n return (start_date, end_date)\n\n\ndef process_holidays(holidays):\n days = set()\n for entry in holidays:\n if entry['end_date'] == '':\n days.add(datetime.strptime(entry['start_date'], DATE_FORMAT))\n else:\n start_date = datetime.strptime(entry['start_date'], DATE_FORMAT)\n end_date = datetime.strptime(entry['end_date'], DATE_FORMAT)\n for dt in rrule(DAILY, dtstart=start_date, until=end_date):\n days.add(dt)\n return days\n\n\ndef process_courses(courses):\n timetable = []\n for weekday in [\"monday\", \"tuesday\", \"wednessday\", \"thursday\", \"friday\"]:\n timetable.append(\n [entry for entry in courses if entry[\"weekday\"] == weekday])\n return timetable\n\n\ndef add_course(day, course):\n start_date = day.replace(hour=int(course[\"start_time\"][0:2]),\n minute=int(course[\"start_time\"][3:]))\n end_date = day.replace(hour=int(course[\"end_time\"][0:2]),\n minute=int(course[\"end_time\"][3:]))\n event = {\n 'summary': course['name'],\n 'location': course[\"location\"],\n 'description': course['description'],\n 'start': {\n 'dateTime': start_date.isoformat(),\n 'timeZone': ΤΙΜΕΖΟΝΕ,\n },\n 'end': {\n 'dateTime': end_date.isoformat(),\n 'timeZone': ΤΙΜΕΖΟΝΕ,\n }\n }\n event = service.events().insert(\n calendarId=lab_calendar if\n (course['type'] == \"lab\") else theory_calendar,\n body=event).execute()\n\n\ndef add_courses(timetable, holidays, start_date, end_date):\n for day in rrule(DAILY, dtstart=start_date, until=end_date):\n if day in holidays or day.weekday() >= 5:\n continue\n else:\n for course in timetable[day.weekday()]:\n add_course(day, course)\n\n\ndef main():\n api_service()\n (start_date, end_date) = get_duration()\n (holidays, courses) = get_dates()\n holidays = process_holidays(holidays)\n timetable = process_courses(courses)\n add_courses(timetable, holidays, start_date, end_date)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"timetable_to_calendar.py","file_name":"timetable_to_calendar.py","file_ext":"py","file_size_in_byte":4667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"188103201","text":"# This python script require vpython to be installed (see vpython.org)\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom vpython import *\n\n# This subroutine visualizes a 3D structure using vpython\n# The vpython library must be available\n\n# Visualize the bare structure with white arrows for the transition dipoles\ndef visual(x,mu,N,scale):\n for i in range(N):\n arrow(pos=vector(x[i,0],x[i,1],x[i,2]),axis=vector(scale*mu[i,0],scale*mu[i,1],scale*mu[i,2]),round=True)\n\n# Visualize a selected exciton state with white arrows showing the phase and magnitude of the wave function. The molecules are shown as small spheres.\ndef visual_exciton(x,mu,c,index,N,scale):\n for i in range(N):\n # Show molecule\n sphere(pos=vector(x[i,0],x[i,1],x[i,2]),radius=0.1)\n # Show associated transition dipole\n arrow(pos=vector(x[i,0],x[i,1],x[i,2]),axis=vector(c[i,index]*scale*mu[i,0],c[i,index]*scale*mu[i,1],c[i,index]*scale*mu[i,2]),round=True)\n\n# Visualize a selected exciton state with white arrows showing the phase and magnitude of the wave function. The molecules are shown as small spheres.\ndef visual_exciton_color(x,mu,c,index,N,scale):\n cmax=np.amax(np.abs(c[:,index]))\n for i in range(N):\n # Show molecule\n sphere(pos=vector(x[i,0],x[i,1],x[i,2]),radius=0.1)\n # Show associated transition dipole\n green=np.abs(1-c[i,index]*c[i,index]/(cmax**2))\n red=green\n blue=green \n if c[i,index]<0:\n red=1\n if c[i,index]>0:\n blue=1\n col=vector(red,green,blue)\n print(i,col)\n arrow(pos=vector(x[i,0],x[i,1],x[i,2]),axis=vector(scale*mu[i,0],scale*mu[i,1],scale*mu[i,2]),color=col,round=True)\n\n","sub_path":"Structure/Structure.py","file_name":"Structure.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"126542352","text":"\"\"\"\nCopyright 2020 Teng Huang\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\n\nfrom janos import *\nimport pandas as pd\nimport numpy as np\nimport sys\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler\nfrom datetime import datetime\nimport time\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\n\npd.options.mode.chained_assignment = None\n\n\"\"\"\nload data\n\"\"\"\n# This is the data frame for training the predictive models.\nhistorical_student_data = pd.read_csv(\"./data/college_student_enroll-s1-1.csv\")\n\n# This is information of applicants, whose financial aid is to be determined.\n# We will use these numbers (SAT, GPA) later in the objective function.\napplications = pd.read_csv(\"./data/college_applications6000.csv\")\n\n\"\"\"\nset the constant in the model\n\"\"\"\nscholarships = [0, 2.5] # lower and upper bound if the scholarship\nn_simulations = 5 # to have meaningful mean and standard deviation;\nstudent_sizes = [50, 100, 500, 1000] # we measure these predictions' RMSE\n# interview_sizes = [5, 10, 15, 20, 25]\nLAYERS = 3\nnodes_per_layer = 10\n\"\"\"\npretrained model\n\"\"\"\n# Assign X and y\nX = historical_student_data[[\"SAT\", \"GPA\", \"merit\"]]\ny = historical_student_data[[\"enroll\"]]\n\n# Before training the model, standardize SAT and GPA.\n# For convenience, we do not standardize merit.\nscaler_sat = StandardScaler().fit(X[[\"SAT\"]])\nscaler_gpa = StandardScaler().fit(X[[\"GPA\"]])\nX['SAT_scaled'] = scaler_sat.transform(X[['SAT']])\nX['GPA_scaled'] = scaler_gpa.transform(X[['GPA']])\n\n# Also, standardize the SAT and GPA in the application data\napplications[\"SAT_scaled\"] = scaler_sat.transform(applications[[\"SAT\"]])\napplications[\"GPA_scaled\"] = scaler_gpa.transform(applications[[\"GPA\"]])\n\n\"\"\"\nPrepare the output file\n\"\"\"\nnow = datetime.now()\ndate_time = now.strftime(\"%H-%M-%S-%Y%m%d\")\nfilename = \"20200501_neural_network_\" + date_time + \".txt\"\noutput = open(filename, \"w\")\noutput.write(\"PM\\t\\tstudent_size\\t\\tn_layers\\t\\titeration\\t\\tjanos_time\\t\\tgurobi_time\\t\\tobj_val\\n\")\noutput.close()\n\nfor student_size in student_sizes:\n n_applications = student_size\n BUDGET = int(0.2 * n_applications)\n hidden_layer_sizes = []\n for n_layers in range(LAYERS):\n\n hidden_layer_sizes.append(nodes_per_layer)\n\n my_logistic_regression = MLPRegressor(\n hidden_layer_sizes=hidden_layer_sizes, random_state=0) ### TODO: how to link training and optimization!\n my_logistic_regression.fit(X[[\"SAT_scaled\", \"GPA_scaled\", \"merit\"]], y)\n\n for iter in range(n_simulations):\n random_sample = applications.sample(student_size, random_state=iter)\n random_sample = random_sample.reset_index()\n\n m = JModel()\n\n # Define regular variables\n assign_scholarship = m.add_regular_variables([n_applications], \"assign_scholarship\")\n for app_index in range(n_applications):\n assign_scholarship[app_index].setContinuousDomain(lower_bound=scholarships[0],\n upper_bound=scholarships[1])\n assign_scholarship[app_index].setObjectiveCoefficient(0)\n\n # Define predicted variables\n # First, we need to create structures of predictive models. In this case, we associate such a structure with an existing / pretrained logistic regression model.\n logistic_regression_model = OptimizationPredictiveModel(m, pretrained_model=my_logistic_regression,\n feature_names=[\"SAT_scaled\", \"GPA_scaled\", \"merit\"])\n\n # Now, we could define the predicted decision variables and associate them with the predicted model structure.\n enroll_probabilities = m.add_predicted_variables([n_applications], \"enroll_probs\")\n for app_index in range(n_applications):\n enroll_probabilities[app_index].setObjectiveCoefficient(1)\n mapping_of_vars = {\"merit\": assign_scholarship[app_index],\n \"SAT_scaled\": random_sample[\"SAT_scaled\"][app_index],\n \"GPA_scaled\": random_sample[\"GPA_scaled\"][app_index]}\n enroll_probabilities[app_index].setPM(logistic_regression_model, mapping_of_vars)\n\n # Construct constraints\n # \\sum_i x_i <= BUDGET\n scholarship_deployed = Expression()\n\n for app_index in range(n_applications):\n scholarship_deployed.add_term(assign_scholarship[app_index], 1)\n\n m.add_constraint(scholarship_deployed, \"less_equal\", BUDGET)\n # m.add_gurobi_param_settings(\"MIPGap\", 0.01)\n\n # solve the model\n m.add_gurobi_param_settings('TimeLimit', 1800)\n m.add_gurobi_param_settings('DUALREDUCTIONS', 0)\n m.add_gurobi_param_settings('MIPGap', 0.001)\n m.add_gurobi_param_settings('Threads', 1)\n m.set_output_flag(0)\n m.solve()\n\n \"\"\"\n write output\n borrowed from https://www.gurobi.com/documentation/8.1/examples/workforce1_py.html\n \"\"\"\n status = m.gurobi_model.status\n\n if status == GRB.Status.UNBOUNDED:\n print('The model cannot be solved because it is unbounded')\n sys.exit(0)\n elif status == GRB.Status.OPTIMAL:\n output = open(filename, \"a\")\n output.write(\"NN\\t\\t\" + str(student_size) + \"\\t\\t\" + str(n_layers) + \"\\t\\t\" + str(iter) +\n \"\\t\\t\" + str(m.get_time()) + \"\\t\\t\" + str(m.gurobi_model.runtime) +\n \"\\t\\t\" + str(m.gurobi_model.objval) + \"\\n\")\n output.close()\n\n elif status != GRB.Status.INF_OR_UNBD and status != GRB.Status.INFEASIBLE:\n print('Optimization was stopped with status %d' % status)\n else:\n # if none of the above, then do IIS\n print('The model is infeasible; computing IIS')\n m.gurobi_model.computeIIS()\n m.gurobi_model.write(\"ip_model_inf.ilp\")\n if m.gurobi_model.IISMinimal:\n print('IIS is minimal\\n')\n else:\n print('IIS is not minimal\\n')\n print('\\nThe following constraint(s) cannot be satisfied:')\n for c in m.gurobi_model.getConstrs():\n if c.IISConstr:\n print('%s' % c.constrName)\n","sub_path":"scripts/evaluate_neural_network_20200430.py","file_name":"evaluate_neural_network_20200430.py","file_ext":"py","file_size_in_byte":7613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"363226136","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 10 23:04:28 2020\n\n@author: LiJinling\n\"\"\"\n\nimport re\nimport csv\nimport pandas as pd\n\nf = open('esb-bigip.conf')\n\nout = open('bigip.csv','w', newline='')\n#csv_write = csv.writer(out,dialect='excel')\n\ndf_members =pd.DataFrame(columns=('IP_Member','Name_Pool ','Name_Monitor'))\ndf_virtual=pd.DataFrame(columns=('Name_Virture','Name_Pool ','Destination','Protocol','Profiles'))\n\n\n\n#csv_write.writerow(['IP_Member','Name_Pool ','Name_Monitor'])\n\n\nwith open('bigip.conf','r') as f:\n content = list(f)\n length=len(content)\n i=0 \n while i