diff --git "a/3997.jsonl" "b/3997.jsonl"
new file mode 100644--- /dev/null
+++ "b/3997.jsonl"
@@ -0,0 +1,654 @@
+{"seq_id":"239525647","text":"__author__ = 'alexandre'\nimport time\nimport RPi.GPIO as io\nio.setmode(io.BOARD)\nio.setup(7, io.IN)\n\ndef motion_detection():\n try:\n while True:\n time.sleep(2)\n if (io.input (7) == 1):\n print(\"Motion\")\n else:\n print(\"No motion\")\n except KeyboardInterrupt:\n io.cleanup()\n exit()\n\nmotion_detection()","sub_path":"basic/cir_motion_sensor.py","file_name":"cir_motion_sensor.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"480741535","text":"# Setting\nimport os\nimport re\nimport numpy as np\nimport pandas as pd\nfrom konlpy.tag import Twitter\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.cluster import KMeans\n\n\n\ndef load_data(file_path):\n # 데이터 로드 및 간단한 전처리\n path = \"C:/Users/YY/Desktop/TB/Week05/NLP/\"\n file = pd.read_csv(os.path.join(path, file_path), encoding='utf-8', index_col=0)\n\n # 필요 없는 칼럼 삭제 및 이름 변경\n file.drop(['from', 'Date'], axis=1, inplace=True)\n file.rename(columns={'x':'contents'}, inplace=True)\n print(\"loading done\")\n\n return file\n\n\n\ndef make_stopwords(file):\n # stopwords 준비\n lines = []\n\n f = open(os.path.join(path, file), 'r')\n while True:\n line = f.readline()\n if not line:\n break\n lines.append(line)\n f.close()\n\n stopwords = set(re.sub('\\n', '', word) for word in lines)\n print(list(stopwords)[0:10])\n print(\"making stopwords done\")\n\n return stopwords\n\n\n\ndef remove_id():\n # 트위터 아이디를 제거해준다.\n pattern = re.compile('.@+[A-Za-z0-9\\_]*:*')\n tweets = [re.sub(pattern, ' ', sentence) for sentence in list(file['contents'])]\n print(\"removing id done\")\n\n return tweets\n\n\n\nclass TweetTokenizer:\n # 트윗을 토큰화함.\n def __init__(self):\n self.twitter = Twitter()\n self.stopwords = stopwords\n\n def nominalize(tweets, start, end):\n nouns = []\n for tweet in tweets[start:end]:\n nouns.append(' '.join([noun for noun in twitter.nouns(str(tweet)) if not noun in stopwords]))\n # print(len(nouns))\n # document = ' '.join(nouns)\n print(\"tokenizing done\")\n\n return nouns\n\n\n\ndef embedding_clustering():\n vect = CountVectorizer(min_df=0.001, encoding='utf-8', max_features=50, ngram_range=(1, 1))\n bow = vect.fit_transform(nouns)\n print(\"사전 길이: \", len(vect.vocabulary_))\n\n X = bow.toarray()\n print(\"X shape: \", X.shape)\n vect.get_feature_names()\n\n dict = {'문재인':0, '남북정상회담':1, '지방선거':2, '자유한국당':3, '안철수':4, '더불어민주당':5,\n '미투':6, '바른미래당':7, '보수':8, '서울시장':9, '진보':10, '박원순':11, '김문수':12}\n\n Y = np.array(file['Keyword'].map(dict)).astype(int).reshape(-1, 1)\n\n kmeans = KMeans(n_clusters=13)\n kmeans.fit(X)\n pred = kmeans.predict(X).reshape(-1, 1)\n\n result = np.concatenate([pred, Y], axis=1)\n\n print(pd.Series(pred.reshape(-1, )).value_counts())\n print(pd.Series(Y.reshape(-1, )).value_counts())\n\n return result\n\n\n\ndef main():\n file = load_data('tweet.csv')\n stopwords = make_stopwords('korean_stopwords.txt')\n twitter = Twitter()\n tweets = remove_id()\n nouns = TweetTokenizer.nominalize(tweets, 0, 118570)\n result = embedding_clustering()\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Machine_Learning/Assignments/TB/Week05_nlp.py","file_name":"Week05_nlp.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"271874183","text":"from linkedlistnode import linkedlist\n\ndef return_kth_to_last(ll,k):\n p1= ll.head\n p2= ll.head\n #we dont know the length of ll\n for i in range(k):\n if p1 is None:\n return None\n p1=p1.next\n while p1.next != None:\n p1=p1.next\n p2=p2.next\n\n return p2 \n","sub_path":"linkedlist/returnk2last.py","file_name":"returnk2last.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"335394749","text":"##################### HEAD #######################\n\nfrom time import sleep\nfrom random import randint\nfrom output import *\n\n#################### CONFIG ######################\n\ndifficulties = {\"1\":(100, 8), \"2\":(1000, 10), \"3\":(10000, 12),\\\n\"4\":(100000, 14), \"5\":(1000000, 16), \"6\":(10000000, 18)} \n#key denotes difficulty and tuple denotes max_num and max_tries\ndifficulty = \"2\"\nsimulation_games = 100\n#number of tries to simulate \nsimulation_delay = 0.05\n#simulation delay in seconds\nsimulation_threshold = 1000\n#bypasses print screens and delays above\n\n################### FUNCTIONS ####################\n\n\ndef get_difficulty(difficulty):\n\n output(\"difficulty\", difficulty)\n sleep(0.25)\n new_difficulty = raw_input(\"> \")\n\n if new_difficulty in difficulties:\n difficulty = new_difficulty\n\n output(\"difficulty\", difficulty)\n sleep(1.0)\n raw_input(\"Press any key to continue. \")\n\n return difficulty\n\n\ndef game(max_num, max_tries, simulation_games = None):\n\n if simulation_games != None:\n output(\"simulation\")\n sleep(1.0)\n raw_input(\"Press any key to continue... \")\n simulation_successes = 0\n games = simulation_games\n else: \n output(\"intro2\")\n games = 1 \n\n for i in range(games):\n\n rnum = randint(1,max_num)\n num = 0 \n tries = 0\n\n if simulation_games != None:\n simulation_min = 1\n simulation_max = max_num\n\n while tries <= max_tries and num != rnum:\n\n if simulation_games != None:\n num = (simulation_min + simulation_max) // 2\n else:\n while True:\n num = (raw_input(\"Guess a number between 1 and %d > \" % max_num))\n try:\n num = int(num)\n except:\n output(\"try again\")\n continue\n else:\n break\n\n if len(str(num)) < 1 or num <= 0 or num >= max_num:\n output(\"try again\")\n elif num < rnum:\n tries += 1\n if simulation_games != None: simulation_min = num\n else: output(\"higher\", tries)\n elif num > rnum:\n tries += 1\n if simulation_games != None: simulation_max = num\n else: output(\"lower\", tries)\n\n if num == rnum:\n if simulation_games != None: simulation_successes += 1\n if simulation_games == None or simulation_games <= simulation_threshold: output(\"win\", tries-1, rnum)\n else:\n if simulation_games == None or simulation_games <= simulation_threshold: output(\"lose\", tries-1, rnum)\n\n if simulation_games != None and simulation_games <= simulation_threshold: sleep(simulation_delay)\n\n if simulation_games != None: \n output(\"success rate\", float(simulation_successes)/float(simulation_games) * 100)\n\n sleep(1.0)\n raw_input(\"Press any key to continue. \")\n\n\n##################### MAIN #######################\n\noutput(\"intro\")\nsleep(1.0)\nraw_input(\"Press any key to continue... \")\n\nwhile True:\n output(\"menu\")\n sleep(0.25)\n selection = raw_input(\"> \")\n\n if selection == \"2\": \n difficulty = get_difficulty(difficulty)\n elif selection == \"3\":\n max_num, max_tries = difficulties[difficulty]\n game(max_num, max_tries, simulation_games)\n elif selection == \"4\":\n break \n else:\n max_num, max_tries = difficulties[difficulty]\n game(max_num, max_tries)\n\noutput(\"quit\")\nsleep(1.0)\n","sub_path":"guess.py","file_name":"guess.py","file_ext":"py","file_size_in_byte":3597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"282072765","text":"from .document import Document\nfrom .remote import _quote_id\nfrom .view import View\n\n\nclass DesignDocument(Document):\n _allowed_keys = [\n \"language\",\n \"options\",\n \"filters\",\n \"lists\",\n \"rewrites\",\n \"shows\",\n \"updates\",\n \"validate_doc_update\",\n \"views\",\n ]\n\n @property\n def endpoint(self):\n return f\"{self._database.endpoint}/_design/{_quote_id(self.id)}\"\n\n def __setitem__(self, key, value):\n if key in self._allowed_keys:\n super().__setitem__(key, value)\n else:\n raise KeyError(f\"The key '{key}' is not allowed in an design document.\")\n\n def view(self, view):\n return View(self._database, self.id, view)\n\n async def create_view(\n self, view, map_function, reduce_function=None, exists_ok=False\n ):\n if \"views\" not in self:\n self[\"views\"] = {}\n\n if view in self[\"views\"] and not exists_ok:\n raise KeyError(\n f\"The view '{view}' does already exist in the design document {self.id}\"\n )\n\n self[\"views\"][view] = {\"map\": map_function}\n if reduce_function is not None:\n self[\"views\"][view][\"reduce\"] = reduce_function\n self[\"language\"] = \"javascript\"\n\n await self.save()\n\n return self.view(view)\n","sub_path":"aiocouch/design_document.py","file_name":"design_document.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"537699464","text":"from django.db import models\nimport datetime\n\n\nclass Attendance(models.Model):\n employee_number = models.IntegerField()\n department = models.CharField(max_length=16)\n clock_in_time = models.DateTimeField()\n clock_out_time = models.DateTimeField(null=True, blank=True)\n shift = models.CharField(max_length=16)\n\n @staticmethod\n def get_manhours_during(start, stop=None, department='all', shift='all'):\n if stop is None:\n stop = datetime.datetime.now()\n if shift == 'all':\n this_shift = Attendance.objects.all()\n else:\n this_shift = Attendance.objects.filter(shift=shift)\n if department == 'all' or department == 'Plant':\n in_department = this_shift\n else:\n in_department = this_shift.filter(department=department)\n were_clocked_in = in_department.filter(clock_in_time__lt=start).exclude(clock_out_time__lt=start)\n clocked_in_after_start = in_department.filter(clock_in_time__gte=start)\n clocked_in_during = clocked_in_after_start.filter(clock_in_time__lt=stop)\n clocked_out_after_start = in_department.filter(clock_out_time__gte=start)\n clocked_out_during = clocked_out_after_start.filter(clock_out_time__lt=stop)\n all_relevent = were_clocked_in | clocked_in_during | clocked_out_during\n manhours = 0\n for employee in all_relevent:\n begin = max(employee.clock_in_time, start)\n if employee.clock_out_time == None:\n end = stop\n else:\n end = min(employee.clock_out_time, stop)\n manhours += ((end - begin).total_seconds())/3600\n return manhours\n\n\n @staticmethod\n def get_active_at(active_time=None, department='all', shift='all'):\n if active_time is None:\n active_time = datetime.datetime.now()\n if shift == 'all':\n this_shift = Attendance.objects.all()\n else:\n this_shift = Attendance.objects.filter(shift=shift)\n if department == 'all' or department == 'Plant':\n in_department = this_shift\n else:\n in_department = this_shift.filter(department=department)\n have_clocked_in = in_department.filter(clock_in_time__lt=active_time)\n not_clocked_out_yet = have_clocked_in.filter(clock_out_time__gt=active_time)\n never_clocked_out = have_clocked_in.filter(clock_out_time=None)\n not_clocked_out = not_clocked_out_yet | never_clocked_out\n return not_clocked_out.count()\n\n def is_ot(self, time_in_question=None):\n if time_in_question is None:\n time_in_question = datetime.datetime.now().time()\n if self.shift == 0:\n if not self.clock_out_time and time_in_question > datetime.time(14, 30):\n return True\n else:\n return False\n else:\n if not self.clock_out_time and time_in_question > datetime.time(22, 30):\n return True\n else:\n return False\n\n\nclass Complete(models.Model):\n serial_number = models.CharField(max_length=10)\n completed = models.DateTimeField()\n\n @staticmethod\n def claims_by_time(time_in_question, hour=None):\n day = time_in_question.date()\n return Complete.objects.filter(completed__gt=datetime.datetime.combine(day,\n datetime.time(0))).filter(completed__lt=time_in_question).count()\n","sub_path":"hpv/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"60869092","text":"# This sample tests the case where super().__new__(cls) is called\n# and there is an inferred return type based on the cls type.\n\nfrom typing import Literal, NamedTuple\n\nFooBase = NamedTuple(\"FooBase\", [(\"x\", int)])\n\n\nclass Foo(FooBase):\n def __new__(cls):\n obj = super().__new__(cls, x=1)\n t1: Literal[\"Self@Foo\"] = reveal_type(obj)\n return obj\n\n\nf = Foo()\nt2: Literal[\"Foo\"] = reveal_type(f)\n","sub_path":"packages/pyright-internal/src/tests/samples/super6.py","file_name":"super6.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"498746878","text":"#Universidad del Valle de Guatemala\n#Raul Alejandro Monzon Solis 17014\n#Bases de Datos\n#Laboratorio 12\n\nimport Funciones\n\n \n#Menu para Interaccion\nprint (\"Raul Alejandro Monzon Solis 17014\\nBases de Datos\\nLaboratorio 12\\nIngresar valores enteros durante use el programa\")\nopcion = 1\nwhile opcion != 6: \n print(\"Seleccione el ejercicio que deseee:\\nSi desea realizar el ejercicio a escriba 1\\nSi desea realizar el ejercicio b escriba 2\\nSi desea realizar el ejercicio c escriba 3\\nSi desea realizar el ejercicio d escriba 4\\nSi desea realizar el ejercicio e escriba 5\\nIngrese 6 para salir\")\n opcion = input(\"INGRESAR NUMERO\")\n try:\n opcion = int(opcion)\n if opcion == 1:\n precio = input(\"Precio: \")\n Funciones.ejercicioA(precio)\n elif opcion == 2:\n velocidad = input(\"speed: \")\n ram = input(\"RAM: \")\n hd = input(\"HD: \")\n Funciones.ejercicioB(velocidad,ram,hd)\n elif opcion == 3:\n presupuesto = input(\"Presupuesto maximo: \")\n speed = input(\"Speed minima: \")\n color = raw_input(\"Desea imprimir a color? (S/N) (Ingresar sin comillas la letra): \")\n Funciones.ejercicioC(presupuesto, speed, color)\n elif opcion == 4:\n Model = input(\"Model: \")\n Speed = input(\"Speed: \")\n RAM = input(\"RAM: \")\n HD = input(\"HD: \")\n Precio = input(\"Precio: \")\n Funciones.ejercicioD(Model, Speed, RAM, HD, Precio)\n elif opcion == 5:\n Precio = input(\"Precio: \")\n Funciones.ejercicioE(Precio)\n else: \n print(\"INGRESE UNA DE LAS OPCIONES (NUMEROS)\")\n except ValueError:\n print(\"INGRESE UNA DE LAS OPCIONES (NUMEROS\")\nprint(\"Ha finalizado el programa correctamente :)\")\n","sub_path":"lab012-17014-Parte1.py","file_name":"lab012-17014-Parte1.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"232250941","text":"# -*- coding: utf-8 -*-\n\n# Como funciona as burlações de votações online. Parte 2\n# Material abaixo absorvido do curso Pythonicos, para estudos e conheicmento\n\nimport requests \n\ndef proxy():\n\turl = \"http://gimmeproxy.com/api/getProxy?protocol=http\"\n\tr = requests.get(url).json()\n\treturn {r['protocol']:r['curl']}\n\n\nurl = \"http://www.ferendum.com/pt/votarpost2.php\"\n\nproxies = proxy()\t\n\n# Edit and Resend, Request Body (pega o que foi enviado)\n# Substitui & por , e = por :\n# Ficando na estrutura do dicionário\ndata = {\"record1\":\"\",\"record2\":\"\",\"pregunta_ID\":\"45561\",\"sec_digit\":\"91791\",\"config_anonimo\":\"1\",\"config_aut_req\":\"0\",\"titulo\":\"Votaria+em+Jair+messias+bolsonaro\"}\n\ntry:\n\tr = requests.post(url,data=data,proxies=proxies)\n\tprint(r.status_code)\n\tif \"Obrigado por participar desta enquete\" in r.content.decode():\n\t\tprint(\"Voto Realizado com Sucesso!\")\nexcept:\n\tprint(\"Error!\")","sub_path":"pythonicos/Proxy Votacoes Online/proxy_votacoes3.py","file_name":"proxy_votacoes3.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"541840227","text":"\"\"\"\nThis file may not be shared/redistributed without permission. Please read copyright notice in the git repo. If this file contains other copyright notices disregard this text.\n\"\"\"\nimport numpy as np\nfrom irlc.ex06.ilqr_agent import ILQRAgent\nfrom irlc import train\nfrom irlc import savepdf\nimport matplotlib.pyplot as plt\nfrom irlc.ex04.model_cartpole import GymSinCosCartpoleEnvironment\nfrom irlc import VideoMonitor\n\ndef cartpole_experiment(N=12, use_linesearch=True, figex=\"\", animate=True):\n np.random.seed(2)\n Tmax = .9\n dt = Tmax/N\n\n env = GymSinCosCartpoleEnvironment(dt=dt, Tmax=Tmax, supersample_trajectory=True)\n agent = ILQRAgent(env, env.discrete_model, N=N, ilqr_iterations=200, use_linesearch=use_linesearch)\n if animate:\n env =VideoMonitor(env)\n stats, trajectories = train(env, agent, num_episodes=1, return_trajectory=True)\n\n agent.use_ubar = True\n stats2, trajectories2 = train(env, agent, num_episodes=1, return_trajectory=True)\n env.close()\n\n xb = agent.xbar\n tb = np.arange(N+1)*dt\n plt.figure(figsize=(8,6))\n F = 3\n plt.plot(trajectories[0].time, trajectories[0].state[:,F], 'k-', label='Closed-loop $\\\\pi$')\n plt.plot(trajectories2[0].time, trajectories2[0].state[:,F], '-', label='Open-loop $\\\\bar{u}_k$')\n\n plt.plot(tb, xb[:,F], '.-', label=\"iLQR rediction $\\\\bar{x}_k$\")\n plt.xlabel(\"Time/seconds\")\n plt.ylabel(\"$\\cos(\\\\theta)$\")\n plt.title(f\"Pendulum environment $T={N}$\")\n\n plt.grid()\n plt.legend()\n ev = \"pendulum\"\n savepdf(f\"irlc_cartpole_theta_N{N}_{use_linesearch}{figex}\")\n plt.show()\n\ndef plt_cartpole():\n cartpole_experiment(N=50, use_linesearch=True, animate=True)\n\nif __name__ == '__main__':\n plt_cartpole()\n","sub_path":"irlc/ex06/ilqr_cartpole_agent.py","file_name":"ilqr_cartpole_agent.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"148500294","text":"import responses\nfrom payconiq import Transaction\n\nfrom .testcase import PayconiqTestCase\n\n\nclass TransactionTestCase(PayconiqTestCase):\n\n REMOTE_ID = '5e14137fe51905040b202c04'\n\n @responses.activate\n def test_resource_start(self):\n responses.add(\n responses.POST,\n Transaction.get_base_url(),\n json={\n 'transactionId': self.REMOTE_ID\n },\n status=200\n )\n\n remote_id = Transaction.start(\n amount=1,\n webhook_url=''\n )\n self.assertEqual(\n remote_id,\n self.REMOTE_ID\n )\n","sub_path":"lunchbreak/payconiq/tests/test_transaction.py","file_name":"test_transaction.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"473946954","text":"from datetime import date\nfrom datetime import datetime\nfrom datetime import timedelta\nimport urllib.request\nimport json\nimport pytz\nimport hockey_scraper\n\ntoday = date.today()\nnow = datetime.now()\nest = pytz.timezone(\"America/New_York\")\npst = pytz.timezone(\"America/Los_Angeles\")\nloc_dt = est.localize(now)\npacific = loc_dt.astimezone(pst)\nstringnow = pacific.strftime(\"%Y-%m-%d\")\nformattednow = datetime.strptime(stringnow, \"%Y-%m-%d\")\nltrVenues = [\n \"Honda Center\",\n \"Gila River Arena\",\n \"Jobing.com Arena\",\n \"TD Garden\",\n \"KeyBank Center\",\n \"First Niagara Center\",\n \"Nationwide Arena\",\n \"Rogers Place\",\n \"Bridgestone Arena\",\n \"Prudential Center\",\n \"NYCB Live/Nassau Coliseum\",\n \"Barclays Center\",\n \"Canadian Tire Centre\",\n \"PPG Paints Arena\",\n \"CONSOL Energy Center\",\n \"SAP Center at San Jose\",\n \"Enterprise Center\",\n \"Scottrade Center\",\n \"Amalie Arena\",\n \"Tampa Bay Times Forum\"]\nrtlVenues = [\n \"Scotiabank Saddledome\",\n \"PNC Arena\",\n \"United Center\",\n \"Pepsi Center\",\n \"American Airlines Center\",\n \"Little Caesars Arena\",\n \"Joe Louis Arena\",\n \"Rexall Place\",\n \"BB&T Center\",\n \"STAPLES Center\", \"Staples Center\",\n \"Xcel Energy Center\",\n \"Centre Bell\",\n \"Nassau Coliseum\",\n \"Madison Square Garden\",\n \"Wells Fargo Center\",\n \"Scotiabank Arena\",\n \"Air Canada Centre\",\n \"Rogers Arena\",\n \"T-Mobile Arena\",\n \"Bell MTS Place\",\n \"MTS Centre\"]\nhomeLtRVenues = [\"Navy-Marine Corps Memorial Stadium\"]\nhomeRtLVenues = [\"Capital One Arena\",\"Verizon Center\",\"Nationals Park\"]\nhomeVenues = [\"Navy-Marine Corps Memorial Stadium\",\"Capital One Arena\",\"Verizon Center\",\"Nationals Park\"]\n\ndef getGame(theJSON, i):\n gamefeed = \"\"\n gamePk = \"\"\n gameslice = slice(4,6)\n gamedate = datetime.strptime(theJSON[\"dates\"][i][\"date\"], \"%Y-%m-%d\")\n if gamedate == formattednow:\n gamePk = str(theJSON[\"dates\"][i][\"games\"][0][\"gamePk\"])\n if gamePk[gameslice] == \"02\":\n gamefeed = \"https://statsapi.web.nhl.com\" + theJSON[\"dates\"][i][\"games\"][0][\"link\"]\n return [gamefeed, int(gamePk)]\n else:\n pass\n\n# def getOldGame(theJSON, i):\n# gamefeed = \"\"\n# gamePk = \"\"\n# gameslice = slice(4,6)\n# gamePk = str(theJSON[\"dates\"][i][\"games\"][0][\"gamePk\"])\n# if gamePk[gameslice] == \"02\":\n# gamefeed = \"https://statsapi.web.nhl.com\" + theJSON[\"dates\"][i][\"games\"][0][\"link\"]\n# return [gamefeed, int(gamePk)]\n\ndef parseGame(data, gameID):\n theJSON = json.loads(data)\n minutesslice = slice(0,2)\n secondsslice = slice(3,5)\n # print(theJSON)\n if theJSON[\"gameData\"][\"status\"][\"detailedState\"] == \"Final\": # Check to make sure the game is finished\n # Next Steps:\n # check to see if ovechkin played\n if \"ID8471214\" in theJSON[\"gameData\"][\"players\"]:\n print(\"Ovechkin on roster\")\n if (theJSON[\"gameData\"][\"venue\"][\"name\"] in homeVenues):\n if not theJSON[\"liveData\"][\"boxscore\"][\"teams\"][\"home\"][\"players\"][\"ID8471214\"][\"stats\"]:\n print(\"Ovechkin rostered but did not play\")\n else:\n plays = dataScrape(gameID, \"home\", theJSON)\n # print(plays[1],plays[4])\n stringtoi = theJSON[\"liveData\"][\"boxscore\"][\"teams\"][\"home\"][\"players\"][\"ID8471214\"][\"stats\"][\"skaterStats\"][\"timeOnIce\"]\n toisplit = stringtoi.split(\":\")\n minutes = int(toisplit[0])\n seconds = int(toisplit[1])\n toi = (minutes * 60) + seconds\n plays.append(toi)\n\n evstringtoi = theJSON[\"liveData\"][\"boxscore\"][\"teams\"][\"home\"][\"players\"][\"ID8471214\"][\"stats\"][\"skaterStats\"][\"evenTimeOnIce\"]\n evtoisplit = evstringtoi.split(\":\")\n evminutes = int(evtoisplit[0])\n evseconds = int(evtoisplit[1])\n evtoi = (evminutes * 60) + evseconds\n plays.append(evtoi)\n\n ppstringtoi = theJSON[\"liveData\"][\"boxscore\"][\"teams\"][\"home\"][\"players\"][\"ID8471214\"][\"stats\"][\"skaterStats\"][\"powerPlayTimeOnIce\"]\n pptoisplit = ppstringtoi.split(\":\")\n ppminutes = int(pptoisplit[0])\n ppseconds = int(pptoisplit[1])\n pptoi = (ppminutes * 60) + ppseconds\n plays.append(pptoi)\n return plays\n else:\n if not theJSON[\"liveData\"][\"boxscore\"][\"teams\"][\"away\"][\"players\"][\"ID8471214\"][\"stats\"]:\n print(\"Ovechkin rostered but did not play\")\n else:\n plays = dataScrape(gameID, \"away\", theJSON)\n # print(plays[1],plays[4])\n stringtoi = theJSON[\"liveData\"][\"boxscore\"][\"teams\"][\"away\"][\"players\"][\"ID8471214\"][\"stats\"][\"skaterStats\"][\"timeOnIce\"]\n toisplit = stringtoi.split(\":\")\n minutes = int(toisplit[0])\n seconds = int(toisplit[1])\n toi = (minutes * 60) + seconds\n plays.append(toi)\n\n evstringtoi = theJSON[\"liveData\"][\"boxscore\"][\"teams\"][\"away\"][\"players\"][\"ID8471214\"][\"stats\"][\"skaterStats\"][\"evenTimeOnIce\"]\n evtoisplit = evstringtoi.split(\":\")\n evminutes = int(evtoisplit[0])\n evseconds = int(evtoisplit[1])\n evtoi = (evminutes * 60) + evseconds\n plays.append(evtoi)\n\n ppstringtoi = theJSON[\"liveData\"][\"boxscore\"][\"teams\"][\"away\"][\"players\"][\"ID8471214\"][\"stats\"][\"skaterStats\"][\"powerPlayTimeOnIce\"]\n pptoisplit = ppstringtoi.split(\":\")\n ppminutes = int(pptoisplit[0])\n ppseconds = int(pptoisplit[1])\n pptoi = (ppminutes * 60) + ppseconds\n plays.append(pptoi)\n return plays\n else:\n print(\"Ovechkin not on roster\")\n else:\n print(\"Game not finished!\")\n\ndef dataScrape(gameID, venue, gamefeed):\n if venue == \"home\":\n print(\"Ovechkin played at home\")\n elif venue == \"away\":\n print(\"Ovechkin played on the road\")\n else:\n print(\"venue error\")\n\n shotsgoals = []\n fenwickgoals = []\n # corsigoals = []\n shots = []\n fenwick = []\n # corsi = []\n evshotsgoals = []\n evfenwickgoals = []\n # evcorsigoals = []\n evshots = []\n evfenwick = []\n # evcorsi = []\n ppshotsgoals = []\n ppfenwickgoals = []\n # ppcorsigoals = []\n ppshots = []\n ppfenwick = []\n # ppcorsi = []\n # print(gamefeed)\n scrape = hockey_scraper.scrape_games([gameID], False, data_format=\"Pandas\")\n bigData = scrape[\"pbp\"]\n eventlist = [\"GOAL\",\"SHOT\",\"MISS\"]\n shotdata = bigData.loc[bigData[\"Event\"].isin(eventlist)]\n roshotdata = shotdata.loc[shotdata[\"Period\"] <= 4]\n ovishotdata = roshotdata.loc[roshotdata[\"p1_ID\"] == 8471214]\n # print(ovishotdata)\n for i, j in ovishotdata.iterrows():\n coordinates = convertCoord(gamefeed,venue,j[\"Period\"],j[\"xC\"],j[\"yC\"])\n # print(i, j[\"Description\"], newcoord)\n strength = j[\"Strength\"]\n strsplit = strength.split(\"x\")\n if venue == \"home\":\n if int(strsplit[0]) >= 5 and int(strsplit[1]) >= 5:\n if j[\"Event\"] == \"GOAL\":\n shotsgoals.append(1)\n fenwickgoals.append(1)\n # corsigoals.append(1)\n shots.append(coordinates)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n evshotsgoals.append(1)\n evfenwickgoals.append(1)\n # evcorsigoals.append(1)\n evshots.append(coordinates)\n evfenwick.append(coordinates)\n # evcorsi.append(coordinates)\n elif j[\"Event\"] == \"SHOT\":\n shotsgoals.append(0)\n fenwickgoals.append(0)\n # corsigoals.append(0)\n shots.append(coordinates)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n evshotsgoals.append(0)\n evfenwickgoals.append(0)\n # evcorsigoals.append(0)\n evshots.append(coordinates)\n evfenwick.append(coordinates)\n # evcorsi.append(coordinates)\n elif j[\"Event\"] == \"MISS\":\n fenwickgoals.append(0)\n # corsigoals.append(0)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n evfenwickgoals.append(0)\n # evcorsigoals.append(0)\n evfenwick.append(coordinates)\n # evcorsi.append(coordinates)\n elif int(strsplit[1]) == int(strsplit[0]):\n if j[\"Event\"] == \"GOAL\":\n shotsgoals.append(1)\n fenwickgoals.append(1)\n # corsigoals.append(1)\n shots.append(coordinates)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n evshotsgoals.append(1)\n evfenwickgoals.append(1)\n # evcorsigoals.append(1)\n evshots.append(coordinates)\n evfenwick.append(coordinates)\n # evcorsi.append(coordinates)\n elif j[\"Event\"] == \"SHOT\":\n shotsgoals.append(0)\n fenwickgoals.append(0)\n # corsigoals.append(0)\n shots.append(coordinates)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n evshotsgoals.append(0)\n evfenwickgoals.append(0)\n # evcorsigoals.append(0)\n evshots.append(coordinates)\n evfenwick.append(coordinates)\n # evcorsi.append(coordinates)\n elif j[\"Event\"] == \"MISS\":\n fenwickgoals.append(0)\n # corsigoals.append(0)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n evfenwickgoals.append(0)\n # evcorsigoals.append(0)\n evfenwick.append(coordinates)\n # evcorsi.append(coordinates)\n elif int(strsplit[1]) < int(strsplit[0]):\n if j[\"Event\"] == \"GOAL\":\n shotsgoals.append(1)\n fenwickgoals.append(1)\n # corsigoals.append(1)\n shots.append(coordinates)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n ppshotsgoals.append(1)\n ppfenwickgoals.append(1)\n # ppcorsigoals.append(1)\n ppshots.append(coordinates)\n ppfenwick.append(coordinates)\n # ppcorsi.append(coordinates)\n elif j[\"Event\"] == \"SHOT\":\n shotsgoals.append(0)\n fenwickgoals.append(0)\n # corsigoals.append(0)\n shots.append(coordinates)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n ppshotsgoals.append(0)\n ppfenwickgoals.append(0)\n # ppcorsigoals.append(0)\n ppshots.append(coordinates)\n ppfenwick.append(coordinates)\n # ppcorsi.append(coordinates)\n elif j[\"Event\"] == \"MISS\":\n fenwickgoals.append(0)\n # corsigoals.append(0)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n ppfenwickgoals.append(0)\n # ppcorsigoals.append(0)\n ppfenwick.append(coordinates)\n # ppcorsi.append(coordinates)\n else:\n pass\n elif venue == \"away\":\n if int(strsplit[0]) >= 5 and int(strsplit[1]) >= 5:\n if j[\"Event\"] == \"GOAL\":\n shotsgoals.append(1)\n fenwickgoals.append(1)\n # corsigoals.append(1)\n shots.append(coordinates)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n evshotsgoals.append(1)\n evfenwickgoals.append(1)\n # evcorsigoals.append(1)\n evshots.append(coordinates)\n evfenwick.append(coordinates)\n # evcorsi.append(coordinates)\n elif j[\"Event\"] == \"SHOT\":\n shotsgoals.append(0)\n fenwickgoals.append(0)\n # corsigoals.append(0)\n shots.append(coordinates)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n evshotsgoals.append(0)\n evfenwickgoals.append(0)\n # evcorsigoals.append(0)\n evshots.append(coordinates)\n evfenwick.append(coordinates)\n # evcorsi.append(coordinates)\n elif j[\"Event\"] == \"MISS\":\n fenwickgoals.append(0)\n # corsigoals.append(0)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n evfenwickgoals.append(0)\n # evcorsigoals.append(0)\n evfenwick.append(coordinates)\n # evcorsi.append(coordinates)\n elif int(strsplit[0]) == int(strsplit[1]):\n if j[\"Event\"] == \"GOAL\":\n shotsgoals.append(1)\n fenwickgoals.append(1)\n # corsigoals.append(1)\n shots.append(coordinates)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n evshotsgoals.append(1)\n evfenwickgoals.append(1)\n # evcorsigoals.append(1)\n evshots.append(coordinates)\n evfenwick.append(coordinates)\n # evcorsi.append(coordinates)\n elif j[\"Event\"] == \"SHOT\":\n shotsgoals.append(0)\n fenwickgoals.append(0)\n # corsigoals.append(0)\n shots.append(coordinates)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n evshotsgoals.append(0)\n evfenwickgoals.append(0)\n # evcorsigoals.append(0)\n evshots.append(coordinates)\n evfenwick.append(coordinates)\n # evcorsi.append(coordinates)\n elif j[\"Event\"] == \"MISS\":\n fenwickgoals.append(0)\n # corsigoals.append(0)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n evfenwickgoals.append(0)\n # evcorsigoals.append(0)\n evfenwick.append(coordinates)\n # evcorsi.append(coordinates)\n elif int(strsplit[0]) < int(strsplit[1]):\n if j[\"Event\"] == \"GOAL\":\n shotsgoals.append(1)\n fenwickgoals.append(1)\n # corsigoals.append(1)\n shots.append(coordinates)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n ppshotsgoals.append(1)\n ppfenwickgoals.append(1)\n # ppcorsigoals.append(1)\n ppshots.append(coordinates)\n ppfenwick.append(coordinates)\n # ppcorsi.append(coordinates)\n elif j[\"Event\"] == \"SHOT\":\n shotsgoals.append(0)\n fenwickgoals.append(0)\n # corsigoals.append(0)\n shots.append(coordinates)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n ppshotsgoals.append(0)\n ppfenwickgoals.append(0)\n # ppcorsigoals.append(0)\n ppshots.append(coordinates)\n ppfenwick.append(coordinates)\n # ppcorsi.append(coordinates)\n elif j[\"Event\"] == \"MISS\":\n fenwickgoals.append(0)\n # corsigoals.append(0)\n fenwick.append(coordinates)\n # corsi.append(coordinates)\n ppfenwickgoals.append(0)\n # ppcorsigoals.append(0)\n ppfenwick.append(coordinates)\n # ppcorsi.append(coordinates)\n else:\n pass\n avgx = 0\n totalx = 0\n for i in range(0, len(fenwick)):\n totalx = totalx + fenwick[i][0]\n if len(fenwick) > 0:\n avgx = totalx / len(fenwick)\n if avgx > 100:\n # print(\"-------------------- INVERT HERE --------------------\")\n for fen in range(0, len(fenwick)):\n fenwick[fen][0] = -fenwick[fen][0] + 178\n fenwick[fen][1] = -fenwick[fen][1]\n print(shots)\n print(fenwick)\n print(evshots)\n print(evfenwick)\n print(ppshots)\n print(ppfenwick)\n # print(\"---------------- INVERSION COMPLETE ----------------\")\n return [shotsgoals, fenwickgoals, shots, fenwick, evshotsgoals, evfenwickgoals, evshots, evfenwick, ppshotsgoals, ppfenwickgoals, ppshots, ppfenwick]\n\ndef constructJSON(plays,gameno):\n # print(plays)\n gamedata = {}\n if plays != None:\n gamedata[\"Game\"] = \"G\" + str(gameno)\n gamedata[\"Goals\"] = {}\n gamedata[\"Goals\"][\"Shots\"] = plays[0]\n gamedata[\"Goals\"][\"Fenwick\"] = plays[1]\n # gamedata[\"Goals\"][\"Corsi\"] = plays[2]\n gamedata[\"TOI\"] = plays[12]\n gamedata[\"Shots\"] = plays[2]\n gamedata[\"Fenwick\"] = plays[3]\n # gamedata[\"Corsi\"] = plays[5]\n gamedata[\"Even Strength\"] = {}\n gamedata[\"Even Strength\"][\"Goals\"] = {}\n gamedata[\"Even Strength\"][\"Goals\"][\"Shots\"] = plays[4]\n gamedata[\"Even Strength\"][\"Goals\"][\"Fenwick\"] = plays[5]\n # gamedata[\"Even Strength\"][\"Goals\"][\"Corsi\"] = plays[8]\n gamedata[\"Even Strength\"][\"TOI\"] = plays[13]\n gamedata[\"Even Strength\"][\"Shots\"] = plays[6]\n gamedata[\"Even Strength\"][\"Fenwick\"] = plays[7]\n # gamedata[\"Even Strength\"][\"Corsi\"] = plays[11]\n gamedata[\"Power Play\"] = {}\n gamedata[\"Power Play\"][\"Goals\"] = {}\n gamedata[\"Power Play\"][\"Goals\"][\"Shots\"] = plays[8]\n gamedata[\"Power Play\"][\"Goals\"][\"Fenwick\"] = plays[9]\n # gamedata[\"Power Play\"][\"Goals\"][\"Corsi\"] = plays[14]\n gamedata[\"Power Play\"][\"TOI\"] = plays[14]\n gamedata[\"Power Play\"][\"Shots\"] = plays[10]\n gamedata[\"Power Play\"][\"Fenwick\"] = plays[11]\n # gamedata[\"Power Play\"][\"Corsi\"] = plays[17]\n return gamedata\n\ndef convertCoord(gamefeed, venue, period, x, y):\n if venue == \"home\":\n if gamefeed[\"gameData\"][\"venue\"][\"name\"] in homeLtRVenues:\n if period == 1 or period == 3:\n # print(\"Caps shooting left to right at home\")\n newX = -x + 89\n newY = y\n return [newX, newY]\n elif period == 2 or period == 4:\n # print(\"Caps shooting right to left at home\")\n newX = x + 89\n newY = -y\n return [newX, newY]\n else:\n pass\n elif gamefeed[\"gameData\"][\"venue\"][\"name\"] in homeRtLVenues:\n if period == 1 or period == 3:\n # print(\"Caps shooting right to left at home\")\n newX = x + 89\n newY = -y\n return [newX, newY]\n elif period == 2 or period == 4:\n # print(\"Caps shooting left to right at home\")\n newX = -x + 89\n newY = y\n return [newX, newY]\n else:\n pass\n else:\n print(\"venue error\")\n elif venue == \"away\":\n if gamefeed[\"gameData\"][\"venue\"][\"name\"] in ltrVenues:\n if period == 1 or period == 3:\n # print(\"Caps shooting right to left on the road\")\n newX = x + 89\n newY = -y\n return [newX, newY]\n elif period == 2 or period == 4:\n # print(\"Caps shooting left to right on the road\")\n newX = -x + 89\n newY = y\n return [newX, newY]\n else:\n pass\n elif gamefeed[\"gameData\"][\"venue\"][\"name\"] in rtlVenues:\n if period == 1 or period == 3:\n # print(\"Caps shooting left to right on the road\")\n newX = -x + 89\n newY = y\n return [newX, newY]\n elif period == 2 or period == 4:\n # print(\"Caps shooting right to left on the road\")\n newX = x + 89\n newY = -y\n return [newX, newY]\n else:\n pass\n else:\n print(\"venue error\")\n else:\n print(\"venue error\")\n\ndef main():\n if (today.month >= 8):\n currentseason = today.year\n else:\n currentseason = today.year - 1\n\n with open('advdata.json') as json_file:\n finalJSON = json.load(json_file)\n print(finalJSON)\n scheduleUrl = str(\"https://statsapi.web.nhl.com/api/v1/schedule?teamId=15&startDate=\" + str(currentseason) + \"-09-01&endDate=\" + str(currentseason + 1) + \"-05-01\")\n print(scheduleUrl)\n webUrl = urllib.request.urlopen(scheduleUrl)\n print (\"result code: \" + str(webUrl.getcode()))\n if (webUrl.getcode() == 200):\n data = webUrl.read()\n newgameJSON = json.loads(data)\n for i,d in enumerate(newgameJSON[\"dates\"]):\n gameJSON = getGame(newgameJSON, i)\n # print(gameJSON[0])\n if gameJSON != None:\n gameUrl = urllib.request.urlopen(gameJSON[0])\n print (\"result code: \" + str(gameUrl.getcode()))\n if (gameUrl.getcode() == 200):\n gamedata = gameUrl.read()\n shotinfo = parseGame(gamedata, gameJSON[1])\n if shotinfo != []:\n exportdict = constructJSON(shotinfo,gameJSON[1])\n print(exportdict)\n finalJSON.append(exportdict)\n else:\n print(\"Received error, cannot parse results\")\n else:\n print(\"Received error, cannot parse results\")\n with open('advdata.json', 'w+') as outfile:\n json.dump(finalJSON, outfile)\n\nif __name__ == \"__main__\":\n main()","sub_path":"PyTodaysData.py","file_name":"PyTodaysData.py","file_ext":"py","file_size_in_byte":23453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"213542859","text":"import pygame\nimport os\n\n# Special code to center window\nos.environ['SDL_VIDEO_CENTERED'] = '1'\n\n\nclass Snake(object):\n def __init__(self):\n self.snake = [(World.GRID_W/2-1, World.GRID_H/2), (World.GRID_W/2, World.GRID_H/2)]\n\n def draw(self, screen):\n block_size = World.BLOCK_SIZE\n block_color = World.WHITE\n for x, y in self.snake:\n block_pos = (x * World.BLOCK_W, y * World.BLOCK_H)\n rect = pygame.Rect(block_pos, block_size)\n pygame.draw.rect(screen, block_color, rect)\n\n\nclass World(object):\n FRAMES_PER_SECOND = 30\n RESOLUTION = WIDTH, HEGIHT = 800, 600\n GRID_SIZE = GRID_W, GRID_H = (40, 40)\n BLOCK_SIZE = BLOCK_W, BLOCK_H = (WIDTH / GRID_W, HEGIHT / GRID_H)\n WINDOWS_TITLE = 'Snake v1.0 by raz'\n BLACK = (0, 0, 0)\n WHITE = (255, 255, 255)\n\n def __init__(self):\n self.player = Snake()\n\n def draw(self, screen):\n screen.fill(self.BLACK)\n self.player.draw(screen)\n pygame.display.flip()\n\n\ndef main():\n # Initial Setup\n pygame.init()\n screen_surface = pygame.display.set_mode(World.RESOLUTION)\n pygame.display.set_caption(World.WINDOWS_TITLE)\n\n world = World()\n done = False\n clock = pygame.time.Clock()\n while not done:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n world.draw(screen_surface)\n # Delays until next frame\n clock.tick(World.FRAMES_PER_SECOND)\n pygame.quit()\n\nmain()","sub_path":"snake/snake02.py","file_name":"snake02.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"290972520","text":"#!/usr/bin/env python3\nimport random\n\n\ndef read_roster():\n with open('roster.txt', 'r') as f:\n line = f.readline()\n count = 0\n for line in f:\n x = line.split()\n if len(x) == 3:\n if 'e' in x[0]+x[1] or 'E' in x[0]+x[1]:\n print(x[0] + ' '+ x[1])\n count += 1\n else:\n if 'e' in x[0] or 'E' in x[0]:\n print(x[0])\n count += 1\n\n\n print(\"Total number of names that contain letter 'e' : {}\".format(count))\n\n\ndef main():\n \n read_roster()\n\nif __name__ == '__main__':\n main()\n","sub_path":"D06ex03.py","file_name":"D06ex03.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"201674344","text":"# coding=utf-8\nfrom src.testcase.GN_Y201H.WidgetOperation import *\n\n\nclass GNY201HNormalTimer2(WidgetOperation):\n @case_run(False)\n def run(self):\n self.case_module = u\"普通定时(#246)\" # 用例所属模块\n self.case_title = u'在线状态,4组开与4组关按自定义方式执行的定时执行状态检查' # 用例名称\n self.zentao_id = \"2064\" # 禅道ID\n\n # 用例动作\n def case(self):\n self.choose_home_device(conf[\"MAC\"][\"HW\"][0])\n\n self.delete_normal_timer()\n\n self.delete_delay_timer()\n\n self.set_power(\"power_off\")\n\n self.widget_click(self.page[\"control_device_page\"][\"normal_timer\"],\n self.page[\"normal_timer_page\"][\"title\"])\n\n now = time.strftime(\"%H:%M\")\n\n time_1 = [\"point\", \"09:00\"]\n start_time_1, set_time_1, cycle1 = self.create_normal_timer(now, time_on=time_1, loop=[u\"周一\", u\"周三\", u\"周五\"])\n\n time_2 = [\"point\", \"10:00\"]\n start_time_2, set_time_2, cycle2 = self.create_normal_timer(now, time_off=time_2, loop=[u\"周一\", u\"周三\", u\"周五\"])\n\n time_3 = [\"point\", \"11:00\"]\n start_time_3, set_time_3, cycle3 = self.create_normal_timer(now, time_on=time_3, loop=[u\"周一\", u\"周三\", u\"周五\"])\n\n time_4 = [\"point\", \"12:00\"]\n start_time_4, set_time_4, cycle4 = self.create_normal_timer(now, time_off=time_4, loop=[u\"周一\", u\"周三\", u\"周五\"])\n\n time_5 = [\"point\", \"13:00\"]\n start_time_5, set_time_5, cycle5 = self.create_normal_timer(now, time_on=time_5, loop=[u\"周一\", u\"周三\", u\"周五\"])\n\n time_6 = [\"point\", \"14:00\"]\n start_time_6, set_time_6, cycle6 = self.create_normal_timer(now, time_off=time_6, loop=[u\"周一\", u\"周三\", u\"周五\"])\n\n time_7 = [\"point\", \"15:00\"]\n start_time_7, set_time_7, cycle7 = self.create_normal_timer(now, time_on=time_7, loop=[u\"周一\", u\"周三\", u\"周五\"])\n\n time_8 = [\"point\", \"16:00\"]\n start_time_8, set_time_8, cycle8 = self.create_normal_timer(now, time_off=time_8, loop=[u\"周一\", u\"周三\", u\"周五\"])\n\n self.widget_click(self.page[\"normal_timer_page\"][\"to_return\"],\n self.page[\"control_device_page\"][\"title\"])\n\n self.check_timer(start_time_1, set_time_1, u\"电源已开启\", cycle1)\n self.check_timer(start_time_2, set_time_2, u\"电源已关闭\", cycle2)\n self.check_timer(start_time_3, set_time_3, u\"电源已开启\", cycle3)\n self.check_timer(start_time_4, set_time_4, u\"电源已关闭\", cycle4)\n self.check_timer(start_time_5, set_time_5, u\"电源已开启\", cycle5)\n self.check_timer(start_time_6, set_time_6, u\"电源已关闭\", cycle6)\n self.check_timer(start_time_7, set_time_7, u\"电源已开启\", cycle7)\n self.check_timer(start_time_8, set_time_8, u\"电源已关闭\", cycle8)\n","sub_path":"src/testcase/GN_Y201H/case/GN_Y201H_NORMAL_TIMER/GN_Y201H_NORMAL_TIMER_002.py","file_name":"GN_Y201H_NORMAL_TIMER_002.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"575455459","text":"'''\nAuthor: Viveque Ramji\nPurpose: Module to clean camera data and provide an open direction to move in\n\n'''\nimport numpy as np\nfrom scipy import sparse\nimport matplotlib.pyplot as plt\n\nimport adaptive_grid_sizing as ags\nimport voronoi\nimport sparse_interpolation as si\nimport obstacle_avoid as oa\n\nimport time\nimport logging\n\nclass Navigation:\n\t\"\"\"\n Object to use depth images to find gap to move in to.\n \"\"\"\n\n\tdef __init__(self, debug=False):\n\t\t\"\"\"\n\t Intitalize Navigation module\n\t \"\"\"\n\t\tself.debug = debug\n\n\n\tdef reconstructFrame(self, depth, perc_samples=0.005, min_sigma=0.5, min_h=10, algorithm_type='voronoi'):\n\t\t\"\"\"\n\t Givena partial depth image, will return an interpolated version filling\n\t all missing data.\n\t \"\"\"\n\n\n\t\tif algorithm_type == 'voronoi':\n\t\t\tsamples, measured_vector = si.createSamples(depth, perc_samples)\n\t\t\tif len(samples) <= 1:\n\t\t\t\treturn None\n\t\t\tfilled = voronoi.getVoronoi(depth.shape, samples, measured_vector)\n\t\telif algorithm_type == 'rbf':\n\t\t\tsamples, measured_vector = si.createSamples(depth, perc_samples)\n\t\t\tif len(samples) <= 1:\n\t\t\t\treturn None\n\t\t\tfilled = si.interpolateDepthImage(depth.shape,samples, measured_vector)\n\t\telif algorithm_type == 'ags_only':\n\t\t\tfilled = depth\n\n\t\tadapted = ags.depthCompletion(filled, min_sigma, min_h)\n\n\t\tif self.debug:\n\t\t\tsamples, measured_vector = si.createSamples(depth, perc_samples)\n\t\t\tsample_img = np.zeros((depth.shape)).flatten()\n\t\t\tsample_img[samples] = depth.flatten()[samples]\n\t\t\tsample_img = sample_img.reshape(depth.shape)\n\n\t\t\tself.plot(depth, sample_img, filled, adapted)\n\n\t\treturn adapted\n\tdef obstacleAvoid(self, depth, max_dist=1.2,barrier_h=.5):\n\t\t\"\"\"\n\t Given a depth image and a threshold value, will find the largest gap\n\t that can be used, returning the fraction along the images width where\n\t this is and the degrees rotation from the center. \n\t \"\"\"\n\t\tpos = oa.findLargestGap(depth, max_dist, barrier_h,DEBUG=self.debug)\n\t\treturn pos\n\n\tdef plot(self, depth, sample_img, filled, ags, cmap='viridis', b=True):\n\t\t\"\"\"\n\t Will plot the rgb image, original depth, interpolated depth and the\n\t position of where the algorithm recommends to move.\n\t \"\"\"\n\t\tplt.subplot(2, 2, 1)\n\t\tplt.title('Depth')\n\t\tplt.imshow(depth)\n\t\tplt.xticks(visible=False)\n\t\tplt.yticks(visible=False)\n\n\t\tplt.subplot(2, 2, 2)\n\t\tplt.imshow(sample_img, cmap=cmap)\n\t\tplt.title('Samples')\n\t\tplt.xticks(visible=False)\n\t\tplt.yticks(visible=False)\n\n\t\tplt.subplot(2, 2, 3)\n\t\tplt.imshow(filled, cmap=cmap)\n\t\tplt.title('RBF, Voronoi, or None')\n\t\tplt.xticks(visible=False)\n\t\tplt.yticks(visible=False)\n\n\t\tplt.subplot(2, 2, 4)\n\t\tplt.imshow(ags, cmap=cmap)\n\t\tplt.title('AGS')\n\t\tplt.xticks(visible=False)\n\t\tplt.yticks(visible=False)\n\n\n\t\tplt.subplots_adjust(bottom=0.1, right=0.8, top=0.9)\n\t\tcax = plt.axes([0.85, 0.1, 0.075, 0.8])\n\t\tplt.colorbar(cax=cax)\n\n\t\tplt.show(block=~b)\n\t\tif b:\n\t\t\ttime.sleep(b)\n\t\t\tplt.close()\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Application example with visualization.\n \"\"\"\n depth = np.random.rand(10, 5)\n depth = np.hstack((depth*4, depth*0.9))\n depth[0, 5] = np.nan\n depth[0, 6] = np.nan\n depth[depth>4.0] = 0.0\n\n nav = Navigation(True)\n adapted = nav.reconstructFrame(depth, .1, .5, 10)\n frac, pos = nav.obstacleAvoid(adapted, 1.3)\n","sub_path":"main/navigation/nav.py","file_name":"nav.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"77644713","text":"from django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect, HttpRequest, HttpResponse, JsonResponse\nfrom django.shortcuts import render, redirect\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\n\nfrom accounts.models import Account\nfrom compute import views as compute_views\nfrom files.forms import DocumentForm\nfrom files.models import Document#, SummaryStats\nfrom files.views import *\n\nfrom forms import CreateForm\nimport json\nimport urllib2\n\ndef home_page(request):\n return redirect('home')\n\ndef list_panes(request):\n \"\"\"\n Entry handler for user's home page!\n \"\"\"\n # redirect to login page if not signed in\n if not request.user.is_authenticated():\n return redirect('top')\n\n if request.method == 'POST':\n form = DocumentForm(request.POST, request.FILES)\n if form.is_valid():\n owner = Account.objects.get(id=request.user.id).username\n Document.upload_file(\n request.user.id,\n request.FILES['csv_file'].name,\n request.FILES['csv_file'],\n owner\n )\n # Redirect to the document list after POST\n return HttpResponseRedirect(reverse('app.views.list_panes'))\n\n elif request.method == 'GET':\n return render_to_response(\n 'home.html',\n get_panes_data(request.user.id),\n context_instance=RequestContext(request)\n )\n\ndef get_panes_data(acct_id):\n \"\"\"\n Fetches relevant data needed for template rendering on GET request\n :return: dict\n \"\"\"\n documents = Document.select_user_files(acct_id)\n shared_files = Document.select_shared_files(acct_id)\n user_name = Account.objects.get(id=acct_id).username\n upload = DocumentForm()\n form = CreateForm()\n panes = compute_views.handle_get_panes(acct_id)\n return {\n 'user_name': user_name,\n 'documents': documents,\n 'shared_documents': shared_files,\n 'form': form,\n 'panes': panes,\n 'upload': upload\n }\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"274483350","text":"from mock import Mock, call\nfrom sprinter.testtools import FormulaTest\n\nsource_config = \"\"\"\n\"\"\"\n\ntarget_config = \"\"\"\n[simple_example]\nformula = sprinter.formulas.egg\negg = jedi\n\n[simple_multiple_eggs]\nformula = sprinter.formulas.egg\neggs = jedi, epc=0.5\n pelican\n\n[simple_multiple_and_single_eggs]\nformula = sprinter.formulas.egg\negg = sprinter\neggs = jedi, epc=0.5\n pelican\n\n[sprinter]\nformula = sprinter.formulas.egg\negg = http://github.com/toumorokoshi/sprinter/tarball/master\n\"\"\"\n\n\nclass TestEggFormula(FormulaTest):\n \"\"\" Tests for the egg formula \"\"\"\n\n def setup(self):\n super(TestEggFormula, self).setup(source_config=source_config,\n target_config=target_config)\n\n def test_simple_example(self):\n \"\"\" The egg formula should install a single egg \"\"\"\n self.environment.install_feature(\"simple_example\")\n self.lib.call.assert_called_with(\"pip install jedi\")\n\n def test_simple_multiple_eggs(self):\n \"\"\" The egg formula should install multiple eggs \"\"\"\n self.environment.install_feature(\"simple_multiple_eggs\")\n self.lib.call.assert_any_call(\"pip install jedi\")\n self.lib.call.assert_any_call(\"pip install epc=0.5\")\n self.lib.call.assert_any_call(\"pip install pelican\")\n\n def test_simple_multiple_and_single_eggs(self):\n \"\"\" The egg formula should install single and multiple eggs \"\"\"\n self.environment.install_feature(\"simple_multiple_and_single_eggs\")\n self.lib.call.assert_any_call(\"pip install jedi\")\n self.lib.call.assert_any_call(\"pip install epc=0.5\")\n self.lib.call.assert_any_call(\"pip install pelican\")\n self.lib.call.assert_any_call(\"pip install sprinter\")\n\n def test_sprinter(self):\n \"\"\" The sprinter egg formula should install sprinter from a remote protocol \"\"\"\n self.environment.install_feature(\"sprinter\")\n self.lib.call.assert_called_with(\"pip install http://github.com/toumorokoshi/sprinter/tarball/master\")\n","sub_path":"sprinter/formulas/egg_tests.py","file_name":"egg_tests.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"347448175","text":"\"\"\"DB re-creation\n\nRevision ID: 4eb4bae04a02\nRevises: \nCreate Date: 2018-11-15 14:13:50.929199\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '4eb4bae04a02'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('music',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('title', sa.String(length=150), nullable=False),\n sa.Column('source', sa.String(length=150), nullable=False),\n sa.Column('duration', sa.Integer(), nullable=False),\n sa.Column('loop', sa.Boolean(), nullable=False),\n sa.Column('vote', sa.Integer(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_music_title'), 'music', ['title'], unique=False)\n op.create_table('scene',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=32), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_scene_name'), 'scene', ['name'], unique=False)\n op.create_table('style',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=32), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_style_name'), 'style', ['name'], unique=False)\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('login', sa.String(length=64), nullable=False),\n sa.Column('password', sa.String(length=128), nullable=False),\n sa.Column('email', sa.String(length=256), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_user_login'), 'user', ['login'], unique=False)\n op.create_table('music_scene',\n sa.Column('scene_id', sa.Integer(), nullable=False),\n sa.Column('music_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['music_id'], ['music.id'], ),\n sa.ForeignKeyConstraint(['scene_id'], ['scene.id'], ),\n sa.PrimaryKeyConstraint('scene_id', 'music_id')\n )\n op.create_table('music_style',\n sa.Column('style_id', sa.Integer(), nullable=False),\n sa.Column('music_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['music_id'], ['music.id'], ),\n sa.ForeignKeyConstraint(['style_id'], ['style.id'], ),\n sa.PrimaryKeyConstraint('style_id', 'music_id')\n )\n op.create_table('playlist',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('title', sa.String(length=32), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('playlist_music',\n sa.Column('playlist_id', sa.Integer(), nullable=False),\n sa.Column('music_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['music_id'], ['music.id'], ),\n sa.ForeignKeyConstraint(['playlist_id'], ['playlist.id'], ),\n sa.PrimaryKeyConstraint('playlist_id', 'music_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('playlist_music')\n op.drop_table('playlist')\n op.drop_table('music_style')\n op.drop_table('music_scene')\n op.drop_index(op.f('ix_user_login'), table_name='user')\n op.drop_table('user')\n op.drop_index(op.f('ix_style_name'), table_name='style')\n op.drop_table('style')\n op.drop_index(op.f('ix_scene_name'), table_name='scene')\n op.drop_table('scene')\n op.drop_index(op.f('ix_music_title'), table_name='music')\n op.drop_table('music')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/4eb4bae04a02_db_re_creation.py","file_name":"4eb4bae04a02_db_re_creation.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"563514035","text":"from networks.model_builder import ModelBuilder\nfrom networks.prepare_real_data import RealPreparer\nimport matplotlib.pyplot as plt\n\nmodel_name = 'rnn_gru_lstm_dence_ru_ru'\npath = 'models/' + model_name\n\ndata_builder, model_builder = RealPreparer(), ModelBuilder()\nx, y = data_builder.Run()\nmodel = model_builder.Build(x, y, test_part=0.1)\nmodel_builder.Save(path)\nmodel = model_builder.Load(path)\n\npredict = model.predict(x)\n\n\npredict = [predict[0][0]]*62 + [p[0] for p in predict]\n\nplt.plot(data_builder.y_plot, c='r')\nplt.plot(data_builder.x_plot)\nplt.plot(predict, c='b')\nplt.savefig(path + f'/{model_name}.png')\nplt.show()\n\n\n","sub_path":"networks/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"367637235","text":"'画图'\nimport tworoundattack.utils as utils\nimport matplotlib.pyplot as plt\n\n\n#画有和没有aes操作时间直方图\ndef drawhistogram():\n aestime = utils.readtime()\n aesntime = utils.readntime()\n for i in range(128):\n plt.subplot(16, 8, 1 + i)\n if i < 64:\n plt.hist(aestime[0][i], 100)\n else:\n plt.hist(aesntime[0][i - 64], 100)\n plt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"aesattackpy/build/lib/tworoundattack/drawhistogram.py","file_name":"drawhistogram.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"78362397","text":"from ert.enkf.plot import EnsembleDataFetcher, ObservationDataFetcher, RefcaseDataFetcher, BlockObservationDataFetcher, EnsembleGenKWFetcher, EnsembleGenDataFetcher, ObservationGenDataFetcher\nfrom ert.enkf.plot.ensemble_block_data_fetcher import EnsembleBlockDataFetcher\nfrom ert_gui.tools.plot.data import PlotData, ObservationPlotData, EnsemblePlotData, RefcasePlotData, HistogramPlotDataFactory, ReportStepLessHistogramPlotDataFactory\nfrom ert_gui.models import ErtConnector\nfrom ert_gui.models.mixins import ModelMixin\n\n\nclass PlotDataFetcher(ErtConnector, ModelMixin):\n\n def getPlotDataForKeyAndCases(self, key, cases):\n observation_data_fetcher = ObservationDataFetcher(self.ert())\n block_observation_data_fetcher = BlockObservationDataFetcher(self.ert())\n gen_kw_fetcher = EnsembleGenKWFetcher(self.ert())\n gen_data_fetcher = EnsembleGenDataFetcher(self.ert())\n\n if self.isBlockObservationKey(key):\n return self.fetchBlockObservationData(block_observation_data_fetcher, key, cases)\n\n elif self.isSummaryKey(key):\n return self.fetchSummaryData(observation_data_fetcher, key, cases)\n\n elif self.isGenKWKey(key):\n return self.fetchGenKWData(gen_kw_fetcher, key, cases)\n\n elif self.isGenDataKey(key):\n return self.fetchGenData(gen_data_fetcher, key, cases)\n\n else:\n raise NotImplementedError(\"Key %s not supported.\" % key)\n\n\n def isSummaryKey(self, key):\n ensemble_data_fetcher = EnsembleBlockDataFetcher(self.ert())\n return ensemble_data_fetcher.supportsKey(key)\n\n\n def isBlockObservationKey(self, key):\n block_observation_data_fetcher = BlockObservationDataFetcher(self.ert())\n return block_observation_data_fetcher.supportsKey(key)\n\n\n def isGenKWKey(self, key):\n gen_kw_fetcher = EnsembleGenKWFetcher(self.ert())\n return gen_kw_fetcher.supportsKey(key)\n\n\n def isGenDataKey(self, key):\n obs_gen_data_fetcher = ObservationGenDataFetcher(self.ert())\n return obs_gen_data_fetcher.supportsKey(key)\n\n\n def fetchGenData(self, gen_data_fetcher, key, cases):\n plot_data = PlotData(key)\n\n ensemble_data = ObservationGenDataFetcher(self.ert()).fetchData(key, cases)\n\n if len(ensemble_data) > 0:\n observation_plot_data = ObservationPlotData(key)\n\n observation_plot_data.setObservationData(ensemble_data[\"x\"], ensemble_data[\"y\"], ensemble_data[\"std\"], ensemble_data[\"continuous\"])\n observation_plot_data.updateBoundaries(ensemble_data[\"min_x\"], ensemble_data[\"max_x\"], ensemble_data[\"min_y\"], ensemble_data[\"max_y\"])\n plot_data.setObservationData(observation_plot_data)\n\n for case in cases:\n ensemble_data = gen_data_fetcher.fetchData(key, case)\n\n if len(ensemble_data) > 0:\n ensemble_plot_data = EnsemblePlotData(key, case)\n ensemble_plot_data.setEnsembleData(ensemble_data[\"x\"], ensemble_data[\"y\"], ensemble_data[\"min_y_values\"], ensemble_data[\"max_y_values\"])\n ensemble_plot_data.updateBoundaries(ensemble_data[\"min_x\"], ensemble_data[\"max_x\"], ensemble_data[\"min_y\"], ensemble_data[\"max_y\"])\n plot_data.addEnsembleData(ensemble_plot_data)\n\n return plot_data\n\n\n def fetchGenKWData(self, gen_kw_fetcher, key, cases):\n plot_data = PlotData(key)\n\n histogram_factory = ReportStepLessHistogramPlotDataFactory(key)\n\n for case in cases:\n ensemble_data = gen_kw_fetcher.fetchData(key, case)\n\n plot_data.setShouldUseLogScale(ensemble_data[\"use_log_scale\"])\n\n ensemble_plot_data = EnsemblePlotData(key, case)\n ensemble_plot_data.setEnsembleData(ensemble_data[\"x\"], ensemble_data[\"y\"], [], [])\n ensemble_plot_data.updateBoundaries(ensemble_data[\"min_x\"], ensemble_data[\"max_x\"], ensemble_data[\"min_y\"], ensemble_data[\"max_y\"])\n\n plot_data.addEnsembleData(ensemble_plot_data)\n\n histogram_factory.addEnsembleData(case, ensemble_data[\"x\"], ensemble_data[\"y\"], ensemble_data[\"min_y\"], ensemble_data[\"max_y\"])\n\n plot_data.setHistogramFactory(histogram_factory)\n\n return plot_data\n\n\n def fetchBlockObservationData(self, block_observation_data_fetcher, key, cases):\n plot_data = PlotData(key)\n\n data = block_observation_data_fetcher.fetchData(key)\n block_observation_plot_data = ObservationPlotData(key)\n selected_report_step_index = 0\n\n if len(data) > 0:\n data = data[selected_report_step_index]\n block_observation_plot_data.setObservationData(data[\"x\"], data[\"y\"], data[\"std\"], False)\n block_observation_plot_data.updateBoundaries(data[\"min_x\"], data[\"max_x\"], data[\"min_y\"], data[\"max_y\"])\n\n plot_data.setObservationData(block_observation_plot_data)\n\n for case in cases:\n ensemble_data = EnsembleBlockDataFetcher(self.ert()).fetchData(key, case)\n\n if len(ensemble_data) > 0:\n ensemble_data = ensemble_data[selected_report_step_index]\n ensemble_plot_data = EnsemblePlotData(key, case)\n ensemble_plot_data.setEnsembleData(ensemble_data[\"x\"], ensemble_data[\"y\"], ensemble_data[\"min_x_values\"], ensemble_data[\"max_x_values\"])\n ensemble_plot_data.updateBoundaries(ensemble_data[\"min_x\"], ensemble_data[\"max_x\"], ensemble_data[\"min_y\"], ensemble_data[\"max_y\"])\n plot_data.addEnsembleData(ensemble_plot_data)\n\n return plot_data\n\n\n def fetchSummaryData(self, observation_data_fetcher, key, cases):\n plot_data = PlotData(key)\n\n histogram_factory = HistogramPlotDataFactory(key)\n\n observation_data = observation_data_fetcher.fetchData(key)\n observation_plot_data = ObservationPlotData(key)\n observation_plot_data.setObservationData(observation_data[\"x\"], observation_data[\"y\"], observation_data[\"std\"], observation_data[\"continuous\"])\n observation_plot_data.updateBoundaries(observation_data[\"min_x\"], observation_data[\"max_x\"], observation_data[\"min_y\"], observation_data[\"max_y\"])\n plot_data.setObservationData(observation_plot_data)\n\n histogram_factory.setObservations(observation_data[\"x\"], observation_data[\"y\"], observation_data[\"std\"], observation_data[\"min_y\"], observation_data[\"max_y\"])\n\n\n\n refcase_data = RefcaseDataFetcher(self.ert()).fetchData(key)\n refcase_plot_data = RefcasePlotData(key)\n refcase_plot_data.setRefcaseData(refcase_data[\"x\"], refcase_data[\"y\"])\n refcase_plot_data.updateBoundaries(refcase_data[\"min_x\"], refcase_data[\"max_x\"], refcase_data[\"min_y\"], refcase_data[\"max_y\"])\n plot_data.setRefcaseData(refcase_plot_data)\n\n histogram_factory.setRefcase(refcase_data[\"x\"], refcase_data[\"y\"], refcase_data[\"min_y\"], refcase_data[\"max_y\"])\n\n for case in cases:\n ensemble_data = EnsembleDataFetcher(self.ert()).fetchData(key, case)\n\n ensemble_plot_data = EnsemblePlotData(key, case)\n ensemble_plot_data.setEnsembleData(ensemble_data[\"x\"], ensemble_data[\"y\"], ensemble_data[\"min_y_values\"], ensemble_data[\"max_y_values\"])\n ensemble_plot_data.updateBoundaries(ensemble_data[\"min_x\"], ensemble_data[\"max_x\"], ensemble_data[\"min_y\"], ensemble_data[\"max_y\"])\n plot_data.addEnsembleData(ensemble_plot_data)\n\n histogram_factory.addEnsembleData(case, ensemble_data[\"x\"], ensemble_data[\"y\"], ensemble_data[\"min_y\"], ensemble_data[\"max_y\"])\n\n plot_data.setHistogramFactory(histogram_factory)\n\n return plot_data\n\n","sub_path":"devel/python/python/ert_gui/tools/plot/data/plot_data_fetcher.py","file_name":"plot_data_fetcher.py","file_ext":"py","file_size_in_byte":7705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"639219275","text":"# -*- coding: utf-8 -*-\n\nfrom flask import render_template\nimport mysql.connector\n\ndef select(_SQL, cursor, conn, params=None):\n try:\n cursor.execute(_SQL, params)\n result = cursor.fetchall()\n except mysql.connector.Error as e:\n cursor.close()\n conn.close()\n err_output = \"Невозможно выполнить запрос к базе данных.\" + \" \" + str(e.errno) + \" \" + e.msg\n return None, render_template('err_output.html', err_output=err_output, nav_buttons=True, back='back')\n \n return result, None\n","sub_path":"includes/select.py","file_name":"select.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"103609438","text":"#!/usr/bin/python\n\"\"\"Syslog Replay Tool.\n\nAuthor: Nicholas Albright\nThis is a quick and dirty tool that will replay messages found in a text file to a remote syslog server.\nGeneric use example:\n MSSP Walks into a customer environment to do monitoring post security event.\n Customer had no central logging prior to the security event.\n On Box logging is still in place.\n Running this tool on existing syslog messages -> forward to SIEM will give us historial logs.\n* NOTE: TimeStamp's need to be 'generated time' not ingested time.\n\"\"\"\nimport sys\nimport time\nimport socket\nimport logging\nimport argparse\n\nlogging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\nlog = logging.getLogger(__name__)\n\n\ndef sendlog(filename, server, port, rate):\n \"\"\"Send our Syslog Data to remote server.\"\"\"\n remote = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n row_count = 0\n rate_count = 0\n try:\n with open(filename, 'r') as fh:\n for line in fh:\n row_count += 1\n rate_count += 1\n if rate_count >= rate:\n time.sleep(1)\n rate_count = 0\n remote.sendto(line.rstrip(), (server, int(port)))\n except Exception as err:\n log.exception(err)\n\n return row_count\n\n\ndef optionparse():\n \"\"\"Parse Options.\"\"\"\n opts = argparse.ArgumentParser(description='Nicholas\\' Syslog Replay Tool')\n opts.add_argument('filename', help='File continaing syslog formatted messages')\n opts.add_argument('-p', '--port', help='Syslog port (UDP), Default: 514', default=514)\n opts.add_argument('-r', '--rate', help='Rate Per Second (or events per second), Default: 300', default=300)\n opts.add_argument('-s', '--server', help='Server to send messages to.')\n parsed_args = opts.parse_args()\n if not parsed_args.server:\n opts.print_help()\n sys.exit()\n return parsed_args\n\n\nif __name__ == '__main__':\n args = optionparse()\n start = time.time()\n row_count = sendlog(args.filename, args.server, args.port, int(args.rate))\n end = time.time()\n log.info('Addressed %d messages in %s seconds' % (row_count, str(end - start)))\n","sub_path":"syslog_replay.py","file_name":"syslog_replay.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"606160272","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\n\n# python std lib\nimport copy\nimport logging\nimport os\nimport re\n\n# phabfive imports\nfrom phabfive.exceptions import PhabfiveConfigException, PhabfiveRemoteException\n\n\n# 3rd party imports\nimport anyconfig\nimport appdirs\nfrom phabricator import Phabricator, APIError\n\n\nlogging.basicConfig(level=logging.INFO)\nlog = logging.getLogger(__name__)\nlogging.getLogger(\"anyconfig\").setLevel(logging.ERROR)\n\n\nCONFIGURABLES = [\"PHABFIVE_DEBUG\", \"PHAB_TOKEN\", \"PHAB_URL\"]\nDEFAULTS = {\"PHABFIVE_DEBUG\": False, \"PHAB_TOKEN\": \"\", \"PHAB_URL\": \"\"}\nREQUIRED = [\"PHAB_TOKEN\", \"PHAB_URL\"]\nVALIDATORS = {\n \"PHAB_URL\": \"^http(s)?://[a-zA-Z0-9._-]+/api/$\",\n \"PHAB_TOKEN\": \"^[a-zA-Z0-9-]{32}$\",\n}\nVALID_EXAMPLES = {\"PHAB_URL\": \"example: http://127.0.0.1/api/\"}\nCONFIG_EXAMPLES = {\n \"PHAB_TOKEN\": \"example: export PHAB_TOKEN=cli-RANDOMRANDOMRANDOMRANDOMRAND\",\n \"PHAB_URL\": \"example: echo PHAB_URL: https://dynamist.phacility.com/api/ >> ~/.config/phabfive.yaml\",\n}\n\n\nclass Phabfive(object):\n def __init__(self):\n\n # Get super-early debugging by `export PHABFIVE_DEBUG=1`\n if \"PHABFIVE_DEBUG\" in os.environ:\n log.setLevel(logging.DEBUG)\n log.info(\n \"Loglevel is: {}\".format(logging.getLevelName(log.getEffectiveLevel()))\n )\n\n self.conf = self.load_config()\n\n maxlen = 8 + len(max(dict(self.conf).keys(), key=len))\n for k, v in dict(self.conf).items():\n log.debug(\"{} {} {}\".format(k, \".\" * (maxlen - len(k)), v))\n\n # check for required configurables\n for k, v in dict(self.conf).items():\n if k in REQUIRED and not v:\n error = \"{} is not configured\".format(k)\n example = CONFIG_EXAMPLES.get(k)\n if example:\n error += \", \" + example\n raise PhabfiveConfigException(error)\n\n # check validity of configurables\n for k, v in VALIDATORS.items():\n if not re.match(VALIDATORS[k], self.conf[k]):\n error = \"{} is malformed\".format(k)\n example = VALID_EXAMPLES.get(k)\n if example:\n error += \", \" + example\n raise PhabfiveConfigException(error)\n self.phab = Phabricator(\n host=self.conf.get(\"PHAB_URL\"), token=self.conf.get(\"PHAB_TOKEN\")\n )\n\n self.verify_connection()\n\n def verify_connection(self):\n \"\"\"\n \"\"\"\n try:\n result = self.phab.user.whoami()\n except APIError as e:\n raise PhabfiveRemoteException(e.message)\n\n def load_config(self):\n \"\"\"\n Load configuration from configuration files and environment variables.\n\n Search order, latest has presedence:\n\n 1. hard coded defaults\n 2. /etc/phabfive.yaml\n 3. /etc/phabfive.d/*.yaml\n 4. ~/.config/phabfive.yaml\n 5. ~/.config/phabfive.d/*.yaml\n 6. environment variables\n \"\"\"\n environ = os.environ.copy()\n\n log.debug(\"Loading configuration defaults\")\n conf = copy.deepcopy(DEFAULTS)\n\n os.environ[\"XDG_CONFIG_DIRS\"] = \"/etc\"\n\n site_conf_file = os.path.join(appdirs.site_config_dir(\"phabfive\") + \".yaml\")\n log.debug(\"Loading configuration file: {}\".format(site_conf_file))\n anyconfig.merge(\n conf,\n {\n k: v\n for k, v in dict(\n anyconfig.load(site_conf_file, ignore_missing=True)\n ).items()\n if k in CONFIGURABLES\n },\n )\n\n site_conf_dir = os.path.join(\n appdirs.site_config_dir(\"phabfive\") + \".d\", \"*.yaml\"\n )\n log.debug(\"Loading configuration files: {}\".format(site_conf_dir))\n anyconfig.merge(\n conf,\n {\n k: v\n for k, v in dict(anyconfig.load(site_conf_dir)).items()\n if k in CONFIGURABLES\n },\n )\n\n user_conf_file = os.path.join(appdirs.user_config_dir(\"phabfive\")) + \".yaml\"\n log.debug(\"Loading configuration file: {}\".format(user_conf_file))\n anyconfig.merge(\n conf,\n {\n k: v\n for k, v in dict(\n anyconfig.load(user_conf_file, ignore_missing=True)\n ).items()\n if k in CONFIGURABLES\n },\n )\n\n user_conf_dir = os.path.join(\n appdirs.user_config_dir(\"phabfive\") + \".d\", \"*.yaml\"\n )\n log.debug(\"Loading configuration files: {}\".format(user_conf_dir))\n anyconfig.merge(\n conf,\n {\n k: v\n for k, v in dict(anyconfig.load(user_conf_dir)).items()\n if k in CONFIGURABLES\n },\n )\n\n log.debug(\"Loading configuration from environment\")\n anyconfig.merge(conf, {k: v for k, v in environ.items() if k in CONFIGURABLES})\n\n return conf\n","sub_path":"phabfive/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":5050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"405948679","text":"# from python\n\n# Django modules\nfrom django.shortcuts import render_to_response\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.core.paginator import Paginator, InvalidPage, EmptyPage\nfrom django.template import RequestContext\nfrom django.db import transaction\nfrom django.db.models import get_model\nfrom django.views.decorators.csrf import csrf_protect\n\n# Our modules\nfrom infra.forms.audit_log import SearchAuditLogForm\nfrom infra.models import AuditLog\nfrom bin.constants import PAGINATE_BY\nfrom infra.custom.build_query_filter import build_query_filter\nimport infra.custom.custom_json as custom_json\n\n# Decorators to ensure that user is logged in and\n# is a staff member\n@login_required\n@staff_member_required\n@csrf_protect\n# Have to fix all exception before enabling commit manually.\n# When turned on, will hide any exception behind its TransactionManagementError\n# so remark off when debugging...\n@transaction.commit_manually\ndef show_audit_log(request, app_label, object_name, object_id):\n \"\"\"\n Display audit log for a app.model and id.\n Allows further search by username, fieldname\n and changed date.\n\n Uses template infra/show_audit_log.html\n\n \"\"\"\n # Initialise Error Messages\n error_messages = []\n\n # Model and Object Id is mandatory\n app_model = get_model(app_label, object_name)\n if app_model is None:\n error_messages.append(_(\"App.Model %(app_label)s.%(object_name)s passed in does not exist in this Application\") % \n {'app_label': app_label, 'object_name': object_name}) \n # Get the model instance description (its unicode())\n try:\n model_instance = app_model.objects.get(pk=object_id)\n instance_desc = unicode(model_instance)\n except app_model.DoesNotExist:\n # Deleted instance?\n instance_desc = \" with Primary Key \" + str(object_id) \n\n # Always query audit header for this model and id\n queryset_criteria = ['and', ['iexact', 'audit_header__model_name', app_label + '.' + object_name],\n ['exact', 'audit_header__primary_key_id', object_id]]\n\n # Post happens when user performs a search\n if request.method == 'POST':\n\n # Decrement the number of levels we have to go back (so that we\n # will return to the Browse page\n back_levels = int(request.POST['back_levels']) - 1\n\n search_form = SearchAuditLogForm(request.POST)\n # User pressed search button \n if request.POST.has_key('search'):\n if search_form.is_valid():\n # Perform Search\n if search_form.cleaned_data['changed_by']:\n queryset_criteria.append(['exact', 'audit_header__changed_by', search_form.cleaned_data['changed_by']])\n if search_form.cleaned_data['changed_on']:\n queryset_criteria.append(['gte', 'audit_header__changed_on', search_form.cleaned_data['changed_on']])\n if search_form.cleaned_data['field_name']:\n queryset_criteria.append(['exact', 'field_name', search_form.cleaned_data['field_name']])\n else:\n # else user is doing paging\n # We need to restore previously save search criteria\n queryset_criteria = custom_json.loads(request.POST.get('queryset_criteria'))\n else:\n # GET Method (1st time this page is called)\n search_form = SearchAuditLogForm()\n back_levels = -1\n\n # Perform query based on search criteria\n audit_logs = AuditLog.objects.filter(build_query_filter(queryset_criteria)).order_by('audit_header__changed_on', 'id')\n\n # Pass tasks found to the paginator\n paginator = Paginator(audit_logs, PAGINATE_BY)\n # Page requested, default to page 1 when not requested\n try:\n page = int(request.POST.get('page', '1'))\n audit_log_page = paginator.page(page)\n except (InvalidPage, EmptyPage):\n # Display last page when invalid or empty page\n audit_log_page = paginator.page(paginator.num_page)\n\n # Even reads need to be committed\n transaction.commit()\n # Display page to user. We have to commit again, possibly because render to response will \n # dirty trx buffer again\n with transaction.commit_on_success(): return render_to_response('infra/show_audit_log.html', {'search_form': search_form, \n 'audit_log_page' : audit_log_page, 'queryset_criteria': custom_json.dumps(queryset_criteria),\n 'object_desc': app_model._meta.verbose_name, 'instance_desc': instance_desc, 'back_levels': back_levels,\n 'error_messages': error_messages, 'media': search_form.media,\n }, context_instance=RequestContext(request))\n","sub_path":"infra/views/show_audit_log.py","file_name":"show_audit_log.py","file_ext":"py","file_size_in_byte":4787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"612001103","text":"# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-\n### BEGIN LICENSE\n# Copyright (C) 2020 \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n### END LICENSE\n\nimport optparse, sys\nfrom locale import gettext as _\n\nimport gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk # pylint: disable=E0611\n\nfrom remarkable import RemarkableWindow\n\nfrom remarkable_lib import set_up_logging, get_version\n\ndef parse_options():\n \"\"\"Support for command line options\"\"\"\n parser = optparse.OptionParser(version=\"%%prog %s\" % get_version())\n parser.add_option(\n \"-v\", \"--verbose\", action=\"count\", dest=\"verbose\",\n help=_(\"Show debug messages (-vv debugs remarkable_lib also)\"))\n (options, args) = parser.parse_args()\n\n set_up_logging(options)\n\ndef main():\n 'constructor for your class instances'\n parse_options()\n\n # Run the application. \n window = RemarkableWindow.RemarkableWindow()\n\n window.show_all()\n window.check_settings() # Load settings after app displayed to fix bugs!\n Gtk.main()\n","sub_path":"remarkable/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"24588015","text":"\"\"\"\r\nThis is the adaptation of Ion module of Numworks.\r\nPlease don't use keyboard and this module at the same time.\r\n\"\"\"\r\n### v All keys of Numworks v ###\r\nKEY_LEFT = 0\r\nKEY_UP = 1\r\nKEY_DOWN = 2\r\nKEY_RIGHT = 3\r\nKEY_OK = 4\r\nKEY_BACK = 5\r\nKEY_HOME = 6\r\nKEY_ONOFF = 7\r\nKEY_SHIFT = 12\r\nKEY_ALPHA = 13\r\nKEY_XNT = 14\r\nKEY_VAR = 15\r\nKEY_TOOLBOX = 16\r\nKEY_BACKSPACE = 17\r\nKEY_EXP = 18\r\nKEY_LN = 19\r\nKEY_LOG = 20\r\nKEY_IMAGINARY = 21\r\nKEY_COMMA = 22\r\nKEY_POWER = 23\r\nKEY_SINE = 24\r\nKEY_COSINE = 25\r\nKEY_TANGENT = 26\r\nKEY_PI = 27\r\nKEY_SQRT = 28\r\nKEY_SQUARE = 29\r\nKEY_SEVEN = 30\r\nKEY_EIGHT = 31\r\nKEY_NINE = 32\r\nKEY_LEFTPARENTHESIS = 33\r\nKEY_RIGHTPARENTHESIS = 34\r\nKEY_FOUR = 36\r\nKEY_FIVE = 37\r\nKEY_SIX = 38\r\nKEY_MULTIPLICATION = 39\r\nKEY_DIVISION = 40\r\nKEY_ONE = 42\r\nKEY_TWO = 43\r\nKEY_THREE = 44\r\nKEY_PLUS = 45\r\nKEY_MINUS = 46\r\nKEY_ZERO = 48\r\nKEY_DOT = 49\r\nKEY_EE = 50\r\nKEY_ANS = 51\r\nKEY_EXE = 52\r\nKEYS = [\r\n \"left\", \"up\", \"down\", \"right\", \"return\", \"del\", \"home\", \"end\", None, None, \r\n None, None, \"shift\", \"ctrl\", \":\", \";\", \"\\\"\", \"backspace\", \"[\", \"]\", \r\n \"{\", \"}\", \", \", \"^\", \"s\", \"c\", \"t\", \"p\", \"<\", \"²\", \r\n \"7\", \"8\", \"9\", \"(\", \")\", None, \"4\", \"5\", \"6\", \"*\", \r\n \"/\", None, \"1\", \"2\", \"3\", \"+\", \"-\", None, \"0\", \".\", \r\n \"insert\", \"@\", \"enter\"\r\n]\r\n### ^ All keys of Numworks ^ ###\r\n\r\nfrom keyboard import is_pressed\r\n\r\ndef keydown(key):\r\n if key < 0 or key > 52 or KEYS[key] == None: return False\r\n else: return is_pressed(KEYS[key])\r\n\r\ndef get_keys(): return KEYS\r\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"383737730","text":"import numpy as np\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.animation as animation\r\n#matplotlib.use('Agg')\r\nimport time\r\nimport datetime\r\nimport math\r\nimport random\r\nimport copy\r\nimport pandas as pd\r\n#from pandas.tools import plotting # 高度なプロットを行うツールのインポート\r\nfrom scipy.cluster.hierarchy import linkage, dendrogram, fcluster\r\nfrom sklearn.cluster import KMeans # K-means クラスタリングをおこなう\r\nimport xlrd\r\n\r\nplt.style.use('ggplot')\r\nfig = plt.figure(figsize=(5, 5))\r\nax = fig.add_subplot(111)\r\n\r\nvn = 6 #vehicle number\r\n#cp = 50 #collection place+1(include depot)\r\n#cp_list = [i for i in range(1,cp)]\r\n#MAP_SIZE = 10\r\n\r\n'''\r\ncp_xy = np.random.rand(cp, 2) * MAP_SIZE #行列を生成。rand(行, 列)\r\nk = 0\r\nfor j in range(vn):\r\n print('!!!',j)\r\n if j != 0:\r\n k += math.ceil(cp/vn)-1\r\n p = 10*MAP_SIZE*np.random.rand()*np.random.choice([-1,1])\r\n q = 10*MAP_SIZE*np.random.rand()*np.random.choice([-1,1])\r\n for i in range(math.ceil(cp/vn)-1):\r\n print(i+k)\r\n cp_xy[i+k][0] += p\r\n cp_xy[i+k][1] += q\r\n\r\ncp_xy[0][0] = 0.\r\ncp_xy[0][1] = 0.\r\n\r\n\r\n#print(cp_xy)\r\n'''\r\n\r\n#エクセルデータから配送先の位置データを持ってくる-------------------------\r\nbook = xlrd.open_workbook('city_position2.xlsx')\r\nsheet = book.sheet_by_name('Sheet1')\r\ndef get_list(sheet):\r\n return [sheet.row_values(row) for row in range(sheet.nrows)]\r\ncp_xy = get_list(sheet)\r\ncp_xy = np.array(cp_xy) #リストをnumpyに変換\r\n\r\nx = cp_xy[:, 0] #すべての行の0列目\r\ny = cp_xy[:, 1] #すべての行の1列目\r\n\r\ncp = sheet.nrows #都市数\r\n\r\nprint(cp)\r\nprint(x)\r\nprint(y)\r\n#-------------------------------------------------------------------\r\n\r\nd_matrix = np.sqrt((x[:, np.newaxis] - x[np.newaxis, :]) ** 2 +\r\n (y[:, np.newaxis] - y[np.newaxis, :]) ** 2) #distance matrix\r\n\r\norder = [[] for i in range(vn)] #各車両の順番情報が入っている。2番目の車両の1番目に訪れる番号はtotal_order[1][0]\r\n\r\n\r\n\r\n#print(cp_list)\r\n#print(cp_list[0:3])\r\n\r\n\r\ndef separate_solution(cp_list, vn, order):\r\n #初期順番を作製する.各車両に一つずつ数字を入れていく。\r\n #random.shuffle(cp_list)\r\n k = 0\r\n while k != len(cp_list):\r\n for i in range(vn):\r\n if k == len(cp_list):\r\n break\r\n order[i].append(cp_list[k])\r\n k += 1\r\n #print('randam_order = ',t_order)\r\n for i in range(vn): #depotを加える\r\n order[i].insert(0, 0)\r\n\r\n\r\ndef calculate_total_distance(order, d_matrix):\r\n \"\"\"Calculate total distance traveled for given visit order\"\"\"\r\n total_distance = 0\r\n for i in range(vn):\r\n idx_from = np.array(order[i]) #訪問順\r\n idx_to = np.array(order[i][1:] + [order[i][0]]) #訪問順をひとつずらしたリスト\r\n distance_arr = d_matrix[idx_from, idx_to] #idx_toのk番目が行、idx_fromのk番目が列を指定する行列\r\n total_distance += np.sum(distance_arr)\r\n\r\n return total_distance\r\n\r\ndef calculate_each_distance(i, order, d_matrix):\r\n \"\"\"Calculate each distance traveled for given visit order\"\"\"\r\n idx_from = np.array(order[i]) #訪問順\r\n idx_to = np.array(order[i][1:] + [order[i][0]]) #訪問順をひとつずらしたリスト\r\n #print(idx_from)\r\n #print(idx_to)\r\n distance_arr = d_matrix[idx_from, idx_to] #idx_toのk番目が行、idx_fromのk番目が列を指定する行列\r\n\r\n return np.sum(distance_arr)\r\n\r\n#配送先を移してクラスタ的に改善\r\ndef k_means_method(order, d_matrix, vn):\r\n #d_matrix_del = np.delete(d_matrix, [0, 0], 1)\r\n d_matrix_pd = pd.DataFrame(d_matrix)\r\n kmeans_model = KMeans(n_clusters=vn, random_state=10).fit(d_matrix_pd.iloc[:, 1:])\r\n labels = kmeans_model.labels_\r\n\r\n print(labels)\r\n\r\n for i in range(cp):\r\n order[labels[i]].append(i)\r\n\r\n for i in range(vn): #depotを加える\r\n order[i].insert(0, 0)\r\n\r\n return order\r\n\r\n\r\ndef visualize_visit_order(order, cp_xy, vn):\r\n \"\"\"Visualize traveling path for given visit order\"\"\"\r\n for i in range(vn):\r\n route = np.array(order[i] + [order[i][0]]) # add point of departure\r\n x_arr = cp_xy[:, 0][route] #順序に並び替え\r\n y_arr = cp_xy[:, 1][route] #順序に並び替え\r\n\r\n plt.plot(x_arr, y_arr, 'o-') #入力の順番に注意\r\n\r\n plt.show()\r\n #plt.savefig('VRP_myself'+str(datetime.datetime.now())+'.png')\r\n\r\n\r\n\r\n\r\n#以下、局所探索法、特に2-opt法-------------------------------------------------------------\r\n\r\ndef calculate_2opt_total_distance(order, distance_matrix):\r\n \"\"\"Calculate total distance traveled for given visit order\"\"\"\r\n idx_from = np.array(order) #訪問順\r\n idx_to = np.array(order[1:] + [order[0]]) #訪問順をひとつずらしたリスト\r\n distance_arr = distance_matrix[idx_from, idx_to] #idx_toのk番目が行、idx_fromのk番目が列を指定する行列\r\n\r\n return np.sum(distance_arr)\r\n\r\n#移動距離の差分を計算する\r\ndef calculate_2opt_exchange_cost(visit_order, i, j, distance_matrix):\r\n \"\"\"Calculate the difference of cost by applying given 2-opt exchange\"\"\"\r\n n_cities = len(visit_order)\r\n a, b = visit_order[i], visit_order[(i + 1) % n_cities] #最大のcity番号だけ特殊。それ以外は隣のcity番号。\r\n c, d = visit_order[j], visit_order[(j + 1) % n_cities]\r\n\r\n cost_before = distance_matrix[a, b] + distance_matrix[c, d]\r\n cost_after = distance_matrix[a, c] + distance_matrix[b, d]\r\n return cost_after - cost_before\r\n\r\n#交換後の訪問順序を計算\r\ndef apply_2opt_exchange(visit_order, i, j):\r\n \"\"\"Apply 2-opt exhanging on visit order\"\"\"\r\n\r\n tmp = visit_order[i + 1: j + 1]\r\n tmp.reverse()\r\n visit_order[i + 1: j + 1] = tmp\r\n\r\n return visit_order\r\n\r\n#近傍探索の実装。現状の訪問経路の、各2パスを入れ替える操作を全組み合わせで実施します。\r\n#全通りを計算しておき、もっとも総移動距離を減らせる交換を実際に適用します。これ以上改善できなければNoneを返すことにします。\r\ndef improve_with_2opt(visit_order, distance_matrix):\r\n \"\"\"Check all 2-opt neighbors and improve the visit order\"\"\"\r\n n_cities = len(visit_order)\r\n cost_diff_best = 0.0\r\n i_best, j_best = None, None\r\n\r\n #すべての組み合わせの中で一番ベスト(一番効果のある)な入れ替えを決定するforloop\r\n for i in range(0, n_cities - 2):\r\n for j in range(i + 2, n_cities):\r\n if i == 0 and j == n_cities - 1: #最初と最後の順番は入れ替えない??\r\n continue #条件を満たしたら以下をスキップ(条件を満たしてないなら実行)\r\n\r\n cost_diff = calculate_2opt_exchange_cost(\r\n visit_order, i, j, distance_matrix)\r\n #costの差分がより小さいならそれをbestにする\r\n if cost_diff < cost_diff_best:\r\n cost_diff_best = cost_diff\r\n i_best, j_best = i, j\r\n #costの差分が0より小さいなら(少しでも改善されるなら)2点の順番を入れ替える\r\n if cost_diff_best < 0.0:\r\n visit_order_new = apply_2opt_exchange(visit_order, i_best, j_best)\r\n return visit_order_new\r\n else:\r\n return None\r\n\r\n#改善ができる限り、上の近傍探索を繰り返す\r\ndef local_search(visit_order, distance_matrix, improve_func): #improve_func = improve_with_2opt\r\n \"\"\"Main procedure of local search\"\"\"\r\n cost_total = calculate_2opt_total_distance(visit_order, distance_matrix)\r\n\r\n while True:\r\n improved = improve_func(visit_order, distance_matrix)\r\n if not improved: #改善できないならbreak #わからない\r\n break\r\n\r\n visit_order = improved\r\n\r\n return visit_order\r\n\r\n\r\ndef opt_improve(vn, improve_order, d_matrix, improve_with_2opt):\r\n opt_improve_order = [[] for i in range(vn)]\r\n for i in range(vn):\r\n opt_improve_order[i] = local_search(improve_order[i], d_matrix, improve_with_2opt)\r\n\r\n return opt_improve_order\r\n\r\n#ここまで改善法-----------------------------------------------------------------------------------------\r\n\r\n\r\n#ある程度改善する--------------------------------------------------------------------\r\n#total_distance = calculate_total_distance(order, d_matrix) #改善前の総距離\r\n\r\n#1.クラスタ的に配送先を決める\r\nimprove_order = k_means_method(order, d_matrix,vn)\r\nprint('order = ' ,improve_order)\r\nt = calculate_total_distance(improve_order, d_matrix)\r\n#print(total_distance, '-->', t)\r\n\r\n\r\n#2.各車両に対して2optで改善\r\nopt_improve_order = opt_improve(vn, improve_order, d_matrix, improve_with_2opt)\r\nt_opt = calculate_total_distance(opt_improve_order, d_matrix)\r\nprint(t, '-->', t_opt)\r\norder = opt_improve_order\r\n\r\nvisualize_visit_order(order, cp_xy, vn)\r\n\r\n'''\r\n#以下で、以上で求めたルートを基に配送(収集)する--------------------------------------\r\n\r\nfor i in range(cp): #配送先のプロット\r\n ax.scatter(x[i], y[i], c='red')\r\nax.scatter(x[0], y[0], marker=',', s=200, c='b')\r\n\r\nstate = [0 for i in range(vn)] #各車両の居場所を収納する。\r\nnext_state = []\r\nfor i in range(vn):\r\n if len(order[i]) > 1:\r\n next_state.append(order[i][1])\r\n else:\r\n next_state.append(0)\r\n\r\nims =[]\r\nfor i in range(max([len(order[i]) for i in range(vn)])):\r\n\r\n for j in range(vn):\r\n if len(order[j]) > i+1:\r\n state[j] = order[j][i]\r\n next_state[j] = order[j][i+1]\r\n else:\r\n state[j] = next_state[j]\r\n next_state[j] = 0\r\n\r\n #print('state = ',state,'next_state = ', next_state)\r\n\r\n im1=[0 for i in range(vn)]\r\n im2=[0 for i in range(vn)]\r\n each_vn = []\r\n tot_vn = []\r\n for k in range(vn):\r\n im1[k] = ax.plot([x[state[k]], x[next_state[k]]], [y[state[k]], y[next_state[k]]], 'r' '-',lw=7)\r\n each_vn += im1[k]\r\n\r\n route = np.array(order[k] + [order[k][0]]) # add point of departure\r\n x_arr = cp_xy[:, 0][route] #順序に並び替え\r\n y_arr = cp_xy[:, 1][route] #順序に並び替え\r\n im2[k] = ax.plot(x_arr, y_arr, 'k', '--')\r\n tot_vn += im2[k]\r\n\r\n ims.append(each_vn + tot_vn)\r\n\r\n\r\nani = animation.ArtistAnimation(fig, ims, interval=500, repeat_delay=1500)\r\n#ani.save('')\r\nplt.show()\r\n'''\r\n","sub_path":"VRP/VRP_myself4_cluster_k-mean_from_excel.py","file_name":"VRP_myself4_cluster_k-mean_from_excel.py","file_ext":"py","file_size_in_byte":10549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"499544703","text":"# ../scripts/included/gg_welcome_msg/gg_welcome_msg.py\r\n\r\n'''\r\n$Rev$\r\n$LastChangedBy$\r\n$LastChangedDate$\r\n'''\r\n\r\n# =============================================================================\r\n# >> IMPORTS\r\n# =============================================================================\r\n# Python Imports\r\nfrom __future__ import with_statement\r\nimport time\r\n\r\n# Eventscripts Imports\r\nimport es\r\nfrom cmdlib import registerSayCommand\r\nfrom cmdlib import unregisterSayCommand\r\nimport popuplib\r\n\r\n# GunGame Imports\r\nfrom gungame51.core.addons.shortcuts import AddonInfo\r\nfrom gungame51.core import get_game_dir\r\nfrom gungame51.core import gungame_info\r\nfrom gungame51.core.addons.shortcuts import get_loaded_addon_list\r\n\r\n\r\n# =============================================================================\r\n# >> ADDON REGISTRATION/INFORMATION\r\n# =============================================================================\r\ninfo = AddonInfo()\r\ninfo.name = 'gg_welcome_msg'\r\ninfo.title = 'GG Welcome Message'\r\ninfo.author = 'GG Dev Team'\r\ninfo.version = \"5.1.%s\" % \"$Rev$\".split('$Rev: ')[1].split()[0]\r\n\r\n\r\n# =============================================================================\r\n# >> GLOBAL VARIABLES\r\n# =============================================================================\r\n# Store the title of the menu\r\ntitle = 'GunGame%s -- Welcome Message' % gungame_info('version')\r\ngg_welcome_msg_timeout = es.ServerVar('gg_welcome_msg_timeout')\r\n\r\n# Create an empty list for detecting if a player just joined the server\r\nmessageQueue = []\r\n\r\n\r\n# =============================================================================\r\n# >> LOAD & UNLOAD\r\n# =============================================================================\r\ndef load():\r\n # Register !welcome\r\n registerSayCommand('!welcome', welcome, 'Displays a !welcome menu.')\r\n\r\n # Build the main gg_welcome popup\r\n buildPopups()\r\n\r\n\r\ndef unload():\r\n # Unregister !welcome\r\n unregisterSayCommand('!welcome')\r\n\r\n # Clean up existing popups\r\n if popuplib.exists('gg_welcome'):\r\n popuplib.delete('gg_welcome')\r\n if popuplib.exists('gg_welcome_include'):\r\n popuplib.delete('gg_welcome_include')\r\n if popuplib.exists('gg_welcome_custom'):\r\n popuplib.delete('gg_welcome_custom')\r\n\r\n\r\n# =============================================================================\r\n# >> GAME EVENTS\r\n# =============================================================================\r\ndef player_activate(event_var):\r\n userid = event_var['userid']\r\n\r\n # If the user is already in the que to receive the welcome message, stop\r\n # here\r\n if userid in messageQueue:\r\n return\r\n\r\n # Add the user to the welcome message queue\r\n messageQueue.append(userid)\r\n\r\n\r\ndef player_team(event_var):\r\n userid = event_var['userid']\r\n\r\n # If the user is in the queue\r\n if userid in messageQueue:\r\n # Send them the welcome message\r\n welcome(userid, '')\r\n # Remove them from the queue\r\n messageQueue.remove(userid)\r\n\r\n\r\n# =============================================================================\r\n# >> CUSTOM/HELPER FUNCTIONS\r\n# =============================================================================\r\ndef buildPopups():\r\n # Get the custom text for the popup\r\n with get_game_dir('cfg/gungame51/' +\r\n 'included_addon_configs/gg_welcome_msg.txt').open() as customFile:\r\n customText = customFile.readlines()\r\n\r\n # Remove unnecessary characters\r\n customText = [x.strip() for x in customText]\r\n # Ignore commented lines\r\n customText = filter(lambda x: x[:2] != '//', customText)\r\n\r\n # Create a new popuplib instance\r\n menu = popuplib.create('gg_welcome')\r\n menu.addline(title)\r\n menu.addline('-' * 30)\r\n\r\n # For each line of custom text\r\n for line in customText:\r\n # If there is nothing on the line, make it a single space to show up\r\n # on the menu\r\n if not line:\r\n line = ' '\r\n\r\n # Replace variables in the line\r\n line = line.replace('$server', str(es.ServerVar('hostname')))\r\n line = line.replace('$date', time.strftime('%d/%m/%Y'))\r\n line = line.replace('$time', time.strftime('%H:%M:%S'))\r\n\r\n # Add the line to the menu\r\n menu.addline(line)\r\n\r\n # Create the rest of the menu\r\n menu.addline('-' * 30)\r\n menu.addline('->1. Included Addons')\r\n menu.select(1, welcome_handler)\r\n menu.addline('->2. Custom Addons')\r\n menu.select(2, welcome_handler)\r\n menu.addline('-' * 30)\r\n menu.addline('0. Cancel')\r\n\r\n # Set the timeout for the menu\r\n menu.timeout('send', int(gg_welcome_msg_timeout))\r\n menu.timeout('view', int(gg_welcome_msg_timeout))\r\n\r\n\r\ndef welcome(userid, args):\r\n # Do not send to bots or non-existent players\r\n if es.getplayersteamid(userid) == 'BOT' or not es.exists('userid', userid):\r\n return\r\n\r\n # If the user has the popup open, remove it\r\n popuplib.unsendname('gg_welcome', userid)\r\n # Send the popup\r\n popuplib.send('gg_welcome', userid)\r\n\r\n\r\ndef welcome_handler(userid, choice, popupname):\r\n # If they selected to see the included addons list\r\n if choice == 1:\r\n # If the menu exists, delete it\r\n if popuplib.exists('gg_welcome_include'):\r\n popuplib.delete('gg_welcome_include')\r\n # Create an easylist instance\r\n menu = popuplib.easylist('gg_welcome_include',\r\n get_loaded_addon_list('included'))\r\n elif choice == 2:\r\n # If the menu exists, delete it\r\n if popuplib.exists('gg_welcome_custom'):\r\n popuplib.delete('gg_welcome_custom')\r\n # Create an easylist instance\r\n menu = popuplib.easylist('gg_welcome_custom',\r\n get_loaded_addon_list('custom'))\r\n\r\n # Set the menu's title\r\n menu.settitle(title)\r\n # When the menu is closed, go back to the welcome message\r\n menu.submenu(0, 'gg_welcome')\r\n # Set the timeout for the menu\r\n menu.timeout('send', int(gg_welcome_msg_timeout))\r\n menu.timeout('view', int(gg_welcome_msg_timeout))\r\n # Send the popup\r\n menu.send(userid)\r\n","sub_path":"cstrike/addons/eventscripts/gungame51/scripts/included/gg_welcome_msg/gg_welcome_msg.py","file_name":"gg_welcome_msg.py","file_ext":"py","file_size_in_byte":6193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"179017616","text":"\n\nchromo_set = set()\n\nwith open (\"p8/blast/pred34inyeast.txt\", \"r\") as fh:\n for line in fh:\n if \"sp|\" in line:\n get_line = line.rstrip().split(\"_YEAST\")\n chromo_set.add(get_line[0].split(\"|\")[2])\n\nwith open (\"p8/overlapsets_uniref.txt\", \"w\") as wh:\n with open (\"p8/experiments.txt\", \"r\") as fh2:\n for line in fh2: \n expset = set(line.strip().split(\" \"))\n overlen = len(chromo_set.intersection(expset))\n wh.write(\"----------------\" + \"\\n\")\n wh.write(\"Overlapping genes:\" + \"\\n\")\n wh.write(\" \".join(chromo_set.intersection(expset)) +\"\\n\" )\n wh.write(\"Length of overlap:\" + \"\\n\" + str(overlen) + \"\\n\" + \"\\n\")\n wh.write(\"Full set:\" + \"\\n\")\n wh.write(\" \".join(expset) + \"\\n\" + \"\\n\")\n","sub_path":"p8/scripts (copy)/parse_blast.py","file_name":"parse_blast.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"277844991","text":"import numpy as np\r\nimport cv2\r\nimport math\r\ndef waveleteTransform(img):\r\n # 将图像像素类型转换成浮点型\r\n image = img.astype(float)\r\n height, width = image.shape[:2]\r\n result = np.zeros((height, width, 3), float)\r\n\r\n # 水平方向第一次处理\r\n width2 = int(width / 2)\r\n for i in range(height):\r\n\r\n for j in range(0, width - 1, 2):\r\n # 分为奇序列和偶序列进行处理\r\n j1 = (int)(j + 1)\r\n j2 = (int)(j / 2)\r\n # 向下取整\r\n width3 = width2 + j2\r\n # 采用提升方案\r\n # xc=(xo + xe)/2\r\n # xd=(x0 - xd)/2\r\n # [xc xd]\r\n result[i, j2] = ((image[i, j] + image[i, j1]) / 2)\r\n result[i, width3] = ((image[i, j] - image[i, j1]) / 2)\r\n\r\n\r\n # copy array\r\n image = np.copy(result)\r\n result=np.zeros((height, width, 3), float)\r\n # 垂直方向第一次处理\r\n height2 = int(height / 2)\r\n for i in range(0, height - 1, 2):\r\n for j in range(0, width):\r\n i1 = (int)(i + 1)\r\n i2 = (int)(i / 2)\r\n height3 = height2 + i2\r\n\r\n result[i2, j] = (image[i, j] + image[i1, j]) / 2\r\n result[height3, j] = (image[i, j] - image[i1, j]) / 2\r\n\r\n image = np.copy(result).astype(np.uint8)\r\n HH = np.copy(image[height2+1:,width2+1:])\r\n LH = np.copy(image[height2+1:,:width2+1])\r\n HL = np.copy(image[:height2+1,width2+1:])\r\n LL = np.copy(image[:height2+1,:width2+1])\r\n return [image,HH,LH,HL,LL]\r\n\r\n\r\ndef denoise(img):\r\n #采用软阈值法进行去噪\r\n image = img.astype(float)\r\n #sigma = abs(np.median(image))/0.6745\r\n #threshold = math.sqrt(sigma*(2*math.log(len(image))))\r\n #image[(abs(image) threshold] -= threshold\r\n #image[image < (-threshold)] += threshold\r\n image[(abs(image) < 256)] = 0.0\r\n image = image.astype(np.uint8)\r\n return image\r\n\r\ndef inverseWaveleteTransform(img):\r\n image = img.astype(float)\r\n nr, nc = image.shape[:2]\r\n result = np.zeros((nr, nc, 3), float)\r\n nr2 = nr / 2\r\n\r\n for i in range(0, nr - 1, 2):\r\n for j in range(0, nc):\r\n\r\n i1 = (int) (i + 1)\r\n i2 = (int) (i / 2)\r\n nr3 = (int) (nr2 + i2)\r\n\r\n result[i, j] = ((image[i2, j] / 2) + (image[nr3, j] / 2)) * 2\r\n result[i1, j] = ((image[i2, j] / 2) - (image[nr3, j] / 2)) * 2\r\n\r\n # //copy array\r\n image = np.copy(result)\r\n\r\n # // Horizontal processing:\r\n nc2 = nc / 2\r\n for i in range(0, nr):\r\n for j in range(0, nc - 1, 2):\r\n\r\n j1 = (int) (j + 1)\r\n j2 = (int) (j / 2)\r\n nc3 = (int) (j2 + nc2)\r\n result[i, j] = ((image[i, j2] / 2) + (image[i, nc3] / 2)) * 2\r\n result[i, j1] = ((image[i, j2] / 2) - (image[i, nc3] / 2)) * 2\r\n\r\n resultimg = result.astype(np.uint8)\r\n return resultimg\r\n\r\n\r\nif __name__ == '__main__':\r\n # loadImage & copy image\r\n image = cv2.imread(\"./image/image_noise.jpg\")\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n cv2.imwrite('./image/gray_image_noise.jpg', image)\r\n height, width = image.shape[:2]\r\n image2, HH, LH, HL, LL = waveleteTransform(image)\r\n cv2.imwrite('./image/DWT.jpg', image2)\r\n HH_d = denoise(HH)\r\n HL_d = denoise(HL)\r\n LH_d = denoise(LH)\r\n image3 = np.copy(image2)\r\n #图3是去噪后的分解图\r\n\r\n image3[int(height/2)+1:,int(width/2)+1:] = HH_d;\r\n image3[int(height/2)+1:,:int(width/2)+1] = LH_d;\r\n image3[:int(height/2)+1,int(width/2)+1:] = HL_d;\r\n cv2.imwrite('./image/DWT_denoise.jpg', image3)\r\n image4 = inverseWaveleteTransform(image2)\r\n #cv2.imwrite('./image/IDWT.jpg', image4)\r\n #图像4是重构后的原图\r\n image5 = inverseWaveleteTransform(image3)\r\n cv2.imwrite('./image/IDWT_denoise.jpg', image5)\r\n #图像5是重构后的去噪图像\r\n","sub_path":"DWT_denoising.py","file_name":"DWT_denoising.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"82599203","text":"sites = [\"pythonworld.ru\",\"telegram.org\",\"wikipedia.org\"]\r\n\r\nname = input(\"Введите что вы хотите дабавить \\n\")\r\nsites.append(name)\r\n\r\n\r\nname_1 = input(\"Введите что вы хотите удалить *В чифрах* \\n\") \r\nsites.remove(name_1) \r\n\r\nsites_copy = sites[:]\r\n\r\nsites_copy.reverse()\r\nsites.clear()\r\n\r\nprint(sites)\r\nprint(sites_copy)\r\n\r\nwhile True: \r\n\tprint(sites_copy)","sub_path":"HW_8/DZ_1.py","file_name":"DZ_1.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"254174658","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@Author : Joshua\n@Time : 2019/2/14 12:01\n@File : lightlda2model.py\n@Desc : \n\"\"\"\n\n\nimport re\nimport sys\nimport gzip\nimport svmlight\nimport numpy as np\nimport pickle\n\ndef nlexicon(data):\n vmax = 0\n for datum in data:\n if datum[0] > vmax:\n vmax = datum[0]\n return vmax + 1\n\ndef match(r, s):\n m = re.search(r, s)\n if m:\n return m.group(1)\n else:\n sys.stderr.write('match(): invalid content.\\n')\n sys.exit(0)\n\ndef read_param(file):\n with open(file, 'r') as fh:\n content = fh.read()\n alpha0 = float(match(r'alpha\\s*=\\s*([0-9.+\\-]+)', content))\n beta0 = float(match(r'alpha\\s*=\\s*([0-9.+\\-]+)', content))\n topics = int(match(r'topics\\s*=\\s*([0-9]+)', content))\n iters = int(match(r'iters\\s*=\\s*([0-9]+)', content))\n return alpha0, beta0, topics, iters\n\ndef lightlda2beta(dir, topics, beta0):\n data = svmlight.loadex(dir + '/' + 'server_0_table_0.model')\n nlex = nlexicon(data)\n matrix = np.zeros((nlex, topics)) + beta0\n for w, doc in data:\n L = len(doc.id)\n for j in range(L):\n k = doc.id[j]\n c = doc.cnt[j]\n matrix[w][k] += c\n s = np.sum(matrix, axis=0)\n return np.dot(matrix, np.diag(1.0/s))\n\ndef lightlda2gamma(dir, topics, alpha0):\n data = svmlight.loadex(dir + '/' + 'doc_topic.0')\n gamma = []\n for n, doc in data:\n v = np.zeros(topics) + alpha0\n L = len(doc.id)\n for j in range(L):\n k = doc.id[j]\n c = doc.cnt[j]\n v[k] += c\n gamma.append(v)\n return np.array(gamma)\n\ndef lightlda2model(dir):\n model = {}\n alpha0, beta0, topics, iters = read_param(dir + '/' + 'param')\n # print(alpha0,beta0,topics,iters)\n model['alpha'] = alpha0\n model['beta'] = lightlda2beta(dir, topics, beta0)\n model['gamma'] = lightlda2gamma(dir, topics, alpha0)\n return model\n\ndef usage ():\n print('usage: lightlda2model dir')\n sys.exit(0)\n\ndef main ():\n if len(sys.argv) < 2:\n usage()\n dir = sys.argv[1]\n print('reading model..')\n model = lightlda2model(dir)\n print('saving model..')\n with gzip.open(dir + '/' + 'model', 'wb') as gf:\n pickle.dump(model, gf, 2)\n print('done.')\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"ba_lda/lightlda2model.py","file_name":"lightlda2model.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"43360728","text":"from collections import *\nimport sys \nimport string\nfrom math import inf\n\nN = 1000\n\ninput=sys.stdin.readline\n\n\n# \"\". join(strings) \n \ndef ri():\n return int(input())\n \ndef rl():\n return list(map(int, input().split()))\n \nn = ri()\npp = []\nvv = []\nfor i in range(n):\n\tprice, vitamins = input().split()\n\tpp.append(int(price))\n\tvv.append(vitamins)\n\n#dp = [juice i][maskVitamins] = min price to pay if we can chose within i-1 first juices to have the vitamins in the mask\n\n#this is the same as the dp in the other solution, except that we regroup all the dps into one big matrix\ndp = [[inf] * 8 for i in range((N + 5))]\n\n#base case:\n\n#create a bitmask from the juice\njuice_mask = 0\nif \"C\" in vv[0]:\n\tjuice_mask += 1 \nif \"B\" in vv[0]:\n\tjuice_mask += 2 \t\nif \"A\" in vv[0]:\n\tjuice_mask += 4\nfor mask in range(8):\n\tif mask & juice_mask == mask:\n\t\tdp[0][mask] = pp[0]\n\n\n#transitions:\nfor i in range(n - 1):\n\n\t#create a bitmask from the juice\n\tjuice_mask = 0\n\tif \"C\" in vv[i + 1]:\n\t\tjuice_mask += 1 \n\tif \"B\" in vv[i + 1]:\n\t\tjuice_mask += 2 \n\tif \"A\" in vv[i + 1]:\n\t\tjuice_mask += 4\n\n\tfor mask in range(8):\n\t\t#if we want, we dont take the juice at rank i + 1\n\t\tdp[i + 1][mask] = min(dp[i][mask], dp[i + 1][mask])\n\n\t\t#we can also chose to only take the juice at i + 1:\n\t\tif mask & juice_mask == mask:\n\t\t\tdp[i + 1][mask] = min(pp[i+1], dp[i + 1][mask])\n\n\t\t#or we take a mix\n\t\tdp[i + 1][mask | juice_mask] = min(dp[i][mask] + pp[i + 1], dp[i + 1][mask | juice_mask])\n\nans = dp[n - 1][7]\n\nif ans == inf:\n\tprint(-1)\nelse:\n\tprint(ans)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"DP_mashup_list_Colin_Galen/Problem_C/C_bitmask.py","file_name":"C_bitmask.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"396058306","text":"\n#Description\n#Move a ball around with UP/DOWN/LEFT/RIGHT continuously\n#Part of the pygame series at https://github.com/tomwh2010/PythonPhysics\n#Public domain by tomwh2010@hotmail.com\n\nimport pygame, sys\nfrom pygame.locals import *\n\n\n#color of the ball\nSHAPE_COLOR=pygame.Color(\"red\")\n\n#style=0 => filled, style=1 => thin line, style=4 => thick line\nFILLSTYLE=0\n\n#Frames pr second\nFPS=40\n\n#window size\nWIDTH=800\nHEIGHT=500\n\n#initialize the pygame environment\npygame.init()\n\n# set up the window with size and caption\nscreen=pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption('Move a ball')\n\n# you have to call this at the start if you want to use this module.\npygame.font.init()\n\n#choose font for later use\nmyfont=pygame.font.SysFont('Times New Roman', 24)\n\n#create text buffer\nstrBuffer=\"Move cursor with arrow keys\"\n\n#render buffer as picture\ntextsurface=myfont.render(strBuffer, 1, pygame.Color(\"black\"))\n\n# creates a clock\nclock=pygame.time.Clock()\n\n#initial location of the ball; center\nmyball=[WIDTH//2, HEIGHT//2]\n\nwhile True:\n #limit updates to FPS\n clock.tick(FPS)\n\n #get events from the event queue\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n #by using this construct instead of event we will get\n #continous flow + at 45 degree angles if we want to to\n keys_pressed = pygame.key.get_pressed()\n\n if keys_pressed[K_LEFT]:\n myball[0]-=5\n\n if keys_pressed[K_RIGHT]:\n myball[0]+=5\n\n if keys_pressed[K_UP]:\n myball[1]-=5\n\n if keys_pressed[K_DOWN]:\n myball[1]+=5\n\n #draw background color to blank the screen\n screen.fill(pygame.Color(\"gray69\"))\n\n #paint picture to screen at location 130,180\n screen.blit(textsurface,(10, 10))\n\n #circle(screen, color, coords(x,y), radius, fillstyle\n pygame.draw.circle(screen, SHAPE_COLOR, myball, 10, FILLSTYLE)\n\n #update display\n pygame.display.flip()\n","sub_path":"Python/movetheball_continuous.py","file_name":"movetheball_continuous.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"151017911","text":"def neighbours(cell):\n position = cell.split(\",\")\n for x in range(-1, 2):\n for y in range(-1, 2):\n if not (x == y == 0):\n yield \"%i,%i\" % (int(position[0]) + x, int(position[1]) + y)\n\nclass GameOfLife:\n #Board tracks every relevant cell, AKA all live cells + every dead cell with live neighbours\n board = {}\n\n def __init__(self, seed=()):\n self.board = {}\n for cell in seed:\n self.revive(cell)\n \n\n #Make dictionary of cells\n #{'x,y':Bool}\n #At every iteration:\n #Check each live and dead cell in the dic\n #If the cell lives, add its neighbours to the dic\n #if it dies, and it has no neighbours, remove from dic\n\n\n #Count the number of neighbouring cells that are alive\n #next to the given cell\n def neighbour_count(self, cell):\n count = 0\n for neighbour in neighbours(cell):\n if self.is_alive(neighbour):\n count += 1\n return count\n\n #Very self descriptive\n def is_alive(self, cell):\n if cell in self.board:\n return self.board[cell]\n return False\n\n #Set the life status of a cell to alive\n ##Also adds neighbouring cells to the board\n def revive(self, cell):\n self.board[cell] = True\n for neighbour in neighbours(cell):\n if neighbour not in self.board:\n self.board[neighbour] = False\n\n #Set the life status of a cell to dead\n ##Also removes the cell from the board if it has no live neighbours\n def kill(self, cell):\n if self.neighbour_count(cell) == 0:\n del self.board[cell]\n else:\n self.board[cell] = False\n\n #go forwards one turn\n def step(self):\n next_step = GameOfLife()\n next_step.board = self.board.copy()\n\n for cell, alive in self.board.items():\n count = self.neighbour_count(cell)\n if alive and ((count > 3) or (count < 2)):\n next_step.kill(cell)\n elif not alive and (count == 3):\n next_step.revive(cell)\n self.board = next_step.board\n\n #Print to terminal\n #80 x 23\n #-40 <===> 39\n #-11 <===> 11\n def render(self):\n for y in range(11, -12, -1):\n line = \"\"\n for x in range(-40, 40):\n if self.is_alive(\"%i,%i\" % (x, y)):\n line += \"x\"\n else:\n line += \" \"\n print(line)\n","sub_path":"dict/life.py","file_name":"life.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"414946563","text":"import tensorflow as tf\nimport numpy as np\nimport os\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nfrom anaconda_navigator.utils.encoding import write\ntf.app.flags.DEFINE_string('data_dir', '.', \"\"\"the default data dirs\"\"\")\n\nFLAGS=tf.app.flags.FLAGS\nmnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)\n\nIMAGE_SIZE = 28\nNUM_CHANNELS = 1\nBATCH_SIZE=32\nnum_epochs=1\n\ntrain_data = mnist.train.images\ntrain_labels = np.asarray(mnist.train.labels, dtype=np.int32)\neval_data = mnist.test.images\neval_labels = np.asarray(mnist.test.labels, dtype=np.int32)\ntrain_size = train_labels.shape[0]\ncheckPointPath=\"C:\\\\tmp\\\\mnistckp\"\npbPath=\"C:\\\\tmp\\\\mnistckp\\\\model.pb\"\n\n\ndef inference(input, l2_regularizer=None):\n\n input_layer = tf.reshape(input, [-1, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS], name=\"inputLayer\")\n tf.summary.image(\"inputImageSummary\", input_layer)\n conv1 = tf.layers.conv2d(\n inputs=input_layer,\n filters=32,\n kernel_size=[5, 5],\n padding=\"same\",\n kernel_regularizer=l2_regularizer,\n activation=tf.nn.relu,\n name=\"convww\")\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)\n conv2 = tf.layers.conv2d(\n inputs=pool1,\n filters=64,\n kernel_size=[5, 5],\n padding=\"same\",\n kernel_regularizer=l2_regularizer,\n activation=tf.nn.relu)\n pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)\n pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])\n\n dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)\n dropout = tf.layers.dropout(inputs=dense, rate=0.4)\n logits = tf.layers.dense(inputs=dropout, units=10)\n return logits\n\n\ndef train():\n\n train_data_node = tf.placeholder(tf.float32, shape=(None, IMAGE_SIZE*IMAGE_SIZE* NUM_CHANNELS),name=\"inputdataName\")\n train_labels_node = tf.placeholder(tf.int64, shape=(None,10))\n regularizer = tf.contrib.layers.l2_regularizer(scale=0.0)\n logits = inference(train_data_node, regularizer)\n trainableVars=tf.trainable_variables()\n for var in trainableVars:\n if var.name.startswith('convww'):\n mean = tf.reduce_mean(var)\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar(var.name+'_mean', mean)\n tf.summary.scalar(var.name+'_stddev', stddev)\n tf.summary.histogram(var.name+'_histogram', var)\n print(var.name)\n prediction = tf.nn.softmax(logits,name=\"predictionName\")\n l2_loss = tf.losses.get_regularization_loss()\n entropyloss=tf.nn.softmax_cross_entropy_with_logits(labels=train_labels_node, logits=logits)\n loss = tf.reduce_mean([l2_loss+entropyloss])\n tf.summary.scalar('lossVal', loss)\n learning_rate=0.01\n optimizer = tf.train.MomentumOptimizer(learning_rate,momentum=0.9).minimize(loss)\n labels = tf.argmax(train_labels_node, 1)\n top_k_op = tf.nn.in_top_k(logits, labels, 1)\n accuracy = tf.reduce_mean(tf.cast(top_k_op, \"float\"), name=\"accuracy\")\n tf.summary.scalar('accuracyVal', accuracy)\n merged = tf.summary.merge_all()\n\n saver = tf.train.Saver()\n initAll=tf.global_variables_initializer()\n\n graph_def = tf.get_default_graph().as_graph_def()\n with tf.gfile.GFile(pbPath, 'wb') as f:\n f.write(graph_def.SerializeToString())\n\n with tf.Session() as sess:\n sess.run(initAll)\n sumwriter = tf.summary.FileWriter(checkPointPath, sess.graph)\n for step in range(int(num_epochs * train_size) // BATCH_SIZE):\n offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)\n batch_data = train_data[offset:(offset + BATCH_SIZE), ...]\n batch_labels = train_labels[offset:(offset + BATCH_SIZE)]\n feed_dict = {train_data_node: batch_data,\n train_labels_node: batch_labels}\n _, lossVal, accuracyVal=sess.run([optimizer, loss, accuracy], feed_dict=feed_dict)\n print('Iter %d, lossVal %.3f, accuracyVal %.3f' % (step, lossVal, accuracyVal))\n\n if step % 10 == 0:\n summaryVal=sess.run(merged,feed_dict=feed_dict)\n sumwriter.add_summary(summaryVal, step)\n\n sumwriter.close()\n saver.save(sess=sess,save_path=os.path.join(checkPointPath,\"model\"))\n\n\nif __name__ == '__main__':\n train()","sub_path":"book/chapter4/tensorboard/mnisttensorboard.py","file_name":"mnisttensorboard.py","file_ext":"py","file_size_in_byte":4353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"633884276","text":"def quick_sort(arr):\n left = []\n right = []\n if len(arr) <= 1:\n return arr\n ref = arr[0]\n ref_count = 0\n for ele in arr:\n if ele < ref:\n left.append(ele)\n elif ele > ref:\n right.append(ele)\n else:\n ref_count += 1\n left = quick_sort(left)\n right = quick_sort(right)\n return right + [ref] * ref_count + left\n\nN = int(input())\nA = list(map(int, input().split()))\n\nA = [x for x in A if A.count(x) == 1]\nA = quick_sort(A)\n\nans = 0\nif len(A) == 0:\n print(ans)\n exit()\nfor i in range(N):\n flag = True\n for j in range(i+1, N):\n if A[i] % A[j] == 0:\n flag = False\n break\n if flag: ans += 1\nprint(ans)","sub_path":"atcoder/abc/170/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"109665559","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution(object):\n \"\"\"\n输入一个链表的头节点,从尾到头反过来返回每个节点的值(用数组返回)。\n输入:head = [1,3,2]\n输出:[2,3,1]\n限制:\n0 <= 链表长度 <= 10000\n链接:https://leetcode-cn.com/problems/cong-wei-dao-tou-da-yin-lian-biao-lcof\n \"\"\"\n def reversePrint(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: List[int]\n \"\"\"\n rec = []\n while head:\n rec[0:0] = [head.val]\n head = head.next\n return rec\n\n\ndef create(nums):\n aux = p = ListNode(-1)\n for x in nums:\n p.next = ListNode(x)\n p = p.next\n return aux.next\n\n\ndef main():\n head = [1, 3, 2]\n test = Solution()\n ret = test.reversePrint(create(head))\n print(ret)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"剑指offer-面试题06. 从尾到头打印链表.py","file_name":"剑指offer-面试题06. 从尾到头打印链表.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"262439263","text":"from collections import defaultdict\n\nfrom scrapy.xlib.pydispatch import dispatcher\nfrom scrapy import signals\n\n\ndef gen_table_name(item_type):\n table_name = item_type.lower()\n if table_name.endswith(\"item\") and len(table_name) > 4:\n table_name = table_name[:-4]\n return table_name\n\n\nclass ScraperWikiPipeline(object):\n \"\"\"\n A pipeline for saving to the Scraperwiki datastore\n\n If the scraper returns different kind of items they are stored in\n different tables\n \"\"\"\n def __init__(self):\n self.buff = 20\n self.data = defaultdict(list)\n self.counter = 0\n dispatcher.connect(self.spider_closed, signals.spider_closed)\n\n def process_item(self, item, spider):\n item_type = item.__class__.__name__\n self.data[item_type].append(dict(item))\n if len(self.data[item_type]) >= self.buff:\n self.write_data(spider, item_type)\n return item\n\n def spider_closed(self, spider):\n for item_type in self.data:\n if self.data[item_type]:\n self.write_data(spider, item_type)\n\n def write_data(self, spider, item_type):\n import scraperwiki\n\n table_name = gen_table_name(item_type)\n unique_keys = spider.settings.get(\n 'SW_UNIQUE_KEYS', {item_type: ['id']}\n )\n scraperwiki.sqlite.save(\n table_name=table_name,\n unique_keys=unique_keys[item_type],\n data=self.data[item_type]\n )\n self.data[item_type] = []\n","sub_path":"scrapyrwiki/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"66971501","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ### Problem Statement\n# \n# \n# Given an integer array, find and return all the subsets of the array.\n# The order of subsets in the output array is not important. However the order of elements in a particular subset should remain the same as in the input array.\n# \n# *Note: An empty set will be represented by an empty list*\n# \n# **Example 1**\n# \n# ```\n# arr = [9]\n# \n# output = [[]\n# [9]]\n# ```\n# \n# **Example 2**\n# \n# ```\n# arr = [9, 12, 15]\n# \n# output = [[],\n# [15],\n# [12],\n# [12, 15],\n# [9],\n# [9, 15],\n# [9, 12],\n# [9, 12, 15]]\n# ```\n\n# In[4]:\n\n\ndef subsets(arr):\n \"\"\"\n :param: arr - input integer array\n Return - list of lists (two dimensional array) where each list represents a subset\n TODO: complete this method to return subsets of an array\n \"\"\"\n return subsets_index(arr,0)\n\ndef subsets_index(arr,index):\n if index >= len(arr):\n return [[]]\n \n temp = subsets_index(arr,index+1)\n out = list()\n for ele in temp:\n out.append(ele)\n \n for ele in temp:\n cur = list()\n cur.append(arr[index])\n cur.extend(ele)\n out.append(cur)\n return out\n\n\n# \n\n# In[5]:\n\n\ndef test_function(test_case):\n arr = test_case[0]\n solution = test_case[1]\n \n output = subsets(arr)\n \n output.sort()\n solution.sort()\n \n if output == solution:\n print(\"Pass\")\n else:\n print(\"Fail\") \n\n\n# In[6]:\n\n\narr = [9]\nsolution = [[], [9]]\n\ntest_case = [arr, solution]\ntest_function(test_case)\n\n\n# In[7]:\n\n\narr = [5, 7]\nsolution = [[], [7], [5], [5, 7]]\ntest_case = [arr, solution]\ntest_function(test_case)\n\n\n# In[8]:\n\n\narr = [9, 12, 15]\nsolution = [[], [15], [12], [12, 15], [9], [9, 15], [9, 12], [9, 12, 15]]\n\ntest_case = [arr, solution]\ntest_function(test_case)\n\n\n# In[9]:\n\n\narr = [9, 8, 9, 8]\nsolution = [[],\n[8],\n[9],\n[9, 8],\n[8],\n[8, 8],\n[8, 9],\n[8, 9, 8],\n[9],\n[9, 8],\n[9, 9],\n[9, 9, 8],\n[9, 8],\n[9, 8, 8],\n[9, 8, 9],\n[9, 8, 9, 8]]\n\ntest_case = [arr, solution]\ntest_function(test_case)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Recursion/Return-Subsets.py","file_name":"Return-Subsets.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"108966100","text":"#O(n*2^n) \n#def powerSet(array):\n# subsets = [[]]\n# for ele in array:\n# for i in range(len(subsets)):\n# currentSubset = subsets[i]\n# subsets.append(currentSubset+[ele])\n# return susbsets\n#O(n*2^n)time | O(n*2^n) space\ndef powerSet(array, idx = None):\n if idx is None:\n idx = len(array) - 1\n elif idx < 0:\n return [[]]\n ele = array[idx]\n subsets = powerSet(array,idx -1)\n for i in range(len(subsets)):\n currentSubset = subsets[i]\n subsets.append(currentSubset+[ele])\n return subsets\n \npowerSet([1,2,3,4])\n \n","sub_path":"powerSet.py","file_name":"powerSet.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"191868693","text":"\nfrom weakref import WeakKeyDictionary\n\n\nclass StatefulProperty(object):\n\n def __init__(self, initial_condition = None):\n self.default = initial_condition\n self.data = WeakKeyDictionary()\n\n def __get__(self, instance, owner):\n if instance is None:\n return self\n\n try:\n return self.data[instance]\n except KeyError:\n val = StatePerSimulation(self.default)\n self.data[instance] = val\n return val\n\n def __set__(self, instance, value):\n raise AttributeError(\"Property is read-only. \"\n \"Did you mean to access via a simultation?\")\n\n\n\nclass StatePerSimulation(object):\n\n def __init__(self, initial_condition = None):\n self.default = initial_condition\n self._sim_data = WeakKeyDictionary()\n\n def __getitem__(self, key):\n from .controlsystem import ControlSystemSimulation\n assert isinstance(key, ControlSystemSimulation)\n try:\n return self._sim_data[key]\n except KeyError:\n if isinstance(self.default, dict) and len(self.default) == 0:\n # Create a new empty dictionary and remember it\n result = dict()\n self._sim_data[key] = result\n return result\n else:\n return self.default\n\n\n def __setitem__(self, key, value):\n from .controlsystem import ControlSystemSimulation\n assert isinstance(key, ControlSystemSimulation)\n self._sim_data[key] = value\n\n\n","sub_path":"skfuzzy/control/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"11054538","text":"import os\r\nimport sys\r\nimport git\r\nimport subprocess\r\nfrom apscheduler import events\r\n\r\n\r\nclass ContinuousDeployment:\r\n def __init__(self, scheduler):\r\n self.scheduler = scheduler\r\n self.jobs_running = 0\r\n # self.scheduler.add_listener(self.job_started, mask=events.EVENT_JOB_SUBMITTED)\r\n # self.scheduler.add_listener(self.job_ended, mask=events.EVENT_JOB_EXECUTED)\r\n # self.scheduler.add_listener(self.job_ended, mask=events.EVENT_JOB_ERROR)\r\n # self.scheduler.add_job(self.check_for_updates, \"cron\", minute=\"*\")\r\n #\r\n self.repo = git.Repo(\".\")\r\n print(self.repo.active_branch.commit)\r\n\r\n def job_started(self, _):\r\n self.jobs_running += 1\r\n\r\n def job_ended(self, _):\r\n self.jobs_running -= 1\r\n\r\n def check_for_updates(self):\r\n if self.jobs_running != 1:\r\n return\r\n if not self.up_to_date():\r\n print(\"Not up to date. Pulling...\")\r\n self.repo.remotes.origin.pull()\r\n restart_program()\r\n else:\r\n print(\"Up to date.\")\r\n\r\n def up_to_date(self):\r\n status = self.repo.remotes.origin.fetch()\r\n for branch in status:\r\n if str(branch.ref) == \"origin/master\":\r\n return branch.commit == self.repo.active_branch.commit\r\n return False\r\n\r\n\r\ndef restart_program():\r\n python = sys.executable\r\n os.execl(python, python, *sys.argv)\r\n\r\n\r\nfrom apscheduler.schedulers.blocking import BlockingScheduler\r\nscheduler = BlockingScheduler()\r\n\r\n\r\nimport time\r\n\r\nprint(\"HELLO AGAIN!!\")\r\nx = ContinuousDeployment(scheduler)\r\n\r\n\r\n@scheduler.scheduled_job(\"cron\", second=\"*/5\")\r\ndef test_job():\r\n x.check_for_updates()\r\n\r\n\r\nscheduler.start()\r\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"490690323","text":"import re\nimport unittest\nimport os.path\nimport grokcore.view\n\nfrom pkg_resources import resource_listdir\n\nfrom zope.app.wsgi.testlayer import BrowserLayer, http\nfrom zope.testing import doctest, renormalizing\n\n\nFunctionalLayer = BrowserLayer(grokcore.view)\n\nchecker = renormalizing.RENormalizing([\n # Accommodate to exception wrapping in newer versions of mechanize\n (re.compile(r'httperror_seek_wrapper:', re.M), 'HTTPError:'),\n ])\n\ndef suiteFromPackage(name):\n files = resource_listdir(__name__, name)\n suite = unittest.TestSuite()\n getRootFolder = FunctionalLayer.getRootFolder\n globs = dict(http=http,\n getRootFolder=getRootFolder)\n optionflags = (doctest.ELLIPSIS+\n doctest.NORMALIZE_WHITESPACE+\n doctest.REPORT_NDIFF)\n\n for filename in files:\n if filename == '__init__.py':\n continue\n\n test = None\n if filename.endswith('.py'):\n dottedname = 'grokcore.view.ftests.%s.%s' % (name, filename[:-3])\n test = doctest.DocTestSuite(\n dottedname,\n checker=checker,\n extraglobs=globs,\n optionflags=optionflags)\n test.layer = FunctionalLayer\n elif filename.endswith('.txt'):\n test = doctest.DocFileSuite(\n os.path.join(name, filename),\n optionflags=optionflags,\n globs=globs)\n test.layer = FunctionalLayer\n if test is not None:\n suite.addTest(test)\n return suite\n\ndef test_suite():\n suite = unittest.TestSuite()\n for name in [\n 'contentprovider',\n 'directoryresource',\n 'static',\n 'url',\n 'view',\n ]:\n suite.addTest(suiteFromPackage(name))\n return suite\n","sub_path":"buildout-cache--/eggs/grokcore.view-2.8-py2.7.egg/grokcore/view/ftests/test_functional.py","file_name":"test_functional.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"141983749","text":"import pygame\nfrom mymath import *\n\nWHITE = (255, 255, 255)\nclass Triangle3D:\n def __init__(self, p1, p2, p3, width, height, focal_length, camera):\n self.points = [0, 0, 0]\n self.points[0] = TranslateTo2D(p1, width, height, focal_length, camera)\n self.points[1] = TranslateTo2D(p2, width, height, focal_length, camera)\n self.points[2] = TranslateTo2D(p3, width, height, focal_length, camera)\n # print(self.points)\n def draw3D(self, graphic):\n p = self.points\n pygame.draw.line(graphic, WHITE, p[0], p[1])\n pygame.draw.line(graphic, WHITE, p[1], p[2])\n pygame.draw.line(graphic, WHITE, p[2], p[0])\n\nclass Square3D:\n def __init__(self, p1, p2, p3, p4, width, height, focal_length, camera):\n self.points = [0, 0, 0, 0]\n # print(self.points)\n \n self.points[0] = TranslateTo2D(p1, width, height, focal_length, camera)\n self.points[1] = TranslateTo2D(p2, width, height, focal_length, camera)\n self.points[2] = TranslateTo2D(p3, width, height, focal_length, camera)\n self.points[3] = TranslateTo2D(p4, width, height, focal_length, camera)\n\n def draw3D(self, graphic):\n p = self.points\n pygame.draw.line(graphic, WHITE, p[0], p[1])\n pygame.draw.line(graphic, WHITE, p[1], p[2])\n pygame.draw.line(graphic, WHITE, p[2], p[3])\n pygame.draw.line(graphic, WHITE, p[3], p[0])\n\nclass mesh:\n def __init__(self, string):\n self.points = vertexLoad(string)\n self.faces = faceLoad(string)\n\n def meshDraw(self, window, width, height, focal_length, angle, camera):\n m = self\n\n for i in m.faces:\n if (len(i) == 3):\n # Поиск точек\n p1 = m.points[i[0]-1]\n p2 = m.points[i[1]-1]\n p3 = m.points[i[2]-1]\n \n # Умножение на матрицу поворота \n p1 = rotateX(angle[0], p1)\n p2 = rotateX(angle[0], p2)\n p3 = rotateX(angle[0], p3)\n\n p1 = rotateY(angle[1], p1)\n p2 = rotateY(angle[1], p2)\n p3 = rotateY(angle[1], p3)\n \n p1 = rotateZ(angle[2], p1)\n p2 = rotateZ(angle[2], p2)\n p3 = rotateZ(angle[2], p3)\n\n v1 = [p2[0] - p1[0], p2[1] - p1[1], p2[2] - p1[2]]\n v2 = [p3[0] - p1[0], p3[1] - p1[1], p3[2] - p1[2]]\n n = crossProd(v1, v2)\n if (n[2]*camera[2] + n[1]*camera[1] + n[0]*camera[0] < 0):\n\n tr = Triangle3D(p1, p2, p3, width, height, focal_length, camera)\n tr.draw3D(window)\n \n elif(len(i) == 4):\n p1 = m.points[i[0]-1]\n p2 = m.points[i[1]-1]\n p3 = m.points[i[2]-1]\n p4 = m.points[i[3]-1]\n \n p1 = rotateX(angle[0], p1)\n p2 = rotateX(angle[0], p2)\n p3 = rotateX(angle[0], p3)\n p4 = rotateX(angle[0], p4) \n\n p1 = rotateY(angle[1], p1)\n p2 = rotateY(angle[1], p2)\n p3 = rotateY(angle[1], p3)\n p4 = rotateY(angle[1], p4)\n \n p1 = rotateZ(angle[2], p1)\n p2 = rotateZ(angle[2], p2)\n p3 = rotateZ(angle[2], p3)\n p4 = rotateZ(angle[2], p4)\n\n v1 = [p2[0] - p1[0], p2[1] - p1[1], p2[2] - p1[2]]\n v2 = [p3[0] - p1[0], p3[1] - p1[1], p3[2] - p1[2]]\n n = crossProd(v1, v2)\n if (n[2]*camera[2] + n[1]*camera[1] + n[0]*camera[0]< 0):\n sq = Square3D(p1, p2, p3, p4, width, height, focal_length, camera)\n sq.draw3D(window) \n\ndef vertexLoad(string):\n vertices = []\n f = open(string)\n for x in f:\n if(x[0] == 'v' and x[1] != 'n' and x[1] != 't'):\n a = x.split()\n a.remove('v')\n for i in range(len(a)):\n a[i] = float(a[i])\n vertices.append(a)\n return vertices\n\ndef faceLoad(string):\n f = open(string)\n faces = []\n for x in f:\n if (x[0] == 'f'):\n a = x.split()\n a.remove('f')\n for i in range(len(a)):\n a[i] = int((a[i][:a[i].index('/')]))\n faces.append(a)\n return faces\n","sub_path":"main/mesh.py","file_name":"mesh.py","file_ext":"py","file_size_in_byte":4450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"540607621","text":"\"\"\"\nThis is where the implementation of the plugin code goes.\nThe GD_1-class is imported from both run_plugin.py and run_debug.py\n\"\"\"\n\nimport json\nimport sys\nimport logging\nfrom webgme_bindings import PluginBase\n\n# Setting up a logger\nlogger = logging.getLogger('GD_2')\nlogger.setLevel(logging.INFO)\nhandler = logging.StreamHandler(sys.stdout) # By default it logs to stderr..\nhandler.setLevel(logging.INFO)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\nlibNode = None\n\ndef search_in_library(self, core, target): #meta_type(string): {Device, Country, Vendor, Region} | target(string) is the node's name we're searching\n childrens = core.load_children(self.META[\"Countries\"]) #loading all children from library\n for child in childrens:\n node = core.load_by_path(self.root_node, child['nodePath']) #loading the node by path\n get_child(self, core, node, target)\n\n\ndef get_child(self, core, node, target):\n global libNode\n # logger.info(\" Device: '{0}'\".format(core.get_attribute(node,'name'))) #logging the found child's name\n children = core.load_children(node) #getting all the children of a node\n for child in children:\n if core.get_attribute(child, 'name') == target: #if we found the searched country's node\n # logger.info(\"Found node: '{0}'\".format(core.get_attribute(child, 'name')))\n libNode = child\n break #return it\n else: #else\n get_child(self, core, child, target) #search in the child's children\n\n\nclass GD_2(PluginBase):\n def main(self):\n global libNode\n core = self.core\n root_node = self.root_node\n active_node = self.active_node\n\n with open(\"./imports/costumRegions.json\") as f: #loading regions json file into data list\n regions = json.load(f)\n\n with open(\"./imports/countries2.json\") as f: #loading countries json file into data list\n countries = json.load(f)\n\n name = core.get_attribute(active_node, 'name') #get the name of active_node\n logger.info('ActiveNode at \"{0}\" has name {1}'.format(core.get_path(active_node), name)) #logging the active_node's name\n\n #getting the meta of target file which we want to create\n region_meta = self.META[\"CustomRegion\"]\n\n i = 1 #temporary variable that helps to push the new child off from the old child so they does not stack\n for region in regions:\n child1 = core.create_child(active_node, region_meta) #creating a child node of the active node\n position_item = core.get_registry(active_node,'position') #getting position of the active node\n position_item['y'] = position_item['y'] + 50 * i #changing the position variable\n position_item['x'] += 400\n core.set_registry(child1, 'position', position_item) #changing the child's position\n core.set_attribute(child1, 'name', region['name']) #changing the child's name\n i+=1 #update the temporary variable\n\n for country in countries:\n if country[\"region\"] == core.get_attribute(child1, 'name'): #check if the selected country is in it's region\n search_in_library(self,core,country[\"country\"])\n core.add_member(child1, \"MemberCountries\", libNode)\n\n # creating a commit for the update\n commit_info = self.util.save(root_node, self.commit_hash, 'test', 'Python plugin created regions with countries')\n logger.info('commited:{0}'.format(commit_info))\n","sub_path":"src/plugins/GD_2/GD_2/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"524643802","text":"\"\"\"Function to get the config id\"\"\"\nimport argparse\n\n\ndef argument_parser() -> str:\n \"\"\"Function to get the config id\"\"\"\n parser = argparse.ArgumentParser(\n description=\"Argument parser to obtain the name of the config file\"\n )\n parser.add_argument(\n \"--config_id\",\n default=\"sample_config\",\n help=\"config id to use\",\n )\n args = parser.parse_args()\n assert isinstance(args.config_id, str)\n return args.config_id\n","sub_path":"src/utils/argument_parser.py","file_name":"argument_parser.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"237898984","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('realtime', '0010_radarconvertparams'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='radarsnapshot',\n name='save_datetime',\n field=models.DateTimeField(default=datetime.datetime(2017, 7, 7, 17, 19, 51, 255485), verbose_name=b'salvataggio', auto_now=True),\n preserve_default=False,\n ),\n ]\n","sub_path":"torinometeo/realtime/migrations/0011_radarsnapshot_save_datetime.py","file_name":"0011_radarsnapshot_save_datetime.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"195244041","text":"import tensorflow as tf\nimport numpy as np\n\nclass Optimizer:\n\n\tdef __init__(self,lr):\n\t\tself.lr= lr\n\t\tself.opt = None\n\n\tdef get_optimizer(self):\n\t\treturn self.opt\n\n\tdef minimize(self,inp):\n\t\treturn self.get_optimizer().minimize(inp)\n\n\tdef init_variables(self,session):\n\t\tsession.run(tf.variables_initializer(self.get_optimizer().variables()))\n\nclass MomentumOpt(Optimizer):\n\n\tdef __init__(self,lr=10e-4,momentum=0.9,decay=0.99,nesterov=True):\n\t\tOptimizer.__init__(self,lr)\n\t\tself.momentum = momentum\n\t\tself.nesterov = nesterov\n\t\tself.opt = tf.train.MomentumOptimizer(learning_rate=self.lr,\n\t\t\tmomentum=self.momentum,\n\t\t\tuse_nesterov=self.nesterov,\n\t\t)\n\nclass RMSPropOpt(Optimizer):\n\n\tdef __init__(self,lr=10e-4,decay=0.9,momentum=0.0,centered=False):\n\t\tOptimizer.__init__(self,lr)\n\t\tself.decay = decay\n\t\tself.momentum = momentum\n\t\tself.centered = centered\n\t\tself.opt = tf.train.RMSPropOptimizer(learning_rate=self.lr,\n\t\t\tdecay=self.decay,\n\t\t\tmomentum=self.momentum,\n\t\t\tcentered=self.centered,\n\t\t)\n\nclass AdamOpt(Optimizer):\n\n\tdef __init__(self,lr=10e-4,beta1=0.9,beta2=0.999):\n\t\tOptimizer.__init__(self,lr)\n\t\tself.beta1=beta1\n\t\tself.beta2 = beta2\n\t\tself.opt = tf.train.AdamOptimizer(learning_rate=self.lr,\n\t\t\tbeta1=self.beta1,\n\t\t\tbeta2=self.beta2,\n\t\t)\n\nclass GradientDOpt(Optimizer):\n\n\tdef __init__(self,lr=10e-4):\n\t\tOptimizer.__init__(self,lr)\n\t\tself.opt = tf.train.GradientDescentOptimizer(learning_rate=self.lr)","sub_path":"wolFikaM/tf_opt.py","file_name":"tf_opt.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"440797422","text":"#!/usr/bin/env python3\n# -*- coding : utf-8 -*-\n\nimport os\nCONFIG_FILE = 'config.txt'\nCUR_DIR = os.path.abspath(os.path.dirname(__file__))\nCF_PATH = os.path.join(CUR_DIR, CONFIG_FILE)\n\n#You need a baidu translate and \n#config.txt -> \n#APPID = xxxxxxx\n#SECRET_KEY = xxxxxx\n\ndef load_config(fn=CF_PATH):\n config = {}\n \n with open(fn, 'r', encoding='utf-8') as f:\n for line in f:\n #allow some notes in config file.\n ignore = line.find('#')\n if ignore >= 0: line = line[:ignore]\n \n kv = line.split('=', 1)\n if len(kv) != 2: continue\n k, v = kv[0].strip(), kv[1].strip()\n config[k] = v\n \n return config\n \nif __name__ == '__main__':\n config = load_config()\n print(config['APPID'], config['SECRET_KEY'])","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"36234478","text":"import matplotlib\nmatplotlib.use(\"TkAgg\")\n\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\nfrom matplotlib.figure import Figure\n\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import *\n\nLARGE_FONT=(\"Verdana\",12)\n\n\nclass MyApp(tk.Tk):\n\n def __init__(self,*args,**kwargs):\n tk.Tk.__init__(self,*args , **kwargs)\n tk.Tk.wm_title(self,'TEST')\n #tk.Tk.iconbitmap(self, default=\"MGava.bmp\")\n container = tk.Frame(self)\n container.pack(side=\"top\",fill=\"both\",expand=True)\n container.grid_rowconfigure(0,weight=1) # minsize, priority\n container.grid_columnconfigure(0,weight=1)\n\n self.frames = {}\n for F in (StartPage, PageOne, PageTwo , PageThree): \n frame = F(container,self)\n self.frames[F] = frame\n frame.grid(row=0, column=0, sticky=\"nsew\") # sticky expand on all direction\n\n self.show_frame(StartPage)\n\n def show_frame(self, cont):\n frame = self.frames[cont]\n frame.tkraise()\n \n\n\nclass StartPage(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n label = tk.Label(self, text=\"Start Page\", font=LARGE_FONT)\n label.pack(pady=10,padx=10)\n button1 = ttk.Button(self, text=\"Visit Page1\",\n command=lambda: controller.show_frame(PageOne))\n button1.pack()\n button2 = ttk.Button(self, text=\"Visit Page2\",\n command=lambda: controller.show_frame(PageTwo))\n button2.pack()\n button3 = ttk.Button(self, text=\"Visit Page3\",\n command=lambda: controller.show_frame(PageThree))\n button3.pack()\n\n \nclass PageOne(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n label = tk.Label(self, text=\"Page One\", font=LARGE_FONT)\n label.pack(pady=10,padx=10)\n \n User=StringVar()\n User.set(\"user\")\n user = ttk.Entry(self, textvariable=User)\n user.pack()\n \n button = ttk.Button(self, text=\"Back\",\n command=lambda: controller.show_frame(StartPage))\n button.pack()\n button2 = ttk.Button(self, text=\"Visit Page2\",\n command=lambda: controller.show_frame(PageTwo))\n button2.pack()\n\nclass PageTwo(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n label = tk.Label(self, text=\"Page Two\", font=LARGE_FONT)\n label.pack(pady=10,padx=10)\n button = ttk.Button(self, text=\"Back\",\n command=lambda: controller.show_frame(StartPage))\n button.pack()\n button1 = ttk.Button(self, text=\"Visit Page1\",\n command=lambda: controller.show_frame(PageOne))\n button1.pack()\n\nclass PageThree(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n label = tk.Label(self, text=\"Page THree\", font=LARGE_FONT)\n label.pack(pady=10,padx=10)\n button = ttk.Button(self, text=\"Back\",\n command=lambda: controller.show_frame(StartPage))\n button.pack()\n\n f = Figure(figsize=(5,5), dpi=100)\n a = f.add_subplot(111)\n a.plot([1,2,3,4,5,6],[3,4,7,1,5,7])\n\n canvas = FigureCanvasTkAgg(f,self)\n canvas.show()\n canvas.get_tk_widget().pack(side=tk.TOP,fill=tk.BOTH, expand = True)\n\n toolbar= NavigationToolbar2TkAgg(canvas,self)\n toolbar.update()\n canvas._tkcanvas.pack(side=tk.TOP,fill=tk.BOTH, expand = True)\n\n \n\napp = MyApp()\napp.mainloop()\n","sub_path":"Python3/suunto_xml/tkinter_app.py","file_name":"tkinter_app.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"65328666","text":"# '''\n# This deep learning model comes from network resources. We refer to it Mask-r-cnn,\n# and the citation information shows below:\n#\n# @misc{matterport_maskrcnn_2017,\n# title={Mask R-CNN for object detection and instance segmentation on Keras and TensorFlow},\n# author={Waleed Abdulla},\n# year={2017},\n# publisher={Github},\n# journal={GitHub repository},\n# howpublished={\\url{https://github.com/matterport/Mask_RCNN}},\n# }\n# @article{ward2020scalable,\n# title={Scalable learning for bridging the species gap in image-based plant phenotyping},\n# author={Ward, Daniel and Moghadam, Peyman},\n# journal={Computer Vision and Image Understanding},\n# pages={103009},\n# year={2020},\n# publisher={Elsevier}\n# }\n#\n# So before running the code, you must download the model from link, get the initial weights from train.py.\n#\n# This python file focus on predicting dataset and getting output images using trained model.\n#\n# '''\n\n\nimport os\nfrom glob import glob\nimport argparse\nimport task3_config_cvppp as config_cvppp\nfrom mrcnn import model, visualize\nimport numpy as np\nimport cv2 as cv\nfrom skimage import io\nfrom matplotlib import pyplot as plt\nimport pylab\n\n\n# Converts a mask to RGB Format\ndef mask_to_rgb(mask):\n\n colours = visualize.random_colors(mask.shape[2])\n rgb_mask = np.zeros((mask.shape[0], mask.shape[1], 3))\n\n for i in range(mask.shape[2]):\n for c in range(3):\n rgb_mask[:, :, c] = np.where(mask[:, :, i] != 0, int(colours[i][c] * 255), rgb_mask[:, :, c])\n\n return rgb_mask\n\n\n### 333\ndef load_image(im_path):\n\n image = cv.imread(im_path, 1)\n image = cv.cvtColor(image, cv.COLOR_BGR2RGB)\n # Check for alpha channel\n if not image.shape[2] <= 3:\n image = image[:, :, :3]\n\n return image\n\n\n# gain arguments from keyboard\ndef arguments():\n parser = argparse.ArgumentParser(description='Performs inference using a Mask RCNN Model')\n parser.add_argument('--dataPattern', type=str, required=True,\n help=\"A glob file path pattern in quotations. e.g. 'path/*_rgb.png'\")\n parser.add_argument('--outputPath', type=str, required=True,\n help='Directory to save all outputs')\n parser.add_argument('--weightsPath', type=str, required=True,\n help='Path to model weights (.h5)')\n\n return parser.parse_args()\n\n\n# The main prediction function\ndef predict_segmentations():\n\n args = arguments()\n\n image_pattern = args.dataPattern\n\n print(\"Image Pattern:\", image_pattern)\n\n # Create output dir\n assert not os.path.isdir(args.outputPath), \"output dir already exists, please try again\"\n os.mkdir(args.outputPath)\n\n # Init config\n configuration = config_cvppp.InferenceConfig()\n\n # Init model\n inference_model = model.MaskRCNN(mode=\"inference\",\n config=configuration,\n model_dir=args.outputPath)\n\n inference_model.load_weights(args.weightsPath, by_name=True)\n\n # Predict Images\n with open(os.path.join(args.outputPath, 'leafCounts.csv'), 'a') as count_file:\n count_file.write(\"Image, Count\\n\")\n for im_path in glob(image_pattern):\n out_path = os.path.join(args.outputPath, os.path.basename(im_path))\n\n print(\"Saving prediction for\", im_path, \"at\", out_path)\n\n try:\n image = load_image(im_path)\n except:\n print(\"Bad File for prediction:\", im_path)\n continue\n\n # blur images\n # image = cv.GaussianBlur(image, (101, 101), 92, 0)\n\n # predict images\n results = inference_model.detect([image])\n # import matplotlib.pyplot as plt\n # plt.imshow(results)\n\n # convert images to RGB format\n rgb_mask = mask_to_rgb(results[0]['masks'])\n\n # store images\n # cv.imwrite(out_path, rgb_mask.astype(np.uint8), cv.IMWRITE_PNG_COMPRESSION)\n io.imsave(out_path, rgb_mask.astype(np.uint8))\n io.imshow(rgb_mask.astype(np.uint8))\n # view.show()\n plt.show()\n # sore result of leaf-counting\n\n count_file.write(os.path.basename(im_path) + \", \" + str(results[0]['masks'].shape[2]) + \"\\n\")\n\nif __name__ == '__main__':\n predict_segmentations()","sub_path":"9517_Group_Submision/Codes/task3_inference.py","file_name":"task3_inference.py","file_ext":"py","file_size_in_byte":4350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"212861158","text":"def evaluate(expression):\n OPERATORS = {\n '+': lambda y, x : x + y,\n '-': lambda y, x : x - y,\n '*': lambda y, x : x * y,\n '/': lambda y, x : int(float(x) / y)\n }\n \n stack = []\n \n for token in expression:\n if token in OPERATORS:\n stack.append(OPERATORS[token](stack.pop(), stack.pop()))\n else:\n stack.append(int(token))\n \n return stack[-1]\n \n","sub_path":"Python/150_evaluate_reverse_polish_notation.py","file_name":"150_evaluate_reverse_polish_notation.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"245043952","text":"from django.urls import path, re_path, include\nfrom .views import add_to_cart, order_summary, delete_from_cart\n\napp_name = 'shopping_cart'\n\nurlpatterns = [\n re_path(r\"^add-to-cart/(?P[-\\w]+)/$\", add_to_cart, name=\"add_to_cart\"),\n path('order-summary/', order_summary, name=\"order-summary\"),\n re_path(r'^item/delete/(?P[-\\w]+)/$', delete_from_cart, name='delete_item'),\n]","sub_path":"src/shopping_cart/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"8978613","text":"from urllib.parse import urlparse, urljoin\n\nfrom .core import *\nfrom .vparsers import *\nfrom .utils import attributeerror_wrapper\n\n\nclass OsiedlePlatiniumParser(MultipleRequestsGeneratorMixin, MultipleWebpageParser): \n url = \"http://www.osiedleplatinum.pl/katalog-mieszkan\"\n method = \"GET\"\n var_params = [ dict(page=i) for i in range(1, 15) ]\n headers = {\n \"Host\": \"www.osiedleplatinum.pl\",\n \"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:57.0) Gecko/20100101 Firefox/57.0\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Accept-Language\": \"en-US,en;q=0.5\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Referer\": \"http://www.osiedleplatinum.pl/katalog-mieszkan\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Upgrade-Insecure-Requests\": \"1\"\n }\n\n schema = [\n DataUnit(label=\"Plan\", parser=LinkParser(DOMElementExtractor(\"a\")), id=\"plan\"),\n DataUnit(label=\"Typ\", parser=DOMTextExtractor(), id=\"_type\"),\n DataUnit(label=\"Number\", parser=DOMTextExtractor(), id=\"number\"),\n DataUnit(label=\"Pokoje\", parser=IntParser(DOMTextExtractor()), id=\"rooms\"),\n DataUnit(label=\"Metraż\", parser=AreaParser(DOMTextExtractor()), id=\"area\"),\n DataUnit(label=\"Piętro\", parser=IntParser(DOMTextExtractor()), id=\"floor\"),\n DataUnit(label=\"Cena m^2\", parser=PriceParser(DOMTextExtractor()), id=\"price_m2\"),\n DataUnit(label=\"Cena\", parser=PriceParser(DOMTextExtractor()), id=\"price\"),\n DataUnit(label=\"Szczegóły\", parser=NoneParser(), id=\"details_none\")\n ]\n \n @attributeerror_wrapper(return_value=[])\n def find_records(self, soup):\n return soup.find(\"table\", {\"class\": \"my-table\"})\\\n .find(\"tbody\").find_all(\"tr\")\n\n def split_record(self, record):\n return record.find_all(\"td\")\n \n def modify_record(self, record, soup=None):\n record[\"plan\"] = urljoin(self.url, record[\"plan\"])\n record[\"status\"] = self.deduce_status(record[\"price\"])\n record[\"fid\"] = self.create_fid(record)\n return record\n\n def deduce_status(self, price):\n if price is None:\n return StatusParser.SOLD\n return StatusParser.AVAILABLE\n\n def create_fid(self, record):\n fid_form = \"{floor}/{number}\"\n return fid_form.format(**record)","sub_path":"parsers/osiedleplatinium.py","file_name":"osiedleplatinium.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"13266530","text":"from health_check.backends import BaseHealthCheckBackend\nfrom health_check.exceptions import (\n ServiceReturnedUnexpectedResult, ServiceUnavailable\n)\n\nfrom sso.utils import sso_api_client\n\n\nclass SingleSignOnBackend(BaseHealthCheckBackend):\n\n message_bad_status = 'SSO proxy returned {0.status_code} status code'\n\n def check_status(self):\n try:\n response = sso_api_client.ping()\n except Exception as error:\n raise ServiceUnavailable('(SSO proxy) ' + str(error))\n else:\n if response.status_code != 200:\n raise ServiceReturnedUnexpectedResult(\n self.message_bad_status.format(response)\n )\n return True\n","sub_path":"healthcheck/backends.py","file_name":"backends.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"173091434","text":"from ichnaea.api.exceptions import (\n RegionNotFoundV0,\n RegionNotFoundV0JS,\n)\nfrom ichnaea.api.locate.tests.base import BaseLocateTest\nfrom ichnaea.tests.base import AppTestCase\n\n\nclass RegionBase(BaseLocateTest, AppTestCase):\n\n apikey_metrics = False\n default_apikey = None\n metric_type = 'region'\n track_connection_events = True\n\n\nclass CommonRegionTests(object):\n\n def test_geoip(self):\n res = self._call(ip=self.test_ip)\n self.check_response(res, 'ok')\n self.check_db_calls(rw=0, ro=0)\n self.check_stats(counter=[\n ('request', [self.metric_path, 'method:post', 'status:200']),\n ], timer=[\n ('request', [self.metric_path, 'method:post']),\n ])\n\n def test_geoip_miss(self):\n res = self._call(ip='127.0.0.1', status=404)\n self.check_response(res, 'not_found')\n self.check_db_calls(rw=0, ro=0)\n self.check_stats(counter=[\n ('request', [self.metric_path, 'method:post', 'status:404']),\n ], timer=[\n ('request', [self.metric_path, 'method:post']),\n ])\n\n def test_get(self):\n res = self._call(ip=self.test_ip, method='get', status=200)\n self.check_response(res, 'ok')\n self.check_stats(counter=[\n ('request', [self.metric_path, 'method:get', 'status:200']),\n ], timer=[\n ('request', [self.metric_path, 'method:get']),\n ])\n\n def test_options(self):\n res = self._call(method='options', status=200)\n self.assertEqual(res.headers['Access-Control-Allow-Origin'], '*')\n self.assertEqual(res.headers['Access-Control-Max-Age'], '2592000')\n\n def test_unsupported_methods(self):\n self._call(method='delete', status=405)\n self._call(method='patch', status=405)\n self._call(method='put', status=405)\n\n def test_cache(self):\n res = self._call(ip=self.test_ip, method='get', status=200)\n cache = res.cache_control\n self.assertFalse(cache.public)\n self.assertTrue(cache.private)\n self.assertTrue(cache.proxy_revalidate)\n self.assertEqual(cache.max_age, 60)\n self.assertEqual(cache.s_max_age, 0)\n\n def test_api_key(self):\n res = self._call(ip=self.test_ip, api_key='test')\n self.check_response(res, 'ok')\n self.check_db_calls(rw=0, ro=0)\n # we don't log any additional API-key specific metrics\n self.check_stats(total=2)\n\n\nclass TestJSONView(CommonRegionTests, RegionBase):\n\n url = '/country.json'\n metric_path = 'path:country.json'\n not_found = RegionNotFoundV0\n\n @property\n def ip_response(self):\n return {\n 'country_code': 'GB',\n 'country_name': 'United Kingdom',\n }\n\n def check_response(self, response, status):\n self.assertEqual(response.content_type, 'application/json')\n self.assertEqual(response.charset, 'UTF-8')\n self.assertEqual(response.headers['Access-Control-Allow-Origin'], '*')\n self.assertEqual(response.headers['Access-Control-Max-Age'], '2592000')\n if status == 'ok':\n self.assertEqual(response.json, self.ip_response)\n elif status == 'not_found':\n self.assertEqual(response.json, self.not_found.json_body())\n\n\nclass TestJSView(CommonRegionTests, RegionBase):\n\n url = '/country.js'\n metric_path = 'path:country.js'\n not_found = RegionNotFoundV0JS\n\n @property\n def ip_response(self):\n return \"\"\"\\\nfunction geoip_country_code() { return 'GB'; }\nfunction geoip_country_name() { return 'United Kingdom'; }\n\"\"\"\n\n def check_response(self, response, status):\n self.assertEqual(response.content_type, 'text/javascript')\n self.assertEqual(response.charset, 'UTF-8')\n self.assertEqual(response.headers['Access-Control-Allow-Origin'], '*')\n self.assertEqual(response.headers['Access-Control-Max-Age'], '2592000')\n if status == 'ok':\n self.assertEqual(response.text, self.ip_response)\n elif status == 'not_found':\n self.assertEqual(response.text, self.not_found().text)\n","sub_path":"ichnaea/api/locate/tests/test_region_v0.py","file_name":"test_region_v0.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"502178683","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/11/26 14:53\n# @Author : qkwu\n# @File : leetcode10RegularExpressionMatching.py\n\n# 简单递归 超时\n# class Solution(object):\n# def isMatch(self, s, p):\n# if not p: return not s\n# first_match = bool(s) and p[0] in [s[0],'.']\n#\n# if len(p) >= 2 and p[1] == '*':\n# return self.isMatch(s, p[2:]) or (first_match and self.isMatch(s[1:], p))\n#\n# return first_match and self.isMatch(s[1:], p[1:])\n\n# DP\nclass Solution(object):\n def isMatch(self, s, p):\n # The DP table and the string s and p use the same indexes i and j, but\n # table[i][j] means the match status between p[:i] and s[:j], i.e.\n # table[0][0] means the match status of two empty strings, and\n # table[1][1] means the match status of p[0] and s[0]. Therefore, when\n # refering to the i-th and the j-th characters of p and s for updating\n # table[i][j], we use p[i - 1] and s[j - 1].\n m, n = len(s) + 1, len(p) + 1\n matches = [[False] * n for _ in range(m)]\n\n # Match empty string with empty pattern\n matches[0][0] = True\n\n # Match empty string with .*\n for i, element in enumerate(p[1:], 2):\n matches[0][i] = matches[0][i - 2] and element == '*'\n\n for i, ss in enumerate(s, 1):\n for j, pp in enumerate(p, 1):\n if pp != '*':\n # The previous character has matched and the current one\n # has to be matched. Two possible matches: the same or .\n matches[i][j] = matches[i - 1][j - 1] and \\\n (ss == pp or pp == '.')\n else:\n # Horizontal look up [j - 2].\n # Not use the character before *.\n matches[i][j] |= matches[i][j - 2]\n\n # Vertical look up [i - 1].\n # Use at least one character before *.\n # p a b *\n # s 1 0 0 0\n # a 0 1 0 1\n # b 0 0 1 1\n # b 0 0 0 ?\n # 因为j是从1开始enu,所以用p[j-2]实际上只是前一位,所以下式是为了尽量匹配更多的?*(如bbbbb 匹配b*)\n if ss == p[j - 2] or p[j - 2] == '.':\n matches[i][j] |= matches[i - 1][j]\n\n return matches[-1][-1]\ns = \"bbbacbcbcbbbbabbbab\"\np = \"b*c*c*.*.*.*ab*c\"\n# Output: true\n\nsl = Solution()\nprint(sl.isMatch(s, p))\n\n# '.' Matches any single character.\n# '*' Matches zero or more of the preceding element.\n# s = \"aa\"\n# p = \"a*\"\n# Output: true\n#\n#\n# s = \"aab\"\n# p = \"c*a*b\"\n# Output: true\n#\n# s = \"mississippi\"\n# p = \"mis*is*p*.\"\n# Output: false","sub_path":"leetcode10RegularExpressionMatching.py","file_name":"leetcode10RegularExpressionMatching.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"600653635","text":"# imports #\nimport json\nimport xml.etree.ElementTree as et\nimport os\nfrom strippers import strip_space\n\n# Directory stuff #\ndir_path = os.path.dirname(os.path.realpath(__file__))\ndir_path = dir_path.replace('/Documentation/Max Documentation', '/Current Test Version/FrameLib')\nref_dir = f'{dir_path}/docs/refpages'\nobj_lookup = f'{dir_path}/interfaces/FrameLib-obj-dlookup.json'\n\n# A class to parse the XML files and build a JSON file from it #\nclass ParseAndBuild():\n def __init__(self):\n self.tree = 0\n self.root = 0\n self.digest = 'none'\n self.d_master_dict = dict({})\n \n # Assign values to construct the json #\n def build_json_file(self):\n self.d_inner_data = dict({\n 'digest' : self.digest\n })\n self.d_outer_data = dict({self.object_name:self.d_inner_data})\n self.d_master_dict.update(self.d_outer_data)\n\n # Extract the info from the refpages #\n def extract_from_refpage(self, x):\n self.tree = et.parse(x)\n self.root = self.tree.getroot() #c74object\n\n # Find Information # \n self.object_name = self.root.get('name') #finds the name so you don't have to do regex\n\n for child in self.root:\n if child.tag == 'digest':\n self.digest = child.text\n\n # Strip whitespace #\n self.digest = strip_space(self.digest)\n\n # Call the build function #\n self.build_json_file()\n\n#----------- THE GUTS ----------- #\ndef main():\n worker = ParseAndBuild()\n for filename in os.listdir(ref_dir):\n if filename != '.DS_Store':\n if filename != '_c74_ref_modules.xml':\n current_category = filename\n source_file_name = f'{ref_dir}/{filename}'\n\n for filename in os.listdir(source_file_name):\n if filename != '.DS_Store':\n source_file = f'{ref_dir}/{current_category}/{filename}'\n worker.extract_from_refpage(source_file)\n\n with open(obj_lookup, 'w') as fp:\n json.dump(worker.d_master_dict, fp, indent=4)\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n","sub_path":"Documentation/Max Documentation/parse_to_dlookup.py","file_name":"parse_to_dlookup.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"404816243","text":"# tower of hanoi problem implementation\n# for problem statement search it\n\ndef toh(n,frm, to, aux):\n '''\n toh == tower of hanoi, n= number of disks (numbered 1,2,...n),\n 'frm' is source disk, 'to' is destination disk and 'aux' is auxillary disk\n '''\n # if only 1 disk, transfer to destination\n if n==1:\n print(\"Move disk {} from {} to {}\\n\".format(n,frm,to))\n else:\n # move n-1 disks from source to auxillary using destination\n toh(n-1,frm, aux, to)\n # move last disk from source to destination\n print(\"Move disk {} from {} to {}\\n\".format(n,frm, to))\n # move n-1 disks back from auxillary to destination using source\n toh(n-1, aux, to, frm)\n\n\nif __name__ == '__main__':\n n = int(input(\"enter number of disks \"))\n print(\"We are using A, B and C tower, where A initially contains all the disks\\n and need to be transferred to C\\n\")\n toh(n, 'A', 'C', 'B')\n print(\"Completed..\")","sub_path":"towerOfHanoi.py","file_name":"towerOfHanoi.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"148894043","text":"\"\"\"\nSprites with texture transformations\n\nArtwork from http://kenney.nl\n\nIf Python and Arcade are installed, this example can be run from the command line with:\npython -m arcade.examples.sprite_texture_transform\n\"\"\"\n\nimport arcade\nfrom arcade import Matrix3x3\nimport math\nimport os\n\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\nSHIP_SPEED = 5\nASPECT = SCREEN_HEIGHT / SCREEN_WIDTH\nSCREEN_TITLE = \"Texture transformations\"\n\n\nclass MyGame(arcade.Window):\n \"\"\" Main application class. \"\"\"\n\n def __init__(self, width, height, title):\n \"\"\"\n Initializer\n \"\"\"\n super().__init__(width, height, title)\n\n # Set the working directory (where we expect to find files) to the same\n # directory this .py file is in. You can leave this out of your own\n # code, but it is needed to easily run the examples using \"python -m\"\n # as mentioned at the top of this program.\n file_path = os.path.dirname(os.path.abspath(__file__))\n os.chdir(file_path)\n\n self.ship = None\n self.camera_x = 0\n self.t = 0\n self.stars = None\n self.xy_square = None\n\n def setup(self):\n \"\"\" Setup \"\"\"\n self.ship = arcade.Sprite(\":resources:images/space_shooter/playerShip1_orange.png\", 0.5)\n self.ship.center_x = SCREEN_WIDTH / 2\n self.ship.center_y = SCREEN_HEIGHT / 2\n self.ship.angle = 270\n self.stars = arcade.load_texture(\":resources:images/backgrounds/stars.png\")\n self.xy_square = arcade.load_texture(\":resources:images/test_textures/xy_square.png\")\n\n # Set the background color\n arcade.set_background_color(arcade.color.BLACK)\n\n def on_update(self, delta_time: float):\n \"\"\" Update \"\"\"\n self.ship.update()\n self.camera_x += 2\n self.t += delta_time * 60\n\n def on_draw(self):\n \"\"\"\n Render the screen.\n \"\"\"\n\n # This command has to happen before we start drawing\n arcade.start_render()\n\n for z in [300, 200, 150, 100]:\n opacity = int(math.exp(-z / 1000) * 255)\n angle = z\n scale = 150 / z\n translate = scale / 500\n self.stars.draw_transformed(\n 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, opacity,\n Matrix3x3().rotate(angle).scale(scale * ASPECT, scale).translate(-self.camera_x * translate, 0))\n self.ship.draw()\n\n for i, pair in enumerate([\n ['identity', Matrix3x3()],\n ['rotate(30)', Matrix3x3().rotate(30)],\n ['scale(0.8, 0.5)', Matrix3x3().scale(0.8, 0.5)],\n ['translate(0.3, 0.1)', Matrix3x3().translate(0.3, 0.1)],\n ['rotate(10).\\nscale(0.33, 0.33)', Matrix3x3().rotate(10).scale(0.7, 0.7)],\n ['scale(-1, 1)', Matrix3x3().scale(-1, 1)],\n ['shear(0.3, 0.1)', Matrix3x3().shear(0.3, 0.1)],\n [f'rotate({int(self.t) % 360})', Matrix3x3().rotate(self.t)],\n ]):\n x = 80 + 180 * (i % 4)\n y = 420 - (i // 4) * 320\n arcade.draw_text(pair[0], x, y - 20 - pair[0].count('\\n') * 10, arcade.color.WHITE, 10)\n self.xy_square.draw_transformed(x, y, 100, 100, 0, 255, pair[1])\n\n\ndef main():\n \"\"\" Main method \"\"\"\n window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)\n window.setup()\n arcade.run()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"arcade/examples/texture_transform.py","file_name":"texture_transform.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"291100772","text":"from tkinter import *\n\n\ndef doNothing():\n print(\"ok ok I won't...\")\n\nroot = Tk()\n\nroot.option_add('*tearOff',False) # hide the dotted line below the window\n\n# Tkinter puts menus at the top by default\nmenu = Menu(root)\nroot.config(menu=menu)\n\nsubMenu = Menu(menu)\n# Adds a drop down when \"File\" is clicked\nmenu.add_cascade(label=\"File\", menu=subMenu)\n\nsubMenu.add_command(label=\"New Project...\", command=doNothing)\nsubMenu.add_command(label=\"New...\", command=doNothing)\nsubMenu.add_separator()\nsubMenu.add_command(label=\"Exit\", command=doNothing)\n\neditMenu = Menu(menu)\nmenu.add_cascade(label=\"Edit\", menu=editMenu)\neditMenu.add_command(label=\"Redo\", command=doNothing)\n\nroot.mainloop()","sub_path":"6-menu/menu_template.py","file_name":"menu_template.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"654130196","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('building', '0012_sponsor'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='sponsor',\n name='condition_des',\n field=models.TextField(verbose_name=b'\\xe6\\x9d\\xa1\\xe4\\xbb\\xb6\\xe8\\xaf\\xb4\\xe6\\x98\\x8e', blank=True),\n ),\n migrations.AddField(\n model_name='sponsor',\n name='income',\n field=models.IntegerField(default=0, verbose_name=b'\\xe6\\xaf\\x8f\\xe5\\xa4\\xa9\\xe6\\x94\\xb6\\xe5\\x85\\xa5\\xe8\\xbd\\xaf\\xe5\\xa6\\xb9\\xe5\\xb8\\x81'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='sponsor',\n name='income_des',\n field=models.TextField(verbose_name=b'\\xe6\\x94\\xb6\\xe5\\x85\\xa5\\xe8\\xaf\\xb4\\xe6\\x98\\x8e', blank=True),\n ),\n ]\n","sub_path":"apps/building/migrations/0013_auto_20151023_1631.py","file_name":"0013_auto_20151023_1631.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"623018423","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.7-x86_64/egg/airflow/contrib/operators/redis_publish_operator.py\n# Compiled at: 2019-09-11 03:47:34\n# Size of source mod 2**32: 2191 bytes\nfrom airflow.utils.decorators import apply_defaults\nfrom airflow.contrib.hooks.redis_hook import RedisHook\nfrom airflow.models import BaseOperator\n\nclass RedisPublishOperator(BaseOperator):\n __doc__ = '\\n Publish a message to Redis.\\n\\n :param channel: redis channel to which the message is published (templated)\\n :type channel: str\\n :param message: the message to publish (templated)\\n :type message: str\\n :param redis_conn_id: redis connection to use\\n :type redis_conn_id: str\\n '\n template_fields = ('channel', 'message')\n\n @apply_defaults\n def __init__(self, channel, message, redis_conn_id='redis_default', *args, **kwargs):\n (super(RedisPublishOperator, self).__init__)(*args, **kwargs)\n self.redis_conn_id = redis_conn_id\n self.channel = channel\n self.message = message\n\n def execute(self, context):\n \"\"\"\n Publish the message to Redis channel\n\n :param context: the context object\n :type context: dict\n \"\"\"\n redis_hook = RedisHook(redis_conn_id=(self.redis_conn_id))\n self.log.info('Sending messsage %s to Redis on channel %s', self.message, self.channel)\n result = redis_hook.get_conn().publish(channel=(self.channel), message=(self.message))\n self.log.info('Result of publishing %s', result)","sub_path":"pycfiles/apache_airflow_arup-1.10.5-py3.6/redis_publish_operator.cpython-36.py","file_name":"redis_publish_operator.cpython-36.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"434184823","text":"import os,time\nfrom django.http import HttpResponse\nfrom django.template import loader\nfrom django.shortcuts import get_object_or_404, render\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.urls import reverse\nimport googlemaps\nfrom datetime import datetime,timedelta\nfrom time import mktime\nfrom math import ceil\n\n# setting the timezone for proper dates\nos.environ['TZ'] = 'Europe/Amsterdam'\ntime.tzset()\n\n# Setting up the Google Maps client with the key stored in the environment variables\ngmaps = googlemaps.Client(key=os.environ['GMAPS_KEY'])\n\n# Setting up the logger for logging\nimport logging\nlogger = logging.getLogger('qlinkplanner')\n\n\n# View for the index. Just a simple form.\ndef index(request):\n\treturn render(request,'planner/index.html')\n\t\n# View for the car modality\ndef car(request):\n\t# Form validation\n\tif(request.POST.get(\"from\",\"\") == \"\" or request.POST.get(\"to\",\"\") == \"\"):\n\t\tlogger.info(\"Empty from or to\")\n\t\treturn\n\t# Get current time\n\tnow = datetime.now()\n\t# Fetch data from Google\n\tcar_directions = gmaps.directions(request.POST.get(\"from\", \"\"),request.POST.get(\"to\", \"\"),mode=\"driving\",departure_time=now,language=\"nl_NL\")\n\t# Calculae arrival time\n\tarrival = now + timedelta(seconds = car_directions[0][\"legs\"][0][\"duration\"][\"value\"])\n\t# Log request\n\tlogger.info(\"Car\")\n\t# Render the car modality\n\treturn render(request,'planner/trips/car.html',{'directions':car_directions,'arrival':arrival.strftime(\"%H:%M\")})\n\t\ndef parkride(request):\n\t# Form validation\n\tif(request.POST.get(\"from\",\"\") == \"\" or request.POST.get(\"to\",\"\") == \"\"):\n\t\tlogger.info(\"Empty from or to\")\n\t\treturn\n\t\t\n\t# Get current time\n\tnow = datetime.now()\n\t\n\t# Set place-id and name for the park and rides\n\tif(request.POST.get(\"via\",\"\") == \"prhgk\"):\n\t\tparkride = \"ChIJvdszPSczyEcRhdw9i5NkIqA\"\n\t\tprname = \"Park+Ride Hoogkerk\"\n\telif(request.POST.get(\"via\",\"\") == \"prhrn\"):\n\t\tparkride = \"ChIJf4OcMgstyEcRO1iKAgXkFPQ\"\n\t\tprname = \"Park+Ride Haren\"\n\telif(request.POST.get(\"via\",\"\") == \"prreit\"):\n\t\tparkride = \"ChIJX1933-DMyUcRFxqjP6DI8ow\"\n\t\tprname = \"Park+Ride Reitdiep\"\n\telif(request.POST.get(\"via\",\"\") == \"prkar\"):\n\t\tparkride = \"ChIJ61-7iZrSyUcRp09q_RrMv_U\"\n\t\tprname = \"Park+Ride Kardinge\"\n\telif(request.POST.get(\"via\",\"\") == \"prhs\"):\n\t\tparkride = \"ChIJPSpLYU3NyUcR2XAniPw6Yuk\"\n\t\tprname = \"Park+Ride Hoofdstation\"\n\telif(request.POST.get(\"via\",\"\") == \"prp3\"):\n\t\tparkride = \"ChIJ54zXTkstyEcR8dO-LV1HdxI\"\n\t\tprname = \"Park+Ride P3 (Europapark/Boumaboulevard)\"\n\n\t# first car then ride\n\tif(request.POST.get(\"direction\",\"\") == \"heen\"):\n\t\t# Get car directions\n\t\tcar_directions = gmaps.directions(request.POST.get(\"from\", \"\"),\"place_id:\"+parkride,mode=\"driving\",departure_time=now,language=\"nl_NL\")\n\t\t# Calculate arrival at park+ride and add 5 minutes to that\n\t\tarrival_at_park_ride = now + timedelta(seconds = car_directions[0][\"legs\"][0][\"duration\"][\"value\"])\n\t\tarrival_at_park_ride_parked = arrival_at_park_ride + timedelta(minutes = 5)\n\t\t# Get transit directions\n\t\ttransit_directions = gmaps.directions(\"place_id:\"+parkride,request.POST.get(\"to\", \"\"),mode=\"transit\",departure_time=arrival_at_park_ride_parked,language=\"nl_NL\")\n\t\t# log the request\n\t\tlogger.info(\"Park and Ride heen\")\n\t\t# Get arrival time and transform it into a timestamp\n\t\tarrival = transit_directions[0][\"legs\"][0][\"arrival_time\"][\"text\"]\n\t\tarrival_timestamp = datetime.strptime(now.strftime(\"%Y-%m-%d\")+\" \"+transit_directions[0][\"legs\"][0][\"arrival_time\"][\"text\"],\"%Y-%m-%d %H:%M\")\n\t\t# Convert it into an UNIX timestamp to calculate duration time\n\t\tarrival_timestamp = mktime(arrival_timestamp.timetuple())\n\t\tnow_unix = mktime(now.timetuple())\n\t\tduration = ceil((arrival_timestamp-now_unix)/60)\n\t\t# Render results\n\t\treturn render(request,'planner/trips/parkrideheen.html',{'car_directions':car_directions,'transit_directions':transit_directions,'arrival':arrival,'prname':prname,'duration':duration})\n\t\n\t# First ride then car (return trip)\n\telif(request.POST.get(\"direction\",\"\") == \"terug\"):\n\t\t# Get transit directions\n\t\ttransit_directions = gmaps.directions(request.POST.get(\"from\", \"\"),\"place_id:\"+parkride,mode=\"transit\",departure_time=now,language=\"nl_NL\")\n\t\t# Calculate the arrival time at the park+ride and add 5 minutes to that\n\t\tarrival_at_park_ride = datetime.strptime(now.strftime(\"%Y-%m-%d\")+\" \"+transit_directions[0][\"legs\"][0][\"arrival_time\"][\"text\"],\"%Y-%m-%d %H:%M\")\n\t\tarrival_at_park_ride_parked = arrival_at_park_ride + timedelta(minutes = 5)\n\t\t# Get the car directions\n\t\tcar_directions = gmaps.directions(\"place_id:\"+parkride,request.POST.get(\"to\", \"\"),mode=\"driving\",departure_time=arrival_at_park_ride_parked,language=\"nl_NL\")\n\t\t# Calculate the duration based on the durations + 5 minutes\n\t\tduration = ceil((transit_directions[0][\"legs\"][0][\"duration\"][\"value\"] + (5*60) + car_directions[0][\"legs\"][0][\"duration\"][\"value\"])/60)\n\t\t# Calculate the final arrival time\n\t\tarrival = arrival_at_park_ride_parked + timedelta(seconds = car_directions[0][\"legs\"][0][\"duration\"][\"value\"])\n\t\tlogger.info(\"Park and Ride terug\")\n\t\t# Extract the departure time\n\t\tdeparture = transit_directions[0][\"legs\"][0][\"departure_time\"][\"text\"]\n\t\t# Render the results\n\t\treturn render(request,'planner/trips/parkrideterug.html',{'car_directions':car_directions,'transit_directions':transit_directions,'arrival':arrival.strftime(\"%H:%M\"),'prname':prname,'duration':duration,'departure':departure})\n\t\t\n\t\n\t\ndef transit(request):\n\t# Form validation\n\tif(request.POST.get(\"from\",\"\") == \"\" or request.POST.get(\"to\",\"\") == \"\"):\n\t\tlogger.info(\"Empty from or to\")\n\t\treturn\n\t# Get current date\n\tnow = datetime.now()\n\t#Fetch directions from Google\n\ttransit_directions = gmaps.directions(request.POST.get(\"from\", \"\"),request.POST.get(\"to\", \"\"),mode=\"transit\",departure_time=now,language=\"nl_NL\")\n\t# Extract arrival time\n\tarrival = transit_directions[0][\"legs\"][0][\"arrival_time\"][\"text\"]\n\t# Extract departure time\n\tdeparture = transit_directions[0][\"legs\"][0][\"departure_time\"][\"text\"]\n\t# Log request\n\tlogger.info(\"Transit\")\n\t# Render page\n\treturn render(request,'planner/trips/transit.html',{'directions':transit_directions,'arrival':arrival,'departure':departure})","sub_path":"planner/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"231654686","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 25 17:11:09 2018\n\n@author: yuanjihuang\n\"\"\"\nimport csv\nimport Adafruit_ADS1x15\nimport Adafruit_ADXL345\n\n\nclass Sensor:\n def __init__(self, max_len=20):\n self.adc = Adafruit_ADS1x15.ADS1115(address=0x48)\n self.acel = Adafruit_ADXL345.ADXL345(address=0x53)\n self.cur_time = 0\n self.time = [0]\n self.muscle = []\n self.acc = []\n self.max_len = max_len\n\n def check_len(self):\n if len(self.time) > self.max_len:\n self.time.pop(0)\n if len(self.muscle) > self.max_len:\n self.muscle.pop(0)\n if len(self.acc) > self.max_len:\n self.acc.pop(0)\n\n def read(self):\n # add time\n self.cur_time += 1\n self.time.append(self.cur_time)\n # add muscle\n a = self.adc.read_adc(0, gain=(2 / 3))\n self.muscle.append(a)\n # add acc\n x, y, z = self.acel.read()\n self.acc.append((x, y, z))\n # check length\n self.check_len()\n\n def save_csv(self, name):\n # print(self.cur_time)\n with open(name, 'w') as output:\n writer = csv.writer(output, delimiter=',', lineterminator='\\n')\n for i in range(len(self.muscle)):\n writer.writerow([self.time[i], self.muscle[i], self.acc[i][0],\\\n self.acc[i][1], self.acc[i][2]])\n","sub_path":"sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"499973017","text":"import read\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score, confusion_matrix, precision_score, recall_score, f1_score\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import classification_report\n\nprint(\"Reading data ...\")\nx_all, y_all = read.read(LOAD_DATA=False)\nx_train, x_test, y_train, y_test = train_test_split(x_all, y_all, test_size=0.3, random_state=42)\nprint(x_train.shape, y_train.shape)\nprint(x_test.shape, y_test.shape)\n\nmodels = [RandomForestClassifier(),\n RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',\n max_depth=None, max_features='sqrt', max_leaf_nodes=None,\n min_impurity_decrease=0.0, min_impurity_split=None,\n min_samples_leaf=1, min_samples_split=2,\n min_weight_fraction_leaf=0.0, n_estimators=1000, n_jobs=1,\n oob_score=False, random_state=None, verbose=0,\n warm_start=False)]\n\nfor model in models:\n print(\"Fitting RF ...\")\n model.fit(x_train, y_train)\n\n print(\"Evaluating ...\")\n y_pred = model.predict(x_test)\n\n print(\"Accuracy is %f.\" % accuracy_score(y_test, y_pred))\n print(confusion_matrix(y_test, y_pred))\n print(\"Precision score is %f.\" % precision_score(y_test, y_pred))\n print(\"Recall score is %f.\" % recall_score(y_test, y_pred))\n print(\"F1 score is %f.\" % f1_score(y_test, y_pred))\n print(\"-----------------------------------\")\n","sub_path":"model/train/train_randomForenst.py","file_name":"train_randomForenst.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"103343009","text":"from tkinter import *\r\nimport pandas as pd\r\nimport pickle\r\nfrom tkinter import messagebox\r\nfrom tkinter import font\r\n\r\n# loading dumped datafiles(pickles from managing_inventory.py)\r\ndata = pickle.load(open('pcat.p', 'rb'))\r\nlast3 = pickle.load(open('last3mon.p', 'rb'))\r\nlis_inv = pickle.load(open(\"lis_inv.p\", \"rb\"))\r\ninv_mon_name = pickle.load(open(\"inv_mon_name.p\", \"rb\"))\r\npred = pickle.load(open(\"predsales.p\", \"rb\"))\r\noff = pickle.load(open('pcat.p', 'rb'))\r\noff_prod = pickle.load(open(\"off_prod.p\", \"rb\"))\r\nfur_prod = pickle.load(open(\"fur_prod.p\", \"rb\"))\r\ntec_prod = pickle.load(open(\"tec_prod.p\", \"rb\"))\r\n\r\n# window configuration\r\nroot = Tk()\r\nroot.geometry(\"900x600\")\r\nroot.configure(background='RoyalBlue3')\r\nroot.title(\"DM-Inventory Management\")\r\nroot.resizable(0,0)\r\nMonth = list(range(1,13))\r\nYear = [2014,2015,2016,2017]\r\nCategory = ['Office Supplies','Furniture','Technology']\r\nBMW = ['Best', 'Moderate', 'Worst']\r\noffsub = ['Binders', 'Storage', 'Appliances', 'Paper', 'Art', 'Envelopes', 'Fasteners', 'Labels', 'Supplies']\r\nfursub = ['Chairs', 'Bookcases', 'Furnishings', 'Tables']\r\ntecsub = ['Copiers', 'Accessories', 'Machines', 'Phones']\r\nsub = [offsub, fursub, tecsub]\r\n\r\nrate = StringVar(root)\r\nrate.set('a')\r\nrateDrop = OptionMenu(root, rate, *BMW)\r\n\r\nsubname = StringVar(root)\r\nsub_sel = ['a']\r\nsubname.set('a')\r\nscnameDropDown = OptionMenu(root, subname, *sub_sel)\r\n\r\np = StringVar(root)\r\nproduct_list = [0]\r\npDropDown = OptionMenu(root, p, *product_list)\r\n\r\n\r\ndef gsummary(*ar):\r\n v = p.get()\r\n print(v)\r\n h1=list(last3[:370]['d_product'])\r\n h2=list(last3[371:1500]['d_product'])\r\n h3=list(last3[1500:]['d_product'])\r\n val = 0\r\n z = 0\r\n \r\n if v in h1:\r\n for j in range(len(last3)):\r\n if last3['d_product'][j] == v:\r\n z=j\r\n ltq= last3['d_quantity'][z] \r\n print(\"Quantity sold in last 3 months: \", ltq)\r\n val = lis_inv[inv_mon_name[-1]][v]\r\n print(\"Remaining quantity in inventory\", val)\r\n print('Best Product')\r\n pre = pred[v]\r\n print(\"Predicted sales:\", pre)\r\n listi.delete(0, END)\r\n listi.insert(END, ' Best Product')\r\n listi.itemconfig(END, foreground=\"green\")\r\n listi.insert(END, 'Name of the product :')\r\n listi.insert(END, v)\r\n listi.insert(END, 'Quantity sold in last 3 months : {}'.format(ltq))\r\n listi.insert(END, 'Remaining quantity in inventory : {}'.format(val))\r\n listi.insert(END, 'Predicted sales : {}'.format(pre))\r\n if(val < 15):\r\n messagebox.showinfo(\"Add inventory alert\", \"This product quantity is running low. Increase the inventory for this product!!!\")\r\n \r\n \r\n elif v in h2:\r\n for j in range(len(last3)):\r\n if last3['d_product'][j] == v:\r\n z=j\r\n ltq= last3['d_quantity'][z] \r\n print(\"Quantity sold in last 3 months: \", ltq)\r\n val = lis_inv[inv_mon_name[-1]][v]\r\n print(\"Remaining quantity in inventory\", val)\r\n print('Moderate Product')\r\n pre = pred[v]\r\n print(\"Predicted sales:\", pre)\r\n listi.delete(0, END)\r\n listi.insert(END, ' Moderate Product')\r\n listi.itemconfig(END, foreground=\"blue\")\r\n listi.insert(END, 'Name of the product :')\r\n listi.insert(END, v)\r\n listi.insert(END, 'Quantity sold in last 3 months : {}'.format(ltq))\r\n listi.insert(END, 'Remaining quantity in inventory : {}'.format(val))\r\n listi.insert(END, 'Predicted sales : {}'.format(pre))\r\n if(val < 15):\r\n messagebox.showinfo(\"Add inventory alert\", \"This product quantity is running low. Increase the inventory for this product!!!\")\r\n \r\n elif v in h3:\r\n for j in range(len(last3)):\r\n if last3['d_product'][j] == v:\r\n z=j\r\n ltq= last3['d_quantity'][z] \r\n print(\"Quantity sold in last 3 months: \", ltq)\r\n val = lis_inv[inv_mon_name[-1]][v]\r\n print(\"Remaining quantity in inventory\", val)\r\n print('Worst Product')\r\n pre = pred[v]\r\n print(\"Predicted sales:\", pre)\r\n listi.delete(0, END)\r\n listi.insert(END, ' Worst Product')\r\n listi.itemconfig(END, foreground=\"red\")\r\n listi.insert(END, 'Name of the product :')\r\n listi.insert(END, v)\r\n listi.insert(END, 'Quantity sold in last 3 months : {}'.format(ltq))\r\n listi.insert(END, 'Remaining quantity in inventory : {}'.format(val))\r\n listi.insert(END, 'Predicted sales : {}'.format(pre))\r\n if(val < 15):\r\n messagebox.showinfo(\"Add inventory alert\", \"This product quantity is running low. Increase the inventory for this product!!!\")\r\n\r\n elif v == 'Nothing to select':\r\n listi.delete(0, END)\r\n listi.insert(END, \"*Nothing to display*\")\r\n \r\n else:\r\n val = lis_inv[inv_mon_name[-1]][v]\r\n print(\"Remaining quantity in inventory\", val)\r\n print('Not Sold in last THREE MONTHS!!!')\r\n pre = pred[v]\r\n print(\"Predicted sales:\", pre)\r\n listi.delete(0, END)\r\n listi.insert(END, ' Not Sold in last THREE MONTHS!!!')\r\n listi.itemconfig(END, foreground=\"purple\")\r\n listi.insert(END, 'Name of the product :')\r\n listi.insert(END, v)\r\n listi.insert(END, 'Remaining quantity in inventory : {}'.format(val))\r\n listi.insert(END, 'Predicted sales : {}'.format(pre))\r\n if(val < 15):\r\n messagebox.showinfo(\"Add inventory alert\", \"This product quantity is running low. Increase the inventory for this product!!!\")\r\n \r\n \r\n# function to get product summary on selecting a product\r\ndef getpname(*val):\r\n c = cat.get()\r\n v = subname.get()\r\n r = rate.get()\r\n global p\r\n global pDropDown\r\n pDropDown.destroy()\r\n print(c, v, r)\r\n p = StringVar(root)\r\n if c == 'Office Supplies':\r\n if r == 'Best':\r\n for i in range(len(sub[0])):\r\n if v == sub[0][i]:\r\n product_list = off_prod[0][i]\r\n if len(product_list) > 0:\r\n p.set(product_list[0])\r\n else:\r\n product_list = ['Nothing to select']\r\n p.set(product_list[0])\r\n listi.delete(0, END)\r\n listi.insert(END, \"*Nothing to display*\")\r\n p.trace(\"w\", gsummary)\r\n pDropDown = OptionMenu(root, p, *product_list)\r\n pDropDown.configure(width = 30, anchor = 'w')\r\n pDropDown.place(relx = 0.21, rely = 0.41)\r\n elif r == 'Moderate':\r\n for i in range(len(sub[0])):\r\n if v == sub[0][i]:\r\n product_list = off_prod[1][i]\r\n if len(product_list)> 0:\r\n p.set(product_list[0])\r\n else:\r\n product_list = ['Nothing to select']\r\n p.set(product_list[0])\r\n listi.delete(0, END)\r\n listi.insert(END, \"*Nothing to display*\")\r\n p.trace(\"w\", gsummary)\r\n pDropDown = OptionMenu(root, p, *product_list)\r\n pDropDown.configure(width = 30, anchor = 'w')\r\n pDropDown.place(relx = 0.21, rely = 0.41)\r\n else:\r\n for i in range(len(sub[0])):\r\n if v == sub[0][i]:\r\n product_list = off_prod[2][i]\r\n if len(product_list)> 0:\r\n p.set(product_list[0])\r\n else:\r\n product_list = ['Nothing to select']\r\n p.set(product_list[0])\r\n listi.delete(0, END)\r\n listi.insert(END, \"*Nothing to display*\")\r\n p.trace(\"w\", gsummary)\r\n pDropDown = OptionMenu(root, p, *product_list)\r\n pDropDown.configure(width = 30, anchor = 'w')\r\n pDropDown.place(relx = 0.21, rely = 0.41)\r\n \r\n elif c == 'Furniture':\r\n if r == 'Best':\r\n for i in range(len(sub[1])):\r\n if v == sub[1][i]:\r\n product_list = fur_prod[0][i]\r\n if len(product_list) > 0:\r\n p.set(product_list[0])\r\n else:\r\n product_list = ['Nothing to select']\r\n p.set(product_list[0])\r\n listi.delete(0, END)\r\n listi.insert(END, \"*Nothing to display*\")\r\n p.trace(\"w\", gsummary)\r\n pDropDown = OptionMenu(root, p, *product_list)\r\n pDropDown.configure(width = 30, anchor = 'w')\r\n pDropDown.place(relx = 0.21, rely = 0.41)\r\n elif r == 'Moderate':\r\n for i in range(len(sub[1])):\r\n if v == sub[1][i]:\r\n product_list = fur_prod[1][i]\r\n if len(product_list)> 0:\r\n p.set(product_list[0])\r\n else:\r\n product_list = ['Nothing to select']\r\n p.set(product_list[0])\r\n listi.delete(0, END)\r\n listi.insert(END, \"*Nothing to display*\")\r\n p.trace(\"w\", gsummary)\r\n pDropDown = OptionMenu(root, p, *product_list)\r\n pDropDown.configure(width = 30, anchor = 'w')\r\n pDropDown.place(relx = 0.21, rely = 0.41)\r\n else:\r\n for i in range(len(sub[1])):\r\n if v == sub[1][i]:\r\n product_list = fur_prod[2][i]\r\n if len(product_list)> 0:\r\n p.set(product_list[0])\r\n else:\r\n product_list = ['Nothing to select']\r\n p.set(product_list[0])\r\n listi.delete(0, END)\r\n listi.insert(END, \"*Nothing to display*\")\r\n p.trace(\"w\", gsummary)\r\n pDropDown = OptionMenu(root, p, *product_list)\r\n pDropDown.configure(width = 30, anchor = 'w')\r\n pDropDown.place(relx = 0.21, rely = 0.41)\r\n \r\n else:\r\n if r == 'Best':\r\n for i in range(len(sub[2])):\r\n if v == sub[2][i]:\r\n product_list = tec_prod[0][i]\r\n if len(product_list) > 0:\r\n p.set(product_list[0])\r\n else:\r\n product_list = ['Nothing to select']\r\n p.set(product_list[0])\r\n listi.delete(0, END)\r\n listi.insert(END, \"*Nothing to display*\")\r\n p.trace(\"w\", gsummary)\r\n pDropDown = OptionMenu(root, p, *product_list)\r\n pDropDown.configure(width = 30, anchor = 'w')\r\n pDropDown.place(relx = 0.21, rely = 0.41)\r\n elif r == 'Moderate':\r\n for i in range(len(sub[2])):\r\n if v == sub[2][i]:\r\n product_list = tec_prod[1][i]\r\n if len(product_list)> 0:\r\n p.set(product_list[0])\r\n else:\r\n product_list = ['Nothing to select']\r\n p.set(product_list[0])\r\n listi.delete(0, END)\r\n listi.insert(END, \"*Nothing to display*\")\r\n p.trace(\"w\", gsummary)\r\n pDropDown = OptionMenu(root, p, *product_list)\r\n pDropDown.configure(width = 30, anchor = 'w')\r\n pDropDown.place(relx = 0.21, rely = 0.41)\r\n else:\r\n for i in range(len(sub[2])):\r\n if v == sub[2][i]:\r\n product_list = tec_prod[2][i]\r\n if len(product_list)> 0:\r\n p.set(product_list[0])\r\n else:\r\n product_list = ['Nothing to select']\r\n p.set(product_list[0])\r\n listi.delete(0, END)\r\n listi.insert(END, \"*Nothing to display*\")\r\n p.trace(\"w\", gsummary)\r\n pDropDown = OptionMenu(root, p, *product_list)\r\n pDropDown.configure(width = 30, anchor = 'w')\r\n pDropDown.place(relx = 0.21, rely = 0.41)\r\n lisdis.delete(0, END)\r\n j = 1 \r\n if product_list[0] == 'Nothing to select':\r\n lisdis.insert(END, \"*Nothing to display*\")\r\n else:\r\n for i in product_list:\r\n lisdis.insert(END, '{}. {}'.format(j, i))\r\n j +=1\r\n \r\n\r\ndef getrate(*val):\r\n print(subname.get())\r\n global rate\r\n global rateDrop\r\n rateDrop.destroy()\r\n rate = StringVar(root)\r\n rate.set(BMW[0])\r\n rate.trace(\"w\", getpname)\r\n rateDrop = OptionMenu(root, rate, *BMW)\r\n rateDrop.configure(width = 10, anchor = 'w')\r\n rateDrop.place(relx = 0.21, rely = 0.285)\r\n \r\n# function to get a list of products on selecting category \r\ndef getYM(val):\r\n global s\r\n global subname\r\n global sub_sel\r\n global scnameDropDown\r\n scnameDropDown.destroy()\r\n \r\n c = cat.get()\r\n if c == 'Office Supplies':\r\n sub_sel = sub[0]\r\n elif c == 'Furniture':\r\n sub_sel = sub[1]\r\n else:\r\n sub_sel = sub[2]\r\n print(c)\r\n subname = StringVar(root)\r\n subname.set(sub_sel[0])\r\n subname.trace(\"w\", getrate) # calls getpname() when a product is chosen from the list\r\n scnameDropDown = OptionMenu(root, subname, *sub_sel)\r\n scnameDropDown.configure(width = 10, anchor = 'w')\r\n scnameDropDown.place(relx = 0.21, rely = 0.165)\r\n\r\ncatLabel = Label(root,text='Select Category : ', background='RoyalBlue3', fg = 'white')\r\ncatLabel.place(relx = 0.05, rely = 0.05)\r\n\r\npLabel = Label(root,text='Select Sub-Category : ', background='RoyalBlue3', fg = 'white')\r\npLabel.place(relx = 0.05, rely = 0.17)\r\n\r\nproL = Label(root,text='Select Rating : ', background='RoyalBlue3', fg = 'white')\r\nproL.place(relx = 0.05, rely = 0.295)\r\n\r\nLabel(root, text = 'Select Product : ', background='RoyalBlue3', fg = 'white').place(relx = 0.05, rely = 0.415)\r\n \r\ncat = StringVar(root)\r\ncat.set(Category[0])\r\ncatDropdown = OptionMenu(root,cat,*Category, command=getYM)\r\ncatDropdown.configure(width = 13, anchor = 'w')\r\ncatDropdown.place(relx = 0.21, rely = 0.05)\r\n\r\ncatLabel = Label(root,text='Summary of selected product : ', background='RoyalBlue3', fg = 'white')\r\ncatLabel.place(relx = 0.62, rely = 0.55) \r\n\r\nLabel(root, text = 'List of products : ', background='RoyalBlue3', fg = 'white').place(relx = 0.62, rely = 0.06)\r\n\r\nlisdis = Listbox(root, width = 60, height = 15)\r\nlisdis.place(relx = 0.545, rely = 0.1)\r\n\r\nlisti = Listbox(root, width = 45, height = 7)\r\nlisti.place(relx = 0.6, rely = 0.59)\r\n\r\n\r\nroot.mainloop()","sub_path":"Final Folder/GUI/in_gui.py","file_name":"in_gui.py","file_ext":"py","file_size_in_byte":14928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"587491843","text":"'''\r\nCreated on Sep 3, 2012\r\n \r\n@author: Daniel J. Rivers\r\n'''\r\nfrom DataAccess.DBInt import DBInt\r\nfrom Utilities import OutputUtils\r\n\r\nclass TableHandler:\r\n\r\n sep = \"~\"\r\n\r\n @staticmethod\r\n def writeRecord( l, td ):\r\n values = []\r\n where = []\r\n for i, v in enumerate( l ):\r\n values.append( ( td.columnNames[ i ][ 0 ], v ) )\r\n if i <= td.where:\r\n where.append( ( td.columnNames[ i ][ 0 ], v ) )\r\n ret = DBInt().merge( tuple( values ), tuple( where ), td )\r\n OutputUtils.debug( \"Merged Record: \" + str( values ) )\r\n return ret\r\n\r\n @staticmethod\r\n def getRecordByID( i, td ):\r\n try:\r\n return DBInt().get( ( ( \"ID\", i ), ), td )[ 0 ]\r\n except Exception as e:\r\n OutputUtils.exception( \"No row found\", e )\r\n\r\n @staticmethod\r\n def getAllRecords( td ):\r\n return DBInt().getAll( td )\r\n\r\n @staticmethod\r\n def getColumnHeaders( td ):\r\n ret = []\r\n for i in td.columnNames [:len( td.columnNames ) - 1]:\r\n ret.append( i[ 0 ] )\r\n return ret\r\n\r\n @classmethod\r\n def getTableSetup( cls, td, l ):\r\n return [ cls.getColumnHeaders( td ), cls.getValuesForTable( td, l )]\r\n\r\n @staticmethod\r\n def getValuesForTable( td, l ):\r\n records = TableHandler.getAllRecords( td )\r\n ret = []\r\n for i in records:\r\n record = i[1:len( td.columnNames )]\r\n if not l:\r\n ret.append( record )\r\n else:\r\n add = True\r\n for j in l:\r\n if j in record:\r\n add = False\r\n if add:\r\n ret.append( record )\r\n return ret\r\n\r\n @staticmethod\r\n def getData():\r\n return None\r\n","sub_path":"FileInventory/DataAccess/TableHandler.py","file_name":"TableHandler.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"137859702","text":"from sys import argv\nimport numpy as np\nfrom scipy.stats import entropy as KLdivergence\n\ndef prob_indep(counts, n):\n# counts: (matrix) contingency table\n# n: (int) sum of counts\n\tR = [sum(row) for row in counts]\n\tC = [sum([counts[i][j] for i in range(len(counts))]) for j in range(len(counts[0]))]\n\tprob = []\n\tfor i in range(len(counts)):\n\t\tprob.append([])\n\t\tfor j in range(len(counts[i])):\n\t\t\tprob[-1].append(0)\n\t\t\tprob[i][j] = R[i]*C[j]/n**2\n\treturn prob\n\ndef total(counts):\n# counts: (matrix) contingency table\n\tif len(counts)==0:\n\t\treturn 0\n\treturn sum(counts[0])+total(counts[1:])\n\ndef flatten(M):\n\tL = []\n\tfor row in M:\n\t\tL = L + row\n\treturn np.array(L)\n\ndef printv(str, flag=\"-v\"):\n\tif flag in argv:\n\t\tprint(str)\n\ndef MonteCarlo(KL, prob, n, sample_size=1E6, seed=0):\n\tnp.random.seed(seed)\n\tprintv(\"Sampling from H0...\")\n\tsample = np.random.multinomial(n, prob, int(sample_size))\n\tprintv(\"Calculating KL for samples...\")\n\tKL_sample = np.array([KLdivergence(obs, prob, 2) for obs in sample])\n\tprintv(\"Computing p-value..\")\n\tp = ((KL_sample >= KL).sum()+1)/(int(sample_size)+1)\n\treturn p\n\ndef receive_cont_table(path):\n\tM = []\n\twith open(path, \"r\") as F:\n\t\tfor line in F:\n\t\t\tM.append([int(x) for x in line.strip().split(\"\\t\")])\n\treturn M\n\n# receive input\nprintv(\"Receiving input...\")\ncounts = receive_cont_table(argv[1])\nn = total(counts)\n# put expected (H0) and observed distributions in numpy arrays\nprintv(\"Calculating H0 distribution...\")\nexp = flatten(prob_indep(counts, n))\nobs = flatten(counts)/n\n# remove counts[i][j] where row i and col j are empty\nprintv(\"Removing rows/cols without counts...\")\nmask = np.argwhere(exp>0).flatten()\nexp, obs = exp[mask], obs[mask]\n# calculate KL-divergence and assess significance\nprintv(\"Calculating KL divergence...\")\nKL = KLdivergence(obs, exp, 2)\np = MonteCarlo(KL, exp, n)\n\nprint(p)\nprintv(\"KL=\"+str(KL), flag=\"-kl\")\n","sub_path":"montecarlo.py","file_name":"montecarlo.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"112633282","text":"import urllib\nimport lxml.html as html\nimport io, json\n\nlink = \"https://tv.yandex.ru/213?grid=main&period=all-day\"\npage = html.fromstring(urllib.urlopen(link).read())\nchannels = page.cssselect('.tv-channel')\n\ndata = []\n\nfor row in channels:\n\titem = dict()\n\titem['channel'] = row.cssselect('.tv-channel-title__text')[0].text_content()\n\tstyle = row.cssselect('.b-tv-image__picture')[0].get(\"style\").replace(\"url(\", \"url(http:\")\n\titem['icon'] = \"http://\" + style[style.find(\"avatars\"):-1]\n\tpr = row.cssselect('.tv-channel-events__items')[0]\n\ttime = pr.cssselect('.tv-event__time-text')\n\tprog = pr.cssselect('.tv-event__title-inner')\n\titem['programs'] = []\n\tfor p in range(0, len(time)):\n\t \titem['programs'].append([time[p].text_content(), prog[p].text_content()])\n\tdata.append(item)\n\nwith open('data.json', 'w') as outfile:\n\tjson_string = json.dumps({'channels':data}, indent=4, ensure_ascii = False).encode('utf-8')\n\toutfile.write(json_string)\n\n","sub_path":"TV.app/www/PY/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"56695332","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import SGDRegressor\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.preprocessing import PolynomialFeatures\nimport matplotlib.pyplot as plt\n\n\nX = 2 * np.random.rand(100, 1)\ny = 4 + 3 * X + np.random.randn(100, 1)\n\nX_b = np.c_[np.ones((100, 1)), X]\n\ntheta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y);\n\nprint(\"Calculado usando numpy \")\nprint(theta_best)\n\n# using Scikit-learn\n\n\nprint(\"Calculado usando Scikit Learn Liniear Regression \")\nlin_reg = LinearRegression()\nlin_reg.fit(X, y)\nprint(lin_reg.intercept_, lin_reg.coef_)\n\n\n# previsão e plotagem\n\n\nX_new = np.array([[0], [2]])\nX_new_b = np.c_[np.ones((2, 1)), X_new]\ny_predict = X_new_b.dot(theta_best)\nplt.plot(X_new, y_predict, \"r-\") # linha em vermelho definida por dois pontos baseado já nos coeficientes calculados\nplt.plot(X, y, \"b.\")\nplt.axis([0, 2, 0, 15])\nplt.show()\n\nprint(\"Calculado usando Scikit SGDRegressor \")\nsgd_reg = SGDRegressor(max_iter=50, penalty=None, eta0=0.1)\nsgd_reg.fit(X, y)\nprint(sgd_reg.intercept_, sgd_reg.coef_)\n\n# Regressão Polinomial\nprint(\" Calculando Regressão com função polinomial \")\nm = 100\nX = 6 * np.random.rand(m, 1) -3\ny = 3 * X **2 + 8*X + 5 + np.random.randn(m, 1)\n\npoly_features = PolynomialFeatures(degree=2, include_bias=False)\nX_poly = poly_features.fit_transform(X)\nlin_reg.fit(X_poly, y)\nprint(lin_reg.intercept_, lin_reg.coef_)\n\nplt.plot(X, y, 'b.')\nplt.show()\n\n\n# Elastic Net\nprint(\"Elastic Net\")\n\nX = 2 * np.random.rand(100, 1)\ny = 4 + 3 * X + np.random.randn(100, 1)\n\nelastic_net = ElasticNet(alpha=0.1, l1_ratio=0.5)\nelastic_net.fit(X,y)\nprint(elastic_net.intercept_, elastic_net.coef_)\n\nprint(elastic_net.predict([[1.5]]))\n","sub_path":"TestesIniciais/cp04.py","file_name":"cp04.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"653222737","text":"# experiment to test the relationship between accuracy and\n# number of choices in the puzzle\n\nimport csv\n\n# Ipmort the model and the generator\nfrom model.trainer import Trainer\nfrom model.generator import Generator\n\n\n# Define experiment parameters\nBASE_SIZES = [16]\nTRAINING_SIZES = [500000]\nALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmno'\nNUM_CHOICES = [2,3,4,5,6,7,8]\n\ndef train_model(base, training_size, length=2, choice=5, epochs=2000, batch_size=1000, dimension = 100, testing_size=100):\n # initialise generator\n alphabet = ALPHABET[:base]\n g = Generator(alphabet, length, choice)\n\n # generate data\n train_data = g.generate_data(training_size)\n test_data = g.generate_data(testing_size)\n\n # initialise model\n trainer = Trainer(train_data, test_data, epochs, dimension)\n\n # run model on generated data\n model = trainer.batch_train()\n\n # evaluate the model on training and testing data\n train_acc = trainer.evaluate(model, train_data[:200])\n test_acc = trainer.evaluate(model, test_data)\n\n # return the choice and the accuracy\n return (choice, train_acc, test_acc)\n\ndef run_experiment():\n print('experimenting with base size')\n # iterate through training sizes\n for training_size in TRAINING_SIZES:\n results = []\n # iterate through all base sizes\n for base_size in BASE_SIZES:\n # iterate through all choice\n for choice in NUM_CHOICES:\n result = train_model(base=base_size, training_size = training_size, choice=choice)\n # Add result to results list\n results.append(result)\n # Save the list as a csv\n with open(\"results/num_choices_experiment/num_choices_experiment.csv\", \"w\") as f:\n writer = csv.writer(f)\n writer.writerows(results)\n\n# do not run when imported\nif __name__ == '__main__':\n run_experiment()\n","sub_path":"experiments/num_choices_experiment.py","file_name":"num_choices_experiment.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"474699862","text":"#\n# [64] Minimum Path Sum\n#\n# https://leetcode.com/problems/minimum-path-sum/description/\n#\n# algorithms\n# Medium (41.29%)\n# Total Accepted: 153.7K\n# Total Submissions: 371.7K\n# Testcase Example: '[[1,3,1],[1,5,1],[4,2,1]]'\n#\n# Given a m x n grid filled with non-negative numbers, find a path from top\n# left to bottom right which minimizes the sum of all numbers along its path.\n#\n# Note: You can only move either down or right at any point in time.\n#\n# Example:\n#\n#\n# Input:\n# [\n# [1,3,1],\n# [1,5,1],\n# [4,2,1]\n# ]\n# Output: 7\n# Explanation: Because the path 1→3→1→1→1 minimizes the sum.\n#\n\n\nclass Solution:\n def minPathSum(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n if len(grid) == 0:\n return 0\n m, n = len(grid), len(grid[0])\n dp = [[0 for x in range(n)] for y in range(2)]\n for i in range(m):\n for j in range(n):\n c, d = i & 1, (i+1) & 1\n dp[c][j] = grid[i][j]\n if i >= 1 and j >= 1:\n dp[c][j] += min(dp[d][j], dp[c][j-1])\n elif i >= 1:\n dp[c][j] += dp[d][j]\n elif j >= 1:\n dp[c][j] += dp[c][j-1]\n return dp[(m-1) & 1][n-1]\n","sub_path":"64.minimum-path-sum.python3.py","file_name":"64.minimum-path-sum.python3.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"217666969","text":"import os\nimport sys\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + \"/../python\")\n\nimport pytest\nfrom unittest.mock import MagicMock, call\nimport random\nimport python.infotiv_launcher\nfrom python.infotiv_launcher import LaunchCMD\n\n\n@pytest.fixture()\ndef launcher():\n print('\\n*********Start*********')\n launcher = python.infotiv_launcher.Launcher()\n launcher.rc = MagicMock()\n yield launcher\n print('\\n**********End**********')\n\n# ---------------------------------------------------------------------------------\n# ------------------------ set_launch_position ------------------------------------\n# ---------------------------------------------------------------------------------\n\ndef test_encoders_ready_check_encoders_not_ready(launcher):\n # GIVEN\n launcher.encoders_ready = 0\n\n # WHEN\n return_value = launcher.encoder_ready_check()\n\n # THEN\n assert return_value == 0\n\n\ndef test_set_launch_position_encoders_not_ready(launcher):\n # GIVEN\n with pytest.raises(Exception) as err:\n launcher.encoders_ready = 0\n # WHEN\n launcher.set_launch_position(1)\n # THEN\n assert err.match('Encoder Not Ready')\n\n\n@pytest.mark.parametrize(\"invalid_data\", [(-1), 112])\ndef test_set_launch_position_encoders_ready_launch_position_invalid_type_of_error(launcher, invalid_data):\n # GIVEN\n with pytest.raises(Exception, match='out of bounds') as err:\n launcher.encoders_ready = 1\n # WHEN\n launcher.set_launch_position(invalid_data)\n # THEN\n assert err.type is ValueError\n\n\n@pytest.mark.parametrize(\"invalid_data\", [(-1), 112])\ndef test_set_launch_position_encoders_ready_launch_position_invalid_message(launcher, invalid_data):\n # GIVEN\n with pytest.raises(ValueError) as err:\n launcher.encoders_ready = 1\n # WHEN\n launcher.set_launch_position(invalid_data)\n # THEN\n err.match('out of bounds')\n\n\ndef test_set_launch_position_encoders_ready_launch_position_zero(launcher):\n # GIVEN\n launcher.encoders_ready = 1\n\n launcher.rc.ReadEncM2.return_value = (1, 2, 2)\n launcher.rc.ReadBuffers.return_value = (0, 0, 0x80)\n\n # WHEN\n launcher.set_launch_position(0)\n\n # THEN\n calls = [call(129, -2500, 2, 1),\n call(129, 0, 0, 0),\n call(129, 2500, 2188, 0),\n call(129, 0, 0, 0)]\n launcher.rc.SpeedDistanceM2.assert_has_calls(calls)\n assert launcher.rc.SpeedDistanceM2.call_count == 4\n\n\ndef test_set_launch_position_encoders_ready_launch_position_higher_than_zero(launcher):\n # GIVEN\n launcher.encoders_ready = 1\n\n launcher.rc.ReadEncM2.return_value = (1, 4, 2) # launch_actual = 4 , launch_increment = -4\n launcher.rc.ReadBuffers.return_value = (0, 0, 0x80)\n\n # WHEN\n launcher.set_launch_position(1)\n\n # THEN\n\n calls = [call(129, -2500, 4, 1),\n call(129, 0, 0, 0),\n call(129, 2500, 2319, 0),\n call(129, 0, 0, 0)]\n launcher.rc.SpeedDistanceM2.assert_has_calls(calls)\n assert launcher.rc.SpeedDistanceM2.call_count == 4\n\n\ndef test_set_launch_position_encoders_ready_launch_position_max(launcher):\n # GIVEN\n launcher.encoders_ready = 1\n\n launcher.rc.ReadEncM2.return_value = (1, -1.5, 2)\n launcher.rc.ReadBuffers.return_value = (0, 0, 0x80)\n\n # WHEN\n launcher.set_launch_position(111)\n\n # THEN\n\n calls = [call(129, 2500, 1.5, 1),\n call(129, 0, 0, 0),\n call(129, 2500, 16991.5, 0),\n call(129, 0, 0, 0)]\n launcher.rc.SpeedDistanceM2.assert_has_calls(calls)\n assert launcher.rc.SpeedDistanceM2.call_count == 4\n\n\ndef test_stop(launcher):\n # GIVEN\n launcher.rc.ForwardM1 = MagicMock(return_value=True)\n launcher.rc.ForwardM2 = MagicMock(return_value=True)\n\n # WHEN\n launcher.stop()\n\n # THEN\n launcher.rc.ForwardM1.assert_any_call(launcher.address, 0)\n launcher.rc.ForwardM2.assert_any_call(launcher.address, 0)\n launcher.rc.ForwardM1.assert_any_call(launcher.address_2,0)\n launcher.rc.ForwardM2.assert_any_call(launcher.address_2, 0)\n\n\ndef test_max_pitch_zero_increment(launcher):\n # GIVEN\n launcher.encoders_ready = 1\n\n launcher.rc.ReadEncM1.return_value = (1, 355000) # pitch increment = 0\n\n # WHEN\n launcher.max_pitch()\n\n # THEN\n\n calls = [call(launcher.address, launcher.pitch_speed_pulses, 0, 1),\n call(launcher.address, 0, 0, 0)]\n launcher.rc.SpeedDistanceM1.assert_has_calls(calls)\n assert launcher.rc.SpeedDistanceM1.call_count == 2\n\n\ndef test_max_pitch_higher_than_zero_increment(launcher):\n # GIVEN\n launcher.encoders_ready = 1\n\n launcher.rc.ReadEncM1.return_value = (1, 2) # pitch increment = 354998\n\n # WHEN\n launcher.max_pitch()\n\n # THEN\n\n calls = [call(launcher.address, launcher.pitch_speed_pulses, 354998, 1),\n call(launcher.address, 0, 0, 0)]\n launcher.rc.SpeedDistanceM1.assert_has_calls(calls)\n assert launcher.rc.SpeedDistanceM1.call_count == 2\n\n\ndef test_max_pitch_lower_than_zero_increment(launcher):\n # GIVEN\n launcher.encoders_ready = 1\n\n launcher.rc.ReadEncM1.return_value = (1, 355020) # pitch increment = -20\n\n # WHEN\n launcher.max_pitch()\n\n # THEN\n\n calls = [call(launcher.address, -launcher.pitch_speed_pulses, 20, 1),\n call(launcher.address, 0, 0, 0)]\n launcher.rc.SpeedDistanceM1.assert_has_calls(calls)\n assert launcher.rc.SpeedDistanceM1.call_count == 2\n\n\n@pytest.mark.parametrize(\"test_input, expected\", [(12345.6789, 1234.57), (10, 1), (123, 12.3), (0, 0), (-10, -1)])\ndef test_battery_voltage_decimal_value(launcher, test_input, expected):\n # GIVEN\n launcher.rc.ReadMainBatteryVoltage.return_value = (128, test_input)\n # WHEN\n return_value = launcher.battery_voltage()\n # THEN\n assert return_value == expected\n\n# ---------------------------------------------------------------------------------\n# ------------------------ set_launch_variables -----------------------------------\n# ---------------------------------------------------------------------------------\n\ndef test_set_launch_variables_valid_positions_called(launcher):\n # GIVEN\n launcher.change_pitch = MagicMock()\n launcher.change_rotation = MagicMock()\n launcher.change_lift = MagicMock()\n\n # WHEN\n pitch_position = random.randint(0, launcher.pitch_length)\n rotation_position = random.randint(0, launcher.rotation_length)\n lift_position = random.randint(0, launcher.lift_length)\n launcher.set_launch_variables(pitch_position, rotation_position, lift_position)\n\n # THEN\n launcher.change_pitch.assert_called_with(pitch_position)\n launcher.change_rotation.assert_called_with(rotation_position)\n launcher.change_lift.assert_called_with(lift_position)\n\n\n# ---------------------------------------------------------------------------------\n# ------------------------ launch_control------------------------------------------\n# ---------------------------------------------------------------------------------\ndef test_launch_control_LaunchCMD_up(launcher):\n # GIVEN\n launcher.encoders_ready = 1\n\n # WHEN\n launcher.launch_control(LaunchCMD(1))\n\n # THEN\n launcher.rc.ForwardM2.assert_called_with(launcher.address_2, launcher.launch_speed_manual)\n launcher.rc.BackwardM2.assert_not_called()\n\n\ndef test_launch_control_LaunchCMD_down(launcher):\n # GIVEN\n launcher.encoders_ready = 1\n\n # WHEN\n launcher.rc.BackwardM2.return_value = True\n launcher.launch_control(LaunchCMD(2))\n\n # THEN\n launcher.rc.BackwardM2.assert_called_with(launcher.address_2, launcher.launch_speed_manual)\n launcher.rc.ForwardM2.assert_not_called()\n\n\ndef test_launch_control_LaunchCMD_stop(launcher):\n # GIVEN\n launcher.encoders_ready = 1\n\n # WHEN\n launcher.rc.ForwardM2.return_value = True\n launcher.launch_control(LaunchCMD(3))\n\n # THEN\n launcher.rc.ForwardM2.assert_called_with(launcher.address_2, 0)\n launcher.rc.BackwardM2.assert_not_called()\n","sub_path":"test/infotiv_launcher/test_launch_functions.py","file_name":"test_launch_functions.py","file_ext":"py","file_size_in_byte":7998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"61031067","text":"from sierra.base_parameters import BaseParameter\nfrom datetime import datetime, timedelta\nimport numpy as np\nfrom sierra.utilities.converter import convert\nimport math\n\n\nclass Millerton_Lake_Flood_Release_Requirement(BaseParameter):\n\n should_drawdown = None\n\n def setup(self):\n super().setup()\n num_scenarios = len(self.model.scenarios.combinations)\n self.should_drawdown = np.empty(num_scenarios, np.bool)\n\n def _value(self, timestep, scenario_index):\n\n if self.model.mode == 'planning':\n return 0\n\n sid = scenario_index.global_id\n\n # Note: the following logic follows the U.S. Army Corps of Engineers 1980 Water Control Manual for Friant Dam\n\n month = self.datetime.month\n day = self.datetime.day\n month_day = '{}-{}'.format(month, day)\n\n # Get flood curve\n flood_curves = self.model.tables[\"Millerton Lake flood curve\"]\n rainflood_curve_mcm = flood_curves.at[month_day, 'rainflood']\n conditional_curve_mcm = flood_curves.at[month_day, 'conditional']\n\n # Get previous storage\n NML = self.model.nodes[\"Millerton Lake\"]\n millerton_storage_mcm = NML.volume[scenario_index.global_id]\n\n # Load base ag demand info\n WYT = self.get('San Joaquin Valley WYT' + self.month_suffix, timestep, scenario_index)\n Madera_df = self.model.tables[\"CVP Madera Canal demand\"][WYT]\n Friant_Kern_df = self.model.tables[\"CVP Friant-Kern Canal demand\"][WYT]\n\n # Set the default release of zero\n release_mcm = 0.0\n\n # 1. Conservation space release\n if millerton_storage_mcm < conditional_curve_mcm:\n pass\n\n # 2. Rainflood space release\n max_storage = NML.get_max_volume(scenario_index)\n above_85_taf_mcm = max_storage - rainflood_curve_mcm - 104.85\n if above_85_taf_mcm > 0.0 and (month >= 10 or month <= 3): # 85 TAF\n MPL = self.model.nodes[\"Mammoth Pool Reservoir\"]\n mammoth_pool_space_mcm = MPL.max_volume - MPL.volume[sid]\n rainflood_curve_mcm += min(above_85_taf_mcm, mammoth_pool_space_mcm)\n\n if millerton_storage_mcm >= rainflood_curve_mcm:\n release_mcm = millerton_storage_mcm - rainflood_curve_mcm\n\n # 3. Conditional space release\n elif 2 <= month <= 7:\n # Note: Here, we are calculating forecasts directly as able, rather than using the USACE manual diagram.\n\n # 3.1. Calculate forecasted unimpaired runoff into Millerton Lake, through July 31.\n # For now, assume perfect forecast.\n # TODO: update to use imperfect forecast?\n fnf_start = timestep.datetime\n fnf_end = datetime(timestep.year, 7, 31)\n forecasted_inflow_mcm = self.model.parameters[\"Full Natural Flow\"].dataframe[fnf_start:fnf_end].sum()\n\n # 3.2. Calculate today's and forecasted irrigation demand.\n forecast_days = 14\n ag_start = (month, day)\n if (month, day) <= (5, 31):\n ag_end = (6, 15)\n else:\n # min of +15 days (1-15 = today + 14 days)\n ag_end_date = timestep.datetime + timedelta(days=forecast_days)\n ag_end = min((ag_end_date.month, ag_end_date.day), (8, 1))\n\n # today_ag_demand = Madera_df[ag_start] + Friant_Kern_df[ag_start]\n\n # option 1: use actual forecasted demand\n # madera_forcasted_demand_mcm = Madera_df[ag_start:ag_end].sum() / 35.315 * 0.0864\n\n # option 2: use Madera canal capacity (i.e., assume we can release at capacity)\n madera_fcst_dem_mcm = self.model.nodes['CVP Madera Canal'].max_flow * forecast_days\n\n friant_kern_fcst_dem_mcm = Friant_Kern_df[ag_start:ag_end].sum() / 35.315 * 0.0864\n forecasted_ag_demand_mcm = madera_fcst_dem_mcm + friant_kern_fcst_dem_mcm\n\n # 3.3. Calculate total space required for flood control\n # slope from flood control chart = 1 / 1.6\n total_space_required_mcm = forecasted_inflow_mcm * 0.625 - forecasted_ag_demand_mcm\n\n # 3.4. Calculate upstream space, adjusted\n\n # 3.4.1. Get total previous storage in upstream reservoirs\n upstream_storage_space_mcm = 0.0\n for node in self.model.nodes:\n # TODO: make this more efficient\n if hasattr(node, 'volume') and node.name != 'Millerton Lake':\n upstream_storage_space_mcm += node.volume[scenario_index.global_id]\n\n # 3.4.2. Calculate adjustment to storage space\n # Note: this is approximated from the upper right of the Flood Control Diagram (Fig. A-11)\n days_since_feb1 = (timestep.datetime - datetime(timestep.year, 2, 1)).days\n adjustment_to_upstream_space_taf = 100 - 3.1623e-9 * math.exp(0.13284 * days_since_feb1)\n adjustment_to_upstream_space_mcm = adjustment_to_upstream_space_taf * 1.2335\n\n # 3.4.3. Subtract adjustment from upstream space\n adjusted_upstream_storage_space_mcm = upstream_storage_space_mcm - adjustment_to_upstream_space_mcm\n\n # 3.5. Calculate conditional reservation required\n # Note: It does not appear that this is actually used in the Flood Control Diagram\n conditional_space_required_mcm = total_space_required_mcm - adjusted_upstream_storage_space_mcm\n\n # 3.6. Compute total space available for flood control\n millerton_storage_space_mcm = NML.max_volume - millerton_storage_mcm\n total_space_available_mcm = millerton_storage_space_mcm + adjusted_upstream_storage_space_mcm\n # total_space_available_mcm = millerton_storage_space_mcm\n\n # 3.7. Finally, compute the supplemental release\n # Note that the goal is to spread the release out over time\n storage_difference_mcm = max(conditional_space_required_mcm - total_space_available_mcm, 0.0)\n\n # if storage_difference_mcm > 0.0:\n # print('{}: conditional; release: {} taf'.format(timestep.datetime, storage_difference_mcm / 1.2335))\n\n # if (month, day) <= (5, 5):\n #\n #\n # elif (month, day) <= (6, 5):\n #\n #\n # elif (month, day) <= (6, 30):\n #\n #\n # else:\n\n supplemental_release_mcm = storage_difference_mcm\n\n # 3.8. Calculate total release\n # Note that this differs from the example in the USACE manual, since we are only calculating instream\n # release here. In the manual, \"total release\" is instream release + ag. release\n release_mcm = max(release_mcm, supplemental_release_mcm)\n\n # This is our overall target release, without accounting for max downstream releases\n release_mcm = float(max(release_mcm, 0.0))\n\n # Assume Madera Canal can absorb some flood control capacity\n # Note that we cannot calculate Madera demand from the demand node/parameter, since that node depends on this.\n if release_mcm > 0.0:\n madera_canal_cfs = Madera_df[(month, day)]\n madera_canal_mcm = madera_canal_cfs / 35.315 * 0.0864\n madera_canal_max_mcm = self.model.nodes[\"Madera Canal.1\"].max_flow\n adjusted_release_mcm = release_mcm - (madera_canal_max_mcm - madera_canal_mcm)\n release_mcm = max(adjusted_release_mcm, 0.0)\n\n # ...reduce to limit flow to <= 8000 cfs (19.57 mcm)\n little_dry_creek_max_mcm = 19.57\n release_mcm = min(release_mcm, little_dry_creek_max_mcm)\n\n # DRAWDOWN OPERATIONS\n\n # Release slowly to a target storage of 350 TAF by Oct 31.\n\n if (7, 1) <= (month, day) <= (10, 31):\n nov1_target = 431.725 # 350 TAF\n\n # Stop this if we've hit the target\n if millerton_storage_mcm < nov1_target:\n self.should_drawdown[sid] = False\n\n # Check if New Melones filled\n if millerton_storage_mcm > nov1_target and not self.should_drawdown[sid]:\n day_before_yesterday = self.datetime + timedelta(days=-2)\n millerton_storage_df = self.model.recorders[\"Millerton Lake/storage\"].to_dataframe()\n prev_millerton_storage_mcm = millerton_storage_df.loc[day_before_yesterday].values[sid]\n if millerton_storage_mcm <= prev_millerton_storage_mcm:\n self.should_drawdown[sid] = True\n\n if self.should_drawdown[sid]:\n drawdown_release_mcm = millerton_storage_mcm - nov1_target\n # drawdown_release_mcm = millerton_storage_mcm - nov1_target\n prev_inflow_mcm = 0.0\n for node in ['Kerckhoff 1 PH', 'Kerckhoff 2 PH', 'IFR bl Kerckhoff Lake', 'Millerton Lake Inflow']:\n prev_inflow_mcm += self.model.nodes[node].prev_flow[sid]\n\n drawdown_release_mcm += prev_inflow_mcm\n\n drawdown_days = (datetime(timestep.year, 11, 1) - timestep.datetime).days\n release_mcm = drawdown_release_mcm / drawdown_days\n # release_mcm = max(release_mcm, drawdown_release_mcm)\n\n # Let's also limit ramping (for both instream flow and reservoir management reasons)\n prev_release_mcm = self.model.nodes[\"Millerton Lake Flood Release\"].prev_flow[scenario_index.global_id]\n # if (month, day) == (7, 1):\n # release_mcm = min(release_mcm, 3)\n if release_mcm > prev_release_mcm:\n release_mcm = min(release_mcm, prev_release_mcm * 1.2)\n elif release_mcm < prev_release_mcm:\n release_mcm = max(release_mcm, prev_release_mcm * 0.8)\n\n else:\n self.should_drawdown[sid] = False\n\n # if self.res_name == 'Millerton Flood Release':\n # release_mcm -= self.model.nodes['CVP Madera Canal'].max_flow\n # release_mcm = max(release_mcm, 0.0)\n\n release_cms = release_mcm / 0.0864\n\n if millerton_storage_mcm < 250:\n release_cms *= 0.5\n\n return release_cms\n\n def value(self, *args, **kwargs):\n try:\n val = self._value(*args, **kwargs)\n return convert(val, \"m^3 s^-1\", \"m^3 day^-1\", scale_in=1, scale_out=1000000.0)\n except Exception as err:\n print('\\nERROR for parameter {}'.format(self.name))\n print('File where error occurred: {}'.format(__file__))\n print(err)\n raise\n\n @classmethod\n def load(cls, model, data):\n return cls(model, **data)\n\n\nMillerton_Lake_Flood_Release_Requirement.register()\n","sub_path":"sierra/models/upper_san_joaquin/_parameters/Millerton_Lake_Flood_Release_Requirement.py","file_name":"Millerton_Lake_Flood_Release_Requirement.py","file_ext":"py","file_size_in_byte":10745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"185735617","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport os\nimport math\n\n\n#________________________________\n#matlab memoria\n\npath = '../results/windows/matlab/'\n#files = os.listdir(path)\n#print(files)\nfiles = ['ex15', 'cfd1', 'shallowwater', 'cfd2', 'parabolicfem', 'apache2', 'G3circuit']#, 'stoc-f']\n#files = ['ex15']\nresult_matlab_win = [0] * files.__len__()\nresult_matlab_ubuntu = [0] * files.__len__()\nj = 0\nfor name in files:\n perc = path + name + '.mat.txt'\n df = pd.read_table(perc, delim_whitespace=True, skiprows=1, header=None)\n df.columns = ['Elapsed time', 'CPU', 'Real', 'Virtual']\n #print(df)\n mat = [name, df['Virtual'].max(), df['Virtual'][0]]\n result_matlab_win[j] = mat\n\n #print(df['Virtual'].max())\n j = j + 1\n\npath = '../results/linux/matlab/'\nj = 0\nfor name in files:\n perc = path + name + '.mat.txt'\n df = pd.read_table(perc, delim_whitespace=True, skiprows=1, header=None)\n df.columns = ['Elapsed time', 'CPU', 'Real', 'Virtual']\n #print(df)\n mat = [name, df['Virtual'].max(), df['Virtual'][0]]\n result_matlab_ubuntu[j] = mat\n\n #print(df['Virtual'].max())\n j = j + 1\n\n# print(result_matlab)\n# print(result_python)\n\n#________________________________\n#python memoria\n\npath = '../results/windows/python/'\n#files = os.listdir(path)\n#print(files)\nfiles = ['ex15', 'cfd1', 'shallowwater', 'cfd2', 'parabolicfem', 'apache2', 'G3circuit']#, 'stoc-f']\n#files = ['ex15']\nresult_python_win = [0] * files.__len__()\nresult_python_ubuntu = [0] * files.__len__()\nj = 0\nfor name in files:\n perc = path + name + '.mat.txt'\n df = pd.read_table(perc, delim_whitespace=True, skiprows=1, header=None)\n df.columns = ['Elapsed time', 'CPU', 'Real', 'Virtual']\n #print(df)\n mat = [name, df['Virtual'].max(), df['Virtual'][0]]\n result_python_win[j] = mat\n\n #print(df['Virtual'].max())\n j = j + 1\n\npath = '../results/linux/python/'\nj = 0\nfor name in files:\n perc = path + name + '.mat.txt'\n df = pd.read_table(perc, delim_whitespace=True, skiprows=1, header=None)\n df.columns = ['Elapsed time', 'CPU', 'Real', 'Virtual']\n #print(df)\n mat = [name, df['Virtual'].max(), df['Virtual'][0]]\n result_python_ubuntu[j] = mat\n\n #print(df['Virtual'].max())\n j = j + 1\n\n#________________________________\n#R memoria\n\npath = '../results/windows/r/'\n#files = os.listdir(path)\n#print(files)\nfiles_r_win = ['ex15', 'cfd1', 'shallowwater', 'cfd2', 'parabolicfem']#, 'stoc-f']\n#files = ['ex15']\nresult_r_win = [0] * files_r_win.__len__()\n\nj = 0\nfor name in files_r_win:\n perc = path + name + '.mtx.txt'\n df = pd.read_table(perc, delim_whitespace=True, skiprows=1, header=None)\n df.columns = ['Elapsed time', 'CPU', 'Real', 'Virtual']\n #print(df)\n mat = [name, df['Virtual'].max(), df['Virtual'][0]]\n result_r_win[j] = mat\n\n #print(df['Virtual'].max())\n j = j + 1\n\nfiles_r_ubuntu = ['ex15', 'cfd1', 'shallowwater', 'cfd2', 'parabolicfem', 'apache2']#, 'stoc-f']\nresult_r_ubuntu = [0] * files_r_ubuntu.__len__()\npath = '../results/linux/r/'\nj = 0\nfor name in files_r_ubuntu:\n perc = path + name + '.mtx.txt'\n df = pd.read_table(perc, delim_whitespace=True, skiprows=1, header=None)\n df.columns = ['Elapsed time', 'CPU', 'Real', 'Virtual']\n #print(df)\n mat = [name, df['Virtual'].max(), df['Virtual'][0]]\n result_r_ubuntu[j] = mat\n\n #print(df['Virtual'].max())\n j = j + 1\n\n\n\n# Create bars matlab\nbarWidth = 0.20\nmem_bef_matlab_win = [0] * files.__len__()\nmem_after_matlab_win = [0] * files.__len__()\nmem_bef_matlab_ubuntu = [0] * files.__len__()\nmem_after_matlab_ubuntu = [0] * files.__len__()\n\nfor i in range(files.__len__()):\n mem_bef_matlab_win[i] = result_matlab_win[i][2]\n mem_after_matlab_win[i] = result_matlab_win[i][1]\n\n mem_bef_matlab_ubuntu[i] = result_matlab_ubuntu[i][2]\n mem_after_matlab_ubuntu[i] = result_matlab_ubuntu[i][1]\n\n# Create bars python\nbarWidth = 0.20\nmem_bef_python_win = [0] * files.__len__()\nmem_after_python_win = [0] * files.__len__()\nmem_bef_python_ubuntu = [0] * files.__len__()\nmem_after_python_ubuntu = [0] * files.__len__()\n\nfor i in range(files.__len__()):\n mem_bef_python_win[i] = result_python_win[i][2]\n mem_after_python_win[i] = result_python_win[i][1]\n\n mem_bef_python_ubuntu[i] = result_python_ubuntu[i][2]\n mem_after_python_ubuntu[i] = result_python_ubuntu[i][1]\n\n# Create bars r\nbarWidth = 0.15\nmem_bef_r_win = [0] * files.__len__()\nmem_after_r_win = [0] * files.__len__()\nmem_bef_r_ubuntu = [0] * files.__len__()\nmem_after_r_ubuntu = [0] * files.__len__()\n\nfor i in range(files_r_win.__len__()):\n mem_bef_r_win[i] = result_r_win[i][2]\n mem_after_r_win[i] = result_r_win[i][1]\n\nfor i in range(files_r_ubuntu.__len__()):\n mem_bef_r_ubuntu[i] = result_r_ubuntu[i][2]\n mem_after_r_ubuntu[i] = result_r_ubuntu[i][1]\n\n\n# position bars\nn = max(len(mem_bef_r_ubuntu),len(mem_after_r_ubuntu),len(mem_bef_r_win),len(mem_after_r_win))\npos = np.arange(n)\n\nbar1 = plt.bar(pos, mem_bef_r_ubuntu, width = barWidth, color = 'r', label='R before ubuntu')\nbar2 = plt.bar(pos+barWidth, mem_after_r_ubuntu, width = barWidth, color = 'b', label='R after ubuntu')\n\nbar3 = plt.bar(pos+barWidth+barWidth, mem_bef_python_ubuntu, width = barWidth, color = 'g', label='Python before ubuntu')\nbar4 = plt.bar(pos+barWidth+barWidth+barWidth, mem_after_python_ubuntu, width = barWidth, color = 'y', label='Python after ubuntu')\n\nbar5 = plt.bar(pos+barWidth * 4, mem_bef_matlab_ubuntu, width = barWidth, color = 'purple', label='Matlab before ubuntu')\nbar6 = plt.bar(pos+barWidth * 5, mem_after_matlab_ubuntu, width = barWidth, color = 'darkslategrey', label='Matlab after ubuntu')\n\nplt.legend(ncol=4,loc='upper left')\n\nplt.xticks([m + barWidth * 2 + barWidth/2 for m in range(len(mem_bef_r_ubuntu))], ['ex15', 'cfd1', 'shallow_water', 'cfd2', 'parabolic_fem', 'apache2', 'G3_circuit'])\nplt.yscale('log')\n\nfor rect in bar1 + bar2 + bar3 + bar4 + bar5 + bar6:\n height = rect.get_height()\n plt.text(rect.get_x() + rect.get_width()/2.0, height, '%d' % int(height), ha='center', va='bottom')\n'''\nfor i in range(len(pos)):\n plt.text(x = pos[i] - 0.015, y = mem_bef_matlab_win[i], s = round(mem_bef_matlab_win[i],0), size = 7)\n plt.text(x = pos[i] + barWidth - 0.015, y = mem_after_matlab_win[i], s = round(mem_after_matlab_win[i],0), size = 7)\n plt.text(x = pos[i] + barWidth + barWidth - 0.015, y = mem_bef_matlab_ubuntu[i], s = round(mem_bef_matlab_ubuntu[i],0), size = 7)\n plt.text(x = pos[i] + barWidth + barWidth + barWidth - 0.015, y = mem_after_matlab_ubuntu[i], s = round(mem_after_matlab_ubuntu[i],0), size = 7)\n'''\n'''\n plt.text(x = pos[i] - 0.015, y = mem_bef_matlab_win[i] + alfa * mem_bef_matlab_win[i], s = round(mem_bef_matlab_win[i],2), size = 7, rotation=90)\n plt.text(x = pos[i] + barWidth - 0.015, y = mem_after_matlab_win[i] + alfa * mem_after_matlab_win[i], s = round(mem_after_matlab_win[i],2), size = 7, rotation=90)\n plt.text(x = pos[i] + barWidth + barWidth - 0.015, y = mem_bef_matlab_ubuntu[i] + alfa * mem_bef_matlab_ubuntu[i], s = round(mem_bef_matlab_ubuntu[i],2), size = 7, rotation=90)\n plt.text(x = pos[i] + barWidth + barWidth + barWidth - 0.015, y = mem_after_matlab_ubuntu[i] + alfa * mem_after_matlab_ubuntu[i], s = round(mem_after_matlab_ubuntu[i],2), size = 7, rotation=90)\n'''\n#plt.title('Python Ubuntu vs Python Windows with UMF_PACK FALSE')\nplt.ylabel('Memory')\nplt.xlabel('Matrix Name')\n\nfig = plt.gcf()\nfig.set_size_inches(18.5, 10.5)\n\n#plt.show()\nfig.savefig('../../Grafici/' + os.path.basename(__file__) + '.png', dpi=1000)\n","sub_path":"plot/5.3_Matlab vs python vs R ubuntu memoria.py","file_name":"5.3_Matlab vs python vs R ubuntu memoria.py","file_ext":"py","file_size_in_byte":7588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"292696093","text":"from fractions import Fraction\nfrom wick.expression import AExpression\nfrom wick.wick import apply_wick\nfrom wick.convenience import one_e, two_e, E1, E2, braE1, commute\n\nH1 = one_e(\"f\", [\"occ\", \"vir\"], norder=True)\nH2 = two_e(\"I\", [\"occ\", \"vir\"], norder=True)\nH = H1 + H2\n\nbra = braE1(\"occ\", \"vir\")\nT1 = E1(\"t\", [\"occ\"], [\"vir\"])\nT2 = E2(\"t\", [\"occ\"], [\"vir\"])\nT = T1 + T2\n\nHT = commute(H, T)\nHTT = commute(HT, T)\nHTTT = commute(commute(commute(H2, T1), T1), T1)\n\nS = bra*(H + HT + Fraction('1/2')*HTT + Fraction('1/6')*HTTT)\nout = apply_wick(S)\nout.resolve()\nfinal = AExpression(Ex=out)\nprint(final)\n","sub_path":"examples/ccsd_T1.py","file_name":"ccsd_T1.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"546735396","text":"from bs4 import BeautifulSoup\r\nfrom lxml import html\r\nimport requests\r\nfrom textblob import TextBlob\r\nimport urllib, json\r\n\r\n# Global Variables\r\n# list of results\r\nresults = []\r\n# which search methods are available\r\nsearchA = True\r\nsearchB = True\r\nsearchC = True\r\nsearchD = True\r\n\r\n\r\n# Generic google search\r\n# Basic search results\r\n# unstable / unpredictable / numerous\r\ndef searchMethodA(topic):\r\n global results\r\n results = []\r\n USER_AGENT = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}\r\n response = requests.get('https://www.google.com/search?q={}&num={}&hl={}'.format(topic, 100, 'en'),\r\n headers=USER_AGENT)\r\n response.raise_for_status()\r\n soup = BeautifulSoup(response.text, 'html.parser')\r\n page = soup.find_all('div', attrs={'class': 'g'})\r\n for a in page:\r\n source = a.find('a', href=True)\r\n name = a.find('h3')\r\n info = a.find('span', attrs={'class': 'st'})\r\n if source and name:\r\n source = source['href']\r\n title = name.get_text()\r\n if info:\r\n info = info.get_text()\r\n if source != '#':\r\n results.append({topic, name, info})\r\n\r\n\r\n# Google search\r\n# Collects related titles at top of search\r\n# Sometimes works well depending on input\r\n# unstable / unreliable / inconsistent\r\ndef searchMethodB(topic):\r\n USER_AGENT = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}\r\n response = requests.get('https://www.google.com/search?q={}&num={}&hl={}'.format(topic, 100, 'en'),\r\n headers=USER_AGENT)\r\n response.raise_for_status()\r\n soup = BeautifulSoup(response.text, 'html.parser')\r\n scrape = soup.find_all('div', attrs={'class': 'kltat'})\r\n for a in scrape:\r\n results.append(a.find('span'))\r\n\r\n\r\n# Database of popular short crossword puzzle words based on year\r\n# Results available for years 1942 - 2020\r\n# First test search method\r\n# Intended as a extra side search method\r\n# stable / reliable / limited\r\n# https://www.xwordinfo.com/popular\r\ndef searchMethodC(year):\r\n global results\r\n if not (1942 <= int(year) <= 2020):\r\n print() # do nothing\r\n else:\r\n page = requests.get(\"https://www.xwordinfo.com/Popular?year=\" + str(year))\r\n soup = BeautifulSoup(page.content, 'html.parser')\r\n table = soup.table\r\n tableRows = table.find_all('tr')\r\n for row in tableRows:\r\n t = row.find_all('td')\r\n info = [i.text for i in t]\r\n results.append(info)\r\n\r\n\r\ndef organizeMethodDOutput():\r\n output_File = open(\"output.txt\", \"r\")\r\n toBeCleaned = output_File.read()\r\n output_File.close()\r\n uncleanList = list(toBeCleaned)\r\n output_File = open(\"output.txt\", \"w\")\r\n preWordCounter = 0\r\n lineCounter = 0\r\n pre = list('\"word\":\"')\r\n cur = ''\r\n for c in uncleanList:\r\n if preWordCounter >= 8 and uncleanList[lineCounter] != '\"':\r\n cur = cur + uncleanList[lineCounter]\r\n elif preWordCounter >= 8 and uncleanList[lineCounter] == '\"':\r\n results.append(cur)\r\n output_File.write(cur + '\\n')\r\n cur = ''\r\n preWordCounter = 0\r\n elif pre[preWordCounter] == uncleanList[lineCounter]:\r\n preWordCounter = preWordCounter + 1\r\n lineCounter = lineCounter + 1\r\n output_File.close()\r\n\r\n\r\n# Search a web site that finds related words\r\n# Still being worked on\r\n# Issues with flexbox\r\n# stable / effective / numerous\r\n# https://relatedwords.org/\r\ndef searchMethodD(topic):\r\n page = requests.get('https://relatedwords.org/api/related?term=' + topic)\r\n soup = BeautifulSoup(page.content, 'html.parser')\r\n output_File = open(\"output.txt\", \"w\")\r\n output_File.write(str(soup))\r\n output_File.close()\r\n organizeMethodDOutput()\r\n\r\n\r\n# Determines if results are sufficient\r\n# Currently only return true for testing purposes\r\n# Incomplete\r\ndef areResultsGood():\r\n return True\r\n\r\n\r\n# Prints current results\r\ndef printResults():\r\n for a in results:\r\n print(a)\r\n\r\n\r\n# Clean results array of extra unnecessary characters\r\n# Incomplete\r\n# Waiting until search methods are complete\r\ndef cleanResults():\r\n print()\r\n\r\n\r\n# Selects next search method\r\ndef search(sTopic, sType, sInput):\r\n global searchA\r\n global searchB\r\n global searchC\r\n global searchD\r\n resultsFound = False\r\n if sType == 'a' and searchA:\r\n searchMethodA(sTopic)\r\n resultsFound = areResultsGood()\r\n searchA = False\r\n elif sType == 'b' and searchB:\r\n searchMethodB(sTopic)\r\n resultsFound = areResultsGood()\r\n searchB = False\r\n elif sType == 'c' and searchC:\r\n searchMethodC(sInput)\r\n resultsFound = areResultsGood()\r\n searchC = False\r\n elif sType == 'd' and searchD:\r\n searchMethodD(sTopic)\r\n resultsFound = areResultsGood()\r\n searchD = False\r\n elif searchA:\r\n searchMethodA(sTopic)\r\n resultsFound = areResultsGood()\r\n searchA = False\r\n elif searchB:\r\n searchMethodB(sTopic)\r\n resultsFound = areResultsGood()\r\n searchB = False\r\n elif searchC:\r\n searchMethodC(sInput)\r\n resultsFound = areResultsGood()\r\n searchC = False\r\n elif searchD:\r\n searchMethodD(sTopic)\r\n resultsFound = areResultsGood()\r\n searchD = False\r\n if (not searchA and not searchB and not searchC) or resultsFound:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n# Start of program\r\ndef programStart(searchTopic, searchType, searchInput):\r\n searchesDone = False\r\n while not searchesDone:\r\n searchesDone = search(searchTopic, searchType, searchInput)\r\n\r\n\r\n# Tests\r\n# programStart(\"pizza\", 'a', 0)\r\n# programStart(\"movies\", 'b', 0)\r\n# programStart(\"\", 'c', 2006)\r\ninput_file = open(\"input.txt\", \"r\")\r\naSearchTerm = input_file.read()\r\ninput_file.close()\r\nprogramStart(aSearchTerm, 'd', 0)\r\n# printResults()\r\n","sub_path":"bin/scraperRoughDraft.py","file_name":"scraperRoughDraft.py","file_ext":"py","file_size_in_byte":6156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"335078815","text":"list1 = ['张三', '李四', '刘老二', '王麻子', '王达成', '隔壁老王']\n# 写代码判断列表中名字为三个字的人有几个\n\n# 思路,首先得把每个名字遍历一遍\nnum1 = 0 # 存放名字为3个字的出现次数\nfor n in list1: # n是列表中的每个成员,list1中有几个成员循环几次\n sum = 0\n for a in n: # a是字符串n中的每个字符,n中有几个字符for循环几次\n sum += 1\n if sum == 3:\n num1 += 1\nprint(num1)\n# 只要知道sum出现3有几次,就是这个答案\n\n# 第一次循环n是张三\n# 第二次循环n是李四\n# 第三次循环n是刘老二\n# 第四次循环n是王麻子\n# 第五次循环n是隔壁老王\n# n = \"刘二\"\n# sum = 0\n# for a in n:\n# sum += 1\n# print(sum)\n","sub_path":"04、 python编程/day04/3-code/10-课堂练习-判断三个字的名字.py","file_name":"10-课堂练习-判断三个字的名字.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"22349005","text":"#code adapted from http://stackoverflow.com/questions/19560498/faster-way-to-remove-stop-words-in-python\nimport sys\nimport os\n\ndef stripStopWords(text, i):\n\t#print(text)\n\t#testtext = \"hi the me the bicycle maryeileen a the them hi\"\n\twith open('stop_words.txt', 'r') as myfile:\n\t\tdata=myfile.read()\n\n\tstop_words = data.split()\n\t#print(testtext)\n\t#print(stop_words)\n\n\t#text = text.decode('unicode_escape').encode('ascii','ignore')\n\t#tokenized_text = word_tokenize(text)\n\t#print(tokenized_text)\n\tclean_text = ' '.join([word for word in text.lower().split() if word not in stop_words])\n\t#print(clean_text)\n\t#print(clean_text)\n\t#path = 'noStopWords_files'\n\t#if not os.path.exists(path):\n\t#\tos.makedirs(path)\n\t#f = str(i)\n\t#clean_text.decode('utf-8')\n\t#with open(os.path.join(path, f), 'wb') as temp_file:\n\t#\ttemp_file.write(clean_text)\n\treturn clean_text\n\ndef main():\n\tpath = 'strippedHTML_files'\n\ti = 0 \n\tfor filename in os.listdir(path):\n\t\twith open(os.path.join(path, filename), 'r') as myfile:\n\t\t\t\n\t\t\tdata=myfile.read()\n\t\t\tdata = str(data)\n\t\t\tstripStopWords(data, i)\n\t\t\ti = i+1\nif __name__ == '__main__':\n\tmain()","sub_path":"removeStopWords.py","file_name":"removeStopWords.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"452247154","text":"from TestbedManagementSystem.Files.Database.configurationDB import configurationDB\r\n\r\nclass NetworkAdapterTable:\r\n def __init__(self, configuration):\r\n self.__NAstring = configuration['NAstring']\r\n self.__NAvmName= configuration['NAvmName']\r\n self.__dictVm = configuration['dictVM']\r\n \r\n\r\n def insertNetAdapt(self,configuration):\r\n conn = configurationDB.connect(self)\r\n cur = conn.cursor() \r\n insert = \"INSERT INTO NETWORK_ADAPTER (NAstring,NAvmName) VALUES ('\" + configuration['NAstring'] + \"','\" + configuration['NAstring'] + \"')\"\r\n try:\r\n cur.execute(insert)\r\n conn.commit()\r\n print('Add Network Adapter\\n')\r\n except:\r\n conn.rollback\r\n cur.close()\r\n conn.close()\r\n \r\n def selectRefeNetAdapt(self,configuration):\r\n conn = configurationDB.connect(self)\r\n cur = conn.cursor() \r\n select = \"SELECT * FROM NETWORK_ADAPTER WHERE NAstring= '\" + configuration['NAstring'] +\"', NAvmName = '\" + configuration['NAvmName']+\"'\"\r\n try:\r\n cur.execute(select) \r\n conn.commit()\r\n print('Selected Network Adapter\\n')\r\n except:\r\n conn.rollback()\r\n \r\n for row in cur:\r\n print()\r\n print (row)\r\n cur.close()\r\n conn.close()\r\n \r\n \r\n def updateNetAdapt(self,configuration):\r\n conn = configurationDB.connect(self)\r\n cur = conn.cursor()\r\n \r\n update = \"UPDATE NETORK_ADAPTER SET NAstring= $s , NAvmName= $s WHERE NAString= $s, NAvmName= $s \"\r\n try:\r\n cur.execute(update,(configuration['NAstring'], configuration['NAvmName'],configuration['NAstring'],configuration['NAvmName']))\r\n conn.commit()\r\n except:\r\n conn.rollback\r\n print('Network AdapterS Updated\\n')\r\n cur.close()\r\n conn.close()\r\n \r\n \r\n def removeNetAdapt(self,configuration):\r\n conn = configurationDB.connect(self)\r\n cur = conn.cursor()\r\n remove =\"DELETE FROM NETORK_ADAPTER WHERE NAstring= '\" + configuration['NAstring'] +\"', NAvmName = '\" + configuration['NAvmName']+\"'\"\r\n try:\r\n cur.execute(remove)\r\n conn.commit()\r\n except:\r\n conn.rollback()\r\n print('Workshop History deleted\\n')\r\n \r\n def __toDict(self):\r\n dict = {'NAstring':self.__NAstring,\r\n 'NAvmName':self.__NAvmName, \r\n 'dictVM':self.__dictVm}\r\n return dict","sub_path":"TestbedManagementSystem/Files/JesusChavez/stargate/binary/Database/NetworkAdapterTable.py","file_name":"NetworkAdapterTable.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"467153811","text":"from django.contrib import admin\nfrom sispos.report.models import Report, ParecerOrientadorMestrado\n\n\nclass ParecerInline(admin.StackedInline):\n model = ParecerOrientadorMestrado\n extra = 1\n fieldsets = (\n ('primeiro semestre', {\n 'classes': ('collapse',),\n 'fields': ('s1_desempenho', 's1_projeto', 's1_outras_atividades')\n }\n ),\n ('segundo semestre', {\n 'classes': ('collapse',),\n 'fields': ('s2_desempenho', 's2_metodologia', 's2_abordagem', 's2_outras_atividades')\n }\n ),\n ('terceiro semestre', {\n 'classes': ('collapse',),\n 'fields': ('s3_resultados', 's3_perspectivas', 's3_resumo', 's3_outras_atividades')\n }\n )\n )\n\n\nclass ReportModelAdmin(admin.ModelAdmin):\n list_display = ['aluno_name']\n inlines = [ParecerInline]\n\n def aluno_name(self, obj):\n return obj.aluno.get_full_name()\n\n aluno_name.short_description = 'Aluno'\n\n\nclass ParecerOrientadorMestradoModelAdmin(admin.ModelAdmin):\n pass\n\nadmin.site.register(Report, ReportModelAdmin)\nadmin.site.register(ParecerOrientadorMestrado, ParecerOrientadorMestradoModelAdmin)","sub_path":"sispos/report/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"527287512","text":"from tkinter import *\nfrom functools import partial # To prevent unwanted windows\n\nimport random\n\nclass Quiz:\n def __init__(self):\n\n # Formatting variables...\n background_color = \"#8FF7A7\"\n btn_color = \"#51BBFE\"\n font_color = \"black\"\n\n self.quiz_frame = Frame(width=500, height=500,bg=background_color)\n self.quiz_frame.grid()\n\n self.starting_questoin = IntVar()\n self.starting_questoin.set(0)\n\n self.heading_frame = Frame(self.quiz_frame,bg=background_color)\n self.heading_frame.grid(row=0)\n\n # Quiz (row 0)\n self.Quiz_label = Label(self.heading_frame,text=\"MATH QUIZ\",\n font=(\"arial 20 bold\"),\n fg=font_color,bg=background_color)\n self.Quiz_label.grid(row=0)\n\n # Quiz (row 1)\n self.choicing_frame = Frame(self.quiz_frame,bg=background_color)\n self.choicing_frame.grid(row=1)\n\n self.amount_error_label = Label(self.choicing_frame, font=\"arial 10 italic\",\n text=\"\", bg=background_color)\n self.amount_error_label.grid(row=0)\n\n self.cho_num_label = Label(self.choicing_frame,text=\"How many question would you like\",\n font=(\"arial 10 bold\"),\n fg=font_color,bg=background_color)\n self.cho_num_label.grid(row=1)\n\n self.cho_num_entry = Entry(self.choicing_frame,\n font=\"arial 10 bold\", width=5)\n self.cho_num_entry.grid(row=1,column=1)\n\n self.low_num_label = Label(self.choicing_frame,text=\"low number amout\",\n font=\"arial 10 bold\",\n fg=font_color,bg=background_color)\n self.low_num_label.grid(row=2)\n\n self.low_num_entry = Entry(self.choicing_frame,\n font=\"arial 10 bold\", width=5)\n self.low_num_entry.grid(row=2,column=1)\n\n self.high_num_label = Label(self.choicing_frame,text=\"high number amout\",\n font=\"arial 10 bold\",\n fg=font_color,bg=background_color)\n self.high_num_label.grid(row=3)\n\n self.high_num_entry = Entry(self.choicing_frame,\n font=\"arial 10 bold\", width=5)\n self.high_num_entry.grid(row=3,column=1)\n\n self.question_amount_btn = Button(self.choicing_frame,text=\"Enter\",bg=btn_color,\n font=\"arial 14 bold\",command=self.check_question)\n self.question_amount_btn.grid(row=4,)\n\n self.cho_btn__frame = Frame(self.quiz_frame, width=300, bg=background_color)\n self.cho_btn__frame.grid(row=2)\n\n self.addition_btn = Button(self.cho_btn__frame,text=\"Addition\", font=\"arial 20 bold\", fg=font_color,\n bg=btn_color,padx=35,command=self.addition)\n self.addition_btn.grid(row=1)\n\n self.addition_label = Label(self.cho_btn__frame,\n text=\"this is a place holder\",\n font=\"arial 10 bold\", fg=font_color,bg=\"#F7FE72\")\n self.addition_label.grid(row=1,column=1)\n\n self.division_btn = Button(self.cho_btn__frame,\n text=\"Division\", font=\"arial 20 bold\", fg=font_color,\n bg=btn_color,padx=36,command=self.division)\n self.division_btn.grid(row=2)\n\n self.division_label = Label(self.cho_btn__frame,\n text=\"this is a place holder\",\n font=\"arial 10 bold\", fg=font_color,bg=\"#F7FE72\")\n self.division_label.grid(row=2,column=1)\n\n self.multiplication_btn = Button(self.cho_btn__frame,\n text=\"Multiplication\", font=\"arial 20 bold\", fg=font_color,\n bg=btn_color,command=self.multiplication)\n self.multiplication_btn.grid(row=3)\n\n self.multiplication = Label(self.cho_btn__frame,\n text=\"this is a place holder \",\n font=\"arial 10 bold\", fg=font_color,bg=\"#F7FE72\")\n self.multiplication.grid(row=3,column=1)\n\n self.help_frame = Frame(self.quiz_frame)\n self.help_frame.grid(row=3)\n\n self.addition_btn.config(state=DISABLED)\n self.division_btn.config(state=DISABLED)\n self.multiplication_btn.config(state=DISABLED)\n\n # Help Button (row 2)\n self.help_button = Button(self.help_frame,\n text=\"Help\", font=\"arial 14 bold\", fg=\"black\",\n bg=\"green\", command=self.help)\n self.help_button.grid(row=2)\n\n def help(self):\n print(\"you need help\")\n get_help = Help(self)\n get_help.help_text.configure(text=\"You have to pick a top and you will be getting tested on in\")\n\n def multiplication(self):\n get_multiplication = Multiplication(self)\n get_multiplication.multiplication_text.configure(text=\"Fill in the boxes\")\n\n def addition(self):\n get_addition = Addition(self)\n get_addition.addition_text.configure(text=\"Fill in the boxes\")\n\n def division(self):\n get_division = Division(self,)\n get_division.division_text.configure(text=\"Fill in the boxes\")\n\n\n def check_question(self):\n starting_questoin = self.cho_num_entry.get()\n\n # Set error background colour (and assum that there are no\n # error at the start\n error_back = \"#ffafaf\"\n has_error = \"no\"\n error_feedback = \"\"\n\n # change background to white (for testing purposes) ...\n self.cho_num_entry.config(bg=\"white\")\n self.amount_error_label.config(text=\"\")\n\n self.addition_btn.config(state=DISABLED)\n self.division_btn.config(state=DISABLED)\n self.multiplication_btn.config(state=DISABLED)\n\n def division(self, stakes):\n starting_questoin = self.question_amount.get()\n Division(self, stakes, starting_questoin)\n\n def check_question(self):\n starting_questoin = self.cho_num_entry.get()\n\n # Set error background colour (and assum that there are no\n # error at the start\n error_back = \"#ffafaf\"\n has_error = \"no\"\n error_feedback = \"\"\n\n # change background to white (for testing purposes) ...\n self.cho_num_entry.config(bg=\"white\")\n self.amount_error_label.config(text=\"\")\n\n self.addition_btn.config(state=DISABLED)\n self.division_btn.config(state=DISABLED)\n self.multiplication_btn.config(state=DISABLED)\n\n try:\n starting_questoin = int(starting_questoin)\n\n if starting_questoin < 0:\n has_error = \"yes\"\n error_feedback = \"You need to enter a number\"\n elif starting_questoin > 20:\n has_error = \"yes\"\n error_feedback = \"unfortunately thats to high\"\n elif starting_questoin >= 1:\n self.addition_btn.config(state=NORMAL)\n self.division_btn.config(state=NORMAL)\n self.multiplication_btn.config(state=NORMAL)\n error_feedback = \"sorry you need a number a bit bigger\"\n\n except ValueError:\n has_error = \"yes\"\n error_feedback = \"Please fill the boxes with whole numbers\"\n\n if has_error == \"yes\":\n self.cho_num_entry.config(bg=error_back)\n self.amount_error_label.config(text=error_feedback)\n\n else:\n self.starting_questoin.set(starting_questoin)\n\n def division(self):\n starting_questoin = self.cho_num_entry.get()\n print(starting_questoin)\n\n Division(self, starting_questoin,)\n\n # hide start up window\n root.withdraw()\n\n\n\nclass Division:\n def __init__(self, partner,starting_questoin):\n starting_questoin = int(starting_questoin)\n background_color = \"#8FF7A7\"\n low_number = 1\n high_number = 10\n number_enter = [low_number,high_number]\n\n # disable button\n partner.addition_btn.config(state=DISABLED)\n partner.division_btn.config(state=DISABLED)\n partner.multiplication_btn.config(state=DISABLED)\n partner.help_button.config(state=DISABLED)\n\n # Set up Geo game one\n self.division_box = Toplevel()\n\n # Set up GUI Frame\n self.division_frame = Frame(self.division_box, width=300, bg=background_color)\n self.division_frame.grid()\n # Set up Geo Instruction heading (row 0)\n self.heading = Label(self.division_frame,\n text=\"Division\",\n font=\"arial 20 bold\",bg=background_color)\n self.heading.grid(row=0)\n\n self.questions_lable = Label(self.division_frame,\n text=starting_questoin,\n font=\"arial 10 bold\", fg=\"black\",bg=background_color)\n self.questions_lable.grid(row=1)\n\n # Geo text (label, row 1)\n self.division_text = Label(self.division_frame,\n text=\"Fill the boxes\",\n justify=LEFT,width=50, bg=background_color,wrap=200)\n self.division_text.grid(row=2)\n\n # Dismiss button (row 2)\n self.dismiss_btn = Button(self.division_frame,text=\"Dismiss\",width=10,bg=\"red\",\n font=\"arial 10 bold\",\n command=partial(self.close_division, partner))\n self.dismiss_btn.grid(row=3)\n\n def close_division(self, partner):\n # Put help button back to normal\n partner.addition_btn.config(state=NORMAL)\n partner.division_btn.config(state=NORMAL)\n partner.multiplication_btn.config(state=NORMAL)\n partner.help_button.config(state=NORMAL)\n self.division_box.destroy()\n\n\nclass Addition:\n def __init__(self, partner):\n background_color = \"#8FF7A7\"\n\n # disable button\n partner.addition_btn.config(state=DISABLED)\n partner.division_btn.config(state=DISABLED)\n partner.multiplication_btn.config(state=DISABLED)\n partner.help_button.config(state=DISABLED)\n\n # Set up Geo game one\n self.addition_box = Toplevel()\n\n # Set up GUI Frame\n self.addition_frame = Frame(self.addition_box, width=300, bg=background_color)\n self.addition_frame.grid()\n # Set up Geo Instruction heading (row 0)\n self.heading = Label(self.addition_frame,\n text=\"Addition\",\n font=\"arial 20 bold\",bg=background_color)\n self.heading.grid(row=0)\n # Geo text (label, row 1)\n self.addition_text = Label(self.addition_frame,\n text=\"Fill the boxes\",\n justify=LEFT,width=50, bg=background_color,wrap=200)\n self.addition_text.grid(column=0,row=1)\n\n # Dismiss button (row 2)\n self.dismiss_btn = Button(self.addition_frame,text=\"Dismiss\",width=10,bg=\"red\",\n font=\"arial 10 bold\",\n command=partial(self.close_addition, partner))\n self.dismiss_btn.grid(row=2, pady=10)\n\n def close_addition(self, partner):\n # Put help button back to normal\n partner.addition_btn.config(state=NORMAL)\n partner.division_btn.config(state=NORMAL)\n partner.multiplication_btn.config(state=NORMAL)\n partner.help_button.config(state=NORMAL)\n self.addition_box.destroy()\nclass Multiplication:\n def __init__(self, partner):\n background_color = \"#8FF7A7\"\n\n # disable button\n partner.addition_btn.config(state=DISABLED)\n partner.division_btn.config(state=DISABLED)\n partner.multiplication_btn.config(state=DISABLED)\n partner.help_button.config(state=DISABLED)\n\n # Set up Geo game one\n self.Multiplication_box = Toplevel()\n\n # Set up GUI Frame\n self.Multiplication_frame = Frame(self.Multiplication_box, width=300, bg=background_color)\n self.Multiplication_frame.grid()\n # Set up Geo Instruction heading (row 0)\n self.heading = Label(self.Multiplication_frame,\n text=\"Multiplication\",\n font=\"arial 20 bold\",bg=background_color)\n self.heading.grid(row=0)\n # Geo text (label, row 1)\n self.multiplication_text = Label(self.Multiplication_frame,\n text=\"Fill the boxes\",\n justify=LEFT,width=50, bg=background_color,wrap=200)\n self.multiplication_text.grid(column=0,row=1)\n\n # Dismiss button (row 2)\n self.dismiss_btn = Button(self.Multiplication_frame,text=\"Dismiss\",width=10,bg=\"red\",\n font=\"arial 10 bold\",\n command=partial(self.close_multiplication, partner))\n self.dismiss_btn.grid(row=2, pady=10)\n\n def close_multiplication(self, partner):\n # Put help button back to normal\n partner.addition_btn.config(state=NORMAL)\n partner.division_btn.config(state=NORMAL)\n partner.multiplication_btn.config(state=NORMAL)\n partner.help_button.config(state=NORMAL)\n self.Multiplication_box.destroy()\nclass Help:\n def __init__(self, partner):\n background_color = \"#8FF7A7\"\n\n # disable help button\n partner.addition_btn.config(state=DISABLED)\n partner.division_btn.config(state=DISABLED)\n partner.multiplication_btn.config(state=DISABLED)\n partner.help_button.config(state=DISABLED)\n\n # Set up child window (ie: help box)\n self.help_box = Toplevel()\n\n # Set up GUI Frame\n self.help_frame = Frame(self.help_box, width=300, bg=background_color)\n self.help_frame.grid()\n # Set up help heading (row 0)\n self.how_heading = Label(self.help_frame,\n text=\"Help / Instruction\",\n font=\"arial 20 bold\",bg=background_color)\n self.how_heading.grid(row=0)\n # Help text (label, row 1)\n self.help_text = Label(self.help_frame,\n text=\"i have no idea where this is going to be\",\n justify=LEFT,width=50, bg=background_color,wrap=200)\n self.help_text.grid(column=0,row=1)\n\n # Dismiss button (row 2)\n self.dismiss_btn = Button(self.help_frame,text=\"Dismiss\",width=10,bg=\"red\",\n font=\"arial 10 bold\",\n command=partial(self.close_help, partner))\n self.dismiss_btn.grid(row=2, pady=10)\n\n def close_help(self, partner):\n # Put help button back to normal\n partner.addition_btn.config(state=NORMAL)\n partner.division_btn.config(state=NORMAL)\n partner.multiplication_btn.config(state=NORMAL)\n partner.help_button.config(state=NORMAL)\n self.help_box.destroy()\n\n# main routine\nif __name__ == \"__main__\":\n root = Tk()\n root.title(\"Quiz\")\n something = Quiz()\n root.mainloop()\n","sub_path":"04_page.py","file_name":"04_page.py","file_ext":"py","file_size_in_byte":15357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"531464269","text":"'''\nTEST SCRIPT FOR TRIPLE HADAMARD WITH MASK UNIFORM NORMALIZATION AND CLIPPING\nBy Filipe Chagas\n'''\n\nimport sys\nsys.path.append('../src')\nsys.path.append('../src/plugins')\n\nimport pipeline as pl\nimport hadamard as hd\nimport numpy as np\nimport random as rd\nimport cv2 as cv\n\n#pipeline\nmy_pipeline = pl.Pipeline()\n\n#buses\nmy_pipeline.create_bus('input', pl.BusFormat.Triple)\nmy_pipeline.create_bus('mask', pl.BusFormat.Triple)\nmy_pipeline.create_bus('output', pl.BusFormat.Triple)\n\n#pipes\nmy_had = hd.TripleHadamardPipe()\n\n#params\nmy_had.set_param('normalize_mask', True)\nmy_had.set_param('uniform_normalization', True)\nmy_had.set_param('clipping', True)\n\n#build\nmy_pipeline.insert_pipe('my_had', my_had, ['input', 'mask'], ['output'])\nmy_pipeline.set_pipe_layer('my_had', 0)\n\nmy_pipeline.set_layers_sequence([0])\n\n#test\ntolerance = 1\nclip = np.vectorize(lambda x : 255 if x > 255 else (0 if x < 0 else (x)))\n\nif __name__ == '__main__':\n for n in range(100):\n print('TEST ' + str(n)) \n \n #input data\n in_data = np.array(np.random.rand(rd.randint(1,500),rd.randint(1,500), 3)*255, np.uint8)\n print('input')\n print(in_data)\n\n #expected output\n mask = np.array(np.random.rand(rd.randint(1,500),rd.randint(1,500), 3)*255, np.uint8)\n mask_rs = cv.resize(mask, (in_data.shape[1], in_data.shape[0])).astype(np.float)\n mask_unorm = mask_rs / np.max(mask_rs)\n\n expected_out = clip(in_data.astype(np.float) * mask_unorm)\n expected_out = expected_out.astype(np.uint8)\n \n print('expected out')\n print(expected_out)\n\n #process and get output\n my_pipeline.buses['input'].set_data(in_data)\n my_pipeline.buses['mask'].set_data(mask)\n my_pipeline.process()\n out_data = my_pipeline.buses['output'].get_data()\n\n print('out')\n print(out_data)\n\n #check\n diference = out_data - expected_out\n ans0 = (diference > tolerance)\n ans1 = (diference < tolerance)\n \n if (not ans0.all()) or (not ans1.all()):\n print('SUCCESS')\n else:\n print('FAILURE')\n break\n\n my_pipeline.reset_buses()\n\n ","sub_path":"tests/test8.py","file_name":"test8.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"410366146","text":"from urllib.request import Request, urlopen\nfrom bs4 import BeautifulSoup\nfrom discord.ext import commands\n\nstates = [\"AL\", \"AK\", \"AZ\", \"AR\", \"CA\", \"CO\", \"CT\", \"DE\", \"FL\", \"GA\", \"HI\", \"ID\", \"IL\", \"IN\", \"IA\",\n \"KS\", \"KY\", \"LA\", \"ME\", \"MD\", \"MA\", \"MI\", \"MN\", \"MS\", \"MO\", \"MT\", \"NE\", \"NV\", \"NH\", \"NJ\",\n \"NM\", \"NY\", \"NC\", \"ND\", \"OH\", \"OK\", \"OR\", \"PA\", \"RI\", \"SC\", \"SD\", \"TN\", \"TX\", \"UT\", \"VT\",\n \"VA\", \"WA\", \"WV\", \"WI\", \"WY\"]\n\n\nclass Election:\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def gainz(self, race_type):\n \"\"\"Say a list of states gained and lost by the dems in a given race on Discord\n\n Keyword arguments:\n race_type -- the type of race to detect\n \"\"\"\n await self.bot.say('Generating gainz (this may take a while).')\n pos_gainz = []\n neg_gainz = []\n for state in states:\n state_gainz = self.get_state_gainz(state, race_type)\n if state_gainz is not None:\n # check if races are + or -\n try:\n if state_gainz.index('+') > -1:\n print(state_gainz)\n pos_gainz.append(state_gainz)\n except ValueError:\n print(state_gainz)\n neg_gainz.append(state_gainz)\n # check for primary and break\n if state_gainz == 'Primary':\n await self.bot.say('Race is currently primary, cannot compute gainz.')\n return\n await self.bot.say('Gainz for ' + race_type.upper() + ': ' + ','.join(pos_gainz) +\n '\\nLosses for ' + race_type.upper() + ': ' + ', '.join(neg_gainz))\n\n @staticmethod\n def get_state_gainz(state, race_type):\n \"\"\"Return whether a state has been gained or lost by the dems\n\n Keyword arguments:\n state -- the state to search\n race_type -- the type of race to detect wins and losses for\n \"\"\"\n race_index = {\n 's1': 0,\n 's2': 1,\n 'gov': 2\n }.get(race_type, 's1')\n incumbent_index = {\n 's1': 1,\n 's2': 2,\n 'gov': 0\n }.get(race_type, 's1')\n # retrieve state data\n url = \"http://oppressive.games/power/state.php?state=\" + state\n request = Request(url, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) '\n 'AppleWebKit/537.75.14 (KHTML, like Gecko) '\n 'Version/7.0.3 Safari/7046A194A'})\n html = str(urlopen(request).read())\n soup = BeautifulSoup(html, \"lxml\")\n politicians_raw = soup.find_all('table')[1]\n politicians = [[cell.text for cell in row(\"td\")]\n for row in politicians_raw(\"tr\")]\n # retrieve incumbent list\n incumbents_raw = soup.find_all('table')[0]\n incumbents = [[cell.text for cell in row(\"td\")]\n for row in incumbents_raw(\"tr\")]\n incumbents = [[cell.replace('\\\\n', '') for cell in row]\n for row in incumbents]\n # retrieve state race as specified in race_type\n container_raw = soup.find_all(\"div\", {\"class\": \"col-md-6 well\"})[race_index]\n race_raw = container_raw.find('table')\n race_data = [[cell.text for cell in row(\"td\")]\n for row in race_raw(\"tr\")[1:]]\n race_data = [[cell.strip() for cell in row]\n for row in race_data]\n race_data = [[cell.replace(\"\\\\n\",\"\") for cell in row]\n for row in race_data]\n # retrieve democratic politician data\n dem_raw = [x for x in race_data if 'Democratic Party' in x]\n # retrieve incumbent politician\n incumbent_pol = [row for row in politicians if row[0] in incumbents[1][incumbent_index]]\n incumbent_pol = [j for i in incumbent_pol for j in i]\n # race data is empty or primary break\n if not race_data:\n return None\n if race_data[0].__len__() < 3:\n return 'Primary'\n # get index of dem in race\n try:\n dem_index = race_data.index(dem_raw[0])\n except IndexError:\n return None\n for row in race_data:\n row[2] = float(row[2][:row[2].find(\"%\")])\n # get highest polling politician\n max_value = ['test', 'test', float(0)]\n for row in race_data:\n if row[2] > max_value[2]:\n max_value = row\n # determine gains or losses\n try:\n if max_value[1] == 'Democratic Party' and incumbent_pol[1] != 'Democratic Party':\n return '+' + state\n elif max_value[1] != 'Democratic Party' and race_data[dem_index][0] in incumbents[1]:\n return '-' + state\n except IndexError:\n if max_value[1] == 'Democratic Party':\n return '+' + state\n return None\n\n\ndef setup(bot):\n bot.add_cog(Election(bot))\n","sub_path":"election.py","file_name":"election.py","file_ext":"py","file_size_in_byte":5079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"328876663","text":"# -*- coding: utf-8 -*-\n\"\"\"\nЗадание 6.2b\n\nСделать копию скрипта задания 6.2a.\n\nДополнить скрипт:\nЕсли адрес был введен неправильно, запросить адрес снова.\n\nОграничение: Все задания надо выполнять используя только пройденные темы.\n\"\"\"\n#start\nwhile True:\n ip = input(\"Enter IP address: \")\n octets = ip.split(\".\")\n\n valid_ip = len(octets) == 4\n\n for i in octets:\n if i.isdigit() and 0 <= int(i) < 256 and valid_ip:\n valid_ip = True\n break\n else :\n valid_ip = False\n if valid_ip:\n break\n else:\n print(\"Wrong ip address\")\n \n\nif valid_ip: \n if int(octets[0]) in range (1,224):\n print(\"unicast\")\n elif int(octets [0]) in range (224, 240):\n print(\"multicast\")\n elif ip == \"255.255.255.255\":\n print(\"local broadcast\")\n elif ip == \"0.0.0.0\":\n print(\"unassigned\")\n else:\n print(\"unused\")\n\n \n \n","sub_path":"exercises/06_control_structures/task_6_2b.py","file_name":"task_6_2b.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"4002284","text":"import requests\nimport re\nimport json\nimport time\nfrom requests.exceptions import RequestException\n\ndef getHtml(url):\n try:\n headers = {\n 'User-Agent': 'Mozilla/5.0(Macintosh;Intel Mac OS X 10_13+3) '\n + 'Applewebit/537.36(KHTML,like Gecko) Chorme/65.0.3325.162 Safari/537.36'\n }\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n return response.text\n return None\n except RequestException:\n return None\n\ndef Write_To_Text(content):\n with open(\"E:\\豆瓣影评\\差评.txt\",'a',encoding='utf-8') as f:\n f.write(json.dumps(content,ensure_ascii=False)+'\\n\\n')\n\ndef Parse_One_Page(html):\n partern='(.*?)'\n items =re.findall(partern, html)\n for item in items:\n Write_To_Text(item)\n\ndef main(offset):\n url='https://movie.douban.com/' \\\n +'subject/25882296/comments?start='+str(offset)\\\n + '&limit=20&sort=new_score&status=P&percent_type=l'\n html = getHtml(url)\n Parse_One_Page(html)\n\nif __name__ == '__main__':\n for i in range(10):\n main(offset=i*20)\n time.sleep(0)\n\n","sub_path":"getComments.py","file_name":"getComments.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"139485226","text":"\nimport argparse\nimport logging\nimport sys\n\n\nsys.path.append('../../../sr2018')\nfrom components.evaluator.syn_evaluator import SynEvaluator\n\n\ndef test(mode='single'):\n\n ref11 = ['I', 'like', 'fresh', 'green', 'apples']\n ref12 = ['I', 'like', 'green', 'apples']\n hyp1 = ['I', 'like', 'fresh', 'spring', 'apples']\n\n ref21 = ['He', 'is', 'my', 'glorious', 'father']\n ref22 = ['He', 'is', 'my', 'glorious', 'brother']\n hyp2 = ['He', 'is', 'my', 'beloved', 'father']\n\n all_hyps = [hyp1, hyp2]\n\n if mode == 'single':\n logging.debug('Single-reference mode')\n all_refs = [[ref12], [ref22]]\n\n elif mode == 'multi':\n logging.debug('Multi-reference mode')\n all_refs = [[ref11, ref12], [ref21, ref22]]\n\n else:\n raise NotImplementedError()\n\n scores = SynEvaluator.compute_metric_scores(all_refs, all_hyps)\n SynEvaluator.output_scores(scores)\n\n\ndef parse_args():\n argparser = argparse.ArgumentParser()\n argparser.add_argument('--ref', help='References file')\n argparser.add_argument('--hyp', help='Hypotheses file')\n argparser.add_argument('--test', help='Run tests', action='store_true')\n argparser.add_argument('--mode', help='Mode: single or multi-reference', choices=['single', 'multi'])\n\n args = argparser.parse_args()\n return args\n\ndef main():\n logging.basicConfig(level=logging.DEBUG)\n argvs = parse_args()\n\n if argvs.test:\n logging.info('Running tests')\n mode = argvs.mode\n test(mode)\n\n else:\n ref_fn = argvs.ref\n hyp_fn = argvs.hyp\n\n scores = SynEvaluator.read_predictions_and_eval(ref_fn, hyp_fn, normalise=True)\n SynEvaluator.output_scores(scores)\n\nif __name__ == '__main__':\n main()","sub_path":"components/evaluator/syn_eval_script.py","file_name":"syn_eval_script.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"339479929","text":"# -*- coding: utf8 -*-\nfrom datetime import datetime\nimport re\n\nfrom scrapy.http import Request, HtmlResponse,FormRequest\nfrom scrapy.selector import Selector\n\nfrom alascrapy.spiders.base_spiders.ala_spider import AlaSpider\nfrom alascrapy.spiders.base_spiders.bazaarvoice_spider import BVNoSeleniumSpider\nfrom alascrapy.lib.generic import get_full_url, date_format\nimport alascrapy.lib.dao.incremental_scraping as incremental_utils\nfrom alascrapy.items import CategoryItem, ProductItem, ReviewItem, ProductIdItem\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom alascrapy.lib.selenium_browser import SeleniumBrowser\nimport json\n\nclass Gamona_deSpider(AlaSpider):\n name = 'gamona_de'\n allowed_domains = ['gamona.de']\n start_urls = ['http://www.gamona.de/games/reviews.html']\n\n \n def parse(self, response):\n original_url = response.url\n url0='http://www.gamona.de/$invoke/games/reviews.html'\n for i in range(1,100):\n data0={\"name\":\"more\",\"arguments\":[{\"page\":i}]}\n #r=FormRequest(url=url0,formdata=json.dumps(data0),callback=self.level_xhr)\n r = Request(url0, method='POST',body=json.dumps(data0),headers={'Content-Type': 'application/json'},callback=self.level_xhr)\n yield r\n product = response.meta.get(\"product\", {})\n review = response.meta.get(\"review\", {})\n \n url_xpath = \"//li[@class='game']/a/@href\"\n single_url = self.extract(response.xpath(url_xpath))\n if single_url:\n matches = None\n if \"\":\n matches = re.search(\"\", single_url, re.IGNORECASE)\n if matches:\n single_url = matches.group(0)\n else:\n return\n single_url = get_full_url(original_url, single_url)\n \n request = Request(single_url, callback=self.level_2)\n try:\n request.meta[\"product\"] = product\n except:\n pass\n try:\n request.meta[\"review\"] = review\n except:\n pass\n yield request\n def level_xhr(self,response):\n #print 'enter level_xhr'\n t=response.body\n pattern = re.compile(r'(?<=href)(.*?)(?=data\\-)')\n anses = pattern.findall(t)\n for ans in anses:\n url='http://www.gamona.de'+ans[3:-3]\n yield Request(url=url,callback=self.level_2)\n def level_2(self, response):\n \n original_url = response.url\n product = response.meta.get(\"product\", {})\n review = response.meta.get(\"review\", {})\n \n product_xpaths = { \n \n \n \"ProductName\":\"//span[@itemprop='name']/h1/a/text()\",\n \n \n \n \n }\n product = self.init_item_by_xpaths(response, \"product\", product_xpaths)\n product['TestUrl'] = original_url\n picurl = product.get(\"PicURL\", \"\")\n if picurl and picurl[:2] == \"//\":\n product[\"PicURL\"] = \"https:\" + product[\"PicURL\"]\n if picurl and picurl[:1] == \"/\":\n product[\"PicURL\"] = get_full_url(original_url, picurl)\n manuf = product.get(\"ProductManufacturer\", \"\")\n if manuf == \"\" and \"\"[:2] != \"//\":\n product[\"ProductManufacturer\"] = \"\"\n try:\n product[\"OriginalCategoryName\"] = category['category_path']\n except:\n pass\n ocn = product.get(\"OriginalCategoryName\", \"\")\n if ocn == \"\" and \"\"[:2] != \"//\":\n product[\"OriginalCategoryName\"] = \"\"\n review_xpaths = { \n \n \n \"ProductName\":\"//span[@itemprop='name']/h1/a/text()\",\n \n \n \n \"TestDateText\":\"//span[@class='date']/text()\",\n \n \n \n \n \"TestSummary\":\"//div[contains(@class,'articlebody')]/p[1]//text()\",\n \n \n \n \"Author\":\"//a[@rel='author']//span[@itemprop='name']//text()\",\n \n \n \"TestTitle\":\"//span[@itemprop='name']/h2/text()\",\n \n \n \n }\n review = self.init_item_by_xpaths(response, \"review\", review_xpaths)\n review['TestUrl'] = original_url\n try:\n review['ProductName'] = product['ProductName']\n review['source_internal_id'] = product['source_internal_id']\n except:\n pass\n awpic_link = review.get(\"AwardPic\", \"\")\n if awpic_link and awpic_link[:2] == \"//\":\n review[\"AwardPic\"] = \"https:\" + review[\"AwardPic\"]\n if awpic_link and awpic_link[:1] == \"/\":\n review[\"AwardPic\"] = get_full_url(original_url, awpic_link)\n\n matches = None\n field_value = review.get(\"TestDateText\", \"\")\n if field_value:\n matches = re.search(\"(\\d{2}\\.\\d{2}\\.\\d{4})\", field_value, re.IGNORECASE)\n if matches:\n review[\"TestDateText\"] = matches.group(1)\n \n\n if review[\"TestDateText\"]:\n \n review[\"TestDateText\"] = review[\"TestDateText\"].strip()\n review[\"TestDateText\"] = date_format(review[\"TestDateText\"], \"%d.%B.%Y\", [\"en\"])\n \n\n review[\"DBaseCategoryName\"] = \"PRO\"\n \n\n review[\"SourceTestScale\"] = \"10\"\n \n\n yield product\n\n in_another_page_xpath = \"//p[@class='nextpage']/a/@href\"\n pros_xpath = \"//div[@class='box pro']/ul//text()\"\n cons_xpath = \"//div[@class='box contra']/ul//text()\"\n rating_xpath = \"//span[@class='award']/img/@alt\"\n award_xpath = \"\"\n award_pic_xpath = \"\"\n \n test_verdict_xpath_1 = '//span[@class=\"content\"]/h3/text()'\n \n review[\"TestVerdict\"] = None\n in_another_page_url = None\n if in_another_page_xpath:\n in_another_page_url = self.extract(response.xpath(in_another_page_xpath))\n if in_another_page_url:\n in_another_page_url = get_full_url(response, in_another_page_url)\n request = Request(in_another_page_url, callback=self.parse_fields_page)\n request.meta['review'] = review\n \n request.meta['test_verdict_xpath_1'] = test_verdict_xpath_1\n \n request.meta['pros_xpath'] = pros_xpath\n request.meta['cons_xpath'] = cons_xpath\n request.meta['rating_xpath'] = rating_xpath\n request.meta['award_xpath'] = award_xpath\n request.meta['award_pic_xpath'] = award_pic_xpath\n yield request\n else:\n \n if not review[\"TestVerdict\"]:\n review[\"TestVerdict\"] = self.extract(response.xpath(test_verdict_xpath_1))\n \n yield review\n \n def parse_fields_page(self, response):\n review = response.meta['review']\n \n test_verdict_xpath_1 = response.meta['test_verdict_xpath_1']\n \n \n if not review[\"TestVerdict\"]:\n review[\"TestVerdict\"] = self.extract(response.xpath(test_verdict_xpath_1))\n \n pros_xpath = response.meta['pros_xpath']\n cons_xpath = response.meta['cons_xpath']\n rating_xpath = response.meta['rating_xpath']\n award_xpath = response.meta['award_xpath']\n award_pic_xpath = response.meta['award_pic_xpath']\n if pros_xpath:\n review[\"TestPros\"] = self.extract_all(response.xpath(pros_xpath), ' ; ')\n if cons_xpath:\n review[\"TestCons\"] = self.extract_all(response.xpath(cons_xpath), ' ; ')\n if rating_xpath:\n review['SourceTestRating'] = self.extract(response.xpath(rating_xpath), '%')\n if award_xpath:\n review['award'] = self.extract(response.xpath(award_xpath))\n if award_pic_xpath:\n review['AwardPic'] = self.extract(response.xpath(award_pic_xpath))\n yield review\n\nif __name__=='__main__':\n from scrapy.crawler import CrawlerProcess\n\n process = CrawlerProcess({})\n process.crawl(Gamona_deSpider)\n process.start()","sub_path":"alascrapy/spiders/gamona_de.py","file_name":"gamona_de.py","file_ext":"py","file_size_in_byte":8448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"9258834","text":"num=[]\r\n\"\"\"user input\"\"\"\r\n\r\nnumcount=int(input(\"how many numbers do you want to input?\"))\r\n\r\nfor i in range(numcount):\r\n element= int(input(\"enter number:\"))\r\n num.append(element)\r\n \r\nuserNum=[]\r\n\r\nfor i in num:\r\n if i not in userNum:\r\n userNum.append(i)\r\n \r\nprint(num)\r\nprint(userNum)\r\n\r\npoppedElement=userNum.pop()\r\nprint(\"popped elment is :\",poppedElement)\r\nprint(userNum)","sub_path":"Popped element.py","file_name":"Popped element.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"452933662","text":"\nimport itertools\n\ndef isprime(number):\n if number == 0 or number == 1:\n return False\n \n for i in range(2, int(number ** 0.5) + 1):\n if number % i == 0:\n return False\n \n return True\n\n\ndef solution(numbers):\n check_dict = dict()\n answer = 0\n for i in range(1, len(numbers) + 1):\n for item in list(itertools.permutations(numbers, i)):\n number = int(''.join(item))\n if number not in check_dict:\n check_dict[number] = number\n \n if isprime(number):\n answer += 1\n\n return answer\n","sub_path":"programmers/42839.py","file_name":"42839.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"311771257","text":"import numpy as np\nfrom scipy.optimize import curve_fit\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom matplotlib.animation import FFMpegWriter\n\nY1 = np.genfromtxt('./Data/2_1_random.txt')\n\nnSeconds1 = 10\ncol1=len(Y1[0])\nrow1=len(Y1)//col1\nfps1=row1//nSeconds1\n\nfig1 = plt.figure( figsize=(8,8) )\n\nim1 = plt.imshow(Y1[0:col1,0:col1])\n\ndef animate_func1(i):\n im1.set_array(Y1[i*col1:(i+1)*col1,0:col1].copy())\n return [im1]\n\nanim1 = animation.FuncAnimation(fig1, animate_func1, frames = row1, interval = 1000 /fps1 )\n\nanim1.save('Animations/Animation_1_random.mp4', fps=fps1, extra_args=['-vcodec', 'libx264'])\n\nY3 = np.genfromtxt('./Data/2_3_random.txt')\n\nnSeconds3 = 10\ncol3=len(Y3[0])\nrow3=len(Y3)//col3\nfps3=row3//nSeconds3\n\nfig3 = plt.figure( figsize=(8,8) )\n\nim3 = plt.imshow(Y3[0:col3,0:col3])\n\ndef animate_func3(i):\n im3.set_array(Y3[i*col3:(i+1)*col3,0:col3].copy())\n return [im3]\n\nanim3 = animation.FuncAnimation(fig3, animate_func3, frames = row3, interval = 1000 /fps3 )\n\nanim3.save('Animations/Animation_3_random.mp4', fps=fps3, extra_args=['-vcodec', 'libx264'])\n","sub_path":"Blatt9/Animation_Programs/Animate_random.py","file_name":"Animate_random.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"40989472","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\nDOCUMENTATION = r'''\n---\nmodule: online_user_info\nshort_description: Gather information about Online user.\ndescription:\n - Gather information about the user.\nversion_added: \"2.9\"\nauthor:\n - \"Remy Leone (@sieben)\"\nextends_documentation_fragment: online\n'''\n\nEXAMPLES = r'''\n- name: Gather Online user info\n online_user_info:\n register: result\n\n- debug:\n msg: \"{{ result.online_user_info }}\"\n'''\n\nRETURN = r'''\n---\nonline_user_info:\n description: Response from Online API\n returned: success\n type: complex\n sample:\n \"online_user_info\": {\n \"company\": \"foobar LLC\",\n \"email\": \"foobar@example.com\",\n \"first_name\": \"foo\",\n \"id\": 42,\n \"last_name\": \"bar\",\n \"login\": \"foobar\"\n }\n'''\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.online import (\n Online, OnlineException, online_argument_spec\n)\n\n\nclass OnlineUserInfo(Online):\n\n def __init__(self, module):\n super(OnlineUserInfo, self).__init__(module)\n self.name = 'api/v1/user'\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=online_argument_spec(),\n supports_check_mode=True,\n )\n\n try:\n module.exit_json(\n online_user_info=OnlineUserInfo(module).get_resources()\n )\n except OnlineException as exc:\n module.fail_json(msg=exc.message)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"env/lib/python3.9/site-packages/ansible/modules/cloud/online/online_user_info.py","file_name":"online_user_info.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"245264892","text":"import math\n\n\nf = open(\"dane_ulamki.txt\")\n\nsuma = 0\nfor line in f.readlines():\n a, b = map(int, line.split())\n suma += a // math.gcd(a, b)\n\nprint(suma)\nf.close()\n","sub_path":"zbiór inf/Coding/65/65-3.py","file_name":"65-3.py","file_ext":"py","file_size_in_byte":168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"18494842","text":"import threading\nimport requests\nimport time\n\nthings = []\nthings_size_threshold = 1000\nthings_size_limit = 10\n\n# Collector\nnum_requests_made = 0\ndef collect():\n global num_requests_made\n while True:\n if len(things) < things_size_threshold:\n print(f'Making request number: {num_requests_made}. Length of things: {len(things)}')\n response = requests.get('https://jolynclothing.com/products/printed-jackson-3-onesie-ruggle.json')\n body = response.json\n things.append(body)\n num_requests_made += 1\n else:\n print(f'Size limit reached. Sleeping for 5 secs')\n time.sleep(5)\n\ndef write():\n global things\n while True:\n print('Writer checking if things can be written ... ')\n if len(things) > things_size_limit:\n print('Writing to disk ... ')\n # file_rows = things[:things_size_limit]\n remaining_rows = things[things_size_limit:]\n things = remaining_rows\n time.sleep(2)\n else:\n print('Too early for writer, sleeping ...')\n time.sleep(2)\n\ncollector = threading.Thread(target=collect)\nwriter = threading.Thread(target=write)\n\ncollector.start()\nwriter.start()\ncollector.join()\nwriter.join()\n\n\n\n","sub_path":"PythonMultithreading.py","file_name":"PythonMultithreading.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"130692539","text":"from random import randint\nfrom euler import *\nimport itertools\n\n#####################################\n# CONSTRUCTION OF INTERESTING WORDS #\n#####################################\n\ndef prefixes(x):\n \"\"\" If x is a word, prefixes(x) is a word which is the concatenation\n of the prefixes of x, i.e \"x[:1]x[:2]...x[:len(x)] \"\"\"\n pre = \"\"\n for i in range(len(x)+1):\n pre += x[:i+1]\n return(pre)\n\ndef random(n):\n \"\"\" Construct a random binary word of length n \"\"\"\n res = \"\"\n for i in range(n):\n res+=str(randint(0,1))\n return res\n\n\ndef deBruijn(n, cyclic = True):\n \"\"\"Construct a de Bruijn word of order n over the alphabet {0,1}.\n\n By default, the word is considered cyclic. If the parameter ̀cyclic`\n is set to False, then the word is not cyclic anymore.\n \"\"\"\n gr = constructionGraph(n-1)\n tour = eulerian_tour(gr,'0'*(n-1))\n if cyclic:\n res = \"\" # version mot circulaire\n else:\n res = '0'*(n-2) # version mot pas circulaire\n for e in tour:\n res += e[-1]\n if cyclic:\n return res[1:] #version circulaire\n else:\n return res #version pas circulaire\n\n\ndef TrianglePath(p,l):\n \"\"\"Construct a triangle-paths word (of depth p and chain-length l).\n\n That is: the concatenation of every sequences of length <= p,\n followed by 2**p random chains of length l (where each chain is the\n concatenation of the prefixes of a random word).\n\n \"\"\"\n res = \"\"\n #Triangles\n for i in range(p):\n for x in [\"\".join(seq) for seq in itertools.product(\"01\", repeat=i)]:\n res += x\n\n chains = [\"\".join(seq) for seq in itertools.product(\"01\", repeat=p)]\n for x in chains:\n res += x\n for i in range(l):\n for i in range(len(chains)):\n chains[i] += str(randint(0,1))\n res += chains[i]\n return res\n\n \n\n################\n# USEFUL TOOLS #\n################\n\ndef constructionGraph(n):\n \"\"\"Construct a graph where nodes are labeled by all sequences of size\n n over the alphabet {0,1}. We add an edge labeled by the letter \"a\"\n between a node w and w' if w[1:] = w'[0:-1] and w'[-1] = a.\n\n An eulerian tour in such a graph with parameter (n-1) corresponds\n more or less to a de Bruijn sequence of order n.\n \"\"\"\n g = {}\n nodes = [\"\".join(seq) for seq in itertools.product(\"01\", repeat=n)]\n for no in nodes:\n g[no] = set([no[1:n] + '0', no[1:n] + '1'])\n # g['0'*n].remove('0'*n)\n # g['1'*n].remove('1'*n)\n \n return g\n","sub_path":"src/Clean/words.py","file_name":"words.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"428475141","text":"import os\r\nimport matplotlib.pyplot as plt\r\nimport csv\r\nimport tarfile\r\nimport gzip\r\nimport shutil\r\nimport bz2\r\nimport binascii\r\nimport lzma\r\nimport csv\r\nimport matplotlib.pyplot as plt\r\nimport plotly.plotly as py\r\n\r\ndef printstat():\r\n print(\"Enter the type of compression you want do: \")\r\n print(\"1.Enter the filename as 'fortar.py' for archieving: \")\r\n print(\"2.Enter the filename as 'forbz2.py' for bz2 compression: \")\r\n print(\"3.Enter the filename as 'forgzip.py' for gzip compression: \")\r\n print(\"4.Enter the filename as 'forlzma.py' for lzma compression: \")\r\n print(\"5.Want to see user selection stats (y/n)? \")\r\n\r\ndef userstat():\r\n y=[]\r\n with open('output.csv', newline='') as csvfile:\r\n spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\r\n for row in spamreader:\r\n y.append(int(', '.join(row)))\r\n plt.plot(y)\r\n plt.ylabel('User choice')\r\n plt.xlabel('Every new entry')\r\n plt.show()\r\n\r\n\r\n \r\ndef manualcall():\r\n y=[]\r\n printstat()\r\n x=input(\"Choose your option : \")\r\n if(x=='y' or x=='n'):\r\n if(x=='y'):\r\n userstat()\r\n else:\r\n manualcall()\r\n else:\r\n y.append(x)\r\n resultFyle = open(\"output.csv\",'a+')\r\n for r in y:\r\n resultFyle.write(r + \"\\n\")\r\n resultFyle.close()\r\n if(int(x)==1):\r\n #tar = input(\"Enter the filename as 'fortar.py' for archieving: \")\r\n #os.system('python ' +str(tar))\r\n for_tar()\r\n \r\n elif(int(x)==2):\r\n #bz = input(\"Enter the filename as 'forbz2.py' for bz2 compression: \")\r\n #os.system('python ' +str(bz))\r\n for_bz()\r\n elif(int(x)==3):\r\n #gzip = input(\"Enter the filename as 'forgzip.py' for gzip compression: \")\r\n #os.system('python ' +str(gzip))\r\n for_gzip()\r\n elif(int(x)==4):\r\n #lzma = input(\"Enter the filename as 'forlzma.py' for lzma compression: \")\r\n #os.system('python ' +str(lzma))\r\n for_lzma()\r\n\r\ndef for_tar():\r\n print(\"tar Archieving\")\r\n flag=True\r\n y=[]\r\n while(flag):\r\n print(\"1.Archieve a file\")\r\n print(\"2.(de)Archieve a file\")\r\n print(\"3.Upload file to drive\")\r\n print(\"4.exit\")\r\n ch=input(\"Enter your choice.\")\r\n if(int(ch) == 1):\r\n #Archive\r\n while(True):\r\n z=input(\"Want to add more file(y/n) : \")\r\n if(z=='y'):\r\n x=input(\"Enter file name : \")\r\n y.append(x)\r\n else:\r\n break\r\n\r\n print ('creating archive')\r\n print (y)\r\n\r\n out = tarfile.open('tarfile_add.tar', mode='w')\r\n for c in y:\r\n out.add(str(c))\r\n\r\n t = tarfile.open('tarfile_add.tar', mode='r')\r\n for member_info in t.getmembers():\r\n print (member_info.name)\r\n\r\n elif(int(ch)==2):\r\n tar = tarfile.open(\"tarfile_add.tar\")\r\n tar.extractall()\r\n tar.close()\r\n elif(int(ch)==3):\r\n r=input(\"Enter file name to upload\")\r\n os.system('python ' +'driveupload.py '+r)\r\n else:\r\n break\r\n\r\n\r\ndef for_gzip():\r\n print(\"compression and deccompression using gzip\")\r\n flag=True\r\n while(flag):\r\n print(\"1.compress a file\")\r\n print(\"2.(de)compress a file\")\r\n print(\"3.Upload file to drive\")\r\n print(\"4.exit\")\r\n ch=input(\"Enter your choice.\")\r\n if(int(ch) == 1):\r\n #compress\r\n x=input(\"Enter file name : \")\r\n z=str(x).split('.')\r\n inF = open(x, 'rb')\r\n s = inF.read()\r\n inF.close()\r\n\r\n outF=gzip.GzipFile(\"compressedByGZIP.gz\",'wb')\r\n outF.write(s)\r\n outF.close()\r\n elif(int(ch) == 2):\r\n #decompress\r\n inF = gzip.GzipFile(\"compressedByGZIP.gz\", 'rb')\r\n s = inF.read()\r\n inF.close()\r\n\r\n outF = open(\"x1.\"+str(z[1]), 'wb')\r\n outF.write(s)\r\n outF.close()\r\n elif(int(ch)==3):\r\n r=input(\"Enter file name to upload\")\r\n os.system('python ' +'driveupload.py '+r)\r\n else:\r\n break\r\n\r\n \r\n c=os.path.getsize(x)\r\n d=os.path.getsize(\"compressedByGZIP.gz\")\r\n\r\n\r\n import matplotlib.pyplot as plt; plt.rcdefaults()\r\n import numpy as np\r\n import matplotlib.pyplot as plt\r\n \r\n objects = (\"original\",\"compressed\")\r\n y_pos = np.arange(len(objects))\r\n performance = [c,d]\r\n plt.barh(y_pos, performance, align='center')\r\n plt.yticks(y_pos, objects)\r\n plt.xlabel('ratio')\r\n plt.title('Compression')\r\n \r\n plt.show()\r\n\r\n\r\n\r\ndef for_bz2():\r\n print(\"compression and deccompression using bzip2\")\r\n flag=True\r\n while(flag):\r\n print(\"1.compress a file\")\r\n print(\"2.(de)compress a file\")\r\n print(\"3.Upload file to drive\")\r\n print(\"4.exit\")\r\n ch=input(\"Enter your choice.\")\r\n if(int(ch) == 1):\r\n #compress\r\n x=input(\"Enter file name : \")\r\n z=str(x).split('.')\r\n inF = open(x, 'rb')\r\n s = inF.read()\r\n inF.close()\r\n\r\n outF=bz2.BZ2File(\"compressedByBZ2.bz\",'wb')\r\n outF.write(s)\r\n outF.close()\r\n elif(int(ch) == 2):\r\n #decompress\r\n inF = bz2.BZ2File(\"compressedByBZ2.bz\", 'rb')\r\n s = inF.read()\r\n inF.close()\r\n\r\n outF = open(\"x1.\"+str(z[1]), 'wb')\r\n outF.write(s)\r\n outF.close()\r\n\r\n elif(int(ch)==3):\r\n r=input(\"Enter file name to upload\")\r\n os.system('python ' +'driveupload.py '+r)\r\n\r\n else:\r\n break\r\n\r\n c=os.path.getsize(x)\r\n d=os.path.getsize(\"compressedByBZ2.bz\")\r\n\r\n\r\n import matplotlib.pyplot as plt; plt.rcdefaults()\r\n import numpy as np\r\n import matplotlib.pyplot as plt\r\n \r\n objects = (\"original\",\"compressed\")\r\n y_pos = np.arange(len(objects))\r\n performance = [c,d]\r\n plt.barh(y_pos, performance, align='center')\r\n plt.yticks(y_pos, objects)\r\n plt.xlabel('ratio')\r\n plt.title('Compression')\r\n \r\n plt.show()\r\n\r\n\r\ndef for_lzma():\r\n print(\"compression and decompression using LZma 2\")\r\n flag=True\r\n while(flag):\r\n print(\"1.compress a file\")\r\n print(\"2.(de)compress a file\")\r\n print(\"3.Upload file to drive\")\r\n print(\"4.exit\")\r\n ch=input(\"Enter your choice.\")\r\n if(int(ch) == 1):\r\n #compress\r\n x=input(\"Enter file name : \")\r\n inF = open(x, 'rb')\r\n z=str(x).split('.')\r\n s = inF.read()\r\n inF.close()\r\n\r\n outF=lzma.LZMAFile(\"compressedByXZ.xz\",'wb')\r\n outF.write(s)\r\n outF.close()\r\n\r\n elif(int(ch) == 2):\r\n #decompress\r\n inF = lzma.LZMAFile(\"compressedByXZ.xz\", 'rb')\r\n s = inF.read()\r\n inF.close()\r\n\r\n outF = open(\"x1.\"+str(z[1]), 'wb')\r\n outF.write(s)\r\n outF.close()\r\n elif(int(ch)==3):\r\n r=input(\"Enter file name to upload\")\r\n os.system('python ' +'driveupload.py '+r)\r\n else:\r\n break\r\n\r\n c=os.path.getsize(x)\r\n d=os.path.getsize(\"compressedByXZ.xz\")\r\n\r\n\r\n import matplotlib.pyplot as plt; plt.rcdefaults()\r\n import numpy as np\r\n import matplotlib.pyplot as plt\r\n \r\n objects = (\"original\",\"compressed\")\r\n y_pos = np.arange(len(objects))\r\n performance = [c,d]\r\n plt.barh(y_pos, performance, align='center')\r\n plt.yticks(y_pos, objects)\r\n plt.xlabel('ratio')\r\n plt.title('Compression')\r\n \r\n plt.show()\r\n\r\n\r\n\r\n \r\nif(os.path.getsize(\"output.csv\") == 0):\r\n manualcall()\r\n\r\nelse:\r\n resultFyle = open(\"output.csv\",'r+')\r\n v1=0\r\n v2=0\r\n v3=0\r\n v4=0\r\n print(\"\"\"\r\n1.Preffered one\r\n2.Suggested one(lzma2 Compression)\r\n3.Manual\r\n4.user stats\r\n \"\"\")\r\n ch2 = input(\"choice : \")\r\n \r\n with open('output.csv', newline='') as csvfile:\r\n spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\r\n for row in spamreader:\r\n if(int(', '.join(row))==1):\r\n v1=v1+1\r\n elif(int(', '.join(row))==2):\r\n v2=v2+1\r\n elif(int(', '.join(row))==3):\r\n v3=v3+1\r\n elif(int(', '.join(row))==4):\r\n v4=v4+1\r\n if(int(ch2)==1):\r\n if(v1>=v2 and v1>=v3 and v1>=v4):\r\n print(\"the prefered one is tar archieving \")\r\n rt=input(\"Do you want to continue (y/n) : \")\r\n if(rt=='y'):\r\n tar = 'fortar.py'\r\n #os.system('python ' +tar)\r\n for_tar()\r\n else:\r\n manualcall()\r\n \r\n elif(v2>=v1 and v2>=v3 and v2>=v4):\r\n print(\"the prefered one is bz2 compression \")\r\n rt=input(\"Do you want to continue (y/n) : \")\r\n if(rt=='y'):\r\n bz = 'forbz2.py'\r\n #os.system('python ' +bz)\r\n for_bz()\r\n else:\r\n manualcall()\r\n \r\n elif(v3>=v2 and v3>=v1 and v3>=v4):\r\n print(\"the prefered one is gzip compression \")\r\n rt=input(\"Do you want to continue (y/n) : \")\r\n if(rt=='y'):\r\n gzip = 'forgzip.py'\r\n #os.system('python ' +gzip)\r\n for_gzip()\r\n else:\r\n manualcall()\r\n\r\n \r\n elif(v4>=v2 and v4>=v3 and v4>=v1):\r\n print(\"the prefered one is lzma compression \")\r\n rt=input(\"Do you want to continue (y/n) : \")\r\n if(rt=='y'):\r\n lzma = 'forlzma.py'\r\n #os.system('python ' +lzma)\r\n for_lzma()\r\n else:\r\n manualcall()\r\n\r\n \r\n elif(int(ch2)==2):\r\n lzma = 'forlzma.py'\r\n #os.system('python ' +lzma)\r\n for_lzma()\r\n\r\n elif(int(ch2)==3):\r\n manualcall()\r\n elif(int(ch2)==4):\r\n userstat()\r\n\r\n","sub_path":"first2.py","file_name":"first2.py","file_ext":"py","file_size_in_byte":10245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"468392038","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Exercise link: https://www.hackerrank.com/challenges/missing-stock-prices\n\n#%% Useful links\n# https://stackoverflow.com/questions/24588437/convert-date-to-float-for-linear-regression-on-pandas-data-frame\n# https://scikit-learn.org/stable/modules/impute.html#impute\n# https://scikit-learn.org/stable/modules/generated/sklearn.impute.IterativeImputer.html#sklearn.impute.IterativeImputer\n\n#%% get sample data from file on localdir\nwith open('/home/pefs/Desktop/missing_values.txt', 'r') as f:\n raw = f.read().split('\\n')\nraw = raw[:-2]\nn_quotes = raw.pop(0)\n\n\n#%% get sample data from stdin\nn_quotes = int(input())\nraw = []\nfor _ in range(n_quotes):\n raw.append(input())\n\n#%% data wrangling\nimport pandas as pd\nfrom datetime import datetime\nimport numpy as np\n\nquotes = [el.split('\\t') for el in raw]\n \nfor i in range(len(quotes)):\n if quotes[i][1][0] == 'M':\n quotes[i][1] = ''\n \ndate = list(list(zip(*quotes))[0])\nprice = list(list(zip(*quotes))[1])\n\nidx = []\nfor i in range(len(price)):\n if price[i]=='':\n price[i] = float('NaN')\n idx.append(i)\n else:\n price[i] = float(price[i])\n\ndate = [datetime.strptime(el, '%m/%d/%Y %H:%M:%S') for el in date]\n\ndf = pd.Series(price, index=date)\ndf.index.name = 'date'\ndf = df.reset_index(name='price')\n\nmiss = df[df.price.isnull()].date.values\n#miss = miss.astype('datetime64[D]').astype(int)\nmiss = [[x] for x in miss]\nmiss = np.asarray(miss)\n\ndf.dropna(inplace=True)\ndate, price = [[x] for x in df.date.values], df.price.values\n\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(date, price, \n test_size=0.01, \n shuffle=False)\nx_train, y_train = np.asarray(x_train), np.asarray(y_train)\n\n#from sklearn.ensemble import RandomForestRegressor\nfrom sklearn.linear_model import SGDRegressor\nmdl = SGDRegressor(shuffle=False, max_iter=5000, learning_rate='optimal', \n random_state=0, n_iter_no_change=30).fit(x_train, y_train)\n\nprint(*mdl.predict(miss), sep='\\n')\n# filling missing values with interpolation methods, didn't get good results\n'''\nprint(*df.price.interpolate(method='linear').loc[idx].values, sep='\\n') #1.04\nprint(*df.price.interpolate(method='krogh').loc[idx].values, sep='\\n') #7.80\n'''\n\n","sub_path":"statistics_machine_learning/Missing Stock Prices.py","file_name":"Missing Stock Prices.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"397106352","text":"from flask import Flask,request,jsonify\nfrom flask_cors import *\nimport json\n\napp = Flask(__name__)\nCORS(app, supports_credentials=True) \n\n@app.route('/',methods=['GET','POST'])\ndef getpiclist():\n from functions import ZipExtract,MoveFile,DelZipFile,RenameFile,ReturnJpgList\n from PilFunctions import xqimglogo\n # 构建path\n dir_id = request.args.get('id')\n if int(dir_id) <= 1000:\n path = 'G:/1-1000/' + str(dir_id)\n elif int(dir_id) >1000 and int(dir_id) <=2000:\n path = 'G:/1000-2000/' + str(dir_id)\n elif int(dir_id) >2000 and int(dir_id) <=3000:\n path = 'G:/2000-3000/' + str(dir_id)\n\n print('\\n\\n--------解压ZIP文件--------\\n\\n')\n ZipExtract(path)\n print('\\n\\n--------移动文件到根目录--------\\n\\n')\n MoveFile(path)\n print('\\n\\n--------重命名文件--------\\n\\n')\n RenameFile(path)\n print('\\n\\n--------删除ZIP文件--------\\n\\n')\n DelZipFile(path)\n\n print('\\n\\n--------完成ALL DOWN--------\\n\\n')\n\n print(jsonify(ReturnJpgList(path)))\n return jsonify(ReturnJpgList(path))\n\n@app.route('/tim',methods=['GET','POST'])\ndef tim():\n from PilFunctions import st,xq,xqimglogo\n from functions import ClearUpload\n body = request.get_json()\n\n dir_id = body['id']\n if int(dir_id) <= 1000:\n path = 'G:/1-1000/' + str(dir_id)\n elif int(dir_id) >1000 and int(dir_id) <=2000:\n path = 'G:/1000-2000/' + str(dir_id)\n elif int(dir_id) >2000 and int(dir_id) <=3000:\n path = 'G:/2000-3000/' + str(dir_id)\n \n print('\\n\\n--------清空桌面--------\\n\\n')\n ClearUpload()\n\n print('\\n\\n--------制作首图--------\\n\\n')\n piclist=body['piclist']\n ststyle=body['ststyle']\n st(piclist,ststyle)\n\n print('\\n\\n--------制作详情图片--------\\n\\n')\n xq(path)\n\n print('\\n\\n--------图片添加水印--------\\n\\n')\n color = body['color']\n if color != '':\n xqimglogo(path,color)\n\n return 'success'\n\nif __name__ == '__main__':\n # app.debug = True\n app.run()","sub_path":"py/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"645189826","text":"'''\r\nCopyright 2020 Flexera Software LLC\r\nSee LICENSE.TXT for full license text\r\nSPDX-License-Identifier: MIT\r\n\r\nCreated on Nov 19, 2019\r\n\r\n@author: SGeary\r\n'''\r\nimport logging\r\nimport requests\r\nimport sys\r\nimport config\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n#######################################################################\r\n# If the calling app is a flask app then we can use\r\n# the flask abort function to catch exceptions\r\n# so see if its defined in a common config file\r\ntry: \r\n FLASKAPP = config.FLASKAPP\r\nexcept:\r\n FLASKAPP = False\r\n\r\nif FLASKAPP:\r\n from flask import abort\r\n#######################################################################\r\n\r\nFNCI_API = \"FNCI v6 Change Project Owner API\"\r\nENDPOINT_URL = config.v6_BASEURL + \"project/changeProjectOwner/\"\r\n\r\n#-----------------------------------------------------------------------#\r\ndef change_project_owner(projectID, ownerID, authToken):\r\n logger.debug(\"Entering change_project_owner\")\r\n \r\n changeProjectOwnerBody = get_changeProjectOwnerBody(projectID, ownerID) \r\n logger.debug(\"changeProjectOwnerBody: %s\" %changeProjectOwnerBody)\r\n \r\n headers = {'Content-Type': 'application/json', 'Authorization': authToken} \r\n RESTAPI_URL = ENDPOINT_URL\r\n logger.debug(\" RESTAPI_URL: %s\" %RESTAPI_URL)\r\n logger.debug(\" headers: %s\" %headers) \r\n \r\n try:\r\n response = requests.post(RESTAPI_URL, data=changeProjectOwnerBody, headers=headers)\r\n response.raise_for_status()\r\n except requests.exceptions.ConnectionError:\r\n # Connection Error - Is the server up and running?\r\n abort_message = FNCI_API + \" - Error Connecting to FNCI Server - \" + (ENDPOINT_URL).split(\"palamida\")[0] # Get rid of everything after palamida in url\r\n logger.error(\" %s\" %(abort_message))\r\n\r\n if FLASKAPP: \r\n # Using error code 500 (Internal Server Error) to cover connection errors\r\n # in the flask apps\r\n abort(500, FNCI_API + \" - %s\" %abort_message) \r\n else:\r\n print(abort_message)\r\n print(\"Is the FNCI server running?\")\r\n print(\"Exiting script\")\r\n sys.exit() \r\n except requests.exceptions.RequestException as e: # Catch the exception for the logs but process below\r\n logger.error(e)\r\n \r\n # We at least received a response from FNCI so check the status to see\r\n # what happened if there was an error or the expected data\r\n if response.status_code == 200:\r\n logger.debug(\" Call to %s was successful.\" %FNCI_API)\r\n \r\n elif response.status_code == 400:\r\n # Bad Request\r\n logger.error(\"Response code 400 - %s\" %response.text)\r\n if FLASKAPP: \r\n abort(400, FNCI_API + \" - Bad Request - Look at debug log for more details\") \r\n else:\r\n print(\"%s - Error: %s - Bad Request.\" %(FNCI_API, response.status_code ))\r\n print(\" Exiting script\")\r\n sys.exit() \r\n\r\n elif response.status_code == 401:\r\n # Unauthorized Access\r\n logger.error(\" %s - Error: %s - Authentication Failed: JWT token is not valid or user does not have correct permissions.\" %(FNCI_API, response.status_code ))\r\n if FLASKAPP: \r\n abort(401, FNCI_API + \" - Authentication Failed: JWT token is not valid or user does not have correct permissions.\")\r\n else:\r\n print(\"%s - Error: %s - Authentication Failed: JWT token is not valid or user does not have correct permissions.\" %(FNCI_API, response.status_code ))\r\n print(\" Exiting script\")\r\n sys.exit() \r\n\r\n elif response.status_code == 404:\r\n # Not Found\r\n logger.error(\" %s - Error: %s - URL endpoint not found: %s\" %(FNCI_API, response.status_code, RESTAPI_URL ))\r\n if FLASKAPP: \r\n abort(400, FNCI_API + \" - Bad Request - URL endpoint not found\") \r\n else:\r\n print(\" %s - Error: %s - URL endpoint not found: %s\" %(FNCI_API, response.status_code, RESTAPI_URL ))\r\n print(\" Exiting script\")\r\n sys.exit() \r\n\r\n elif response.status_code == 405:\r\n # Method Not Allowed\r\n logger.error(\" %s - Error: %s - Method (GET/POST/PUT//DELETE/ETC) Not Allowed.\" %(FNCI_API, response.status_code ))\r\n if FLASKAPP: \r\n abort(405, FNCI_API + \" - Method Not Allowed.\")\r\n else:\r\n print(\" %s - Error: %s - Method (GET/POST/PUT//DELETE/ETC) Not Allowed.\" %(FNCI_API, response.status_code ))\r\n print(\" Exiting script\")\r\n sys.exit() \r\n \r\n elif response.status_code == 500:\r\n # Internal Server Error\r\n logger.error(\" %s - Error: %s - Internal Server Error.\" %(FNCI_API, response.status_code ))\r\n if FLASKAPP: \r\n abort(500, FNCI_API + \" - Internal Server Error.\")\r\n else:\r\n print(\" %s - Error: %s - Internal Server Error.\" %(FNCI_API, response.status_code ))\r\n print(\" Exiting script\")\r\n sys.exit() \r\n#-----------------------------------------------------------#\r\n\r\n \r\ndef get_changeProjectOwnerBody(projectID, ownerID) : \r\n\r\n \r\n changeProjectOwnerBody = ''' {\r\n \"projectId\" :\"''' + str(projectID) + '''\",\r\n \"userId\" :\"''' + str(ownerID) + '''\"\r\n}'''\r\n \r\n return changeProjectOwnerBody \r\n ","sub_path":"FNCI/v6/project/changeProjectOwner.py","file_name":"changeProjectOwner.py","file_ext":"py","file_size_in_byte":5472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"240298070","text":"import sys\nimport os\nimport shutil\nimport io\nimport math\nimport base64\nimport json\nimport lzma\nimport jinja2\nimport falcon\nimport tempfile\nimport subprocess\n\nfrom contextlib import closing\nfrom PIL import Image, ImageDraw, ImageFont\n\nfrom urllib.parse import urlencode\n\nimport pyparsing as pp\nimport browser_json\n\nclass ImageGenerationFailed(Exception):\n pass\n\ndef render_image_error(text):\n text = text.strip()\n with closing(Image.new(\"RGBA\", (600, 300), (0, 0, 0, 0))) as image:\n max_width = 100\n max_height = 0\n \n draw = ImageDraw.Draw(image)\n try:\n font = ImageFont.truetype(font=\"Arial\", size=20)\n except OSError:\n font = ImageFont.truetype(font=\"DejaVuSansMono\", size=20)\n \n x_offset, _ = draw.textsize(\"Error: \", font=font)\n draw.text((0, 0), \"Error: \", fill=(255, 0, 0, 255))\n \n w, h = draw.textsize(text, font=font)\n max_width = min(max(max_width, w + x_offset), image.width)\n max_height = min(max(max_height, h), image.height)\n \n draw.text((x_offset, 0), text, fill=(0, 0, 0, 255))\n \n with closing(image.crop((0, 0, max_width, max_height))) as subimage:\n with closing(io.BytesIO()) as io_out:\n subimage.save(io_out, \"PNG\")\n return io_out.getvalue()\n\ndef generate_image(image_type, scale, code):\n if code.strip() == \"\":\n return render_image_error(\"No code supplied\")\n \n #print(\"------- code start ----------\")\n #print(type(code))\n #print(code)\n \n #print(\"------- code normalized ----------\")\n try:\n code = json.dumps(browser_json.parse_browser_json(code.decode() if isinstance(code, bytes) else code))\n except pp.ParseException as e:\n return render_image_error(\"invalid json syntax\")\n #errMsg = str(e)\n #return render_image_error(\"\\n\".join(errMsg[i*80:(i+1)*80] for i in range(math.ceil(len(errMsg) / 80))))\n #print(code)\n #print(\"------- code end ----------\")\n \n try:\n tmpdir = tempfile.mkdtemp()\n\n filename = os.path.join(tmpdir, \"out\")\n process_result = subprocess.run(\n [\"phantomjs\", \"server/serverside-renderer.js\", \"--silent\", \"--format={}\".format(image_type), \"--scale={}\".format(scale), filename],\n cwd=os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", \"..\"),\n input=code if isinstance(code, bytes) else code.encode(),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n \n #print(process_result.args)\n #print(process_result.returncode)\n #print(process_result.stdout)\n #print(process_result.stderr)\n\n if process_result.returncode == 0:\n with open(filename, \"rb\") as f:\n return f.read()\n else:\n return render_image_error(process_result.stdout.decode())\n finally:\n shutil.rmtree(tmpdir)\n return None\n\ndef derive_host_url(req):\n port_ext = \"\"\n if req.port != 80:\n port_ext = \":{}\".format(req.port)\n return req.scheme + \"://\" + req.host + port_ext\n\nclass HTMLContent:\n def __init__(self):\n self.resource_path = os.path.join(\n os.path.dirname(__file__),\n \"templates\")\n self.jinja_env = jinja2.Environment(\n loader=jinja2.FileSystemLoader(self.resource_path))\n\n def on_get(self, req, response, filename=None):\n if filename is None:\n filename = \"index.html\"\n\n extension = os.path.splitext(filename.lower())[1]\n inferred_mediatype = falcon.MEDIA_TEXT\n if extension == \".html\":\n inferred_mediatype = falcon.MEDIA_HTML\n elif extension == \".css\":\n inferred_mediatype = \"text/css\"\n elif extension == \".js\":\n inferred_mediatype = falcon.MEDIA_JS\n elif extension == \".png\":\n inferred_mediatype = falcon.MEDIA_PNG\n elif extension in (\".jpg\", \".jpeg\"):\n inferred_mediatype = falcon.MEDIA_JPEG\n\n try:\n template = self.jinja_env.get_template(filename)\n except jinja2.exceptions.TemplateNotFound:\n raise falcon.HTTPNotFound(\n title=\"Page not found\",\n description=\"The page '{}' does not exist\".format(filename))\n else:\n port_ext = \"\"\n if req.port != 80:\n port_ext = \":{}\".format(req.port)\n rendered_page = template.render(\n hostname=req.host + port_ext,\n protocol=req.scheme)\n response.body = rendered_page\n response.content_type = inferred_mediatype\n\n\ndef verify_image_type(image_type):\n if image_type not in (\"svg\", \"png\"):\n raise falcon.HTTPInternalServerError(\n title=\"Invalid type parameter\",\n description=\"type parameter must be svg or png\")\n\ndef parse_image_scale(scale):\n try:\n scale = float(scale)\n if not (.1 <= scale <= 10.0):\n raise ValueError()\n except ValueError:\n raise falcon.HTTPInternalServerError(\n title=\"Invalid scale parameter\",\n description=\"Scale must be a valid float between .1 and 10.0\")\n else:\n return scale\n\n\ndef compute_compression_prepostfixes():\n single1_compression = lzma.compress(b'{default: [ \"more things!\" ]}')\n single2_compression = lzma.compress(b'{x: \"asdf\", diferetn: 9, [2 3 4 5 1012 ]}' * 5)\n \n global STANDARD_COMPRESSION_PREFIX, STANDARD_COMPRESSION_POSTFIX\n \n l = 0\n for i in range(min(len(single1_compression), len(single2_compression))):\n if single1_compression[i] == single2_compression[i]:\n l += 1\n else:\n break\n STANDARD_COMPRESSION_PREFIX = single1_compression[:l]\n \n l = 0\n for i in range(min(len(single1_compression), len(single2_compression))):\n if single1_compression[-1 - i] == single2_compression[-1 - i]:\n l -= 1\n else:\n break\n STANDARD_COMPRESSION_POSTFIX = \"\" if l == 0 else single1_compression[l:]\ncompute_compression_prepostfixes()\n\ndef compress_text(text):\n compressed_bytes = lzma.compress(\n text.encode() if isinstance(text, str) \n else code)\n\n t = compressed_bytes\n compression_mode = 0\n if compressed_bytes[:len(STANDARD_COMPRESSION_PREFIX)] == STANDARD_COMPRESSION_PREFIX:\n compression_mode |= 1\n compressed_bytes = compressed_bytes[len(STANDARD_COMPRESSION_PREFIX):]\n if len(STANDARD_COMPRESSION_POSTFIX) > 0:\n if compressed_bytes[-len(STANDARD_COMPRESSION_POSTFIX):] == STANDARD_COMPRESSION_POSTFIX:\n compression_mode |= 2\n compressed_bytes = compressed_bytes[:-len(STANDARD_COMPRESSION_POSTFIX)]\n \n return str(compression_mode) + base64.b64encode(compressed_bytes).decode()\n\ndef decompress_text(text):\n mode = chr(text[0])\n try:\n mode = int(mode)\n if not (0 <= mode < (1 << 2)):\n raise ValueError(\"...\")\n except ValueError:\n raise falcon.HTTPInternalServerError(\n title=\"Invalid code\",\n description=\"The supplied code is missing the encoding mode byte.\")\n \n try:\n binary_lzma_code = base64.b64decode(text[1:])\n except ValueError:\n raise falcon.HTTPInternalServerError(\n title=\"Code is not base64 encoded\",\n description=\"The provided code is not encoded in the base64 format\")\n\n if (mode & 1) != 0:\n binary_lzma_code = STANDARD_COMPRESSION_PREFIX + binary_lzma_code\n if (mode & 2) != 0:\n binary_lzma_code = binary_lzma_code + STANDARD_COMPRESSION_POSTFIX\n \n try:\n plain_code = lzma.decompress(binary_lzma_code)\n except lzma.LZMAError:\n raise falcon.HTTPInternalServerError(\n title=\"Code is not compressed\",\n description=\"The provided code is note compressed using the correct lzma compression technique\")\n\n if isinstance(plain_code, str):\n plain_code = plain_code.encode()\n\n return plain_code\n\nclass RestAPI:\n def on_get(self, req, response, cmd=\"[None]\"):\n cmd = cmd.lower()\n if cmd == \"gen_image\":\n image_type = req.get_param(\"type\", required=True).lower()\n verify_image_type(image_type)\n\n scale = req.get_param(\"scale\", default=1.0)\n scale = parse_image_scale(scale)\n\n code = req.get_param(\"c\", required=True)\n \n plain_code = decompress_text(code.encode())\n\n image = generate_image(image_type, scale, plain_code)\n if image is None:\n raise falcon.HTTPInternalServerError(\n title=\"Image generation failed\",\n description=\"The code you submitted could not be used to render a wavedrom image\")\n\n response.content_type = {\n \"svg\": \"image/svg+xml\",\n \"png\": falcon.MEDIA_PNG\n }[image_type]\n response.body = image\n else:\n raise falcon.HTTPNotFound(\n title=\"invalid command\",\n description=\"get command {} does not exist\".format(cmd))\n\n def on_post(self, req, response, cmd=\"[None]\"):\n cmd = cmd.lower()\n if cmd == \"gen_image\":\n return self.on_get(req, response, cmd=cmd)\n if cmd == \"generate_link\":\n scale = req.get_param(\"scale\", default=1.0)\n scale = parse_image_scale(scale)\n\n image_type = req.get_param(\"type\", default=\"png\").lower()\n verify_image_type(image_type)\n\n auto_redirect = req.get_param_as_bool(\"redirect\")\n\n code = req.get_param(\"code\")\n if code is None:\n code = req.bounded_stream.read(32768)\n try:\n generate_image(\"svg\", 1.0, code)\n except ImageGenerationFailed:\n raise falcon.HTTPInternalServerError(\n title=\"Invalid WaveDrom code\",\n description=\"The WaveDrom code you submitted cannot be parsed by the WaveDrom generator\")\n\n compressed_code = compress_text((code.decode() if isinstance(code, bytes) else code).strip())\n\n url = \"{hosturl}/rest/gen_image?{options}\".format(\n hosturl=derive_host_url(req),\n options=urlencode({\n \"type\": image_type,\n \"scale\": scale,\n \"c\": compressed_code}))\n\n if auto_redirect:\n raise falcon.HTTPTemporaryRedirect(url)\n\n response.content_type = falcon.MEDIA_TEXT\n response.body = url\n else:\n raise falcon.HTTPNotFound(\n title=\"invalid command\",\n description=\"post command {} does not exist\".format(cmd))\n\nclass StaticRedirect:\n def __init__(self, path):\n self.path = path\n\n def on_get(self, req, response):\n raise falcon.HTTPTemporaryRedirect(self.path)\n\n def on_post(self, req, response):\n raise falcon.HTTPTemporaryRedirect(self.path)\n\nif __name__ in (\"__main__\", \"main\"):\n html_content = HTMLContent()\n rest_api = RestAPI()\n \n fapi = falcon.API()\n\n fapi.req_options.auto_parse_form_urlencoded = True\n\n fapi.add_route(\"/\", StaticRedirect(\"/html/\"))\n fapi.add_route(\"/html/\", StaticRedirect(\"index.html\"))\n\n fapi.add_route(\"/html/{filename}\", html_content)\n\n fapi.add_route(\"/rest/\", rest_api)\n fapi.add_route(\"/rest/{cmd}\", rest_api)\n\n","sub_path":"python/render_server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"533762106","text":"import logging\nimport logging.config\nimport os\nimport re\nimport shutil\nimport sys\nimport wave\n\nfrom process_script.check_noise_annotation import check_noise_annotation_old_norm\nfrom process_script.metada_update import AudioMetadata, write_meta, read_supplement\n\nlogger = logging.getLogger(\"yueyu\")\nlog_path = os.path.dirname(os.getcwd()) + '/Logs/'\nlog_name = log_path + 'log.log'\nfh = logging.FileHandler(log_name, mode='a', encoding=\"utf8\")\nlogger.addHandler(fh)\n\n\nclass Check(object):\n def __init__(self, src, dst, workbook):\n self.src = src\n self.dst = dst\n self.workbook = workbook\n\n def move(self, src_path):\n dst_path = os.path.join(self.dst, os.path.relpath(src_path, self.src))\n dirname = os.path.dirname(dst_path)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n shutil.move(src_path, dst_path)\n shutil.move(src_path.replace('.wav', '.txt'), dst_path.replace('.wav', '.txt'))\n shutil.move(src_path.replace('.wav', '.metadata'), dst_path.replace('.wav', '.metadata'))\n\n def spain(self, userinfo):\n userinfos = {}\n for group, infos in userinfo.items():\n if infos['city'] in userinfos:\n userinfos[infos['city']].update({group: infos})\n else:\n userinfos.update({infos['city']: {group: infos}})\n return userinfos\n\n def checkers(self, option):\n logger.error(\"Start\")\n\n # 用户信息,\n userinfo = read_supplement(self.workbook)\n\n errors = []\n for path, dirs, files in os.walk(self.src):\n for file in files:\n if os.path.splitext(file)[-1] == '.wav':\n audio_f = os.path.join(path, file)\n txt_f = audio_f.replace('.wav', '.txt')\n meta_f = audio_f.replace('.wav', '.metadata')\n\n wav_checker = WAV(audio_f)\n if os.path.exists(txt_f):\n txt_checker = TXT(txt_f)\n else:\n logger.error(\"{}\\t Don't have txt file \".format(audio_f))\n continue\n if os.path.exists(meta_f):\n meta_checker = Metadata(meta_f)\n else:\n logger.error(\"{}\\t Don't have meta file \".format(audio_f))\n continue\n\n if option == 'update':\n # lines = txt_checker.update()\n # txt_checker.check(lines)\n meta_checker.update(userinfo, self.src, self.dst, errors)\n meta_checker.check()\n # wav_checker.check()\n\n elif option == 'check':\n txt_checker.check()\n meta_checker.check()\n wav_checker.check()\n\n # if not txt_checker.flag or not meta_checker.flag or not wav_checker.flag:\n # self.move(audio_f)\n\n logger.error(\"End\")\n\n\nclass File(object):\n GROUP_REGEX = re.compile('(?P[G|Z]\\d+)[A-F\\d_]*(?PS\\d+)\\.')\n\n def __init__(self, filepath):\n self.filepath = filepath\n self.flag = True\n r = self.GROUP_REGEX.search(os.path.basename(filepath))\n if r:\n self.group = r.group('group').strip()\n else:\n self.group = os.path.basename(filepath)\n\n def read_file(self):\n \"\"\"\n 读取文件,捕获编码,如果不是utf8 抛出异常\n :return:\n \"\"\"\n try:\n with open(self.filepath, 'r', encoding='utf-8') as f:\n return f.readlines()\n except UnicodeDecodeError as e:\n logger.error(\"{}\\t not encode utf-8\".format(self.filepath))\n self.flag = False\n\n def is_has_ch(self, lines):\n # 是否含有中文\n z = re.compile(u'[\\u4e00-\\u9fa5]')\n for line in lines:\n if z.search(line):\n self.flag = False\n logger.error(\"{}\\t has chinese\".format(self.filepath))\n return\n\n def write_file(self, lines):\n with open(self.filepath, 'w', encoding='utf8') as f:\n for line in lines:\n f.write(line)\n\n\nclass TXT(File):\n def ch_to_en(self, lines):\n \"\"\"中文标点转英文\"\"\"\n table = {ord(f): ord(t) for f, t in zip('【】;‘’:“”《》,。、?', '[];\\'\\':\"\"<>,. ?')}\n return [text.translate(table) for text in lines]\n\n def remove(self, lines):\n # 去除斜杠\n new_lines = []\n for line in lines:\n new_lines.append(re.sub('/|~', '', line))\n return new_lines\n\n def is_double_str(self, lines):\n \"\"\"\n 是否包含全角\n :param lines:\n :return:\n \"\"\"\n double_s = []\n double_str = lambda x: ord(x) == 0x3000 or 0xFF01 <= ord(x) <= 0xFF5E\n for line in lines:\n for x in line:\n if double_str(x):\n double_s.append(x)\n if double_s:\n self.flag = False\n logger.error(\"{}\\t Has double str(quan jiao) is {}\".format(self.filepath, double_s))\n\n def dbc2sbc(self, lines):\n \"\"\"全角转半角\"\"\"\n new_lines = []\n for line in lines:\n rstring = ''\n for uchar in line:\n inside_code = ord(uchar)\n if inside_code == 0x3000:\n inside_code = 0x0020\n else:\n inside_code -= 0xfee0\n if not (0x0021 <= inside_code and inside_code <= 0x7e):\n rstring += uchar\n continue\n rstring += chr(inside_code)\n new_lines.append(rstring)\n\n return new_lines\n\n def is_one_line(self, lines: list):\n \"\"\"\n 判断是否为一行\n :param lines: 文本行\n :return:\n \"\"\"\n if len(lines) == 0:\n self.flag = False\n logger.error(\"{}\\t the file is empty\".format(self.filepath))\n elif len(lines) > 1:\n self.flag = False\n logger.error(\"{}\\t the file is Multi-line\".format(self.filepath))\n else:\n content = lines[0].strip()\n if not content:\n self.flag = False\n logger.error(\"{}\\t the file is line break\".format(self.filepath))\n\n def is_have_digit(self, lines):\n \"\"\"\n 是否包含数字\n :param lines:\n :return:\n \"\"\"\n P_DIGIT = re.compile(u'\\d+')\n digit = P_DIGIT.findall(lines[0])\n if digit:\n self.flag = False\n logger.error(\"{}\\t contains numbers is {}\".format(self.filepath, digit))\n\n def is_have_symbol(self, lines):\n \"\"\"\n 判断是否有特殊字符\n :param lines: 行内容\n :return:\n \"\"\"\n P_SYMBOL_FULL = re.compile('[#¥{}【】;‘��:“”《》,。、?·&*$^]')\n special_symbol = P_SYMBOL_FULL.findall(lines[0])\n if special_symbol:\n self.flag = False\n logger.error(\"{}\\t contains special symbol is {}\".format(self.filepath, special_symbol))\n\n def update(self):\n # 更新\n lines = self.read_file()\n # for updater in [self.ch_to_en, self.dbc2sbc, self.remove]:\n for updater in [self.ch_to_en, self.dbc2sbc]: #\n lines = updater(lines)\n self.write_file(lines)\n return lines\n\n def check(self, lines=None):\n # 检查\n if not lines:\n lines = self.read_file()\n self.is_one_line(lines)\n\n # 如果不存在空行和多行的情况进入的特殊字符的检查\n if self.flag:\n self.is_have_digit(lines)\n self.is_have_symbol(lines)\n self.is_double_str(lines)\n check_noise_annotation_old_norm(self.filepath, lines[0])\n\n\nclass Metadata(File):\n meta_map = {\n 'SES': 'dirname',\n 'DIR': 'dirpath',\n 'FIP': 'dirpath',\n 'SAM': 'frame',\n 'SNB': 'sample_width',\n 'SBF': 'lohi',\n 'SSB': 'per_bits',\n 'QNT': 'type',\n 'NCH': 'channels',\n 'SCD': 'dirname',\n 'LBD': 'mark_file',\n 'LBR': 'length',\n 'ORS': 'text'\n }\n\n def read_meta(self):\n infos = {}\n with open(self.filepath, 'r', encoding='utf-8') as f:\n for line in f:\n info = line.strip().split('\\t')\n if len(info) == 2:\n k, v = info\n infos.update({k: v})\n elif len(info) == 1:\n k, v = info[0], ''\n infos.update({k: v})\n else:\n k, v = info[0], \"\\t\".join(info[1:])\n infos.update({k: v})\n\n return infos\n\n def update(self, userinfo, src, dst, errors):\n if userinfo is not None: # 这里是df 不能直接用bool判断\n infos = self.read_meta()\n group = infos.get(\"SES\")\n if not group:\n self.flag = False\n logger.error(\"{}\\t SES key is null\".format(self.filepath))\n else:\n if group in userinfo.index:\n update_info = userinfo.loc[group, :].to_dict()\n # 修改文件\n tem = 1\n new_content = \"\"\n with open(self.filepath, 'r+', encoding='utf8') as f:\n for line in f:\n type_name = line.strip().split(\"\\t\")[0]\n if type_name in update_info:\n line_content = type_name + \"\\t\" + update_info[type_name] + \"\\n\"\n new_content += line_content\n else:\n new_content += line\n f.seek(0)\n f.truncate()\n f.write(new_content)\n\n\n def check(self):\n z = re.compile(u'[\\u4e00-\\u9fa5]')\n meta_no_null = ['SEX', 'AGE', 'ACC', 'ACT', \"BIR\"]\n lines = self.read_file()\n meta = {}\n\n for line in lines:\n line = line.strip()\n if z.search(line) and 'ORS' not in line:\n self.flag = False\n logger.error(\"{}\\t content contains chinese\".format(self.filepath))\n\n if len(line.split('\\t')) > 3:\n self.flag = False\n logger.error(\"{}\\t content redundant TAB keys\".format(self.filepath))\n elif len(line.split('\\t')) == 3:\n if \"LBR\" in line or \"LBO\" in line:\n pass\n else:\n self.flag = False\n logger.error(\"{}\\t content redundant TAB keys, {}\".format(self.filepath, line.split('\\t')[0]))\n\n elif len(line.split('\\t')) == 1:\n if line.split('\\t')[0] in meta_no_null:\n self.flag = False\n logger.error(\"{}\\t {}\\t key is null\".format(self.filepath, line.split('\\t')[0]))\n else:\n key = line.split('\\t')[0]\n valve = line.split('\\t')[1]\n meta[key] = valve\n # print(meta)\n for m in meta_no_null:\n # print(meta[m])\n if not m in meta.keys():\n self.flag = False\n logger.error(\"{}\\t {}\\t key is null\".format(self.filepath, m))\n else:\n if not meta['SEX'] in ['Male', 'Female']:\n self.flag = False\n logger.error(\"{}\\t value format is err\".format(self.filepath))\n\n\nclass WAV(File):\n min_length = 15\n audio_channel = 1\n sample_width = 2\n framerate = [16000, 22050, 44100]\n\n def check(self):\n fsize = os.path.getsize(self.filepath)\n txt_file = self.filepath.replace('.wav', '.txt')\n meta_file = self.filepath.replace('.wav', '.metadata')\n\n if not os.path.exists(txt_file) or not os.path.exists(meta_file):\n self.flag = False\n logger.error(\"{}\\t missing files\".format(self.filepath))\n\n if fsize / float(1024) < self.min_length:\n self.flag = False\n logger.error(\"{}\\t size error\".format(self.filepath))\n else:\n with wave.open(self.filepath, 'rb') as f:\n if not f.getnchannels() == self.audio_channel:\n self.flag = False\n logger.error(\"{}\\t channel error\".format(self.filepath))\n\n if not f.getframerate() in self.framerate:\n self.flag = False\n logger.error(\"{}\\t sample error\".format(self.filepath))\n if not f.getsampwidth() == self.sample_width:\n self.flag = False\n logger.error(\"{}\\t sample width error\".format(self.filepath))\n\n\nif __name__ == '__main__':\n # root, audio_size, audio_sample, audio_channel, meta_key, sy_list = read_ini('config.txt')\n\n try:\n # 脚本使用\n src_path = sys.argv[1]\n dst_path = sys.argv[2]\n workbook = sys.argv[3]\n option = sys.argv[4]\n except Exception as e:\n # 集成环境使用\n # src_path = r'\\\\IT-20190729TRCT\\数据备份_liuxd\\apy161101028_g_351人意大利语手机采集语音数据\\完整数据包_processed\\data'\n # src_path = r'\\\\IT-20190729TRCT\\数据备份_liuxd\\apy161101028_r_215小时意大利语手机采集语音数据_朗读\\完整数据包_加密后数据\\data'\n # src_path = r'\\\\IT-20190729TRCT\\数据备份_liuxd\\apy161101033_g_405人法语手机采集语音数据\\完整数据包_processed\\data'\n # src_path = r'\\\\IT-20190729TRCT\\数据备份_liuxd\\apy161101033_r_232小时法语手机采集语音数据\\完整数据包_processed\\data'\n # src_path = r'\\\\IT-20190729TRCT\\数据备份_liuxd\\apy161101034_g_343人西班牙语手机采集语音数据\\完整数据包_processed\\data\\category'\n # src_path = r'\\\\IT-20190729TRCT\\数据备份_liuxd\\apy161101034_r_227小时西班牙语手机采集语音数据\\完整数据包_processed\\data'\n # src_path = r'\\\\IT-20190729TRCT\\数据备份_liuxd\\apy170801048_338小时西班牙语手机采集语音数据\\完整数据包_processed\\data'\n src_path = r'\\\\IT-20190729TRCT\\数据备份_liuxd\\apy170901049_347小时意大利语手机采集语音数据\\完整数据包_加密后数据\\data'\n\n # src_path = r'\\\\10.10.30.14\\刘晓东\\数据分类\\语音数据\\apy161101031_r_215小时美式英语手机采集语音数据\\完整数据包_processed\\data'\n # src_path = r'\\\\10.10.30.14\\刘晓东\\数据分类\\语音数据\\apy161101031_g_344人美式英语手机采集语音数据\\完整数据包_processed\\data'\n # src_path = r'\\\\10.10.30.14\\刘晓东\\数据分类\\语音数据\\apy161101032_g_357人英式英语手机采集语音数据\\完整数据包_processed\\data'\n # src_path = r'\\\\10.10.30.14\\刘晓东\\数据分类\\语音数据\\apy161101032_r_199小时英式英语手机采集语音数据\\完整数据包_processed\\data'\n # src_path = r'\\\\10.10.30.14\\刘晓东\\数据分类\\语音数据\\APY161101011_201小时北美英语手机采集语音数据\\完整数据包_processed\\data\\androidcategory'\n # src_path = r'\\\\10.10.30.14\\刘晓东\\数据分类\\语音数据\\APY161101011_201小时北美英语手机采集语音数据\\完整数据包_processed\\data\\desktopcategory'\n # src_path = r'\\\\10.10.30.14\\刘晓东\\数据分类\\语音数据\\APY161101011_201小时北美英语手机采集语音数据\\完整数据包_processed\\data\\iphonecategory'\n\n # src_path = r'\\\\10.10.30.14\\杨明明\\修改测试demo\\data'\n dst_path = ''\n workbook = r''\n # option = 'update'\n option = 'check'\n\n print(src_path)\n c = Check(src_path, dst_path, workbook)\n c.checkers(option)\n","sub_path":"process_script/audio_check.py","file_name":"audio_check.py","file_ext":"py","file_size_in_byte":15971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"122037939","text":"from math import gcd\n\nfractions = []\nfor d in range(999900, 1000001):\n n = int(3 * d / 7)\n fractions.append([n / d, n, d])\nfor res in sorted(fractions)[::-1]:\n if gcd(res[1], res[2]) == 1:\n print(res[1])\n break\n","sub_path":"problems/71.py","file_name":"71.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"72448481","text":"import boto3\nimport json\nimport decimal\n\ndynamodb = boto3.resource('dynamodb')\n\nclass DecimalEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n if o % 1 > 0:\n return float(o)\n else:\n return int(o)\n return super(DecimalEncoder, self).default(o)\n\ndef lambda_handler(event, context):\n usertable = dynamodb.Table(\"user\")\n response = usertable.scan()\n\n return {\n 'statusCode' : 200,\n 'body' : json.dumps(response['Items'], cls=DecimalEncoder),\n 'headers' : {\n 'Content-Type' : 'application/json'\n }\n }","sub_path":"dynamoDBread.py","file_name":"dynamoDBread.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"44062206","text":"m = 0\nmn = 0\nfor c in range(1, 6):\n p = float(input(f\"Peso {c}ª Pessoa: \"))\n if c == 1:\n m = p\n mn = p\n else:\n if p >= m:\n m = p\n if p <= mn:\n mn = p\nprint(f\"Maior = {m} Kg\\nMenor = {mn} Kg\")\n","sub_path":"ExCursoEmVídeo(Python)/ex55.py","file_name":"ex55.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"84366026","text":"import torch\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef diagonala( tensor2d):\n x = tensor2d.size()\n if x[0]!=x[1]:\n return -1\n for x in range (x[0]):\n print(\"[\", x , \",\", x, \"]: \", tensor2d[x, x], sep='')\n return 0\n\na = [ [1,2,3], [1,1,2], [1,2,4]] #basic 2d tensor initialization\nA = torch.tensor(a)\nc = A.size()\nprint(A, A.ndimension(), A.shape)\n\ndiagonala(A) #index testing function\n\nB = 2*A\nC = torch.mm(A, B)\nC[1:3, 1] = 0\nprint(C)\n\na = np.lin\n\n\n","sub_path":"1 Tensor and Gradients/2d_tensors.py","file_name":"2d_tensors.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"250876217","text":"\n\"\"\"\n\n어간 추출, 원형 복원\n • PorterStemmer 어간 추출 알고리즘\n • LancasterStemmer 어간 추출 알고리즘\n • 원형 복원(lemmatizing)\n\n\"\"\"\n\n\n\nwords = ['lives', 'dies', 'flies', 'died']\n\n\n# ------------------------------------------------------------\n# PorterStemmer 어간 추출 알고리즘\n\nfrom nltk.stem import PorterStemmer\nst = PorterStemmer()\nresult = [st.stem(w) for w in words]\nprint(result)\n# ['live', 'die', 'fli', 'die']\n\n\n# ------------------------------------------------------------\n# LancasterStemmer 어간 추출 알고리즘\nfrom nltk.stem import LancasterStemmer\nst = LancasterStemmer()\nresult = [st.stem(w) for w in words]\nprint(result)\n# ['liv', 'die', 'fli', 'died']\n\n\n# 결과를 보면\n# 두 어간 추출 알고리즘으로 인한 결과가 다른 것을 알 수 있습니다.\n\n\n\n# ------------------------------------------------------------\n# 원형 복원(lemmatizing)\n\nfrom nltk.stem import WordNetLemmatizer\nlm = WordNetLemmatizer()\nresult = [lm.lemmatize(w) for w in words]\nprint(result)\n# ['life', 'dy', 'fly', 'died']\n\n\n# \"died\"의 pos=\"v\" 옵션으로 접미사나 어미를 제거한 형태로 원형복원\nprint(lm.lemmatize(\"died\", pos=\"v\"))\n# die\n","sub_path":"PythonPackages/src/NLTK/ex03.py","file_name":"ex03.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"620491101","text":"from django.conf.urls import url\nfrom . import views\nurlpatterns = [url(r'^index/$', views.index, name='index'),\n url(r'^(?P[0-9]+)/$', views.question, name='question'),\n url(r'^first/$', views.first, name='first'),\n url(r'^second/$', views.update, name='second'),\n url(r'^image/$', views.image, name='image'),\n url(r'^check/$', views.check, name='check'),\n url(r'^background/$', views.background, name='background'),\n\n ]","sub_path":"styles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"511958441","text":"from pprint import pprint\n\ntrainings = { \"course1\":{\"title\":\"Python Training Course for Beginners\", \n \"location\":\"Frankfurt\", \n \"trainer\":\"Steve G. Snake\"},\n \"course2\":{\"title\":\"Intermediate Python Training\",\n \"location\":\"Berlin\",\n \"trainer\":\"Ella M. Charming\"},\n \"course3\":{\"title\":\"Python Text Processing Course\",\n \"location\":\"München\",\n \"trainer\":\"Monica A. Snowdon\"}\n }\n\ntrainings2 = trainings.copy()\n\ntrainings[\"course2\"] = {\"title\":\"Perl Seminar for Beginners\",\n \"location\":\"Ulm\",\n \"trainer\":\"James D. Morgan\"}\npprint(trainings2)","sub_path":"shallowcopy_pprint.py","file_name":"shallowcopy_pprint.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"595670283","text":"from cloud.aws import *\nfrom cloud.response import Response\nimport cloud.shortuuid as shortuuid\n\n\n# Define the input output format of the function.\n# This information is used when creating the *SDK*.\ninfo = {\n 'input_format': {\n 'guest_id': 'str?',\n },\n 'output_format': {\n 'guest_id': 'str',\n 'session_id': 'str',\n }\n}\n\n\ndef do(data, boto3):\n body = {}\n recipe = data['recipe']\n params = data['params']\n app_id = data['app_id']\n\n guest_id = params.get('guest_id', None)\n table_name = 'auth-{}'.format(app_id)\n\n login_conf = recipe['login_method']['guest_login']\n default_group_name = login_conf['default_group_name']\n enabled = login_conf['enabled']\n if enabled == 'true':\n enabled = True\n elif enabled == 'false':\n enabled = False\n\n if not enabled:\n body['error'] = '6'\n body['message'] = '게스트 로그인이 비활성화 상태입니다.'\n return Response(body)\n\n dynamo = DynamoDB(boto3)\n\n if guest_id:\n result = dynamo.get_item(table_name, guest_id)\n if result.get('Item', None):\n session_item = {\n 'userId': guest_id\n }\n dynamo.put_item(table_name, 'session', session_item)\n body['message'] = '게스트 로그인 성공'\n return Response(body)\n else:\n body['error'] = '7'\n body['message'] = '해당 게스트가 없습니다'\n return Response(body)\n else:\n guest_id = shortuuid.uuid()\n email = '{}@guest.com'.format(shortuuid.uuid())\n item = {\n 'email': email,\n 'group': default_group_name,\n 'extra': {},\n 'loginMethod': 'guest_login',\n }\n dynamo.put_item(table_name, 'user', item, item_id=guest_id)\n session_id = shortuuid.uuid()\n session_item = {\n 'userId': guest_id\n }\n dynamo.put_item(table_name, 'session', session_item, item_id=session_id)\n body['session_id'] = session_id\n body['guest_id'] = guest_id\n body['message'] = '게스트 로그인 성공'\n return Response(body)\n\n","sub_path":"aws_interface/cloud/auth/guest.py","file_name":"guest.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"256482171","text":"''' This example shows how WakeUpBehaviour works. The agent\nprints a message in the screen.\n'''\n\n# Needed imports\nfrom pade.behaviours.types import WakeUpBehaviour\nfrom pade.core.agent import Agent\nfrom pade.misc.utility import display_message, start_loop\n\n# Defining the LaterAgent (inherits from Agent class)\nclass LateAgent(Agent):\n\n\t# This method will execute at agent startup\n\tdef setup(self):\n\t\t# The behaviour is created with two args, where\n\t\t# the second is a time (in seconds) to behaviour\n\t\t# waits.\n\t\tbehaviour = AmILate(self, 5)\n\t\t# This adds a behaviour in the agent\n\t\tself.add_behaviour(behaviour)\n\n\n# Defining the AmILate behaviour\nclass AmILate(WakeUpBehaviour):\n\n\t# This method executes the main actions of behaviour\n\tdef on_wake(self):\n\t\tdisplay_message(self.agent, 'Am I late?')\n\n\n# This starts the agents with PADE\nif __name__ == '__main__':\n\t# Defining a LateAgent object\n\tlateagent = LateAgent('late')\n\t# Creating a list with agents that will be executed\n\tagents_list = [lateagent]\n\t# Passing the agent list to main loop of PADE\n\tstart_loop(agents_list)","sub_path":"examples/behaviours-and-messages/LateAgent.py","file_name":"LateAgent.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"570274507","text":"\n\nfrom xai.brain.wordbase.nouns._frailty import _FRAILTY\n\n#calss header\nclass _FRAILTIES(_FRAILTY, ):\n\tdef __init__(self,): \n\t\t_FRAILTY.__init__(self)\n\t\tself.name = \"FRAILTIES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"frailty\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_frailties.py","file_name":"_frailties.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"289169908","text":"#!/usr/bin/env python3\n\nimport skilstak.colors as c\nimport random\n\nanswers = [\n \"yes\",\n \"no\",\n \"maybe\"\n]\n\ndef main():\n print(c.clear + c.multi(\"Welcome to the magical eightball\"))\n while True:\n answer = random.choice(answers)\n input(c.rc() + \">>> \" + c.reset)\n print(c.rc() + answer)\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n print(c.clear)\n exit()\n\n\n","sub_path":"py/eightball/eightball.py","file_name":"eightball.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"630778203","text":"from numpy import *\r\nimport numpy as np \r\nimport matplotlib.pyplot as plt \r\n \r\n\r\ndef euclDistance(vector1, vector2): \r\n return sqrt(sum(power(vector2 - vector1, 2))) \r\n \r\ndef Centroids(dataSet, k): \r\n numpixel, attr = dataSet.shape \r\n centroids = zeros((k, attr)) \r\n for j in range(k): \r\n i = random.randint(0, numpixel)\r\n centroids[j , :] = dataSet[i, :] \r\n return centroids \r\n \r\ndef kmeans(dataSet, k): \r\n numpixel = dataSet.shape[0] \r\n Assign = mat(zeros((numpixel, 2))) \r\n Changed = True \r\n centroids =Centroids(dataSet, k) \r\n \r\n while Changed: \r\n Changed = False \r\n for i in range(numpixel): \r\n minDistance = 1000000.0 \r\n minIndex = 0 \r\n for j in range(k): \r\n distance = euclDistance( dataSet[i, :], centroids[j, :]) \r\n if minDistance > distance: \r\n minDistance = distance \r\n minIndex = j \r\n if Assign[i, 0] != minIndex: \r\n Changed = True \r\n Assign[i, :] = minIndex,minDistance**2\r\n \r\n for j in range(k):\r\n centroidj=Assign[:,0].A==j\r\n dataj=nonzero(centroidj)\r\n clusterj=dataSet[dataj[0]] \r\n centroids[j, :] = mean(clusterj, axis = 0)\r\n \r\n return centroids, Assign\r\n\r\n \r\ndef result(file_name, data):\r\n \r\n m, n = np.shape(data)\r\n f = open(file_name, \"w\")\r\n for i in range(m):\r\n pixel = []\r\n for j in range(n):\r\n pixel.append(str(data[i, j]))\r\n f.write(\"\\t\".join(pixel) + \"\\n\")\r\n f.close()\r\n","sub_path":"Kmeans.py","file_name":"Kmeans.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"292457335","text":"from pypokerengine.players import BasePokerPlayer\nfrom pypokerengine.utils.card_utils import gen_cards, estimate_hole_card_win_rate\nimport random as rand\nimport pprint\n\nclass Honest22Player(BasePokerPlayer):\n\n\t# Number of simulation\n\tNB_SIMULATION = 250\n\n\t# Minumum rounds needed to collect base data for 2nd heuristic\n\tMIN_NUM_DATA_COLLECTED = 100\n\n\t# Psuedo infinite\n\tPOS_INF = 10000.0\n\n\t# Street name constant\n\tSTREET_ZERO_CARD = \"preflop\"\n\tSTREET_THREE_CARD = \"flop\"\n\tSTREET_FOUR_CARD = \"turn\"\n\tSTREET_FIVE_CARD = \"river\"\n\n\t# List of streets\n\tLIST_OF_STREET = [STREET_ZERO_CARD, STREET_THREE_CARD, STREET_FOUR_CARD, STREET_FIVE_CARD]\n\n\t# Action name constant\n\tFOLD = \"fold\"\n\tCALL = \"call\"\n\tRAISE = \"raise\"\n\n\t# Action index constant\n\tFOLD_INDEX = 0\n\tCALL_INDEX = 1\n\tRAISE_INDEX = 2\n\n\t# Game constant (declared here as it is difficult to check during game)\n\t# ENGINE BUG: The engine uses 20 for preflop, flop and 40 for turn, river \n\t# instead of 10 like the project specification stated\n\tRAISE_AMOUNT_PREFLOP = 20\n\tRAISE_AMOUNT_FLOP = 20\n\tRAISE_AMOUNT_TURN = 40\n\tRAISE_AMOUNT_RIVER = 40\n\n\t# Limited poker constant\n\tNUM_STREET_PER_ROUND = 4\n\tNUM_RAISE_PER_STREET = 4\n\tNUM_RAISE_PER_ROUND_PER_PLAYER = 4\n\n\t# Player turn constant\n\tPLAYER_TURN = True\n\tOPPONENT_TURN = False\n\n\t# Street index\n\tSTREET_INDEX_DICT = {\n\t\tSTREET_ZERO_CARD: 0,\n\t\tSTREET_THREE_CARD: 1,\n\t\tSTREET_FOUR_CARD: 2,\n\t\tSTREET_FIVE_CARD: 3\n\t}\n\n\t# Raise amount dictionary\n\tRAISE_AMOUNT_DICT = {\n\t\tSTREET_ZERO_CARD: RAISE_AMOUNT_PREFLOP,\n\t\tSTREET_THREE_CARD: RAISE_AMOUNT_FLOP,\n\t\tSTREET_FOUR_CARD: RAISE_AMOUNT_TURN,\n\t\tSTREET_FIVE_CARD: RAISE_AMOUNT_RIVER,\n\t\t0: RAISE_AMOUNT_PREFLOP,\n\t\t1: RAISE_AMOUNT_FLOP,\n\t\t2: RAISE_AMOUNT_TURN,\n\t\t3: RAISE_AMOUNT_RIVER\n\t}\n\n\t# Convert card letter to number\n\tCARD_NUM_DICT = {\n\t\t\"2\": 2,\n\t\t\"3\": 3,\n\t\t\"4\": 4,\n\t\t\"5\": 5,\n\t\t\"6\": 6,\n\t\t\"7\": 7,\n\t\t\"8\": 8,\n\t\t\"9\": 9,\n\t\t\"T\": 10,\n\t\t\"J\": 11,\n\t\t\"Q\": 12,\n\t\t\"K\": 13,\n\t\t\"A\": 14\n\t}\n\n\n\t# Hold history of Opponent's action and outcome\n\t# To search: self.RAISE_HISTORY[did_player_win][is_player_big_blind][street][num_raises]\n\tRAISE_HISTORY = {\n\t\t# Roundts won\n\t\tTrue: {\n\t\t\t# Player is big blind\n\t\t\tTrue: {\n\t\t\t\t#Streets history (index: number_of_raises, value: number_of_rounds)\n\t\t\t\tSTREET_ZERO_CARD: [0, 0, 0, 0, 0],\n\t\t\t\tSTREET_THREE_CARD: [0, 0, 0, 0, 0],\n\t\t\t\tSTREET_FOUR_CARD: [0, 0, 0, 0, 0],\n\t\t\t\tSTREET_FIVE_CARD: [0, 0, 0, 0, 0]\n\t\t\t},\n\t\t\t# Opponent is big blind\n\t\t\tFalse: {\n\t\t\t\t#Streets history (index: number_of_raises, value: number_of_rounds)\n\t\t\t\tSTREET_ZERO_CARD: [0, 0, 0, 0, 0],\n\t\t\t\tSTREET_THREE_CARD: [0, 0, 0, 0, 0],\n\t\t\t\tSTREET_FOUR_CARD: [0, 0, 0, 0, 0],\n\t\t\t\tSTREET_FIVE_CARD: [0, 0, 0, 0, 0]\n\t\t\t}\n\t\t},\n\t\t# Rounds lost\n\t\tFalse: {\n\t\t\t# Player is big blind\n\t\t\tTrue: {\n\t\t\t\t# Streets history (index: number_of_raises, value: number_of_rounds)\n\t\t\t\tSTREET_ZERO_CARD: [0, 0, 0, 0, 0],\n\t\t\t\tSTREET_THREE_CARD: [0, 0, 0, 0, 0],\n\t\t\t\tSTREET_FOUR_CARD: [0, 0, 0, 0, 0],\n\t\t\t\tSTREET_FIVE_CARD: [0, 0, 0, 0, 0]\n\t\t\t},\n\t\t\t# Opponent is big blind\n\t\t\tFalse: {\n\t\t\t\t# Streets history (index: number_of_raises, value: number_of_rounds)\n\t\t\t\tSTREET_ZERO_CARD: [0, 0, 0, 0, 0],\n\t\t\t\tSTREET_THREE_CARD: [0, 0, 0, 0, 0],\n\t\t\t\tSTREET_FOUR_CARD: [0, 0, 0, 0, 0],\n\t\t\t\tSTREET_FIVE_CARD: [0, 0, 0, 0, 0]\n\t\t\t}\n\t\t}\n\t}\n\n\t# Hold win rates history given the player who is big blind this round\n\t# To search: self.WIN_RATES_FROM_RAISE_HISTORY[street][num_raises]\n\tWIN_RATES_FROM_RAISE_HISTORY = {\n\t\t# Streets history (index: number_of_raises, value: win_rates)\n\t\tSTREET_ZERO_CARD: [0.0, 0.0, 0.0, 0.0, 0.0],\n\t\tSTREET_THREE_CARD: [0.0, 0.0, 0.0, 0.0, 0.0],\n\t\tSTREET_FOUR_CARD: [0.0, 0.0, 0.0, 0.0, 0.0],\n\t\tSTREET_FIVE_CARD: [0.0, 0.0, 0.0, 0.0, 0.0]\n\t}\n\n\tBET_AVG_KEY = \"avg\"\n\tBET_NUM_KEY = \"num\"\n\tBET_TOT_KEY = \"total\"\n\n\tBET_AMOUNT_HISTORY = {\n\t\t# Streets history (current_amount: {BET_AVG_KEY: avg, BET_NUM_KEY: num, BET_TOT_KEY: total})\n\t\tSTREET_ZERO_CARD: {},\n\t\tSTREET_THREE_CARD: {},\n\t\tSTREET_FOUR_CARD: {}\n\t}\n\n\t# Hold card probability look up table\n\t# To search: self.PREFLOP_EXPECTED_VALUE[is_same_shape][lower_card_number][higher_card_number]\n\t\"\"\" TO BE FILLED HERE \"\"\"\n\tPREFLOP_EXPECTED_VALUE = {\n\t\t# Same shape (suited)\n\t\tTrue: {\n\t\t\t# Lower card number\n\t\t\t\"2\": {\n\t\t\t\t# Higher card number\n\t\t\t\t\"3\": -0.2803, \"4\": -0.2634 , \"5\": -0.2430 , \"6\": -0.2466 , \"7\": -0.2369 ,\n\t\t\t\t\"8\": -0.1946, \"9\": -0.1517 , \"T\": -0.1032 , \"J\": -0.0524 , \"Q\": 0.0034 ,\n\t\t\t\t\"K\": 0.0642 , \"A\": 0.1476\n\t\t\t},\n\t\t\t\"3\": {\n\t\t\t\t\"4\": -0.2272, \"5\": -0.2061, \"6\": -0.2093, \"7\": -0.1993, \"8\": -0.1825,\n\t\t\t\t\"9\": -0.1347, \"T\": -0.0861, \"J\": -0.0354, \"Q\": 0.0204, \"K\": 0.0811,\n\t\t\t\t\"A\": 0.1644\n\t\t\t},\n\t\t\t\"4\": {\n\t\t\t\t\"5\": -0.1709, \"6\": -0.1733, \"7\": -0.1630, \"8\": -0.1460, \"9\": -0.1228,\n\t\t\t\t\"T\": -0.0694, \"J\": -0.0186, \"Q\": 0.0371, \"K\": 0.0977, \"A\": 0.1807\n\t\t\t},\n\t\t\t\"5\": {\n\t\t\t\t\"6\": -0.1373, \"7\": -0.1265, \"8\": -0.1091, \"9\": -0.0856, \"T\": -0.0557,\n\t\t\t\t\"J\": -0.0003, \"Q\": 0.0554, \"K\": 0.1159, \"A\": 0.1985\n\t\t\t},\n\t\t\t\"6\": {\n\t\t\t\t\"7\": -0.0926, \"8\": -0.0751, \"9\": -0.0514, \"T\": -0.0212, \"Q\": 0.0723,\n\t\t\t\t\"J\": 0.0121, \"K\": 0.1328, \"A\": 0.1981\n\t\t\t},\n\t\t\t\"7\": {\n\t\t\t\t\"8\": -0.0413, \"9\": -0.0174, \"T\": 0.0128, \"J\": 0.0465, \"Q\": 0.0860,\n\t\t\t\t\"K\": 0.1580, \"A\": 0.2197\n\t\t\t},\n\t\t\t\"8\": {\n\t\t\t\t\"9\": 0.0016, \"T\": 0.0467, \"J\": 0.0803, \"Q\": 0.1204, \"K\": 0.1662,\n\t\t\t\t\"A\": 0.2389\n\t\t\t},\n\t\t\t\"9\": {\n\t\t\t\t\"T\": 0.0806, \"J\": 0.1132, \"Q\": 0.1533, \"K\": 0.1998, \"A\": 0.2556\n\t\t\t},\n\t\t\t\"T\": {\n\t\t\t\t\"J\": 0.1506, \"Q\": 0.1894, \"K\": 0.2358, \"A\": 0.2920\n\t\t\t},\n\t\t\t\"J\": { \n\t\t\t\t\"Q\": 0.2052, \"K\": 0.2513, \"A\": 0.3079\n\t\t\t},\n\t\t\t\"Q\": {\n\t\t\t\t\"K\": 0.2680, \"A\": 0.3242\n\t\t\t},\n\t\t\t\"K\": {\n\t\t\t\t\"A\": 0.3409\n\t\t\t}\n\t\t},\n\t\t# Different shape (unsuited)\n\t\tFalse: {\n\t\t\t# Lower card number\n\t\t\t\"2\": {\n\t\t\t\t# Higher card number\n\t\t\t\t\"2\": 0.0067 , \"3\": -0.3539 , \"4\": -0.3360 , \"5\": -0.3143 , \"6\": -0.3185 ,\n\t\t\t\t\"7\": -0.3083, \"8\": -0.2634 , \"9\": -0.2180 , \"T\": -0.1666 , \"J\": -0.1130 ,\n\t\t\t\t\"Q\": -0.0541, \"K\": 0.0102 , \"A\": 0.0986\n\t\t\t},\n\t\t\t\"3\": {\n\t\t\t\t\"3\": 0.0739, \"4\": -0.2971, \"5\": -0.2747, \"6\": -0.2784, \"7\": -0.2680,\n\t\t\t\t\"8\": -0.2503, \"9\": -0.1996, \"T\": -0.1481, \"J\": -0.0945, \"Q\": -0.0356,\n\t\t\t\t\"K\": -0.0285, \"A\": 0.1169\n\t\t\t},\n\t\t\t\"4\": {\n\t\t\t\t\"4\": 0.1405, \"5\": -0.2369, \"6\": -0.2398, \"7\": -0.2290, \"8\": -0.2111, \n\t\t\t\t\"9\": -0.1866, \"T\": -0.1299, \"J\": -0.0763, \"Q\": -0.0174, \"K\": 0.0465, \n\t\t\t\t\"A\": 0.1346\n\t\t\t},\n\t\t\t\"5\": {\n\t\t\t\t\"5\": 0.2065, \"6\": -0.2011, \"7\": -0.1898, \"8\": -0.1714, \"9\": -0.1466, \n\t\t\t\t\"T\": -0.1150, \"J\": -0.0564, \"Q\": 0.0024, \"K\": 0.0663, \"A\": 0.1539\n\t\t\t},\n\t\t\t\"6\": {\n\t\t\t\t\"6\": 0.2657, \"7\": -0.1535, \"8\": -0.1353, \"9\": -0.1102, \"T\": -0.0782, \n\t\t\t\t\"J\": -0.0431, \"Q\": 0.0205, \"K\": 0.0845, \"A\": 0.1536\n\t\t\t},\n\t\t\t\"7\": {\n\t\t\t\t\"7\": 0.3247, \"8\": -0.0990, \"9\": -0.0740, \"T\": -0.0418, \"J\": -0.0064, \n\t\t\t\t\"Q\": 0.0353, \"K\": 0.1037, \"A\": 0.1768 \n\t\t\t},\n\t\t\t\"8\": {\n\t\t\t\t\"8\": 0.3833, \"9\": -0.0381, \"T\": -0.0056, \"J\": 0.0298, \"Q\": 0.0720, \n\t\t\t\t\"K\": 0.1204, \"A\": 0.1975\n\t\t\t},\n\t\t\t\"9\": {\n\t\t\t\t\"9\": 0.4411, \"T\": 0.0306, \"J\": 0.0650, \"Q\": 0.1072, \"K\": 0.1562, \n\t\t\t\t\"A\": 0.2155\n\t\t\t},\n\t\t\t\"T\": {\n\t\t\t\t\"T\":0.5002, \"J\": 0.1050, \"Q\": 0.1458, \"K\": 0.1948, \"A\": 0.2544\n\t\t\t},\n\t\t\t\"J\": { \n\t\t\t\t\"J\": 0.5494, \"Q\": 0.1627, \"K\": 0.2114, \"A\": 0.2713\n\t\t\t},\n\t\t\t\"Q\": {\n\t\t\t\t\"Q\": 0.5985, \"K\": 0.2291, \"A\": 0.2886\n\t\t\t},\n\t\t\t\"K\": {\n\t\t\t\t\"K\": 0.6479, \"A\": 0.3064\n\t\t\t},\n\t\t\t\"A\": {\n\t\t\t\t\"A\": 0.7041\n\t\t\t}\n\t\t}\n\t}\n\n\n\tdef __init__(self):\n\t\tBasePokerPlayer.__init__(self)\n\t\t# To be re-initialized at the start of each game\n\t\t# Agent info\n\t\t# self.uuid is already initialized by the engine\n\t\tself.name = \"agent22\"\n\t\tself.seat_pos = 0\n\t\t# Rule info\n\t\tself.small_blind_amount = 0\n\t\tself.big_blind_amount = 0\n\t\tself.max_round = 1000\n\t\t# To be re-initialized at the start of each round\n\t\t# Round info\n\t\tself.round_count = 0\n\t\tself.big_blind_seat_pos = 0\n\t\tself.is_player_big_blind = True\n\t\tself.hole_card = []\n\t\tself.player_stack_at_start_of_round = 10000\n\t\tself.opponent_stack_at_start_of_round = 10000\n\t\tself.prev_outcome = 0\n\t\t# To be re-initialized at the start of each street\n\t\t# Street info\n\t\tself.street = self.STREET_ZERO_CARD\n\t\tself.is_start_of_street = True\n\t\tself.community_card = []\n\t\tself.player_bet_at_start_of_street = 0\n\t\tself.opponent_bet_at_start_of_street = 0\n\t\tself.bet_at_end_of_street = []\n\t\tself.remaining_raise_this_street = self.NUM_RAISE_PER_STREET\t#set to 4\n\t\tself.remaining_player_raise_this_round = self.NUM_RAISE_PER_ROUND_PER_PLAYER\t#set to 4\n\t\tself.remaining_opponent_raise_this_round = self.NUM_RAISE_PER_ROUND_PER_PLAYER\t#set to 4\n\t\tself.preflop_expected_value = 0.0\n\t\tself.winning_probability = 0.5\n\t\tself.opp_heuristic_weight = 0.0\n\t\t# To be re-initialized at the start of each update\n\t\t# Current info (will be reinitialize from game info)\n\t\tself.player_stack = 10000\n\t\tself.opponent_stack = 10000\n\t\tself.player_bet = 0 \n\t\tself.opponent_bet = 0\n\t\tself.last_action = {}\n\t\t# Pre-calculating and populating data for estimating the future raising amount\n\t\tself.avg_raise_amount_remaining_street = []\n\t\tself.pre_calculate_avg_raise_amount_remaining_street()\n\n\tdef declare_action(self, valid_actions, hole_card, round_state):\n\t\tcall_action_info = valid_actions[self.best_action(valid_actions)]\n\t\taction = call_action_info[\"action\"]\n\t\treturn action\t# action returned here is sent to the poker engine\n\n\tdef receive_game_start_message(self, game_info):\n\t\t# initialize game infomation\n\t\tself.small_blind_amount = game_info[\"rule\"][\"small_blind_amount\"]\n\t\tself.big_blind_amount = 2 * self.small_blind_amount\n\t\tself.max_round = game_info[\"rule\"][\"max_round\"]\n\t\t# initialize personal data\n\t\tfor i in range(0, len(game_info[\"seats\"])):\n\t\t\tif game_info[\"seats\"][i][\"uuid\"] == self.uuid:\n\t\t\t\tself.seat_pos = i\n\t\t\t\tself.name = game_info[\"seats\"][i][\"name\"]\n\t\t# initialize money stack\n\t\tself.player_stack_at_start_of_round = game_info[\"rule\"][\"initial_stack\"]\n\t\tself.opponent_stack_at_start_of_round = game_info[\"rule\"][\"initial_stack\"]\n\t\tself.player_stack = self.player_stack_at_start_of_round\n\t\tself.opponent_stack = self.opponent_stack_at_start_of_round\n\t\t# DEBUG\n\t\t# pprint.pprint(game_info)\n\t\t# print(\"-----GAME START-----\")\n\t\t# print(\"name: \" + str(self.name))\n\t\t# print(\"uuid: \" + str(self.uuid))\n\t\t# print(\"small_blind_amount: \" + str(self.small_blind_amount))\n\t\t# print(\"big_blind_amount: \" + str(self.big_blind_amount))\n\t\t# print(\"max_round: \" + str(self.max_round))\n\t\t# print(\"seat_pos: \" + str(self.seat_pos))\n\t\t# print(\"--------------------\")\n\t\t# pass\n\n\tdef receive_round_start_message(self, round_count, hole_card, seats):\n\t\t# Initialize round info\n\t\tself.round_count = round_count\n\t\tself.hole_card = list(hole_card)\n\t\tself.remaining_player_raise_this_round = self.NUM_RAISE_PER_ROUND_PER_PLAYER\n\t\tself.remaining_opponent_raise_this_round = self.NUM_RAISE_PER_ROUND_PER_PLAYER\n\t\t# Initialize last action\n\t\tself.last_action = {}\n\t\t# Initialize bet amount at end of street\n\t\tself.bet_at_end_of_street = []\n\t\t# Initialize for preflop street\n\t\tself.player_bet_at_start_of_street = 0\n\t\tself.opponent_bet_at_start_of_street = 0\n\t\tself.remaining_raise_this_street = self.NUM_RAISE_PER_STREET\n\t\t# Update stack and initialize small/big blind bet\n\t\tfor i in range(0, len(seats)):\n\t\t\tif seats[i][\"uuid\"] == self.uuid:\n\t\t\t\tself.player_stack = seats[i][\"stack\"]\n\t\t\t\tself.player_bet = self.player_stack_at_start_of_round - self.player_stack\n\t\t\t\t# big blind/all in is considered as a raise\n\t\t\t\tif self.player_bet > self.small_blind_amount:\n\t\t\t\t\tself.remaining_raise_this_street -= 1\n\t\t\t\t\tself.big_blind_seat_pos = i\n\t\t\telse:\n\t\t\t\tself.opponent_stack = seats[i][\"stack\"]\n\t\t\t\tself.opponent_bet = self.opponent_stack_at_start_of_round - self.opponent_stack\n\t\t\t\t# big blind/all in is considered as a raise\n\t\t\t\tif self.opponent_bet > self.small_blind_amount:\n\t\t\t\t\tself.remaining_raise_this_street -= 1\n\t\t\t\t\tself.big_blind_seat_pos = i\n\t\tself.is_player_big_blind = (self.seat_pos == self.big_blind_seat_pos)\n\t\t# DEBUG\n\t\t# pprint.pprint(hole_card)\n\t\t# pprint.pprint(seats)\n\t\t# print(\"-----ROUND START-----\")\n\t\t# print(\"round_count: \" + str(self.round_count))\n\t\t# print(\"big_blind_seat_pos: \" + str(self.big_blind_seat_pos))\n\t\t# print(\"hole_card: \")\n\t\t# pprint.pprint(self.hole_card)\n\t\t# print(\"player_stack_at_start_of_round: \" + str(self.player_stack_at_start_of_round))\n\t\t# print(\"opponent_stack_at_start_of_round: \" + str(self.opponent_stack_at_start_of_round))\n\t\t# print(\"---------------------\")\n\t\t# pass\n\n\tdef receive_street_start_message(self, street, round_state):\n\t\t# Initialize street info\n\t\tself.street = street\n\t\tself.is_start_of_street = True\n\t\tself.community_card = list(round_state[\"community_card\"])\n\t\t# Initialize for non-preflop street\n\t\tif self.street != self.STREET_ZERO_CARD:\n\t\t\tself.remaining_raise_this_street = self.NUM_RAISE_PER_STREET\n\t\t\tself.player_bet_at_start_of_street = self.player_bet\n\t\t\tself.opponent_bet_at_start_of_street = self.opponent_bet\n\t\t\tself.bet_at_end_of_street.append(self.player_bet_at_start_of_street)\n\t\t# Update stacks\n\t\tfor i in range(0, len(round_state[\"seats\"])):\n\t\t\tif round_state[\"seats\"][i][\"uuid\"] == self.uuid:\n\t\t\t\tself.player_stack = round_state[\"seats\"][i][\"stack\"]\n\t\t\telse:\n\t\t\t\tself.opponent_stack = round_state[\"seats\"][i][\"stack\"]\n\t\t# Recalculate winning probability\n\t\tself.re_calculate_probability()\n\t\t# DEBUG\n\t\t# pprint.pprint(round_state)\n\t\t# print(\"-----STREET START-----\")\n\t\t# print(\"street: \" + str(self.street))\n\t\t# print(\"raise_amount: \" + str(self.raise_amount(self.street)))\n\t\t# print(\"avg_raise_amount_remaining_street: \" \n\t\t#\t\t+ str(self.avg_raise_amount_remaining_street[self.STREET_INDEX_DICT[self.street]]))\n\t\t# pprint.pprint(self.community_card)\n\t\t# print(\"player_bet_at_start_of_street: \" + str(self.player_bet_at_start_of_street))\n\t\t# print(\"opponent_bet_at_start_of_street: \" + str(self.opponent_bet_at_start_of_street))\n\t\t# print(\"player_bet: \" + str(self.player_bet))\n\t\t# print(\"opponent_bet: \" + str(self.opponent_bet))\n\t\t# print(\"----------------------\")\n\t\t# pass\n\n\t# ENGINE BUG (RARE): Poker game might send update message 2 times\n\t# In order to update number of remaining raise while dealing with this bug,\n\t# we make use of the fact that a player cannot preform 2 identical raise action \n\t# subsequently withour letting the opponent do anything\n\tdef receive_game_update_message(self, action, round_state):\n\t\t# Check for repeating bug\n\t\tif ((action != self.last_action) or (self.is_start_of_street == True)):\n\t\t\t# Update bet amount\n\t\t\tif action[\"player_uuid\"] == self.uuid:\n\t\t\t\tif action[\"action\"] != self.FOLD:\n\t\t\t\t\tself.player_bet = self.player_bet_at_start_of_street + action[\"amount\"]\n\t\t\t\tif action[\"action\"] == self.RAISE:\n\t\t\t\t\tself.remaining_raise_this_street -= 1\n\t\t\t\t\tself.remaining_player_raise_this_round -= 1\n\t\t\telse:\n\t\t\t\tif action[\"action\"] != self.FOLD:\n\t\t\t\t\tself.opponent_bet = self.opponent_bet_at_start_of_street + action[\"amount\"]\n\t\t\t\tif action[\"action\"] == self.RAISE:\n\t\t\t\t\tself.remaining_raise_this_street -= 1\n\t\t\t\t\tself.remaining_opponent_raise_this_round -= 1\n\t\t\t# Update stacks\n\t\t\tfor i in range(0, len(round_state[\"seats\"])):\n\t\t\t\tif round_state[\"seats\"][i][\"uuid\"] == self.uuid:\n\t\t\t\t\tself.player_stack = round_state[\"seats\"][i][\"stack\"]\n\t\t\t\telse:\n\t\t\t\t\tself.opponent_stack = round_state[\"seats\"][i][\"stack\"]\n\t\t\t# An action has been preformed since the start of the street\n\t\t\tself.is_start_of_street = False\n\t\t# Update last action\n\t\tself.last_action = dict(action)\n\t\t# DEBUG\n\t\t# pprint.pprint(action)\n\t\t# pprint.pprint(round_state)\n\t\t# print(\"-----GAME UPDATE-----\")\n\t\t# print(\"player_bet: \" + str(self.player_bet))\n\t\t# print(\"opponent_bet: \" + str(self.opponent_bet))\n\t\t# print(str(self.player_stack))\n\t\t# print(\"opponent_stack: \" + str(self.opponent_stack))\n\t\t# print(\"remaining_raise_this_street: \" + str(self.remaining_raise_this_street))\n\t\t# print(\"remaining_player_raise_this_round: \" + str(self.remaining_player_raise_this_round))\n\t\t# print(\"remaining_opponent_raise_this_round: \" + str(self.remaining_opponent_raise_this_round))\n\t\t# print(\"---------------------\")\n\t\t# pass\n\n\tdef receive_round_result_message(self, winners, hand_info, round_state):\n\t\t# Update bet amount history\n\t\tif (self.last_action[\"action\"] != self.FOLD):\n\t\t\tfinal_bet = self.player_bet\n\t\t\tif (len(self.bet_at_end_of_street) == (self.NUM_STREET_PER_ROUND - 1)):\n\t\t\t\tfor i in range(0, self.NUM_STREET_PER_ROUND - 1):\n\t\t\t\t\tstreet = self.LIST_OF_STREET[i]\n\t\t\t\t\tbet = self.bet_at_end_of_street[i]\n\t\t\t\t\tif bet in self.BET_AMOUNT_HISTORY[street].keys():\n\t\t\t\t\t\tself.BET_AMOUNT_HISTORY[street][bet][self.BET_TOT_KEY] += final_bet\n\t\t\t\t\t\tself.BET_AMOUNT_HISTORY[street][bet][self.BET_NUM_KEY] += 1\n\t\t\t\t\t\tself.BET_AMOUNT_HISTORY[street][bet][self.BET_AVG_KEY] = (\n\t\t\t\t\t\t\tself.BET_AMOUNT_HISTORY[street][bet][self.BET_TOT_KEY]\n\t\t\t\t\t\t\t/ float(self.BET_AMOUNT_HISTORY[street][bet][self.BET_NUM_KEY]))\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.BET_AMOUNT_HISTORY[street][bet] = {\n\t\t\t\t\t\t\tself.BET_AVG_KEY: final_bet,\n\t\t\t\t\t\t\tself.BET_NUM_KEY: 1,\n\t\t\t\t\t\t\tself.BET_TOT_KEY: final_bet\n\t\t\t\t\t\t}\n\n\t\t# Update stacks\n\t\tfor i in range(0, len(round_state[\"seats\"])):\n\t\t\tif round_state[\"seats\"][i][\"uuid\"] == self.uuid:\n\t\t\t\tself.player_stack = round_state[\"seats\"][i][\"stack\"]\n\t\t\telse:\n\t\t\t\tself.opponent_stack = round_state[\"seats\"][i][\"stack\"]\n\n\t\tself.prev_outcome = self.player_stack - self.player_stack_at_start_of_round\n\t\tself.player_stack_at_start_of_round = self.player_stack\n\t\tself.opponent_stack_at_start_of_round = self.opponent_stack\n\n\t\thas_won = (winners[0][\"name\"] == self.name)\n\t\t\n\t\t# We can only evaluate how powerful the opponent is given their action if we do not fold\n\t\tif not ((has_won == False) and (self.last_action[\"action\"] == self.FOLD)):\n\t\t\tnum_raises = [0, 0, 0, 0]\n\t\t\tfor street in round_state[\"action_histories\"].keys():\n\t\t\t\t# Calculate number of raise per street\n\t\t\t\tfor turn in round_state[\"action_histories\"][street]:\n\t\t\t\t\tif turn[\"action\"] == \"RAISE\" and turn[\"uuid\"] != self.uuid:\n\t\t\t\t\t\tnum_raises[self.STREET_INDEX_DICT[street]] += 1\n\n\t\t\t# Calculate number of raise by the end of certain street\n\t\t\tfor i in range(self.NUM_STREET_PER_ROUND - 1, 0, -1):\n\t\t\t\tfor j in range(0, i):\n\t\t\t\t\tnum_raises[i] += num_raises[j]\n\n\t\t\tfor street in self.LIST_OF_STREET:\n\t\t\t\tnum_opp_raises = num_raises[self.STREET_INDEX_DICT[street]]\n\t\t\t\tself.RAISE_HISTORY[has_won][self.is_player_big_blind][street][num_opp_raises] += 1\n\n\t\t\t# Note: it can be mathematically proven that we only need to update \n\t\t\t# entries with number of raises by the end of each street of this round\n\t\t\tfor street in self.WIN_RATES_FROM_RAISE_HISTORY.keys():\n\t\t\t\tif street in round_state[\"action_histories\"].keys():\n\t\t\t\t\traises = num_raises[self.STREET_INDEX_DICT[street]]\n\t\t\t\t\tself.WIN_RATES_FROM_RAISE_HISTORY[street][raises] = (\n\t\t\t\t\t\tself.win_chance_from_raise_history(street, raises))\n\n\t\t# DEBUG\n\t\t# print(self.RAISE_HISTORY)\n\t\t# pprint.pprint(winners)\n\t\t# pprint.pprint(hand_info)\n\t\t# pprint.pprint(round_state)\n\t\t# print(\"-----ROUND RESULT-----\")\n\t\tprint(str(self.player_stack - 10000))\n\t\t# print(\"opponent_stack: \" + str(self.opponent_stack))\n\t\t# print(\"----------------------\")\n\t\t# pass\n\n\tdef raise_amount(self, street):\n\t\t# Default result: RAISE_AMOUNT_PREFLOP\n\t\tresult = self.RAISE_AMOUNT_DICT.get(street, self.RAISE_AMOUNT_PREFLOP)\n\t\treturn result\n\n\t# First call of the heuristic minimax search, return best action index\n\t# Special case: call in the very first turn of the street will not end the street\n\tdef best_action(self, valid_actions):\n\t\tbet_diff = self.opponent_bet - self.player_bet\n\t\t# Initialize with fold action\n\t\tbest_outcome = (-1) * self.player_bet\n\t\tbest_action_index = self.FOLD_INDEX\n\t\t# Check call action\n\t\tcall_outcome = 0\n\t\tif self.is_start_of_street == True:\n\t\t\tcall_outcome = self.heuristic_minimax(\t#player calls, opponent turn\n\t\t\t\t\t\t\t\tself.OPPONENT_TURN,\n\t\t\t\t\t\t\t\tself.opponent_bet,\n\t\t\t\t\t\t\t\tself.opponent_bet,\n\t\t\t\t\t\t\t\tself.player_stack - bet_diff,\n\t\t\t\t\t\t\t\tself.opponent_stack,\n\t\t\t\t\t\t\t\tself.remaining_player_raise_this_round,\n\t\t\t\t\t\t\t\tself.remaining_opponent_raise_this_round,\n\t\t\t\t\t\t\t\tself.remaining_raise_this_street,\n\t\t\t\t\t\t\t\tbest_outcome,\n\t\t\t\t\t\t\t\tself.POS_INF)\n\t\telse:\n\t\t\tcall_outcome = self.expected_outcome(\t#player calls, street ends\n\t\t\t\t\t\t\t\tself.opponent_bet,\n\t\t\t\t\t\t\t\tself.player_stack - bet_diff,\n\t\t\t\t\t\t\t\tself.opponent_stack,\n\t\t\t\t\t\t\t\tself.remaining_player_raise_this_round,\n\t\t\t\t\t\t\t\tself.remaining_opponent_raise_this_round)\n\t\tif call_outcome >= best_outcome:\n\t\t\tbest_outcome = call_outcome\n\t\t\tbest_action_index = self.CALL_INDEX\n\t\t# Check raise action\n\t\tif len(valid_actions) == 3:\n\t\t\traise_outcome = 0\n\t\t\traise_amount_this_street = self.raise_amount(self.street)\n\t\t\tplayer_has_enough_money = (self.player_stack >= (bet_diff + raise_amount_this_street))\t#flag\n\t\t\topponent_has_enough_money = (self.opponent_stack >= raise_amount_this_street)\t#flag\n\t\t\tif (player_has_enough_money and opponent_has_enough_money):\n\t\t\t\traise_outcome = self.heuristic_minimax(\t#player raises\n\t\t\t\t\t\t\t\t\tself.OPPONENT_TURN,\n\t\t\t\t\t\t\t\t\tself.opponent_bet + raise_amount_this_street,\n\t\t\t\t\t\t\t\t\tself.opponent_bet,\n\t\t\t\t\t\t\t\t\tself.player_stack - bet_diff - raise_amount_this_street,\n\t\t\t\t\t\t\t\t\tself.opponent_stack,\n\t\t\t\t\t\t\t\t\tself.remaining_player_raise_this_round - 1,\n\t\t\t\t\t\t\t\t\tself.remaining_opponent_raise_this_round,\n\t\t\t\t\t\t\t\t\tself.remaining_raise_this_street - 1,\n\t\t\t\t\t\t\t\t\tbest_outcome,\n\t\t\t\t\t\t\t\t\tself.POS_INF)\n\t\t\telse:\n\t\t\t\tlast_raise_amount = min(self.player_stack - bet_diff, self.opponent_stack)\n\t\t\t\traise_outcome = self.heuristic_minimax(\t#player raises to the highest possible remaining amount\n\t\t\t\t\t\t\t\t\tself.OPPONENT_TURN,\n\t\t\t\t\t\t\t\t\tself.opponent_bet + last_raise_amount,\n\t\t\t\t\t\t\t\t\tself.opponent_bet,\n\t\t\t\t\t\t\t\t\tself.player_stack - bet_diff - last_raise_amount,\n\t\t\t\t\t\t\t\t\tself.opponent_stack,\n\t\t\t\t\t\t\t\t\tself.remaining_player_raise_this_round - 1,\n\t\t\t\t\t\t\t\t\tself.remaining_opponent_raise_this_round,\n\t\t\t\t\t\t\t\t\tself.remaining_raise_this_street - 1,\n\t\t\t\t\t\t\t\t\tbest_outcome,\n\t\t\t\t\t\t\t\t\tself.POS_INF)\n\t\t\tif raise_outcome >= best_outcome:\n\t\t\t\tbest_outcome = raise_outcome\n\t\t\t\tbest_action_index = self.RAISE_INDEX\n\t\treturn best_action_index\n\n\t# Alpha-beta prunning heuristic minimax algorithm\n\t# Special case in the first turn of the street is already checked in best_action\n\tdef heuristic_minimax(self,\n\t\t\t\t\t\tplayer_turn,\n\t\t\t\t\t\tplayer_bet,\n\t\t\t\t\t\topponent_bet,\n\t\t\t\t\t\tplayer_stack,\n\t\t\t\t\t\topponent_stack,\n\t\t\t\t\t\tremaining_player_raise_this_round,\n\t\t\t\t\t\tremaining_opponent_raise_this_round,\n\t\t\t\t\t\tremaining_raise_this_street,\n\t\t\t\t\t\talpha,\n\t\t\t\t\t\tbeta\n\t\t\t\t\t\t):\n\t\tbest_outcome = 0\n\t\tcall_outcome = 0\n\t\traise_outcome = 0\n\t\traise_amount_this_street = self.raise_amount(self.street)\n\t\t# Max player\n\t\tif player_turn == self.PLAYER_TURN: \t# if it is player's turn\n\t\t\tbest_outcome = (-1) * player_bet \t# initialize with folding\n\t\t\tif (best_outcome >= beta):\n\t\t\t\treturn best_outcome\t\t\t\t# prunning\n\t\t\tbet_diff = opponent_bet - player_bet\n\t\t\t# Check call outcome\n\t\t\tcall_outcome = self.expected_outcome(\n\t\t\t\t\t\t\t\topponent_bet,\n\t\t\t\t\t\t\t\tplayer_stack - bet_diff,\n\t\t\t\t\t\t\t\topponent_stack,\n\t\t\t\t\t\t\t\tremaining_player_raise_this_round,\n\t\t\t\t\t\t\t\tremaining_opponent_raise_this_round)\n\t\t\tif call_outcome >= best_outcome:\n\t\t\t\tbest_outcome = call_outcome\n\t\t\tif (best_outcome >= beta):\n\t\t\t\treturn best_outcome\t\t\t\t# prunning\n\t\t\t# Check raise outcome\n\t\t\tif remaining_raise_this_street > 0:\t#make sure there is eligible number of raises left in street\n\t\t\t\tif remaining_player_raise_this_round > 0:\t#check if player is still eligible to raise\n\t\t\t\t\tif ((player_stack > bet_diff) and (opponent_stack > 0)):\n\t\t\t\t\t\tplayer_has_enough_money = (player_stack >= (bet_diff + raise_amount_this_street))\n\t\t\t\t\t\topponent_has_enough_money = (opponent_stack >= raise_amount_this_street)\n\t\t\t\t\t\tif (player_has_enough_money and opponent_has_enough_money):\n\t\t\t\t\t\t\traise_outcome = self.heuristic_minimax(\t#recursive miniMax\n\t\t\t\t\t\t\t\t\t\t\t\tself.OPPONENT_TURN,\n\t\t\t\t\t\t\t\t\t\t\t\topponent_bet + raise_amount_this_street,\n\t\t\t\t\t\t\t\t\t\t\t\topponent_bet,\n\t\t\t\t\t\t\t\t\t\t\t\tplayer_stack - bet_diff - raise_amount_this_street,\n\t\t\t\t\t\t\t\t\t\t\t\topponent_stack,\n\t\t\t\t\t\t\t\t\t\t\t\tremaining_player_raise_this_round - 1,\n\t\t\t\t\t\t\t\t\t\t\t\tremaining_opponent_raise_this_round,\n\t\t\t\t\t\t\t\t\t\t\t\tremaining_raise_this_street - 1,\n\t\t\t\t\t\t\t\t\t\t\t\tmax(alpha, best_outcome),\n\t\t\t\t\t\t\t\t\t\t\t\tbeta)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tlast_raise_amount = min(player_stack - bet_diff, opponent_stack)\n\t\t\t\t\t\t\traise_outcome = self.heuristic_minimax(\t#recursive miniMax\n\t\t\t\t\t\t\t\t\t\t\t\tself.OPPONENT_TURN,\n\t\t\t\t\t\t\t\t\t\t\t\topponent_bet + last_raise_amount,\n\t\t\t\t\t\t\t\t\t\t\t\topponent_bet,\n\t\t\t\t\t\t\t\t\t\t\t\tplayer_stack - bet_diff - last_raise_amount,\n\t\t\t\t\t\t\t\t\t\t\t\topponent_stack,\n\t\t\t\t\t\t\t\t\t\t\t\tremaining_player_raise_this_round - 1,\n\t\t\t\t\t\t\t\t\t\t\t\tremaining_opponent_raise_this_round,\n\t\t\t\t\t\t\t\t\t\t\t\tremaining_raise_this_street - 1,\n\t\t\t\t\t\t\t\t\t\t\t\tmax(alpha, best_outcome),\n\t\t\t\t\t\t\t\t\t\t\t\tbeta)\n\t\t\t\t\t\tif raise_outcome >= best_outcome:\n\t\t\t\t\t\t\tbest_outcome = raise_outcome\n\t\t# Min player\n\t\telse:\n\t\t\tbest_outcome = opponent_bet\n\t\t\tif (best_outcome <= alpha):\n\t\t\t\treturn best_outcome\t\t\t\t# prunning\n\t\t\tbet_diff = player_bet - opponent_bet\n\t\t\t# Check call outcome\n\t\t\tcall_outcome = self.expected_outcome(\n\t\t\t\t\t\t\t\tplayer_bet,\n\t\t\t\t\t\t\t\tplayer_stack,\n\t\t\t\t\t\t\t\topponent_stack - bet_diff,\n\t\t\t\t\t\t\t\tremaining_player_raise_this_round,\n\t\t\t\t\t\t\t\tremaining_opponent_raise_this_round)\n\t\t\tif call_outcome <= best_outcome:\n\t\t\t\tbest_outcome = call_outcome\n\t\t\tif (best_outcome <= alpha):\n\t\t\t\treturn best_outcome\t\t\t\t# prunning\n\t\t\t# Check raise outcome\n\t\t\tif remaining_raise_this_street > 0:\n\t\t\t\tif remaining_opponent_raise_this_round > 0:\n\t\t\t\t\tif ((opponent_stack > bet_diff) and (player_stack > 0)):\n\t\t\t\t\t\topponent_has_enough_money = (opponent_stack >= (bet_diff + raise_amount_this_street))\n\t\t\t\t\t\tplayer_has_enough_money = (player_stack >= raise_amount_this_street)\n\t\t\t\t\t\tif (player_has_enough_money and opponent_has_enough_money):\n\t\t\t\t\t\t\traise_outcome = self.heuristic_minimax(\n\t\t\t\t\t\t\t\t\t\t\t\tself.PLAYER_TURN,\n\t\t\t\t\t\t\t\t\t\t\t\tplayer_bet,\n\t\t\t\t\t\t\t\t\t\t\t\tplayer_bet + raise_amount_this_street,\n\t\t\t\t\t\t\t\t\t\t\t\tplayer_stack,\n\t\t\t\t\t\t\t\t\t\t\t\topponent_stack - bet_diff - raise_amount_this_street,\n\t\t\t\t\t\t\t\t\t\t\t\tremaining_player_raise_this_round,\n\t\t\t\t\t\t\t\t\t\t\t\tremaining_opponent_raise_this_round - 1,\n\t\t\t\t\t\t\t\t\t\t\t\tremaining_raise_this_street - 1,\n\t\t\t\t\t\t\t\t\t\t\t\talpha,\n\t\t\t\t\t\t\t\t\t\t\t\tmin(beta, best_outcome))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tlast_raise_amount = min(opponent_stack - bet_diff, player_stack)\n\t\t\t\t\t\t\traise_outcome = self.heuristic_minimax(\n\t\t\t\t\t\t\t\t\t\t\t\tself.PLAYER_TURN,\n\t\t\t\t\t\t\t\t\t\t\t\tplayer_bet,\n\t\t\t\t\t\t\t\t\t\t\t\tplayer_bet + last_raise_amount,\n\t\t\t\t\t\t\t\t\t\t\t\tplayer_stack,\n\t\t\t\t\t\t\t\t\t\t\t\topponent_stack - bet_diff - last_raise_amount,\n\t\t\t\t\t\t\t\t\t\t\t\tremaining_player_raise_this_round,\n\t\t\t\t\t\t\t\t\t\t\t\tremaining_opponent_raise_this_round - 1,\n\t\t\t\t\t\t\t\t\t\t\t\tremaining_raise_this_street - 1,\n\t\t\t\t\t\t\t\t\t\t\t\talpha,\n\t\t\t\t\t\t\t\t\t\t\t\tmin(beta, best_outcome))\n\t\t\t\t\t\tif raise_outcome <= best_outcome:\n\t\t\t\t\t\t\tbest_outcome = raise_outcome\n\t\treturn best_outcome\n\n\tdef expected_outcome(self, \n\t\t\t\t\t\tbet_amount,\n\t\t\t\t\t\tplayer_stack, \n\t\t\t\t\t\topponent_stack,\n\t\t\t\t\t\tremaining_player_raise_this_round, \n\t\t\t\t\t\tremaining_opponent_raise_this_round):\n\t\tnum_opponent_raise = self.NUM_RAISE_PER_ROUND_PER_PLAYER - remaining_opponent_raise_this_round\n\t\tif self.street == self.STREET_FIVE_CARD:\n\t\t\texpected_bet = bet_amount\n\t\telse:\n\t\t\tif ((self.round_count > self.MIN_NUM_DATA_COLLECTED)\n\t\t\t\tand (bet_amount in self.BET_AMOUNT_HISTORY[self.street].keys())):\n\t\t\t\texpected_bet = self.BET_AMOUNT_HISTORY[self.street][bet_amount][self.BET_AVG_KEY]\n\t\t\telse:\n\t\t\t\texpected_bet = self.naive_expected_bet_amount(\n\t\t\t\t\t\t\t\t\tbet_amount, \n\t\t\t\t\t\t\t\t\tplayer_stack, \n\t\t\t\t\t\t\t\t\topponent_stack,\n\t\t\t\t\t\t\t\t\tremaining_player_raise_this_round,\n\t\t\t\t\t\t\t\t\tremaining_opponent_raise_this_round)\n\t\texpected_value = self.evaluate_value(expected_bet, num_opponent_raise)\n\t\treturn expected_value\n\n\tdef naive_expected_bet_amount(self, \n\t\t\t\t\t\t\t\tbet_amount, \n\t\t\t\t\t\t\t\tplayer_stack, \n\t\t\t\t\t\t\t\topponent_stack, \n\t\t\t\t\t\t\t\tremaining_player_raise_this_round, \n\t\t\t\t\t\t\t\tremaining_opponent_raise_this_round):\n\t\tnum_player_raise = self.NUM_RAISE_PER_ROUND_PER_PLAYER - remaining_player_raise_this_round\n\t\tnum_opponent_raise = self.NUM_RAISE_PER_ROUND_PER_PLAYER - remaining_opponent_raise_this_round\n\t\tstreet_index = self.STREET_INDEX_DICT[self.street]\n\t\tnum_street = street_index + 1\n\t\tnum_remaining_street = self.NUM_STREET_PER_ROUND - num_street\n\t\tavg_raise_amount_remaining_street = self.avg_raise_amount_remaining_street[street_index]\n\t\texpected_num_player_future_raise = min(remaining_player_raise_this_round, \n\t\t\t\t\t\t\t\t\t\t\t\tfloat(num_player_raise) / num_street * num_remaining_street)\n\t\texpected_num_opponent_future_raise = min(remaining_opponent_raise_this_round, \n\t\t\t\t\t\t\t\t\t\t\t\tfloat(num_opponent_raise) / num_street * num_remaining_street)\n\t\tremaining_possible_total_raise = num_remaining_street * self.NUM_RAISE_PER_STREET\n\t\texpected_increase_bet = min(player_stack, \n\t\t\t\t\t\t\t\t\topponent_stack,\n\t\t\t\t\t\t\t\t\t((expected_num_player_future_raise + expected_num_opponent_future_raise) \n\t\t\t\t\t\t\t\t\t\t* avg_raise_amount_remaining_street),\n\t\t\t\t\t\t\t\t\tremaining_possible_total_raise * avg_raise_amount_remaining_street)\n\t\texpected_bet = bet_amount + expected_increase_bet\n\t\treturn expected_bet\n\n\tdef evaluate_value(self, bet_amount, num_opponent_raise):\n\t\tfirst_card = self.hole_card[0]\n\t\tsecond_card = self.hole_card[1]\n\t\tvalue = 0\n\t\tif self.street == self.STREET_ZERO_CARD:\n\t\t\tcard_heuristic = bet_amount * self.preflop_expected_value\n\t\t\t# card_heuristic = bet_amount * (2 * self.winning_probability - 1)\n\t\telse:\t#not in PREFLOP\n\t\t\t# E = P(W) * B - (1 - P(W)) * B\n\t\t\tcard_heuristic = bet_amount * (2 * self.winning_probability - 1)\n\t\twin_rates_from_raise_history = self.WIN_RATES_FROM_RAISE_HISTORY[self.street][num_opponent_raise]\n\t\topp_heuristic = bet_amount * (2 * win_rates_from_raise_history - 1)\n\t\tif ((win_rates_from_raise_history >= 0.0) \n\t\t\tand (win_rates_from_raise_history <= 1.0) \n\t\t\tand (self.round_count > self.MIN_NUM_DATA_COLLECTED)):\n\t\t\tvalue = (1 - self.opp_heuristic_weight) * card_heuristic + self.opp_heuristic_weight * opp_heuristic\n\t\telse:\n\t\t\tvalue = card_heuristic\n\t\treturn value\n\n\tdef re_calculate_probability(self):\n\t\t# if in PREFLOP, we check against expected value and reverse the equation \n\t\tif self.street == self.STREET_ZERO_CARD:\n\t\t\tfirst_card = self.hole_card[0]\n\t\t\tsecond_card = self.hole_card[1]\n\t\t\t\n\t\t\tif self.CARD_NUM_DICT[first_card[1]] > self.CARD_NUM_DICT[second_card[1]]: #check number\n\t\t\t\tlower_card_number = second_card[1]\n\t\t\t\thigher_card_number = first_card[1]\n\t\t\telse:\n\t\t\t\tlower_card_number = first_card[1]\n\t\t\t\thigher_card_number = second_card[1]\n\t\t\tif first_card[0] == second_card[0]: #check same shape\n\t\t\t\tis_same_shape = True\n\t\t\telse:\n\t\t\t\tis_same_shape = False\n\t\t\t#reverse engineer equation, 2*Pr(win) = (Expected Value Per Bet) + 1\n\t\t\tself.preflop_expected_value = (\n\t\t\t\tself.PREFLOP_EXPECTED_VALUE[is_same_shape][lower_card_number][higher_card_number])\n\t\t\tself.winning_probability = (self.preflop_expected_value + 1) / 2\n\n\t\t#when not in PREFLOP\n\t\telse:\n\t\t\tself.winning_probability = estimate_hole_card_win_rate(\n\t\t\t\t\t\t\t\t\t\t\tnb_simulation = self.NB_SIMULATION,\n\t\t\t\t\t\t\t\t\t\t\tnb_player = 2,\n\t\t\t\t\t\t\t\t\t\t\thole_card = gen_cards(list(self.hole_card)),\n\t\t\t\t\t\t\t\t\t\t\tcommunity_card = gen_cards(list(self.community_card)))\n\n\tdef pre_calculate_avg_raise_amount_remaining_street(self):\n\t\ttotal_raise_amount_value = 0\n\t\tfor i in range(0, self.NUM_STREET_PER_ROUND):\t#calculating raise amount sum of all 4 street\n\t\t\ttotal_raise_amount_value += self.RAISE_AMOUNT_DICT[i]\n\t\tnum_remaining_street = self.NUM_STREET_PER_ROUND\t#set to 4\n\t\tfor i in range(0, self.NUM_STREET_PER_ROUND):\t#calculate avg remaining raise for each street\n\t\t\ttotal_raise_amount_value -= self.RAISE_AMOUNT_DICT[i]\n\t\t\tnum_remaining_street -= 1\n\t\t\tavg_raise_value = 0\n\t\t\tif num_remaining_street != 0:\n\t\t\t\tavg_raise_value = float(total_raise_amount_value) / num_remaining_street\n\t\t\tself.avg_raise_amount_remaining_street.append(avg_raise_value)\n\n\tdef rounds_won(self, street_name):\n\t\twon = 0\n\t\tfor x in self.RAISE_HISTORY[True][self.is_player_big_blind][street_name]:\n\t\t\twon += x\n\t\treturn won\n\n\tdef rounds_lost(self, street_name):\n\t\tlost = 0\n\t\tfor x in self.RAISE_HISTORY[False][self.is_player_big_blind][street_name]:\n\t\t\tlost += x\n\t\treturn lost\n\n\tdef rounds_with_specific_raises(self, street_name, num_raises):\n\t\tresult = (self.RAISE_HISTORY[False][self.is_player_big_blind][street_name][num_raises] \n\t\t\t\t\t+ self.RAISE_HISTORY[True][self.is_player_big_blind][street_name][num_raises])\n\t\treturn result\n \n\t# Use definition of conditional probability to calculate prob of winning given the num of raises made by opponent\n\t# Return a probability between 0.0 and 1.0 if success, and return -1.0 if cannot compute\n\tdef win_chance_from_raise_history(self, street, num_raises):\n\t\tnum_wins = self.rounds_won(street)\n\t\tnum_lost = self.rounds_lost(street)\n\t\tprob_player_win_given_opp_raises = -1.0\t#initialize to card winning probability in case cannot compute\n\t\tif (num_wins + num_lost != 0):\n\t\t\tprob_cur_opp_raises_and_player_win = (self.RAISE_HISTORY[True][self.is_player_big_blind][street][num_raises] \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t/ float(num_wins + num_lost))\n\t\t\tprob_opp_raises = self.rounds_with_specific_raises(street, num_raises) / float(num_wins + num_lost)\n\t\t\tif prob_opp_raises != 0:\n\t\t\t\tprob_player_win_given_opp_raises = prob_cur_opp_raises_and_player_win / prob_opp_raises\n\t\treturn prob_player_win_given_opp_raises\n\ndef setup_ai():\n\treturn Honest22Player()\n","sub_path":"honest22player.py","file_name":"honest22player.py","file_ext":"py","file_size_in_byte":32960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"306589208","text":"#!/usr/bin/env python3\n\n\"\"\"\nCreated on 14 Mar 2019\n\n@author: Bruno Beloff (bruno.beloff@southcoastscience.com)\n\nsource repo: scs_airnow\n\nDESCRIPTION\nThe airnow_downloader utility is used to\n\nSYNOPSIS\nairnow_downloader.py -t ORG GROUP LOC TOPIC -s START -e END [-d DIR] [-v]\n\nEXAMPLES\n./airnow_downloader.py -t unep ethiopia 1 particulates -s 2019-03-20T00:00:00Z -e 2019-03-21T00:00:00Z -d data -v\n\nSEE ALSO\nscs_analysis/aqcsv_mapper\nscs_analysis/aqcsv_task_manager\n\"\"\"\n\nimport os\nimport sys\n\nfrom scs_airnow.cmd.cmd_airnow_downloader import CmdAirNowDownloader\nfrom scs_airnow.helper.airnow_availability import AirNowAvailability\n\nfrom scs_core.aqcsv.connector.airnow_mapping_task import AirNowMappingTaskList\n\nfrom scs_core.data.datum import Datum\n\nfrom scs_core.sys.filesystem import Filesystem\nfrom scs_core.sys.subprocess import Pipe\n\nfrom scs_host.sys.host import Host\n\n\n# TODO: fix the issue of locality for external scripts\n\n# --------------------------------------------------------------------------------------------------------------------\n\nif __name__ == '__main__':\n\n # ----------------------------------------------------------------------------------------------------------------\n # cmd...\n\n cmd = CmdAirNowDownloader()\n\n if not cmd.is_valid_start():\n print(\"airnow_downloader: invalid format for start datetime.\", file=sys.stderr)\n exit(2)\n\n if not cmd.is_valid_end():\n print(\"airnow_downloader: invalid format for end datetime.\", file=sys.stderr)\n exit(2)\n\n if not Datum.is_numeric(cmd.task_loc):\n print(\"airnow_downloader: the loc value %s should be an integer.\" % cmd.task_loc, file=sys.stderr)\n exit(2)\n\n if not cmd.is_valid():\n cmd.print_help(sys.stderr)\n exit(2)\n\n if cmd.verbose:\n print(\"airnow_downloader: %s\" % cmd, file=sys.stderr)\n sys.stderr.flush()\n\n\n try:\n # ------------------------------------------------------------------------------------------------------------\n # resources...\n\n # MappingTask...\n tasks = AirNowMappingTaskList.load(Host)\n task = tasks.item((cmd.task_org, cmd.task_group, int(cmd.task_loc), cmd.task_topic))\n\n if task is None:\n print(\"airnow_downloader: task not found.\", file=sys.stderr)\n exit(1)\n\n if cmd.verbose:\n print(\"airnow_downloader: %s\" % task, file=sys.stderr)\n sys.stderr.flush()\n\n # files...\n task_prefix = task.file_prefix()\n dir_name = task.site_code if cmd.dir is None else os.path.join(cmd.dir, task.site_code)\n file_prefix = task_prefix if cmd.file_prefix is None else cmd.file_prefix\n\n file_path = os.path.join(dir_name, file_prefix)\n\n if cmd.verbose:\n print(\"airnow_downloader: file group: %s\" % file_path, file=sys.stderr)\n sys.stderr.flush()\n\n\n # ------------------------------------------------------------------------------------------------------------\n # validation...\n\n # datetimes...\n start = cmd.start.as_iso8601()\n end = cmd.end.as_iso8601()\n\n # data availability...\n if cmd.check:\n result = AirNowAvailability.check(\"airnow_downloader\", task, cmd.end, cmd.verbose)\n\n if result != AirNowAvailability.OK:\n exit(result)\n\n\n # ------------------------------------------------------------------------------------------------------------\n # run: directories...\n\n if cmd.verbose:\n print(\"airnow_downloader: making directories...\", end='', file=sys.stderr)\n sys.stderr.flush()\n\n Filesystem.mkdir(dir_name)\n\n if cmd.verbose:\n print(\"done.\", file=sys.stderr)\n\n\n # ------------------------------------------------------------------------------------------------------------\n # run: env download...\n\n env_filename = file_path + '-' + task.topic + '.csv'\n\n if cmd.verbose:\n print(\"airnow_downloader: downloading %s data...\" % task.topic, end='', file=sys.stderr)\n sys.stderr.flush()\n\n p = Pipe(('./aws_topic_history.py', task.environment_path(), '-s', start, '-e', end),\n ['./node.py', 'rec', 'tag', 'src'] + ['val.' + param for param in task.parameters],\n ('./sample_aggregate.py', '-c', task.checkpoint),\n ('./csv_writer.py', env_filename))\n\n return_code = p.wait()\n\n if return_code > 0:\n print(\"airnow_downloader: %s download failed with exit code %s.\" % (task.topic, return_code),\n file=sys.stderr)\n exit(return_code)\n\n if cmd.verbose:\n print(\"done.\", file=sys.stderr)\n\n\n # ------------------------------------------------------------------------------------------------------------\n # run: status download...\n\n status_filename = file_path + '-status.csv'\n\n if cmd.verbose:\n print(\"airnow_downloader: downloading status data...\", end='', file=sys.stderr)\n sys.stderr.flush()\n\n p = Pipe(('./aws_topic_history.py', task.status_path(), '-s', start, '-e', end),\n ('./node.py', 'rec', 'tag', 'val.tz', 'val.sch', 'val.gps', 'val.airnow'),\n ('./sample_aggregate.py', '-c', task.checkpoint),\n ('./csv_writer.py', status_filename))\n\n return_code = p.wait()\n\n if return_code > 0:\n print(\"airnow_downloader: status download failed with exit code %s.\" % return_code, file=sys.stderr)\n exit(return_code)\n\n if cmd.verbose:\n print(\"done.\", file=sys.stderr)\n\n\n # ------------------------------------------------------------------------------------------------------------\n # run: join...\n\n joined_filename = file_path + '-joined.csv'\n\n if cmd.verbose:\n print(\"airnow_downloader: joining...\", end='', file=sys.stderr)\n sys.stderr.flush()\n\n p = Pipe(('./csv_join.py', '-i', '-l', task.topic, 'rec', env_filename, '-r', 'status', 'rec', status_filename),\n ('./csv_writer.py', joined_filename))\n\n return_code = p.wait()\n\n if return_code > 0:\n print(\"airnow_downloader: join failed with exit code %s.\" % return_code, file=sys.stderr)\n exit(return_code)\n\n if cmd.verbose:\n print(\"done.\", file=sys.stderr)\n\n\n # ----------------------------------------------------------------------------------------------------------------\n # end...\n\n except KeyboardInterrupt:\n print(file=sys.stderr)\n","sub_path":"src/scs_airnow/airnow_downloader.py","file_name":"airnow_downloader.py","file_ext":"py","file_size_in_byte":6627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"204392930","text":"import numpy as np\n\ndef unity_lut_1d():\n lutmono = np.arange(0, 32768, 32, dtype=np.uint16)\n lut = np.stack([lutmono]*3, axis=0)\n return lut\n\ndef unity_lut_3d(n=33):\n spacing = complex(0,n)\n lut = np.mgrid[0.:4095.:spacing,0.:4095.:spacing,0.:4095.:spacing]\n lut = np.rint(lut).astype(np.uint16)\n lut = np.transpose(lut, axes=(1,2,3,0))\n lut = np.flip(lut, axis=-1)\n return lut\n\ndef read_cube_file(filename):\n nheader = 0\n lut_1d_size = None\n lut_3d_size = None\n domain_min = None\n domain_max = None\n with open(filename, \"r\") as f:\n for line in f:\n icomment = line.find(\"#\")\n if icomment>=0:\n line = line[:icomment]\n \n splitline = line.split()\n if splitline:\n keyword = splitline[0]\n else:\n keyword = None\n \n if keyword is None:\n pass\n elif keyword == \"TITLE\":\n pass\n elif keyword == \"LUT_1D_SIZE\":\n lut_1d_size = int(splitline[1])\n if lut_1d_size<2 or lut_1d_size>65536:\n raise ValueError(f\"Invalid value {lut_1d_size} for LUT_1D_SIZE, must be in range [2,65536].\")\n elif keyword == \"LUT_3D_SIZE\":\n lut_3d_size = int(splitline[1])\n if lut_3d_size<2 or lut_3d_size>256:\n raise ValueError(f\"Invalid value {lut_3d_size} for LUT_3D_SIZE, must be in range [2,256].\")\n elif keyword == \"DOMAIN_MIN\":\n domain_min = np.genfromtxt([line], usecols=(1,2,3), dtype=np.float64)\n if domain_min.shape != (3,):\n raise ValueError(\"DOMAIN_MIN must provide exactly 3 values.\")\n if np.amin(domain_min) < -1e37 or np.amax(domain_min) > 1e37:\n raise ValueError(\"Invalid value in DOMAIN_MIN, must be in range [-1e37,1e37].\")\n elif keyword == \"DOMAIN_MAX\":\n domain_max = np.genfromtxt([line], usecols=(1,2,3), dtype=np.float64)\n if domain_max.shape != (3,):\n raise ValueError(\"DOMAIN_MIN must provide exactly 3 values.\")\n if np.amin(domain_max) < -1e37 or np.amax(domain_max) > 1e37:\n raise ValueError(\"Invalid value in DOMAIN_MAX, must be in range [-1e37,1e37].\")\n else:\n break\n \n nheader += 1\n \n if lut_1d_size and lut_3d_size:\n raise ValueError(\"Cannot specify both LUT_1D_SIZE and LUT_3D_SIZE.\")\n \n if not lut_1d_size and not lut_3d_size:\n raise ValueError(\"Must specify one of LUT_1D_SIZE or LUT_3D_SIZE.\")\n \n if domain_min is None:\n domain_min = np.zeros((3,), dtype=np.float64)\n \n if domain_max is None:\n domain_max = np.ones((3,), dtype=np.float64)\n \n lut = np.genfromtxt(filename, skip_header=nheader, comments=\"#\", dtype=np.float64)\n if np.amin(lut) < -1e37 or np.amax(lut) > 1e37:\n raise ValueError(\"Invalid value in DOMAIN_MAX, must be in range [-1e37,1e37].\")\n \n domain_min = np.reshape(domain_min, (1,3))\n domain_max = np.reshape(domain_max, (1,3))\n \n #shift and scale lut to range [0.,1.]\n lut = (lut-domain_min)/(domain_max-domain_min)\n \n if lut_1d_size:\n if lut.shape != (lut_1d_size,3):\n raise ValueError(f\"Expected shape {(lut_1d_size,3)} for 1D LUT, but got {lut.shape}.\")\n #convert to integer with appropriate range\n lut = np.rint(lut*32767.).astype(np.uint16)\n #transpose to get the correct element order\n lut = np.transpose(lut)\n elif lut_3d_size:\n if lut.shape != (lut_3d_size**3, 3):\n raise ValueError(f\"Expected shape {(lut_3d_size**3, 3)} for 3D LUT, but got {lut.shape}.\")\n lut = np.reshape(lut, (lut_3d_size, lut_3d_size, lut_3d_size, 3))\n lut = np.rint(lut*4095.).astype(np.uint16)\n \n return lut\n\ndef read_cal_file(filename):\n nheader = 0\n with open(filename, \"r\") as f:\n caldata = f.readlines()\n \n dataidx = caldata.index(\"BEGIN_DATA\\n\")\n lut_1d_size_in = int(caldata[dataidx-1].split()[1])\n \n lut = np.genfromtxt(caldata[dataidx+1:dataidx+1+lut_1d_size_in], dtype=np.float64)\n \n if lut.shape != (lut_1d_size_in,4):\n raise ValueError(f\"Expected shape {(lut_1d_size_in,3)} for 1D LUT, but got {lut.shape}.\")\n\n lut_1d_size = 1024\n \n #interpolate if necessary\n if lut_1d_size_in != lut_1d_size:\n x = np.linspace(0., 1., lut_1d_size, dtype=np.float64)\n lutcomponents = []\n for i in range(1,4):\n lutcomponent = np.interp(x, lut[:,0], lut[:,i])\n lutcomponents.append(lutcomponent)\n lut = np.stack(lutcomponents, axis=-1)\n else:\n lut = lut[:,1:]\n \n #convert to integer with appropriate range\n lut = np.rint(32767.*lut).astype(np.uint16)\n #transpose to get the correct element order\n lut = np.transpose(lut)\n \n return lut\n","sub_path":"aiopylgtv/lut_tools.py","file_name":"lut_tools.py","file_ext":"py","file_size_in_byte":5044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"459593522","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 21 11:15:01 2018\n\n@author: sujania\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# 05s00-13s02\ncc_inv = np.array([-1.024446e+01, -1.111985e-01, -4.172265e+00, 7.545845e+00, -2.760083e+00])\ncc_wh = np.array([-23.249747946970412, 0., 0., 0., 0.])\ncc_trp = np.array([-7.8685474666240403, 0., 0., 0., 0.])\ncc_bgt = np.array([-23.243773926407343, 0., 0., 0., 0.])\ncc_rwz = np.array([-19.445862735380029, 0., 0., 0., 0.])\ncc_s20rts = np.array([-1.5207712954483830, -0.79245287657720009, \n -1.8023340224734921, 2.5102791743920361, -3.7194176036023707])\ncc_trmp_s20rts = cc_trp + cc_s20rts\ncc_bgt_s20rts = cc_bgt + cc_s20rts\n\nfig = plt.figure(figsize=(10,3.5))\nax = fig.add_subplot(1,1,1)\nax.plot(cc_inv, '-^', color='r', label='${}_5S_0-{}_{13}S_2$', linewidth=2.5)\nax.plot(cc_bgt_s20rts, '-^', color='b', label='IC Beghein-Trampert + S20RTS', linewidth=2)\n#ax.plot(cc_s20rts, '-^', color='b', label='S20RTS')\n#ax.plot(cc_wh, '-^', color='g', label='IC Woodhouse')\n#ax.plot(cc_bgt, '-^', color='c', label='IC Beghein-Trampert')\n#ax.plot(cc_trp, '-^', color='plum', label='IC Tromp')\n#ax.plot(cc_rwz, '-^', color='m', label='IC Romanowicz')\nax.set_xticks(range(len(cc_inv)))\nplt.axhline(y=0, color='darkgray', linestyle='-', linewidth=5, zorder=0)\nplt.text(2, 1.5, 'PREM', fontsize=15, backgroundcolor='w', va='bottom', ha='center', zorder=0)\nax.tick_params(axis = 'both', which = 'major', labelsize = 15, zorder=0)\ncst = ['$Re[c_{20}]$', '$Re[c_{21}]$', '$Im[c_{21}]$', '$Re[c_{22}]$', '$Im[c_{22}]$']\nax.set_xticks(range(len(cst)))\nax.set_xticklabels(cst, rotation='horizontal', fontsize=15)\n\n# Shrink current axis's height by 10% on the bottom\nbox = ax.get_position()\n# Put a legend below current axis\nax.legend(loc='lower right',\n fancybox=True, shadow=True, ncol=2, fontsize=16)\n#plt.title('Cross-coupling coefficients')\nfig.savefig('cc_coeffs_05s00_13s02', dpi=350) \n","sub_path":"frospy/tests/todo/Su/plot_cc_coeff.py","file_name":"plot_cc_coeff.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"28971834","text":"import sqlite3\n\ndef databaseentry(ir,hr):\n con = sqlite3.connect('static/data.db')\n params = dict()\n try:\n with con:\n cur = con.cursor()\n cur.execute('INSERT OR IGNORE INTO data (ir,hr) VALUES (?, ?)', (ir,hr))\n con.commit()\n params['status'] = \"success\" \n except:\n params['status'] = \"fail\"\n return params\n\ndef databaseread():\n con = sqlite3.connect('static/data.db')\n params = dict()\n try:\n with con:\n cur = con.cursor()\n cur.execute(\"Select * from data order by id desc limit 1\")\n rows = cur.fetchall()\n for row in rows:\n id = row[0]\n ir = row[1]\n hr = row[2]\n params['id'] = id\n params['ir'] = ir\n params['hr'] = hr\n except:\n params['status'] = \"fail\"\n return params\n","sub_path":"API/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"275268080","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n# Authour:Dreamer\n# Tmie:2018.6.22\n\n# 1.创建窗口\n# 2.加载背景图片\n# 3.背景图片贴到窗口\n# 4.刷新窗口\n\nimport pygame # 模块分两种 静态模块.py 动态模块.py (windows .dll .pyd) linux .os\nimport sys\nfrom pygame.locals import *\nimport time\nimport random\n\n# 模拟常量:用大写定义后不要再改了\nWINDOW_WIDTH = 512\nWINDOW_HEIGHT = 760\n\n\nclass BasePlane:\n\n def __init__(self, img_path, x, y, window):\n self.img = pygame.image.load(img_path) # 英雄图片路径\n # 英雄飞机第一次出现在窗口的位置\n self.x = x\n self.y = y\n # 窗口\n self.window = window\n\n def display(self):\n \"\"\"显示英雄飞机图片\"\"\"\n self.window.blit(self.img, (self.x, self.y))\n\n\nclass Bullet:\n \"\"\"子弹\"\"\"\n\n def __init__(self, img_path, x, y, window):\n self.img = pygame.image.load(img_path) # 子弹图片路径\n # 子弹出现在窗口的位置\n self.x = x\n self.y = y\n # 子弹所在的窗口\n self.window = window\n\n def display(self):\n \"\"\"显示子弹图片\"\"\"\n self.window.blit(self.img, (self.x, self.y))\n\n def bullet_move(self):\n \"\"\"子弹移动\"\"\"\n self.y -= 10\n\n def __del__(self):\n \"\"\"对象释放时自动调用\"\"\"\n print(\"子弹销毁了!\")\n\n\nclass HeroPlane(BasePlane):\n \"\"\"英雄飞机\"\"\"\n\n def __init__(self, img_path, x, y, window):\n super().__init__(img_path, x, y, window)\n\n self.bullets = [] # 保存所有发射的子弹\n\n def display(self):\n \"\"\"显示英雄飞机图片\"\"\"\n self.window.blit(self.img, (self.x, self.y))\n\n def move_left(self):\n \"\"\"飞机向左移\"\"\"\n if (self.x - 8) >= 0:\n self.x -= 8\n\n def move_right(self):\n \"\"\"飞机向右移\"\"\"\n if (self.x + 8) <= 395:\n self.x += 8\n\n def move_down(self):\n \"\"\"飞机向下移\"\"\"\n if (self.y + 8) <= 700:\n self.y += 8\n\n def move_up(self):\n \"\"\"飞机向上移\"\"\"\n if 0 < (self.y - 8):\n self.y -= 8\n\n def fire(self):\n \"\"\"发射子弹\"\"\"\n # 创建子弹对象\n # 子弹的x = 飞机的x + 飞机的宽度 * 0.5 - 子弹宽度 * 0.5\n # 子弹的y = 飞机的y - 子弹的高\n bullet = Bullet(\"res/bullet_9.png\", self.x + 50, self.y - 31, self.window)\n # bullet.display()\n # 把发射过的子弹添加到列表中保存起来\n self.bullets.append(bullet)\n\n def display_bullet(self):\n \"\"\"处理子弹图\"\"\"\n del_bullet = []\n for bullet in self.bullets: # 取出一个个子弹\n if bullet.y >= 31:\n bullet.display() # 每一个子弹重复贴图\n bullet.bullet_move() # 让子弹飞\n else:\n # self.bullets.remove(bullet) # 让超出边界的子弹销毁\n del_bullet.append(bullet)\n for bullet1 in del_bullet:\n self.bullets.remove(bullet1)\n\n\nclass EnemyPlane(BasePlane):\n \"\"\"敌机\"\"\"\n\n def enemy_move(self):\n \"\"\"敌机移动\"\"\"\n self.y += 5\n if self.y >= WINDOW_HEIGHT:\n self.y = random.randint(-300, -68)\n self.x = random.randint(0, WINDOW_WIDTH - 100)\n self.img = pygame.image.load(\"res/img-plane_%d.png\" % random.randint(1, 7))\n\n\n# 程序的主入口\ndef main():\n # 1.初始化pygam库。让计算机硬件做装备,如果想要音效或文字\n pygame.init()\n\n # 2.创建窗口\n window = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\n\n # 3.加载图片文件,返回图片对象\n # 加载背景图片\n bg_img = pygame.image.load(\"res/img_bg_level_1.jpg\")\n\n # 创建英雄飞机对象\n hero_plane = HeroPlane(\"res/hero2.png\", 196, 500, window)\n\n # 创建敌机\n enemy_list = []\n for _ in range(105):\n enemy_plane = EnemyPlane(\"res/img-plane_%d.png\" % random.randint(1, 7), random.randint(0, WINDOW_WIDTH - 100),\n random.randint(-300, -68), window)\n enemy_list.append(enemy_plane)\n while True:\n # 4.贴图(指定坐标,将图片绘制到窗口)\n window.blit(bg_img, (0, 0))\n\n # 添加飞机到窗口上\n hero_plane.display()\n\n # 添加敌机到窗口上\n for enemy_plane in enemy_list:\n enemy_plane.display()\n enemy_plane.enemy_move()\n # 把子弹对象重新贴图\n # for bullet in hero_plane.bullets: # 取出一个个子弹\n # bullet.display() # 每一个子弹重复贴图\n # bullet.bullet_move() # 让子弹飞\n hero_plane.display_bullet()\n # 5.刷新界面,不刷新不会更新内容\n pygame.display.update()\n\n # 检测事件\n for event in pygame.event.get():\n # 1.鼠标点击关闭窗口事件\n if event.type == QUIT:\n print(\"点击关闭窗口按钮\")\n sys.exit() # 关闭程序\n # 2.键盘按下事件\n if event.type == KEYDOWN:\n # # 判断用户按键操作\n # if event.key == K_LEFT or event.key == K_a:\n # x -= 5\n # print(\"left\")\n # if event.key == K_RIGHT or event.key == K_d:\n # x += 5\n # print(\"right\")\n # if event.key == K_DOWN or event.key == K_w:\n # y += 5\n # print(\"down\")\n # if event.key == K_UP or event.key == K_s:\n # y -= 5\n # print(\"up\")\n if event.key == K_SPACE:\n print(\"space\")\n hero_plane.fire()\n # 3.键盘获取长按事件\n # 获取当前键盘所有按键的状态(按下、没有按下),返回bool元组(0,0,0,1,0,0,0)\n pressed_keys = pygame.key.get_pressed()\n\n if pressed_keys[K_LEFT] or pressed_keys[K_a]:\n hero_plane.move_left()\n elif pressed_keys[K_RIGHT] or pressed_keys[K_d]:\n hero_plane.move_right()\n elif pressed_keys[K_DOWN] or pressed_keys[K_s]:\n hero_plane.move_down()\n elif pressed_keys[K_UP] or pressed_keys[K_w]:\n hero_plane.move_up()\n\n # 让每一次的循环停0.01,降低循环的执行速度\n time.sleep(0.01) # 降低CPU的消耗\n print(len(hero_plane.bullets))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Python12/PlaneWar/08-抽取基类.py","file_name":"08-抽取基类.py","file_ext":"py","file_size_in_byte":6604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"223113415","text":"from bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport sys\nimport re\nimport os\nimport boto3\nfrom datetime import datetime as dt\n\n\ndef get_content(target_url):\n paths = \"{}/phantomjs\".format(os.getenv(\"LAMBDA_TASK_ROOT\"))\n\n service_args = ['--ignore-ssl-errors=yes']\n driver = webdriver.PhantomJS(\n executable_path=paths,\n desired_capabilities={\n 'phantomjs.page.settings.userAgent': \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36\",\n },\n service_args=service_args,\n service_log_path=os.path.devnull \n )\n print(\"target_url: {}\".format(target_url))\n driver.get(target_url)\n\n data = driver.page_source.encode(\"utf-8\")\n soup = BeautifulSoup(data, \"lxml\")\n print(soup.title.string)\n return soup.prettify()\n\n\ndef create_html_file(content):\n tmp_dir = '/tmp'\n\n cur_ts = dt.now().strftime('%Y%m%d_%H%M%S')\n file_path = \"{}/{}.html\".format(tmp_dir, cur_ts)\n with open(file_path, 'w') as file:\n file.write(content)\n return file_path\n\n\ndef upload_text_s3bucket(upload_file_path, s3_bucket, key):\n print(\"upload text. file_path:{}, upload_s3_bucket:{}, key:{}\".format(upload_file_path, s3_bucket, key))\n bucket = boto3.resource('s3').Bucket(s3_bucket)\n bucket.upload_file(upload_file_path, key)\n\n\ndef lambda_handler(event, context):\n\n current_dt = dt.now().strftime('%Y%m%d%H%M%S')\n upload_s3_bucket = os.getenv(\"UPLOAD_S3_BUCKET\")\n target_url = os.getenv(\"CRAWLING_TARGET_URL\")\n target_url_without_slash = target_url.replace(\"/\", \"_\")\n\n # get content\n content = get_content(target_url)\n\n # save\n saved_file_path = create_html_file(content)\n\n # upload\n upload_text_s3bucket(saved_file_path, upload_s3_bucket, \"crawling_result/{}/{}.html\".format(target_url_without_slash, current_dt))","sub_path":"crawling/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"443886372","text":"\nclass Tag:\n\n def __init__(self, tagname1=None, tagname2=None, tagslug=None, tagdescription=None):\n # None служебная конструкция, которая позволяет по умолчанию заполнять поле класса пустышкой\n # если при инициализации класса и конструктора, это поле не было указано\n self.tagname1 = tagname1\n self.tagname2 = tagname2\n self.tagslug = tagslug\n self.tagdescription = tagdescription","sub_path":"model/tag.py","file_name":"tag.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"255300759","text":"import maya.cmds as cmds\nimport Window\nreload(Window)\n\n\nclass Locator(Window.Window):\n def __init__(self, name='Locator Window'):\n Window.Window.__init__(self, name)\n\n def CreateUI(self):\n ''' Creates a UI for this tool '''\n self.DelUI()\n\n self.mWin = cmds.window(self.mWin, height=175, title=\"Create Locator at Center\", width=350)\n self.mLayout = cmds.columnLayout(adjustableColumn=True, height=100, rowSpacing=10, width=275)\n column = cmds.columnLayout(adjustableColumn=True, columnAttach=[\"both\", 25], parent=self.mLayout,\n rowSpacing=5)\n cmds.button(command=lambda *args: self.CreateLoc(2), height=25, label='Rotated Pivot', parent=column)\n cmds.button(command=lambda *args: self.CreateLoc(1), height=25, label='Center Pivot', parent=column)\n\n cmds.showWindow(self.mWin)\n\n def CreateLoc(self, option=1):\n ''' Creates a locator at the center of selection, or the pivot of each object, defaults to former '''\n sels = cmds.ls(sl=True)\n\n if option == 1:\n bbox = cmds.exactWorldBoundingBox(sels)\n pivot = [(bbox[0] + bbox[3]) / 2, (bbox[1] + bbox[4]) / 2, (bbox[2] + bbox[5]) / 2]\n\n loc = cmds.spaceLocator()[0]\n cmds.xform(loc, t=pivot, ws=True)\n\n elif option == 2:\n for sel in sels:\n pivot = cmds.xform(sel, q=True, t=True, ws=True)\n rot = cmds.xform(sel, q=True, ro=True, ws=True)\n loc = cmds.spaceLocator()[0]\n cmds.xform(loc, t=pivot, ws=True)\n cmds.xform(loc, ro=rot, ws=True)\n","sub_path":"MayaModels/scripts/Locator.py","file_name":"Locator.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"416392090","text":"import com.ihsan.foundation.pobjecthelper as phelper\nimport com.ihsan.timeutils as timeutils\nimport pyFlexcel\nimport sys\n\ndef formSetDataEx(uideflist, Parameter):\n if Parameter.DatasetCount == 0: return\n\n key=Parameter.FirstRecord.key\n config = uideflist.config\n helper = phelper.PObjectHelper(config)\n uploaded = helper.GetObject('UploadedStatement',key)\n\n uip = uideflist.uipart\n rec = uip.Dataset.AddRecord()\n rec.bulan = uploaded.StatementMonth\n rec.tahun = uploaded.StatementYear\n rec.picname = uploaded.LBranchBankPIC.PICName\n rec.cabang = uploaded.LBranchBankPIC.LBranch.BranchName\n\n\n","sub_path":"dialogs/Transaksi/fPeragaanUploadFileAngsuran_data.py","file_name":"fPeragaanUploadFileAngsuran_data.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"453730512","text":"#from datetime import datetime, timezone\nimport time\nimport pymysql\nimport sys\n\ndb_connection = {\n 'remote': {\n 'DBHost': 'mysqldb1.cmwln1uaaae0.ap-south-1.rds.amazonaws.com',\n 'DBUser': 'admin',\n 'DBPass': 'smartx123',\n 'DBName': 'waterfall'\n },\n 'local':{\n 'DBHost': 'localhost',\n 'DBUser': 'mounika',\n 'DBPass': 'Mounika@123',\n 'DBName': 'waterfall'\n }\n}\n\n\ndef get_cursor(db_conn_type):\n \"\"\"\n Create cursor for local and remote mysql server\n args: db_conn_type : (remote, local)\n :return: db, crsr\n \"\"\"\n db = pymysql.connect(host=db_connection[db_conn_type]['DBHost'],\n user=db_connection[db_conn_type]['DBUser'],\n passwd=db_connection[db_conn_type]['DBPass'],\n db=db_connection[db_conn_type]['DBName'])\n crsr = db.cursor()\n return db, crsr\n\n\n\ndef sync_local_to_remote_db():\n \"\"\"\n Sync records in the local database to remote database;\n :return:\n \"\"\"\n try:\n db_local, cur_local = get_cursor('local')\n db_remote, cur_remote = get_cursor('remote')\n\n # get last record id from remote database\n cmd_last_record = '''\n SELECT * FROM temp_reading\n ORDER BY id DESC \n LIMIT 1 ;\n '''\n cur_remote.execute(cmd_last_record)\n last_record_id = cur_remote.fetchall()[0][0]\n print(\"Remote DB last record id: \", last_record_id)\n #update new records from local db to remote db\n\n cmd_new_local_records = '''\n SELECT * FROM temp_reading\n WHERE id > {last_id} \n '''.format(last_id = last_record_id)\n\n cur_local.execute(cmd_new_local_records)\n local_records = cur_local.fetchall()\n\n if len(local_records) == 0:\n print('Local and Remote DBs are in sync..')\n for each_record in local_records:\n\n cmd_update = '''\n INSERT INTO temp_reading (id, timestamp, current_temp, high, low) \n VALUES {values}\n '''.format(values=(each_record[0],\n each_record[1].strftime(\"%Y-%m-%d, %H:%M:%S\"),\n float(each_record[2]),\n float(each_record[3]),\n float(each_record[4])))\n cur_remote.execute(cmd_update)\n db_remote.commit()\n print(\"Successfully inserted:\", each_record)\n\n except Exception as e:\n print(e)\n sys.exit(1)\n\nif __name__ == '__main__':\n sync_local_to_remote_db()","sub_path":"agri/update_cloud_db.py","file_name":"update_cloud_db.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"587321668","text":"import csv\nimport argparse\nimport sys\nsys.path.append(\"../\")\n\nfrom app_package import db\nfrom app_package.models import Player, Transaction, InternalState\nfrom scheduler.params_operations import *\nfrom scheduler.state_operations import *\n\nPARAMS_FILE = \"../scheduler/params.json\"\n\n\ndef print_table(my_dict, col_list=None):\n \"\"\"\n Pretty print a list of dicts (my_dict) as a table. You can specify the order of\n the columns in col_list.\n \"\"\"\n\n if not col_list:\n col_list = list(my_dict[0].keys() if my_dict else [])\n my_list = [col_list]\n for item in my_dict:\n my_list.append([str(item[col]) for col in col_list])\n col_size = [max(map(len, col)) for col in zip(*my_list)]\n format_str = ' | '.join([\"{{:<{}}}\".format(i) for i in col_size])\n my_list.insert(1, ['-' * i for i in col_size])\n for item in my_list:\n print(format_str.format(*item))\n\n\ndef clear_all_tables():\n print(\"--Clearing all tables...\")\n meta = db.metadata\n for table in reversed(meta.sorted_tables):\n db.session.execute(table.delete())\n db.session.commit()\n\n\ndef create_players(players_file=None):\n \"\"\"\n Generate db entries with initialization data from players_file.\n \"\"\"\n print(\"--Generating initial players database...\")\n params = get_params(PARAMS_FILE)\n with open(players_file) as f:\n reader = csv.DictReader(f)\n for i, row in enumerate(reader):\n p = Player(playername=row[\"playername\"],\n email=row[\"email\"],\n credit=params[\"starter_credit\"],\n about_me=row[\"about_me\"],\n avatar_path=\"avatars/{}.jpg\".format(str(i+1)))\n p.set_password(row[\"password\"])\n db.session.add(p)\n db.session.commit()\n\n\ndef view_players():\n all_players = Player.query.all()\n player_list = [p.__dict__ for p in all_players]\n print_table(player_list, [\"id\", \"playername\", \"credit\", \"score\", \"avatar_path\"])\n\n\ndef init_state():\n print(\"--Generating initial internal state...\")\n params = get_params(PARAMS_FILE)\n init_counter = params[\"refill\"][\"frequency_min\"]\n state = InternalState(counter=init_counter,\n is_challenge_open=False,\n is_final_vote_open=False,\n is_leaderboard_open=False)\n db.session.add(state)\n db.session.commit()\n\n\ndef view_state():\n state = get_state()\n print_table([state[1]])\n\n\ndef simple_vote(sender_name, recipient_name, amount):\n sender = Player.query.filter(Player.playername==sender_name).first()\n sender.credit -= amount\n recipient = Player.query.filter(Player.playername==recipient_name).first()\n recipient.score += amount\n print(\"{} gives {} to {}\".format(sender_name, amount, recipient_name))\n\n\n","sub_path":"scheduler/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"156446278","text":"import logging\nfrom os import getenv\n\nfrom squid_py import ConfigProvider\nfrom brizo.constants import ConfigSections\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_provider_account(ocean_instance):\n address = ConfigProvider.get_config().parity_address\n logger.info(f'address: {address}, {ocean_instance.accounts.accounts_addresses}')\n for acc in ocean_instance.accounts.list():\n if acc.address.lower() == address.lower():\n return acc\n\n\ndef get_env_property(env_variable, property_name):\n return getenv(\n env_variable,\n ConfigProvider.get_config().get(ConfigSections.OSMOSIS, property_name)\n )\n\n\ndef get_metadata(ddo):\n try:\n for service in ddo['service']:\n if service['type'] == 'Metadata':\n return service['metadata']\n except Exception as e:\n logger.error(\"Error getting the metatada: %s\" % e)\n\n\ndef check_required_attributes(required_attributes, data, method):\n assert isinstance(data, dict), 'invalid payload format.'\n logger.info('got %s request: %s' % (method, data))\n if not data:\n logger.error('%s request failed: data is empty.' % method)\n return 'payload seems empty.', 400\n for attr in required_attributes:\n if attr not in data:\n logger.error('%s request failed: required attr %s missing.' % (method, attr))\n return '\"%s\" is required in the call to %s' % (attr, method), 400\n return None, None\n","sub_path":"brizo/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"194083289","text":"from random import choice\n\nclass HangmanGame(object):\n def __init__(self, word=None, hangmanbody=None):\n if hangmanbody is None:\n hangmanbody = [\n ' x x ',\n ' n ',\n ' \\\\_|_/ ',\n ' | ',\n ' / \\\\ ',\n ' d b '\n ]\n self.hangmanbody = hangmanbody\n\n if word is None:\n word = self.get_random_word()\n self.secret = word\n print(\"SECRET: '%s'\" % self.secret)\n self.guesslist = []\n self.failcount = 0\n self.donecount = 0\n self.blank = '*'*len(self.secret)\n\n @classmethod\n def get_random_word(cls):\n f=open('words_list.txt')\n words=f.readlines()\n f.close\n word = choice(words)\n if word[-1] == \"\\n\":\n word = word[:-1]\n return word\n\n def play_game(self):\n while True:\n self.guesslist.sort()\n self.show_hangman()\n print(self.blank)\n print('You have guessed: %s' % self.guesslist)\n print('You have %s strikes left.' % (6-self.failcount))\n\n guess=input(\"Guess a letter: \").lower()\n \n if not self.is_guess_valid(guess):\n continue\n \n self.guesslist += guess\n\n if guess in self.secret:\n for i in range(len(self.secret)):\n if guess == self.secret[i]:\n self.blank = self.blank[:i] + self.secret[i] + self.blank[i+1:]\n self.donecount+=1\n else:\n print('Nope! %s strikes left,' % (5-self.failcount))\n self.failcount += 1\n\n print('\\n')\n\n if self.failcount == 6:\n self.show_hangman()\n print(\"GAME OVER....the secret word was '%s'.\" % self.secret)\n break\n elif self.donecount == len(self.secret):\n print(\"YOU WIN!!! The word was '%s'.\" % self.secret)\n break\n\n def is_guess_valid(self, guess):\n if len(guess)!=1:\n print('One guess at a time.')\n return False\n \n if not guess.isalpha():\n print('This needs to be a letter dipwad.')\n return False\n \n if guess in self.guesslist and self.secret:\n print('You already guessed %s.'%guess)\n return False\n \n return True\n\n def show_hangman(self):\n if self.failcount > 0:\n print(\"\\n\".join(self.hangmanbody[0:self.failcount]))\n if self.failcount<6:\n print('\\n'*(5-self.failcount))\n\n\ndef hangman_intro():\n questions3=0\n while questions3<3:\n play=input('Wanna play hangman? y/n: ')\n questions3 +=1\n if play=='y':\n break\n else:\n print('Haha, I must have misheard you. I SAID:')\n if questions3==3:\n print(\"WRONG. We're playing now.\")\n break\n\n\n\n\n\n#THE BEGINNING OF IT ALL\nhangman_intro()\nwhile True:\n hangmaninstance = HangmanGame()\n hangmaninstance.play_game()\n print(\"YOU ARE PLAYING AGAIN! DO IT BETTER THIS TIME!\")\n","sub_path":"hangman_class.py","file_name":"hangman_class.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"276799127","text":"from django.core.paginator import Paginator\nfrom django.shortcuts import render, get_object_or_404\nfrom .models import Product, Cart,OderTracking,Transactions\nfrom django.contrib.auth.decorators import login_required\n\n\n# Create your views here.\ndef index(request):\n product = Product.objects.filter(Trending=True)\n new_arrival = Product.objects.filter(new_Arraivel=True)\n return render(request, \"index.html\", {'products': product, 'newarrival': new_arrival})\n\n\ndef Cart_View(request):\n products = Product.objects.filter(category=request.GET.get('category'))\n new_arrival = Product.objects.filter(new_Arraivel=True)\n mycontext = {\n 'products': new_arrival\n }\n return render(request, \"pages/cart.html\", mycontext)\n\n\ndef Shop(request):\n product = Product.objects.all()\n paginator = Paginator(product, 10)\n page = request.GET.get('page')\n products = paginator.get_page(page)\n context = {\n 'products': products\n }\n return render(request, \"pages/shop.html\", context)\n\n\ndef About(request):\n return render(request, \"pages/about.html\")\n\n\n@login_required(login_url=\"/login/\")\ndef CheckOut(request):\n current_user = request.user\n if request.method == 'POST':\n transactions = Transactions.objects.get_or_create(user=current_user)\n fname = request.POST['first_name']\n lname =request.POST['last_name']\n country =request.POST['Country']\n streetaddress =request.POST['streetaddress']\n appartment =request.POST['appartment']\n town =request.POST['town']\n postcode =request.POST['postcode']\n phone =request.POST['phone']\n total =request.POST['money']\n email =request.POST['email']\n payment =request.POST['payment']\n \n transactions[0].first_name=fname\n transactions[0].last_name=lname\n transactions[0].state_country=country\n transactions[0].street_Address=streetaddress\n transactions[0].appertment=appartment\n transactions[0].town=town\n transactions[0].postcode=postcode\n transactions[0].phone=phone\n transactions[0].email=email\n transactions[0].payment_Method=payment\n transactions[0].Total=total\n print(transactions[0].appertment)\n transactions[0].save() \n products = Product.objects.all()\n context = {\n 'products': products,\n }\n return render(request, \"pages/checkout.html\",context)\n\n@login_required(login_url=\"/login/\")\ndef OrderStatus(request):\n Ordertracker = OderTracking.objects.filter()\n context ={\n 'OderTracking':Ordertracker\n }\n return render(request, \"accounts/ProductDeliveryStatus.html\",context)\n\ndef Product_Details(request, id, Product_slug):\n product = get_object_or_404(Product, id=id, Product_slug=Product_slug, available=True)\n return render(request, \"pages/product-single.html\", {'product': product})\n\n\ndef Contact(request):\n return render(request, \"pages/contact.html\")\n\n","sub_path":"shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"538202623","text":"import agent\nimport ctp_api\nimport lts_api\nimport base\nimport time\nimport logging\nimport mysqlaccess\nimport datetime\nimport misc\n\ndef save_LTS(user, insts, run_date):\n app_name = 'SaveAgent'\n my_agent = agent.SaveAgent(name = app_name, trader = None, cuser = None, instruments=insts, daily_data_days=0, min_data_days=0, tday = run_date)\n lts_api.make_user(my_agent, user, insts)\n try:\n while 1: time.sleep(1)\n except KeyboardInterrupt:\n my_agent.mdapis = []; my_agent.trader = None\n\ndef filter_main_cont(sdate):\n insts, prods = mysqlaccess.load_alive_cont(sdate)\n main_cont = {}\n for pc in prods:\n main_cont[pc], exch = mysqlaccess.prod_main_cont_exch(pc)\n main_insts = []\n for inst in insts:\n pc = misc.inst2product(inst)\n mth = int(inst[-2:])\n if mth in main_cont[pc]:\n main_insts.append(inst)\n return main_insts\n \ndef save_all(tday, prod_md = misc.PROD_USER):\n logging.basicConfig(filename=\"save_all_agent.log\",level=logging.INFO,format='%(name)s:%(funcName)s:%(lineno)d:%(asctime)s %(levelname)s %(message)s')\n save_insts = filter_main_cont(tday)\n app_name = 'SaveAgent'\n config = {'daily_data_days': 0, 'min_data_days': 0}\n my_agent = agent.SaveAgent(name = app_name, trader = None, cuser = None, instruments=save_insts,tday = tday, config = config)\n ctp_api.make_user(my_agent, prod_md)\n try:\n while 1:\n time.sleep(1)\n \n except KeyboardInterrupt:\n my_agent.mdapis = []; my_agent.trader = None\n\ndef save_lts_test(tday):\n logging.basicConfig(filename=\"save_lts_test.log\",level=logging.INFO,format='%(name)s:%(funcName)s:%(lineno)d:%(asctime)s %(levelname)s %(message)s')\n save_insts = ['510050', '510050C1502M02500', '510050P1502M02500']\n save_LTS(misc.LTS_SO_USER,save_insts)\n pass\n\nif __name__ == '__main__':\n save_all()\n pass","sub_path":"test_run.py","file_name":"test_run.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"298128815","text":"import torch\nimport torch.nn.functional as F\nimport torch.nn as nn\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=1, bias=True)\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return\n\n\nclass ResBlock(nn.Module):\n \"\"\"UNet 两层卷积块\n 收缩模块经过了两次卷积操作,每一次卷积之后都进行一次 relu 操作\n 参数:\n in_channels: 输入的通道数。\n out_channels: 输出的通道数。\n kernel_size: 卷积核的大小。默认使用 3×3 的卷积核\n stride: 卷积核移动步长。默认为 1\n padding: 填充。默认无填充\n bias: 卷积后的偏置。默认添加偏置\n\n 示例:\n contracting_block_1 = ContractingBlock(3, 32)\n contracting_block_2 = ContractingBlock(3, 32, 3, 1, 1, True)\n \"\"\"\n\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(ResBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion * planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass UpSamplingBlock(nn.Module):\n \"\"\"UNet 上采样和拼接模块\n 收缩模块经过了两次卷积操作,每一次卷积之后都进行一次 relu 操作\n 参数:\n in_channels: 输入的通道数。\n out_channels: 输出的通道数。\n kernel_size: 卷积核的大小。默认使用 2×2 的卷积核\n stride: 卷积核移动步长。默认为 1\n padding: 填充。默认无填充\n bias: 卷积后的偏置。默认添加偏置\n\n 示例:\n contracting_block_1 = ContractingBlock(3, 32)\n contracting_block_2 = ContractingBlock(3, 32, 3, 1, 1, True)\n \"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size=2, stride=2, padding=0, output_padding=1, bias=True):\n super(UpSamplingBlock, self).__init__()\n self.tran_conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=stride)\n\n def forward(self, x, concat_feature):\n tran_conv_out = self.tran_conv(x)\n\n out = torch.cat((concat_feature, tran_conv_out), dim=1)\n\n return out\n\n\nclass ChannelAttention(nn.Module):\n def __init__(self, in_planes, ratio=16):\n super(ChannelAttention, self).__init__()\n\n out_channels = 1 if 0 == (in_planes // 16) else in_planes // 16\n\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.max_pool = nn.AdaptiveMaxPool2d(1)\n\n self.fc1 = nn.Conv2d(in_planes, out_channels, 1, bias=False)\n self.relu1 = nn.ReLU()\n self.fc2 = nn.Conv2d(out_channels, in_planes, 1, bias=False)\n\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))\n max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))\n out = avg_out + max_out\n return self.sigmoid(out)\n\n\nclass SpatialAttention(nn.Module):\n def __init__(self, kernel_size=7):\n super(SpatialAttention, self).__init__()\n\n assert kernel_size in (3, 7), 'kernel size must be 3 or 7'\n padding = 3 if kernel_size == 7 else 1\n\n self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n avg_out = torch.mean(x, dim=1, keepdim=True)\n max_out, _ = torch.max(x, dim=1, keepdim=True)\n x = torch.cat([avg_out, max_out], dim=1)\n x = self.conv1(x)\n return self.sigmoid(x)\n\n\nclass ChannelAttentionBasicBlock(nn.Module):\n\n def __init__(self, channels):\n super(ChannelAttentionBasicBlock, self).__init__()\n\n self.ca = ChannelAttention(channels)\n\n def forward(self, x):\n out = x\n\n out = self.ca(out) * out\n\n return out\n\n\nclass FusionBasicBlock(nn.Module):\n\n def __init__(self, conv_num, tran_conv_num, depth, target_channel, in_channel_list=None, cbam_block=True):\n super(FusionBasicBlock, self).__init__()\n\n self.cbam_block = cbam_block\n\n if in_channel_list is None:\n in_channel_list = [1, 32, 64, 128, 256]\n\n self.layers = nn.ModuleList()\n\n for i in range(conv_num):\n sequential = nn.Sequential(\n nn.Conv2d(in_channel_list[i], target_channel, 3, padding=1),\n nn.MaxPool2d(2 ** (depth - i - 1), 2 ** (depth - i - 1)),\n\n )\n\n if cbam_block:\n sequential.add_module('attention', ChannelAttentionBasicBlock(target_channel))\n\n self.layers.append(sequential)\n\n for i in range(tran_conv_num):\n sequential = nn.Sequential(\n nn.ConvTranspose2d(in_channel_list[depth + i], target_channel, 2 ** (i + 1), stride=2 ** (i + 1)),\n )\n\n if cbam_block:\n sequential.add_module('attention', ChannelAttentionBasicBlock(target_channel))\n\n self.layers.append(sequential)\n\n if cbam_block:\n self.sa = SpatialAttention()\n\n def forward(self, node_list, current_node=None):\n out = current_node\n\n for index, item in enumerate(self.layers):\n if out is None:\n out = item(node_list[index])\n else:\n out = out + item(node_list[index])\n\n if self.cbam_block:\n out = self.sa(out) * out\n\n return out\n\n\nclass MultiResolutionBlock(nn.Module):\n def __init__(self):\n super(MultiResolutionBlock, self).__init__()\n\n # layer 1(node 1)\n self.fb_basic_block_1_1 = FusionBasicBlock(0, 0, 1, 1)\n\n # layer 2(生成节点2)\n self.fb_basic_block_2_1 = FusionBasicBlock(1, 0, 2, 32) # 0 down 1 up\n\n # layer 3(节点1更新,节点2更新,生成节点3)\n # 节点 1 更新(节点2上采样+节点1)\n self.fb_basic_block_3_1 = FusionBasicBlock(0, 1, 1, 1)\n # 节点 2 更新(节点1下采样+节点2)\n self.fb_basic_block_3_2 = FusionBasicBlock(1, 0, 2, 32)\n # 生成节点 3(节点1下采样+节点2下采样)\n self.fb_basic_block_3_3 = FusionBasicBlock(2, 0, 3, 64)\n\n # layer 4(节点1更新,节点2更新,节点3更新,生成节点4)\n # 节点 1 更新(节点2上采样+节点3上采样 + 节点1)\n self.fb_basic_block_4_1 = FusionBasicBlock(0, 2, 1, 1)\n # 节点 2 更新(节点1下采样+节点2+节点3上采样)\n self.fb_basic_block_4_2 = FusionBasicBlock(1, 1, 2, 32)\n # 生成节点 3(节点1下采样+节点2下采样+节点3)\n self.fb_basic_block_4_3 = FusionBasicBlock(2, 0, 3, 64)\n # 生成节点 4(节点1下采样+节点2下采样+节点3下采样)\n self.fb_basic_block_4_4 = FusionBasicBlock(3, 0, 4, 128)\n\n # layer 5(节点1更新,节点2更新,节点3更新,节点4更新,生成节点5)\n # 节点 1 更新(节点2上采样+节点3上采样+节点4上采样 + 节点1)\n self.fb_basic_block_5_1 = FusionBasicBlock(0, 3, 1, 1)\n # 节点 2 更新(节点1下采样+节点2+节点3上采样+节点4上采样)\n self.fb_basic_block_5_2 = FusionBasicBlock(1, 2, 2, 32)\n # 节点 3 更新(节点1下采样+节点2下采样+节点3+节点4上采样)\n self.fb_basic_block_5_3 = FusionBasicBlock(2, 1, 3, 64)\n # 节点 4 更新(节点1下采样+节点2下采样+节点3下采样+节点4)\n self.fb_basic_block_5_4 = FusionBasicBlock(3, 0, 4, 128)\n # 生成节点 5(节点1下采样+节点2下采样+节点3下采样+节点4下采样)\n self.fb_basic_block_5_5 = FusionBasicBlock(4, 0, 5, 256)\n\n def forward(self, x):\n # layer 1\n input_rel_1 = self.fb_basic_block_1_1([x], current_node=x)\n\n # layer 2\n input_rel_2 = self.fb_basic_block_2_1([input_rel_1])\n\n # layer 3\n input_rel_1 = self.fb_basic_block_3_1([input_rel_2], input_rel_1)\n input_rel_2 = self.fb_basic_block_3_2([input_rel_1], input_rel_2)\n input_rel_3 = self.fb_basic_block_3_3([input_rel_1, input_rel_2])\n\n # layer 4\n input_rel_1 = self.fb_basic_block_4_1([input_rel_2, input_rel_3], input_rel_1)\n input_rel_2 = self.fb_basic_block_4_2([input_rel_1, input_rel_3], input_rel_2)\n input_rel_3 = self.fb_basic_block_4_3([input_rel_1, input_rel_2], input_rel_3)\n input_rel_4 = self.fb_basic_block_4_4([input_rel_1, input_rel_2, input_rel_3])\n\n # layer 5\n input_rel_1 = self.fb_basic_block_5_1([input_rel_2, input_rel_3, input_rel_4], input_rel_1)\n input_rel_2 = self.fb_basic_block_5_2([input_rel_1, input_rel_3, input_rel_4], input_rel_2)\n input_rel_3 = self.fb_basic_block_5_3([input_rel_1, input_rel_2, input_rel_4], input_rel_3)\n input_rel_4 = self.fb_basic_block_5_4([input_rel_1, input_rel_2, input_rel_3], input_rel_4)\n input_rel_5 = self.fb_basic_block_5_5([input_rel_1, input_rel_2, input_rel_3, input_rel_4])\n\n return [input_rel_1, input_rel_2, input_rel_3, input_rel_4, input_rel_5]\n\n\nclass FullyAggregationBlock(nn.Module):\n def __init__(self):\n super(FullyAggregationBlock, self).__init__()\n\n # stage 1\n # 节点1+节点2上采样+节点上采样+节点4上采样\n self.fb_basic_block_1 = FusionBasicBlock(0, 3, 1, 32, in_channel_list=[32, 64, 128, 256, 512], cbam_block=True)\n # stage 2\n # 节点1下采样+节点2+节点3上采样+节点4上采样\n self.fb_basic_block_2 = FusionBasicBlock(1, 2, 2, 64, in_channel_list=[32, 64, 128, 256, 512], cbam_block=True)\n # stage 3\n # 节点1下采样+节点2下采样+节点3+节点4上采样\n self.fb_basic_block_3 = FusionBasicBlock(2, 1, 3, 128, in_channel_list=[32, 64, 128, 256, 512], cbam_block=True)\n # stage 4\n # 节点1下采样+节点2下采样+节点3下采样+节点4\n self.fb_basic_block_4 = FusionBasicBlock(3, 0, 4, 256, in_channel_list=[32, 64, 128, 256, 512], cbam_block=True)\n\n def forward(self, x):\n [l1, l2, l3, l4] = x\n l1 = self.fb_basic_block_1([l2, l3, l4], l1)\n l2 = self.fb_basic_block_2([l1, l3, l4], l2)\n l3 = self.fb_basic_block_3([l1, l2, l4], l3)\n l4 = self.fb_basic_block_4([l1, l2, l3], l4)\n\n return [l1, l2, l3, l4]\n\n\nclass MultiInputBlock(nn.Module):\n def __init__(self):\n super(MultiInputBlock, self).__init__()\n\n # layer 1\n\n # layer 2\n self.conv_2 = nn.Conv2d(1, 32, 3, padding=1)\n self.down_sampling_2 = nn.MaxPool2d(2, 2)\n\n # layer 3\n self.conv_3 = nn.Conv2d(32, 64, 3, padding=1)\n self.down_sampling_3 = nn.MaxPool2d(2, 2)\n\n # layer 4\n self.conv_4 = nn.Conv2d(64, 128, 3, padding=1)\n self.down_sampling_4 = nn.MaxPool2d(2, 2)\n\n # layer 5\n self.conv_5 = nn.Conv2d(128, 256, 3, padding=1)\n self.down_sampling_5 = nn.MaxPool2d(2, 2)\n\n def forward(self, x):\n layer_1 = x\n layer_2 = self.down_sampling_2(self.conv_2(layer_1))\n layer_3 = self.down_sampling_3(self.conv_3(layer_2))\n layer_4 = self.down_sampling_4(self.conv_4(layer_3))\n layer_5 = self.down_sampling_5(self.conv_5(layer_4))\n\n return [layer_1, layer_2, layer_3, layer_4, layer_5]\n\n\nclass UNetMI(nn.Module):\n def __init__(self):\n super().__init__()\n\n self.mi_block = MultiInputBlock()\n\n self.conv_block_1 = ResBlock(1, 32)\n self.down_sampling_1 = nn.MaxPool2d(2, 2)\n self.conv_block_2 = ResBlock(32, 64)\n self.down_sampling_2 = nn.MaxPool2d(2, 2)\n self.conv_block_3 = ResBlock(64, 128)\n self.down_sampling_3 = nn.MaxPool2d(2, 2)\n self.conv_block_4 = ResBlock(128, 256)\n self.down_sampling_4 = nn.MaxPool2d(2, 2)\n self.conv_block_5 = ResBlock(256, 512)\n\n self.up_sampling_1 = UpSamplingBlock(512, 256)\n self.conv_block_6 = ResBlock(512, 256)\n self.up_sampling_2 = UpSamplingBlock(256, 128)\n self.conv_block_7 = ResBlock(256, 128)\n self.up_sampling_3 = UpSamplingBlock(128, 64)\n self.conv_block_8 = ResBlock(128, 64)\n self.up_sampling_4 = UpSamplingBlock(64, 32)\n self.conv_block_9 = ResBlock(64, 32)\n\n self.out = nn.Conv2d(32, 2, 1)\n\n def forward(self, x):\n [input_l1, input_l2, input_l3, input_l4, input_l5] = self.mi_block(x)\n\n conv_block_out_1 = self.conv_block_1(input_l1)\n conv_block_out_2 = self.conv_block_2(input_l2)\n conv_block_out_3 = self.conv_block_3(input_l3)\n conv_block_out_4 = self.conv_block_4(input_l4)\n conv_block_out_5 = self.conv_block_5(input_l5)\n\n conv_block_out_6 = self.conv_block_6(self.up_sampling_1(conv_block_out_5, conv_block_out_4))\n conv_block_out_7 = self.conv_block_7(self.up_sampling_2(conv_block_out_6, conv_block_out_3))\n conv_block_out_8 = self.conv_block_8(self.up_sampling_3(conv_block_out_7, conv_block_out_2))\n conv_block_out_9 = self.conv_block_9(self.up_sampling_4(conv_block_out_8, conv_block_out_1))\n\n out = self.out(conv_block_out_9)\n\n return out\n","sub_path":"models/UNetMI.py","file_name":"UNetMI.py","file_ext":"py","file_size_in_byte":14049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"467132176","text":"from velociraptor.observations.objects import ObservationalData\n\nimport unyt\nimport numpy as np\nimport os\nimport sys\n\n# Exec the master cosmology file passed as first argument\nwith open(sys.argv[1], \"r\") as handle:\n exec(handle.read())\n\n# Cosmology\nh_sim = cosmology.h\nOmega_b = 0.0486 # Cosmology assumed by the M-TNG model\nOmega_m = 0.3089 # Cosmology assumed by the M-TNG model\n\ninput_filename = \"../raw/mtng_hmsm.txt\"\n\noutput_filename = \"MillenniumTNG_ratio.hdf5\"\noutput_directory = \"../\"\n\nif not os.path.exists(output_directory):\n os.mkdir(output_directory)\n\nprocessed = ObservationalData()\nraw = np.loadtxt(input_filename)\n\nM_200 = raw[:, 0] * unyt.Solar_Mass\nratio = raw[:, 1] * unyt.dimensionless\nratio *= Omega_b / Omega_m\n\n# Meta-data\ncomment = ()\ncitation = \"Pakmor et al. (2022) (MTNG)\"\nbibcode = \"2022arXiv221010060P\"\nname = \"Stellar mass - halos mass relation from Millennium-TNG (Fig. 2)\"\nplot_as = \"line\"\nredshift = 0.0\nh = h_sim\n\n# Write everything\nprocessed = ObservationalData()\nprocessed.associate_x(\n M_200,\n scatter=None,\n comoving=False,\n description=\"Halo Mass ($M_{200, {\\rm crit}}$)\",\n)\nprocessed.associate_y(\n ratio,\n scatter=None,\n comoving=True,\n description=\"Galaxy Stellar Mass / Halo Mass ($M_* / M_{200, {\\rm crit}}$)\",\n)\nprocessed.associate_citation(citation, bibcode)\nprocessed.associate_name(name)\nprocessed.associate_comment(comment)\nprocessed.associate_redshift(redshift)\nprocessed.associate_plot_as(plot_as)\nprocessed.associate_cosmology(cosmology)\n\noutput_path = f\"{output_directory}/{output_filename}\"\n\nif os.path.exists(output_path):\n os.remove(output_path)\n\nprocessed.write(filename=output_path)\n","sub_path":"data/GalaxyStellarMassHaloMass/conversion/convertMTNG.py","file_name":"convertMTNG.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"46010126","text":"\"\"\"\npicture.py\nAuthor: Katie Naughton\nCredit: I worked alone. \n\nAssignment:\n\nUse the ggame library to \"paint\" a graphical picture of something (e.g. a house, a face or landscape).\n\nUse at least:\n1. Three different Color objects.\n2. Ten different Sprite objects.\n3. One (or more) RectangleAsset objects.\n4. One (or more) CircleAsset objects.\n5. One (or more) EllipseAsset objects.\n6. One (or more) PolygonAsset objects.\n\nSee:\nhttps://github.com/HHS-IntroProgramming/Standards-and-Syllabus/wiki/Displaying-Graphics\nfor general information on how to use ggame.\n\nSee:\nhttp://brythonserver.github.io/ggame/\nfor detailed information on ggame.\n\n\"\"\"\nfrom ggame import App, Color, LineStyle, Sprite, RectangleAsset, CircleAsset, EllipseAsset, PolygonAsset\n\n# colors, no transparency\nred = Color(0xff0000, 1.0)\ngreen = Color(0x00ff00, 1.0)\nblue = Color(0x0000ff, 1.0)\nblack = Color(0x000000, 1.0)\nyellow = Color(0xffff00,1.0)\nlightblue = Color(0X00ffff,1.0)\ndarkgreen = Color(0x006400, 1.0)\ndarkblue = Color(0x483D8B, 1.0)\nwhite = Color(0xf8f8ff, 1.0)\n\n\n#line\nthinline=LineStyle(1, black)\n\n#line 2\nthinline2=LineStyle(1, darkblue)\n\n#sky day\nrectangle4=RectangleAsset (475, 1000, thinline, lightblue)\nSprite(rectangle4, (0, 0))\n\n#sky night\nrectangle6=RectangleAsset (460, 1000, thinline, darkblue)\nSprite(rectangle6, (475, 0))\n\n#house\nrectangle=RectangleAsset (250, 250, thinline, black)\nSprite(rectangle,(350, 150))\n\n#grass day\nrectangle2=RectangleAsset (475, 300, thinline, green)\nSprite(rectangle2, (0, 400))\n\n#grass night\nrectangle5=RectangleAsset (460, 350, thinline, darkgreen)\nSprite(rectangle5, (475, 400))\n\n#door\nrectangle3=RectangleAsset (100, 100, thinline, red)\nSprite(rectangle3, (400, 300))\n\n#doorknob\nellipse=EllipseAsset (5, 10, thinline, blue)\nSprite(ellipse, (425, 350))\n\n#roof\ntriangle=PolygonAsset([(1,100),(200,1),(400, 100)], thinline, black)\nSprite(triangle, (275,75))\n\n#sun\ncircle=CircleAsset(50, thinline, yellow)\nSprite(circle, (0,0))\n\n#moonwhite\ncircle2=CircleAsset(50, thinline, white)\nSprite(circle2, (800,0))\n\n#moondarkblue\ncircle3=CircleAsset(40, thinline2, darkblue)\nSprite(circle3, (800,0))\n\n\n\n\n\n\n\n\nmyapp = App()\nmyapp.run()\n","sub_path":"picture.py","file_name":"picture.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"302366132","text":"import pygame\nfrom Animations.secretAgent import SecretAgent\nfrom Maps.map import Map\nfrom Menu.mainMenu import MainMenu\n\nSOURCE_PICTURE = \"Properties/agent_sprites.png\"\nSIDEBAR_PICTURE = \"Properties/Sidebar.png\"\nTEST_MAP = \"Properties/map2.csv\"\nMENU_FILE = \"Properties/menu.csv\"\nBACKGROUND_COLOR = (86, 118, 255)\nDISPLAY_SIZE = (500, 320)\n\n\ndef main():\n while 1:\n screen = pygame.display.set_mode(DISPLAY_SIZE, pygame.DOUBLEBUF)\n clock = pygame.time.Clock()\n entire_map = Map(SOURCE_PICTURE, TEST_MAP)\n menu = MainMenu(SIDEBAR_PICTURE, MENU_FILE)\n agent = SecretAgent(SOURCE_PICTURE)\n while 1:\n if menu.key_check():\n break\n menu.draw_menu(screen)\n pygame.display.flip()\n\n while 1:\n clock.tick(30)\n agent.moveAgent()\n entire_map.move(agent.position)\n entire_map.collisionDetection(agent)\n screen.fill(BACKGROUND_COLOR)\n entire_map.draw(screen, DISPLAY_SIZE)\n agent.draw(screen, DISPLAY_SIZE)\n pygame.display.flip()\n if agent.return_to_menu and not agent.jump and not agent.isJumping:\n break\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"267758726","text":"import logging\nimport sys\n\nimport structlog\nfrom flask import Flask, redirect, url_for\nfrom flask_admin import Admin\nfrom flask_assets import Environment, Bundle\nfrom flask_qrcode import QRcode\n\nfrom website.constants import FLASK_SECRET_KEY\nfrom website.extensions import cache\nfrom website.views.home_view import HomeView\n\n\nclass App(Flask):\n def __init__(self):\n super().__init__(__name__)\n if __name__ != '__main__':\n gunicorn_logger = logging.getLogger('gunicorn.error')\n self.logger.handlers = gunicorn_logger.handlers\n self.logger.setLevel(gunicorn_logger.level)\n\n logging.basicConfig(\n format=\"%(message)s\", stream=sys.stdout, level=logging.INFO\n )\n structlog.configure(\n processors=[\n structlog.processors.KeyValueRenderer(\n key_order=[\"event\", \"request_id\"]\n )\n ],\n context_class=structlog.threadlocal.wrap_dict(dict),\n logger_factory=structlog.stdlib.LoggerFactory(),\n )\n assets = Environment(self)\n\n js = Bundle('js/app.js',\n filters='jsmin', output='gen/packed.js')\n assets.register('js_all', js)\n\n cache.init_app(self)\n QRcode(self)\n self.debug = False\n self.config['SECRET_KEY'] = FLASK_SECRET_KEY\n\n @self.route('/')\n def index():\n return redirect(url_for('home.index'))\n\n @self.errorhandler(404)\n def page_not_found(e):\n return redirect(url_for('home.index'))\n\n self.admin = Admin(app=self, url='/')\n\n home_view = HomeView(name='Home', endpoint='home')\n self.admin.add_view(home_view)\n\n\nif __name__ == '__main__':\n app = App()\n app.debug = True\n app.run(port=5001, use_reloader=True, use_debugger=True)\n","sub_path":"website/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"293664793","text":"#!/usr/bin/python3\n\nimport subprocess\nimport sys\n\n\ndef run_cmd(correction_file, args):\n args.insert(0, correction_file)\n\n if correction_file.endswith('.py'):\n args.insert(0, 'python3')\n elif correction_file.endswith('.pl'):\n args.insert(0, 'perl')\n elif correction_file.endswith('.go'):\n args.insert(0, 'run')\n args.insert(0, 'go')\n\n child = subprocess.Popen(args, stdout=subprocess.PIPE)\n out = child.communicate()[0]\n ret = child.returncode\n return out.decode(), ret\n\n\ndef check(correction_file, secret):\n \"\"\"\n Check if the corrected source code is still usable\n \"\"\"\n checks = [\n {\n 'params': ['odtokjupfpenmtyo'],\n 'response': 'here is the encrypted text',\n 'message': 'Encryption API is broken. Expecting to find: \"here is the encrypted text\" in the response\\n\\n'\n 'Your code output: \\n\\n{}',\n },\n {\n 'params': [''],\n 'response': 'here is the encrypted text',\n 'message': 'Encryption API is broken. Without user input, return example found in secret.\\n '\n 'Expecting to find: \"here is the encrypted text\" in the response\\n\\n'\n 'Your code output: \\n\\n{}',\n },\n {\n 'params': ['tooshort'],\n 'response': 'encryption problem!',\n 'message': 'Encryption API is broken. Expecting : \"Encryption problem!\"\\n\\n'\n 'Your code output: \\n\\n{}',\n },\n ]\n\n for _check in checks:\n\n out, return_code = run_cmd(correction_file, _check['params'])\n if return_code != 0:\n print(\"Invalid execution : {}\".format(out))\n sys.exit(1)\n if _check['response'] not in out.lower():\n print(_check['message'].format(out))\n return False\n\n return True\n\n\ndef main():\n secret = sys.argv[1]\n correction_file = sys.argv[2]\n return_code = 0 if check(correction_file, secret) else 2\n sys.exit(return_code)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"challs/data_exposure.dir/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"176409729","text":"ignore = '\":;,.-+=/\\|[]{}()*^&'\n\n\ndef word_count(s):\n # Your code here\n result = {}\n filtered_text = [\n word.strip(ignore).lower()\n for word in s.split()\n if word.strip(ignore)\n ]\n for text in filtered_text:\n result[text] = result.get(text, 0) + 1\n\n return result\n\n\nif __name__ == \"__main__\":\n print(word_count(\"\"))\n print(word_count(\"Hello\"))\n print(word_count('Hello, my cat. And my cat doesn\\'t say \"hello\" back.'))\n print(word_count('This is a test of the emergency broadcast network. This is only a test.'))\n","sub_path":"applications/word_count/word_count.py","file_name":"word_count.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"111463753","text":"# The edit distance between two strings refers to the minimum number of character\n# insertions, deletions, and substitutions required to change one string to the\n# other. For example, the edit distance between “kitten” and “sitting” is three:\n# substitute the “k” for “s”, substitute the “e” for “i”, and append a “g”.\n#\n# Given two strings, compute the edit distance between them.\n\n# Edit distance between a, b with m, n being the length of those strings\n# respectively\n\n# Levenshtein distance\n\ndef dist(a, m, b, n):\n # base case: empty strings\n if m == 0:\n return n\n\n if n == 0:\n return m\n\n # if the last characters of the strings match\n cost = 0 if (a[m - 1] == b[n - 1]) else 1\n\n return min(dist(a, m - 1, b, n) + 1, # deletion\n dist(a, m, b, n - 1) + 1, # insertion\n dist(a, m - 1, b, n - 1) + cost) # substitution\n\nif __name__ == '__main__':\n a = \"kitten\"\n b = \"sitting\"\n\n print(\"The edit distance is\", dist(a, len(a), b, len(b)))\n","sub_path":"editDistance.py","file_name":"editDistance.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"87859233","text":"# -*- coding=utf-8 -*-\r\n#author:wang\r\n\r\nfrom docx import Document\r\nimport openpyxl\r\n\r\nclass WordOperation:\r\n \"\"\"\r\n word操作类,可以根据不同的输入内容,生成word文件,要求至少完成三类内容的输入,标题,副标题,正文\r\n \"\"\"\r\n def __init__(self, title, subtitle, text):\r\n self.title = title\r\n self.subtitle = subtitle\r\n self.text = text\r\n def write_word(self):\r\n \"写入word并保存\"\r\n try:\r\n doc = Document()\r\n doc.add_heading(self.title)\r\n doc.add_paragraph(self.subtitle,'Subtitle')\r\n doc.add_paragraph(self.text)\r\n name = self.title + '.docx'\r\n doc.save(name)\r\n print('word文件保存完成!')\r\n except Exception as f:\r\n print(f)\r\n \r\nclass ExcelOperation:\r\n '''\r\n 根据第一列的数据,按年进行拆分,放到新的工作表,例:2015年数据,表名为2015\r\n 按年拆分后的数据,在数据最后一行,添加平均价格\r\n from_file_name:数据文件名\r\n sheet_name:数据表名\r\n go_file_name:数据新建表名\r\n '''\r\n def __init__(self, from_file_name, sheet_name, go_file_name):\r\n self.wb = openpyxl.load_workbook(from_file_name)\r\n self.sh1 = self.wb[sheet_name]\r\n self.go_name = go_file_name\r\n\r\n def run(self):\r\n \"完成逻辑控制\"\r\n l = []\r\n for rows in self.sh1.rows:\r\n if rows[0].coordinate != 'A1':\r\n l.append(rows[0].value[:4])\r\n l = list(set(l))\r\n\r\n for x in l:\r\n self.write_data(x)\r\n\r\n self.wb.save(self.go_name)\r\n print('excel保存完成')\r\n\r\n\r\n def write_data(self, sheet_name):\r\n \"\"\"\r\n 写入表格\r\n sheet_name :数据保存的表名\r\n \"\"\"\r\n index = 2\r\n self.wb.create_sheet(sheet_name)\r\n sh2 = self.wb[sheet_name]\r\n sh2['A1'] = self.sh1['A1'].value\r\n sh2['B1'] = self.sh1['B1'].value\r\n for rows in self.sh1.rows:\r\n if rows[0].coordinate != 'A1' and rows[0].value[:4] == sheet_name:\r\n sh2['A' + str(index)] = rows[0].value\r\n sh2['B' + str(index)] = rows[1].value\r\n index += 1\r\n num = sh2.max_row\r\n # 求平均值\r\n l=[]\r\n for row in sh2.rows:\r\n if row[0].coordinate != 'A1':\r\n l.append(int(row[1].value))\r\n average_num = sum(l)/(num-1)\r\n \r\n sh2.cell(row=num+1, column=2).value = average_num\r\n sh2.cell(row=num+1, column=1).value = '平均分'\r\n\r\nclass OfficeOperation:\r\n \"选择需要的操作类,逻辑选择\"\r\n def operation(self):\r\n while True:\r\n print('请选择需要进行的操作')\r\n menu = {\r\n '1':'操作word',\r\n '2':'操作excel', \r\n '0':'quit'\r\n }\r\n for k,v in menu.items():\r\n print(k, v)\r\n try:\r\n choose = input('请输入需要选择的操作编号:') \r\n if choose == '0':\r\n print('退出!' )\r\n break\r\n elif choose == '1':\r\n WO = WordOperation('wangshuai', 'python', 'text')\r\n WO.write_word()\r\n elif choose == '2':\r\n EO = ExcelOperation('btc.xlsx', 'btc', 'btc-1.xlsx')\r\n EO.run()\r\n except Exception as f:\r\n print(\"输入有误请重新输入\")\r\n\r\ndef main():\r\n while True:\r\n print('请选择需要进行的操作')\r\n menu = {\r\n '1':'操作word',\r\n '2':'操作excel', \r\n '0':'quit'\r\n }\r\n for k,v in menu.items():\r\n print(k, v)\r\n try:\r\n choose = input('请输入需要选择的操作编号:') \r\n if choose == '0':\r\n print('退出!' )\r\n break\r\n elif choose == '1':\r\n WO = WordOperation('wangshuai', 'python', 'text')\r\n WO.write_word()\r\n elif choose == '2':\r\n EO = ExcelOperation('btc.xlsx', 'btc', 'btc-1.xlsx')\r\n EO.run()\r\n except Exception as f:\r\n print(\"输入有误请重新输入\")\r\n \r\nif __name__ == \"__main__\":\r\n main()","sub_path":"moude3/数据处理系统/office_process.py","file_name":"office_process.py","file_ext":"py","file_size_in_byte":4451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"62657030","text":"def maxrem(num):\n rem=0\n nu = max(num)\n num.remove(nu)\n\n x=max(num)\n if((x%nu)>rem):\n rem=x%nu\n return rem\n num.remove(x)\n return rem\n\nif __name__ == '__main__':\n N = int(input())\n numstr = input()\n nums = set(map(int, numstr.split()))\n r = maxrem(nums)\n print(r)\n\n\n","sub_path":"code_Chef/maximum_remaining.py","file_name":"maximum_remaining.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"46742022","text":"import psycopg2\nimport argparse\nimport sys\nimport json\n\n\"\"\"\nparser = argparse.ArgumentParser(description='parse key pairs into a dictionary')\n\n\nclass StoreDictKeyPair(argparse.Action):\n def __init__(self, option_strings, dest, nargs=None, **kwargs):\n self._nargs = nargs\n super(StoreDictKeyPair, self).__init__(option_strings, dest, nargs=nargs, **kwargs)\n\n def __call__(self, parser, namespace, values, option_string=None):\n my_dict = {}\n print(\"values: {}\".format(values))\n for kv in values:\n k, v = kv.split(\":\")\n my_dict[k] = v\n setattr(namespace, self.dest, my_dict)\n\n\nparser.add_argument(\"--kv\", dest=\"my_dict\", action=StoreDictKeyPair, nargs=\"+\", metavar=\"KEY:VAL\")\n\nargs = parser.parse_args(sys.argv[1:])\n\n\"\"\"\n\n#\n# parser = argparse.ArgumentParser()\n# parser.add_argument('-i', '--input', help=\"JSON file to be processed\")\n# arguments = parser.parse_args()\n# inp = \"\"\n# if arguments.input:\n# # print(arguments.input, type(arguments.input))\n# inp = arguments.input\n# print(inp,type(inp))\n# else:\n# print(\"usage: program -i \")\n# sys.exit(-1)\n# data_info = {}\n# data = json.loads(inp)\n# print(data)\n# data = inp.replace(\"'{\", '')\n# data_0 = data.replace(\"}'\", '')\n# dict_data = data_0.split(\",\")\n# for kv in dict_data:\n# k, v = kv.split(\":\")\n# data_info[k] = v\n# print(data_info, type(data_info))\n# record_to_insert = (data_info.get(\"user\"), data_info.get(\"state\"), data_info.get(\"status\"))\n# print(record_to_insert, type(record_to_insert))\n\"\"\"\nconnection = None\n\ntry:\n connection = psycopg2.connect(\"dbname='gtaproject' host='10.10.1.10' user='gtauser' password='password'\")\n cur = connection.cursor()\n postgres_insert_query = \"INSERT INTO authentication_executionstatus (user_id, key, value) VALUES (%s,%s,%s)\"\n # record_to_insert = (2, \"status\", \"sucess\")\n record_to_insert = (data_info.get(\"user_id\"), data_info.get(\"state\"), data_info.get(\"status\"))\n cur.execute(postgres_insert_query, record_to_insert)\n connection.commit()\n\nexcept psycopg2.DatabaseError as err:\n print(f\"Error in DB inserting: {err}\")\n sys.exit(1)\nfinally:\n if connection:\n connection.close()\n\"\"\"\n\n# python db_status.py -i '{\"user\": srikanththalla, \"state\":\"GIT\", \"status\":\"Success\"}'\n\n\nimport psycopg2\nimport argparse\nimport sys\n# import pdb\nimport json\n\n\"\"\"\nparser = argparse.ArgumentParser()\nparser.add_argument('-i', '--input', help=\"JSON file to be processed\")\narguments = parser.parse_args()\ninp = \"\"\nif arguments.input:\n inp = arguments.input\n\nelse:\n print(\"usage: program -i \")\n sys.exit(-1)\n#pdb.set_trace()\n\ndata_info = {}\nd=json.loads(inp)\n#print(d)\ndata_info.update(d)\"\"\"\n\n\n# print(data_info)\n\ndef write_db(ds, **kwargs):\n connection = None\n\n try:\n connection = psycopg2.connect(\"dbname='gtaproject1' host='10.10.1.10' user='gtauser' password='password'\")\n cur = connection.cursor()\n user_id = kwargs['dag_run'].conf['user_id']\n state_git = kwargs['dag_run'].conf['state_git']\n status_git = kwargs['dag_run'].conf['status_git']\n # pdb.set_trace()\n insert_query = \"\"\" INSERT INTO authentication_executionstatus (user_id, key, value) VALUES (%s,%s,%s)\"\"\"\n record_to_insert = (user_id, state_git, status_git)\n cur.execute(insert_query, record_to_insert)\n connection.commit()\n except psycopg2.DatabaseError as err:\n print(\"Error in DB inserting:\", err)\n sys.exit(1)\n\n finally:\n if connection:\n connection.close()\n\n\ngit_status = PythonOperator(task_id='git_status', provide_context=True, python_callable=write_db, dag=dag)\n","sub_path":"Core_python/_24_DB_Connection_Python/GTA_db_status.py","file_name":"GTA_db_status.py","file_ext":"py","file_size_in_byte":3691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"71923664","text":"# Import the libraries\nimport math\nimport pandas_datareader as web\nimport time\nfrom datetime import datetime, timedelta, date\nfrom dateutil.relativedelta import relativedelta\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, LSTM\nfrom db_connect import lstm_usd_res, lstm_usd_remove\nfrom helper_connect import DBConnect # 디비 연결\n\n\ndef getDay(year, month, date_v):\n day = ['Mon', 'Tue', 'Wed', 'Thur', 'Fri', 'Sat', 'Sun']\n aday = date(year, month, date_v)\n bday = aday.weekday()\n return day[bday]\n\n\ndef lstm_usd(conn, df):\n data = df\n dataset = data.values\n training_data_len = math.ceil(len(dataset) * .8)\n\n # Scale the data\n scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_data = scaler.fit_transform(dataset)\n\n train_data = scaled_data[0:training_data_len, :]\n x_train = []\n y_train = []\n for i in range(60, len(train_data)):\n x_train.append(train_data[i-60:i, 0])\n y_train.append(train_data[i, 0])\n x_train, y_train = np.array(x_train), np.array(y_train)\n\n x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))\n\n model = Sequential()\n model.add(LSTM(units=50, return_sequences=True,\n input_shape=(x_train.shape[1], 1)))\n model.add(LSTM(units=50, return_sequences=False))\n model.add(Dense(units=25))\n model.add(Dense(units=1))\n\n model.compile(optimizer='adam', loss='mean_squared_error')\n\n model.fit(x_train, y_train, batch_size=1, epochs=1)\n\n test_data = scaled_data[training_data_len - 60:, :]\n x_test = []\n y_test = dataset[training_data_len:, :]\n for i in range(60, len(test_data)):\n x_test.append(test_data[i-60:i, 0])\n x_test = np.array(x_test)\n\n x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))\n\n predictions = model.predict(x_test)\n predictions = scaler.inverse_transform(predictions)\n\n rmse = np.sqrt(np.mean((predictions - y_test)**2))\n print(rmse)\n\n train = data[:training_data_len]\n valid = data[training_data_len:]\n valid['Predictions'] = predictions\n\n df = web.DataReader('KRW=X', data_source='yahoo', start='2003-01-01')\n new_df = df.filter(['Close'])\n last_60_days = new_df[-60:].values\n\n last_60_days_scaled = scaler.transform(last_60_days)\n\n X_test = []\n X_test.append(last_60_days_scaled)\n X_test = np.array(X_test)\n X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))\n print(X_test.shape)\n pred_price = model.predict(X_test)\n pred_price = scaler.inverse_transform(pred_price)\n print(pred_price[0][0], \"함수한 result\")\n return pred_price[0][0]\n\n\nif __name__ == \"__main__\":\n conn = DBConnect()\n df = web.DataReader('KRW=X', data_source='yahoo', start='2003-01-01')\n data = df.filter(['Close'])\n for i in range(1, 31):\n today = date.today() + relativedelta(days=+i)\n if getDay(today.year, today.month, today.day) == 'Sat' or getDay(today.year, today.month, today.day) == 'Sun':\n continue\n else:\n result = lstm_usd(conn, data)\n data = data.reset_index()\n data = data.append({\"Date\": pd.Timestamp(\n today), \"Close\": float(result)}, ignore_index=True)\n data = data.set_index(\"Date\")\n lstm_usd_res(conn, today, result)\n lstm_usd_remove(conn)\n","sub_path":"Crawling/lstm_USD.py","file_name":"lstm_USD.py","file_ext":"py","file_size_in_byte":3448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"138646626","text":"import qnt.data as qndata\nimport qnt.stats as qnstats\nimport qnt.xr_talib as qnxrtalib\nimport qnt.forward_looking as qnfl\nimport time\n\ndata = qndata.load_data(min_date=\"2010-01-01\", max_date=None, forward_order=True, dims=(\"time\", \"field\", \"asset\"))\n\n\ndef strategy(data):\n wma = qnxrtalib.WMA(data.sel(field='close'), 290)\n sroc = qnxrtalib.ROCP(wma, 35)\n\n is_liquid = data.sel(field=\"is_liquid\")\n weights = is_liquid.where(sroc > 0.0125)\n\n weights = weights / weights.sum(\"asset\", skipna=True)\n return weights.fillna(0.0)\n\n\nt0 = time.time()\noutput = qnfl.calc_output_and_check_forward_looking(data, strategy)\nt1 = time.time()\nprint(t1 - t0)\nstat = qnstats.calc_stat(data, output, max_periods=252 * 3)\nt2 = time.time()\nprint(t2 - t1)\nstat2 = qnstats.calc_stat(data, output, max_periods=252 * 3, per_asset=True)\nt3 = time.time()\nprint(t3 - t2)\n\nprint(stat2.sel(field='sharpe_ratio').transpose().to_pandas())\n\nqndata.write_output(output)\n","sub_path":"example/roc_example.py","file_name":"roc_example.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"496922150","text":"from pymongo import InsertOne, MongoClient, UpdateOne\nfrom os import environ\nfrom boto3 import client\nfrom re import compile, escape, IGNORECASE, sub\nfrom flask import abort, request, session\nfrom uuid import uuid4\nfrom functools import wraps\nfrom game_night.game import Game\n\ntry:\n _game_night = MongoClient('mongodb://{}:{}@{}/{}'.format(environ['MONGODB_USER'], environ['MONGODB_PASSWORD'], environ.get('MONGODB_HOST', 'localhost'), environ['MONGODB_DATABASE'])).game_night\nexcept:\n _game_night = MongoClient().game_night\n_api_keys = _game_night.api_keys\n_gamemasters = _game_night.gamemasters\n_games = _game_night.games\n\n_s3 = client('s3', aws_access_key_id = environ['S3_KEY'], aws_secret_access_key = environ['S3_SECRET'])\n\ndef _create_filters():\n filters = {}\n max_players = request.args.get('max_players')\n if max_players:\n try:\n filters['max_players'] = int(max_players)\n except:\n filters['max_players'] = -1\n min_players = request.args.get('min_players')\n if min_players:\n try:\n filters['min_players'] = int(min_players)\n except:\n filters['min_players'] = -1\n name = request.args.get('name')\n if name:\n try:\n filters['name'] = compile(name, IGNORECASE)\n except:\n filters['name'] = compile(escape(name), IGNORECASE)\n owner = request.args.get('owner')\n if owner:\n filters['owner'] = owner\n players = request.args.get('players')\n if players:\n try:\n players = int(players)\n filters['$and'] = [{'min_players': {'$lte': players}}, {'max_players': {'$gte': players}}]\n except:\n filters['$and'] = [{'min_players': {'$lte': -1}}, {'max_players': {'$gte': -1}}]\n submitter = request.args.get('submitter')\n if submitter:\n filters['submitter'] = submitter\n return filters\n\ndef delete_game(name):\n if _games.delete_one({'name': name}).deleted_count:\n try:\n id = list(_games.find().sort([('_id', -1)]).limit(10))[-1]['_id']\n _games.update_many({'_id': {'$gte': id}}, {'$set': {'new': True}})\n except:\n pass\n _s3.delete_object(Bucket = environ['S3_BUCKET'], Key = name + '.jpg')\n return True\n return False\n\ndef game_exists(name):\n return _games.count({'name': compile(f'^{escape(name)}$', IGNORECASE)})\n\ndef generate_api_key(write = False):\n uuid = str(uuid4())\n _api_keys.insert_one({'key': uuid, 'write': write})\n return uuid\n\ndef get_count():\n return _games.count(_create_filters())\n\ndef get_game(name):\n return _games.find_one({'name': name})\n\ndef get_games():\n return _games.find(_create_filters(), {'_id': False}).sort([('sort_name', 1)])\n\ndef get_newest_games():\n filters = _create_filters()\n filters['new'] = True\n return _games.find(filters, {'_id' : False}).sort([('_id', -1)])\n\ndef get_owners(all = False):\n owners = _games.distinct('owner') if all else _games.distinct('owner', _create_filters())\n owners.sort()\n return owners\n\ndef get_players():\n try:\n return _games.aggregate([{'$group': {'_id': False, 'max': {'$max': '$max_players'}, 'min': {'$min': '$min_players'}}}]).next()\n except:\n return None\n\ndef get_random_games(sample_size):\n return _games.aggregate([{'$match': _create_filters()}, {'$sample': {'size': sample_size}}, {'$project': {'_id': False}}])\n\ndef get_submissions():\n filters = _create_filters()\n filters['submitter'] = session['userinfo']['preferred_username']\n return _games.find(filters, {'_id': False}).sort([('sort_name', 1)])\n\ndef _insert_game(game):\n requests = [InsertOne(game)]\n games = list(_games.find().sort([('_id', -1)]).limit(10))\n if len(games) == 10:\n requests.append(UpdateOne({'_id': games[-1]['_id']}, {'$unset': {'new': 1}}))\n _games.bulk_write(requests)\n\ndef is_gamemaster():\n return _gamemasters.count({'username': session['userinfo']['preferred_username']})\n\ndef _prepare_game(game):\n del game['image']\n game['new'] = True\n game['sort_name'] = sub('(A|(An)|(The)) ', '', game['name'])\n game['submitter'] = session['userinfo']['preferred_username']\n\ndef require_gamemaster(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n if is_gamemaster():\n return function(*args, **kwargs)\n abort(403)\n return wrapper\n\ndef require_read_key(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n try:\n if not _api_keys.count({'key': request.headers['Authorization'][7:]}):\n abort(403)\n except:\n abort(403)\n return function(*args, **kwargs)\n return wrapper\n\ndef require_write_key(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n try:\n if not _api_keys.find_one({'key': request.headers['Authorization'][7:]})['write']:\n abort(403)\n except:\n abort(403)\n return function(*args, **kwargs)\n return wrapper\n\ndef submit_game():\n game = Game()\n if game.validate():\n game = game.data\n _s3.upload_fileobj(game['image'], environ['S3_BUCKET'], game['name'] + '.jpg', ExtraArgs = {'ContentType': game['image'].content_type})\n _prepare_game(game)\n _insert_game(game)\n return True\n return game, next(iter(game.errors.values()))[0]","sub_path":"game_night/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":5380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"23247324","text":"import copy\nimport json\nimport logging\nimport os\n\n\nfrom . import uri, utilities\n\nlogger = logging.getLogger(__name__)\n\n\n\n\ndefault_conf = {\n \"database\": {\n \"host\": \"localhost\",\n \"port\": 3004,\n \"db\": \"visinum\"\n },\n \"girder\": {\n \"apiUrl\": \"http://localhost:8080/api/v1\",\n \"apiKey\": \"EdKeaqELS40XIrepHcXZFuLQzrMOGUJOVIeeyR5Z\",\n },\n}\n\n\ndef initialize(conf=None):\n global app_config\n app_config = {}\n load_config(default_conf)\n if conf:\n load_config(conf)\n return app_config\n\n\ndef load_config(filepath):\n global app_config\n _config = {}\n if isinstance(filepath, str) and os.path.isfile(filepath):\n try:\n with open(filepath, 'r') as conf_fh:\n _config = json.load(conf_fh)\n except Exception as err: \n logger.error(\"load_config: cannot load config file «%s», failed with error «%s».\" % (filepath, err) )\n elif isinstance(filepath, dict):\n _config = filepath\n # elif isinstance(filepath, str):\n # try:\n # _config = json.loads(filepath)\n # except Exception as err: \n # logger.error(\"load_config: cannot load config string «%s», failed with error «%s».\" % (filepath, err) )\n else:\n logger.error(\"load_config: cannot find config file «%s» in directory «%s»\" % (filepath, os.getcwd()) )\n utilities.rupdate(app_config, _config)\n return app_config\n\n\n\n\ndef get_config(*keys):\n global app_config\n if not keys:\n return False\n _datadict = copy.deepcopy(app_config)\n for _key in keys:\n _val = _datadict.get(_key, None)\n if isinstance(_val, dict):\n _datadict = _val\n else:\n break\n return _val\n\n\ndef get_db_uri(db=None, collection=None, _id=None, vn_uri=None, port=None, **kwargs):\n _db_uri = get_config(\"database\")\n _db_uri.update(kwargs)\n if db:\n _db_uri[\"db\"] = db\n if collection:\n _db_uri[\"collection\"] = collection\n if port:\n _db_uri[\"port\"] = port\n if not _id and vn_uri:\n _id = uri.string2UUID(vn_uri)\n if _id:\n _db_uri[\"_id\"] = _id\n return _db_uri\n\n\n\ndef make_req_url(*paths):\n apiUrl = app_config[\"girder\"][\"apiUrl\"] \n if apiUrl.endswith(\"/\"):\n apiUrl = apiUrl[:-1]\n return apiUrl + \"/\" + \"/\".join(paths)\n","sub_path":"visinum/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"283626713","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\nfrom knack.arguments import enum_choice_list\nfrom .const import (SERVICE_ENDPOINT_AUTHORIZATION_PERSONAL_ACCESS_TOKEN,\n SERVICE_ENDPOINT_TYPE_GITHUB,\n SERVICE_ENDPOINT_AUTHORIZATION_SERVICE_PRINCIPAL,\n SERVICE_ENDPOINT_TYPE_AZURE_RM)\n\n\n# CUSTOM CHOICE LISTS\n_YES_NO_SWITCH_VALUES = ['yes', 'no']\n_SOURCE_CONTROL_VALUES = ['git', 'tfvc']\n_PROJECT_VISIBILITY_VALUES = ['private', 'public']\n_STATE_VALUES = ['invalid', 'unchanged', 'all', 'new', 'wellformed', 'deleting', 'createpending']\n_SERVICE_ENDPOINT_TYPE = [SERVICE_ENDPOINT_TYPE_GITHUB, SERVICE_ENDPOINT_TYPE_AZURE_RM]\n_SERVICE_ENDPOINT_AUTHORIZATION_SCHEME = [SERVICE_ENDPOINT_AUTHORIZATION_PERSONAL_ACCESS_TOKEN,\n SERVICE_ENDPOINT_AUTHORIZATION_SERVICE_PRINCIPAL]\n\n\ndef load_global_args(context):\n from azure.cli.core.commands.parameters import get_enum_type\n context.argument('organization', options_list=('--organization', '--org'),\n help='Azure Devops organization URL. Example: https://dev.azure.com/MyOrganizationName/')\n context.argument('detect', arg_type=get_enum_type(['on', 'off']),\n help='Automatically detect organization. Default is \"on\".')\n context.argument('project', options_list=('--project', '-p'), help='Name or ID of the project.')\n\n\ndef load_team_arguments(self, _):\n with self.argument_context('devops configure') as context:\n context.argument('defaults', options_list=('--defaults', '-d'), nargs='*')\n with self.argument_context('devops project') as context:\n context.argument('process', options_list=('--process', '-p'))\n context.argument('source_control', options_list=('--source-control', '-s'),\n **enum_choice_list(_SOURCE_CONTROL_VALUES))\n context.argument('description', options_list=('--description', '-d'))\n context.argument('state', **enum_choice_list(_STATE_VALUES))\n context.argument('visibility', **enum_choice_list(_PROJECT_VISIBILITY_VALUES))\n with self.argument_context('devops service-endpoint create') as context:\n context.argument('service_endpoint_type', **enum_choice_list(_SERVICE_ENDPOINT_TYPE))\n context.argument('authorization_scheme', **enum_choice_list(_SERVICE_ENDPOINT_AUTHORIZATION_SCHEME))\n with self.argument_context('devops project delete') as context:\n context.argument('yes', options_list=['--yes', '-y'], action='store_true',\n help='Do not prompt for confirmation.')\n with self.argument_context('devops configure') as context:\n context.argument('use_git_aliases', **enum_choice_list(_YES_NO_SWITCH_VALUES))\n context.argument('list_config', options_list=('--list', '-l'))\n\n with self.argument_context('devops') as context:\n load_global_args(context)\n\n with self.argument_context('repos') as context:\n load_global_args(context)\n\n with self.argument_context('artifacts') as context:\n load_global_args(context)\n\n with self.argument_context('boards') as context:\n load_global_args(context)\n\n with self.argument_context('pipelines') as context:\n load_global_args(context)\n","sub_path":"azure-devops/azext_devops/dev/team/arguments.py","file_name":"arguments.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"162445679","text":"\nimport logging\nimport queue\n\nfrom bluepy.btle import Peripheral\nfrom pybluepedal.common.base import BaseDelegate, BaseService\nfrom pybluepedal.common.byte_ops import byte_array_to_int\n\nlogger = logging.getLogger(\"CSCService\")\n\n\nclass CSCService(BaseService):\n UUID = \"00001816\"\n CHARACTERISTIC_MEASUREMENT = \"00002a5b\"\n CHARACTERISTIC_FEATURE = \"00002a5c\"\n CHARACTERISTIC_SENSOR_LOCATION = \"00002a5d\"\n\n ENABLE_NOTIFICATION_VALUE = (0x01, 0x00)\n\n FEATURE_CRANK_DATA = \"FEATURE_CRANK_DATA\"\n FEATURE_WHEEL_DATA = \"FEATURE_CRANK_DATA\"\n FEATURE_MULTIPLE_SENSOR_LOCATIONS = \"FEATURE_MULTIPLE_SENSOR_LOCATIONS\"\n\n CSC_FEATURES_MASK = {\n 0b00000001: FEATURE_CRANK_DATA,\n 0b00000010: FEATURE_WHEEL_DATA,\n 0b00000100: FEATURE_MULTIPLE_SENSOR_LOCATIONS,\n }\n\n CSC_FEATURES = {v: k for k, v in CSC_FEATURES_MASK.items()}\n\n def __init__(self, peripheral: Peripheral):\n super().__init__(peripheral, CSCService.UUID)\n\n def supports_feature(self, name: str) -> bool:\n \"\"\"Returns true if the feature is supported\"\"\"\n\n characteristics = self._service.getCharacteristics(\n forUUID=CSCService.CHARACTERISTIC_FEATURE)\n\n if len(characteristics) < 1:\n return False\n\n characteristic = characteristics[0]\n val = byte_array_to_int(characteristic.read())\n\n return CSCService.CSC_FEATURES[name] & val > 0\n\n def start_notifications(self, delegate: BaseDelegate):\n \"\"\"Starts the notifications for the characteristic measurement\"\"\"\n\n self._peripheral.setDelegate(delegate)\n\n characteristic = self._service.getCharacteristics(\n forUUID=CSCService.CHARACTERISTIC_MEASUREMENT)[0]\n\n resp = self._peripheral.writeCharacteristic(\n characteristic.getHandle() + 1, b\"\\x01\\x00\", True)\n\n logger.debug(f\"notification started: {resp}\")\n\n\nclass CSCDelegate(BaseDelegate):\n def __init__(self, producer_queue: queue.Queue):\n super().__init__(producer_queue)\n\n self._producer_queue = producer_queue\n\n def handleNotification(self, cHandle, data):\n logger.debug(f\"handing notification {cHandle} {data}\")\n\n values = bytearray(data)\n\n cumulative_wheel_revolutions = byte_array_to_int(bytes(values[1:5]))\n last_wheel_event_time = byte_array_to_int(bytes(values[5:7]))\n cumulative_crank_revolutions = byte_array_to_int(bytes(values[7:9]))\n last_crank_event_time = byte_array_to_int(bytes(values[9:]))\n\n data = {\n \"type\": \"CSC\",\n \"handle\": cHandle,\n \"cumulative_wheel_revolutions\": cumulative_wheel_revolutions,\n \"last_wheel_event_time\": last_wheel_event_time,\n \"cumulative_crank_revolutions\": cumulative_crank_revolutions,\n \"last_crank_event_time\": last_crank_event_time,\n }\n\n self._producer_queue.put(data)\n logger.debug(f\"added to queue {data}\")\n","sub_path":"pybluepedal/services/cycling_speed_cadence.py","file_name":"cycling_speed_cadence.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"181226239","text":"# -*- coding:utf-8 -*-\nimport pagerank\nimport OutputData\nfrom numpy import *\nimport InputData\n\n\nmatrixfile = 'gather_new.csv'\nOffsetRow = 1\nOffsetCol = 1\npi = pagerank.pagerankmain(matrixfile, OffsetRow, OffsetCol) #计算网站的pagerank值\n\nurlfile = 'id_url.csv'\nInputUrl = InputData.InputUrl()\nUrlList = InputUrl.ReadUrl(urlfile)\nIdList = InputUrl.ReadId(urlfile)\n\nn1 = len(UrlList)\nn2 = pi.shape[1]\nif n1 == n2:\n for i in range(n2):\n data = {\"_id\": int(IdList[i]), \"url\": UrlList[i], \"pr\": pi[0][i]}\n outputer = OutputData.Outputpi()\n outputer.OutputMongo(data)\n","sub_path":"NjuPagerank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"578274374","text":"# -*- coding: utf-8 -*-\n\nfrom typing import Union, AnyStr\nfrom decimal import Decimal, InvalidOperation\n\n\ndef num_to_words(n: Union[int, float, AnyStr, Decimal]) -> str:\n \"\"\"Convert a number to Vietnamese words.\n\n Convert a number to its Vietnamese formal spoken form. It supports\n long numbers (both integers and decimals).\n\n Parameters\n ----------\n n : int, float, Decimal or str\n The number to be converted. If `n` is a str, it will be converted\n to a Decimal object.\n\n Returns\n -------\n str\n The spoken form of the number\n \n Raises\n ------\n TypeError\n If the input's type is neither int, float, str nor Decimal\n\n ValueError\n If the input string does not represent a valid number\n \"\"\"\n \n digits = ('không', 'một', 'hai', 'ba', 'bốn', 'năm', 'sáu', 'bảy', 'tám', 'chín', 'mười')\n levels = ('đơn vị', 'nghìn', 'triệu')\n \n def per_digit(n):\n return [digits[int(s)] for s in str(n)]\n \n def per_thousand(n, linh=False):\n tarr = []\n if 100 <= n <= 999:\n n1, n2 = divmod(n, 100)\n tarr.append(digits[n1])\n tarr.append('trăm')\n if 1 <= n2 <= 9:\n tarr.append('linh')\n n = n2\n if 1 <= n <= 9:\n if linh:\n tarr.append('linh')\n tarr.append(digits[n])\n elif n <= 99 and n != 0:\n n1, n2 = divmod(n, 10)\n ele = digits[n2]\n if n1 == 1:\n tarr.append('mười')\n else:\n tarr.append(digits[n1])\n tarr.append('mươi')\n if n2 == 1:\n ele = 'mốt'\n elif n2 == 4:\n ele = 'tư'\n if n2 == 5:\n ele = 'lăm'\n if ele != 'không': tarr.append(ele)\n return tarr\n \n tarr = []\n if isinstance(n, str):\n try:\n n = Decimal(n)\n except InvalidOperation as e:\n raise ValueError(f\"'{n}' is not a valid number.\")\n elif not isinstance(n, (int, float, Decimal)):\n raise TypeError('The first parameter must be an integer or a float.')\n if int(n) == 0:\n tarr.append('không')\n elif int(n) < 0:\n tarr.append('âm')\n n = abs(n)\n ns = str(n)\n if '.' in ns:\n is_decimal = True\n intn, decn = ns.split('.')\n ns = intn\n else:\n is_decimal = False\n \n length = len(ns)\n splited = [ns[0:len(ns) % 3]] + [ns[i:i+3] for i in range(len(ns) % 3, len(ns), 3)]\n splited = list(filter(None, splited))\n \n for part in splited:\n pn = int(part)\n if pn != 0:\n if part[0] == '0' and 1 <= pn <= 99:\n tarr.append('không trăm')\n linh = True\n else:\n linh = False\n tarr.extend(per_thousand(pn, linh))\n bilis, thous = divmod((length - 1) // 3, 3)\n if thous > 0:\n tarr.append(levels[thous])\n tarr.extend(['tỉ'] * bilis)\n length -= 3\n \n if is_decimal:\n tarr.append('phẩy')\n if decn != '0':\n decn = decn.rstrip('0')\n if 2 <= len(decn) <= 3:\n dec_int = int(decn)\n if decn[0] == '0' and 1 <= dec_int <= 99:\n tarr.append('không trăm')\n tarr.extend(per_thousand(dec_int))\n else:\n tarr.extend(per_digit(int(decn)))\n tarr = list(filter(None, tarr))\n return ' '. join(tarr)","sub_path":"ctnx/number.py","file_name":"number.py","file_ext":"py","file_size_in_byte":3559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"381460184","text":"import subprocess\nimport platform\nimport errno\nimport os\nfrom config import config\n\nimport win\n\nclass net():\n \n def __init__(self,card='wlan'):\n self.card = card\n self.pc_type = self._get_platform()\n self.current = self._settings_dir()\n self.config = config()\n\n arg = {'card': self.card,'pc_type':self.pc_type,\n 'direc': self.current,'config':self.config}\n \n systems = {'windows':win.windows(arg)}\n self.system = systems[self.pc_type]\n \n def _get_platform(self):\n try:\n plat = platform.system().lower()\n except Exception as e:\n plat = 0\n print(e)\n return plat\n \n def _settings_dir(self):\n direc = os.path.join(os.getcwd(),\"config\",\"profiles\")\n try:\n os.makedirs(direc) \n except OSError as e:\n if e.errno != errno.EEXIST:\n direc = \".\"\n \n return direc\n \n def sys(self):\n return self.system\n\n \ndef test_scan():\n work = net().sys()\n items = work.scan()\n \n for i in items:\n print('%s:%s------\\n'%(i.ssid,i.authentication))\n #if len(i.bssids) >=1:\n # print(i.bssids[0].id)\ndef test_disconnect():\n work = net().sys().disconnect()\n#test_scan()\n \n#test export\nsetting= {'name':'RGGCN','password':'123456'}\nprint(net().sys()._create_profile_xml(setting))\n\n#print([item.authentication for item in net().sys().scan()])\n\ninput(\"Press any key to exit.\")\n\n# connect(SSID,password)\n\n\n","sub_path":"wifi.py","file_name":"wifi.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"231844579","text":"\"\"\"\n==========================================================================\nJson implementation of :py:class:`~.base.mapper.MapperABC` abstract class\n==========================================================================\n\"\"\"\n\nfrom base.mapper import MapperABC\nimport json\n\n\nclass JsonMapper(MapperABC):\n \"\"\"\n Implementation of :py:class:`~.base.mapper.MapperABC` abstract class.\n For more info see MapperABC\n \"\"\"\n\n def load(self, data) -> 'JsonMapper':\n if data is None:\n self.data = dict()\n elif isinstance(data, str):\n self.data = json.loads(data)\n elif isinstance(data, dict):\n self.data = data\n else:\n raise TypeError(\n 'Supplied data must be json string or dictionary.'\n ' data you supplied was of %s type' % (\n str(type(data)),\n )\n )\n return self\n\n def get_item(self, key: str) -> any:\n return self.data.get(key)\n\n def dump(self):\n return json.dumps(self.dump_data, ensure_ascii=False)\n\n def set_item(self, key: str, value: any) -> None:\n self.dump_data[key] = value\n","sub_path":"jsonparser/jsonmapper.py","file_name":"jsonmapper.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"614127669","text":"# Libraries\nimport argparse\nimport sets\nimport numpy as np\nimport pandas as pd\nimport os, pickle\nimport copy\nimport sklearn.utils as sk\n\n# interact with code with `kill -SIGUSR2 `\nimport code\nimport signal\nsignal.signal(signal.SIGUSR2, lambda sig, frame: code.interact(local=dict(globals(), **locals())))\n\n# Declare the variables to be\n# read from command line\n\n# initial learning rate\nlr = 0\n# momentum\nmomentum = 0\n# number of hidden layers\nnum_hidden = 0\n# sizes of hidden layers\nsizes = [784]\n# activation function\nactivation = 'tanh'\n# loss function\nloss = 'ce'\n# optimization algorithm\nopt = 'gd'\n# batch size\nbatch_size = 20\n# annealing\nanneal = False\n# model save directory\nsave_dir = '/pa1'\n# save all thetas?\nsave_all_thetas = True\n# log save directory\nexpt_dir = '/pa1/exp'\n# path to train dataset\ntrain_path = 'train.csv'\n# path to test dataset\ntest_path = 'test.csv'\n# path to validation dataset\nval_path = ''\n# log frequency (steps)\nlog_frequency = 100\n# max epochs\nmax_epochs = 50\n# anneal type\nanneal_type = \"val\"\n# anneal lr threshold\nanneal_threshold = 4e-8\n# regularization parameter\nregularization = 1e-4\n\n# neural network parameters\nW_layer = [-1]\nB_layer = [-1]\n# neuron inputs and activations\nA_layer = [-1]\nH_layer = [-1]\n# Total number of layers\nL = 0\n# list containing all parameters\ntheta = []\n\ndef main():\n\tglobal theta\n\t# read training data into a pandas dataframe\n\tt_df = pd.read_csv(train_path)\n\tt_df.set_index('id', inplace=True)\n\tnum_features = len(t_df.columns)-1\n\t# import pdb\n\t# pdb.set_trace()\n\t# normalize between 0 and 1 - not required?\n\tt_df.iloc[:, 0:num_features] /= 255.0\n\tX = t_df.iloc[:, 0:num_features].as_matrix()\n\tX = X.reshape([X.shape[0],X.shape[1],1])\n\tY_temp = t_df.iloc[:, num_features].as_matrix()\n\tY = np.zeros([Y_temp.shape[0],10,1])\n\tfor i in range(Y_temp.shape[0]):\n\t\tY[i] = get_output_vector(Y_temp[i])\n\n\n\tglobal X_val, Y_val\n\t# read validation data into a pandas dataframe\n\tt_df_val = pd.read_csv(val_path)\n\tt_df_val.set_index('id', inplace=True)\n\t# normalize between 0 and 1 - not required?\n\tt_df_val.iloc[:, 0:num_features] /= 255.0\n\tX_val = t_df_val.iloc[:, 0:num_features].as_matrix()\n\tX_val = X_val.reshape([X_val.shape[0],X_val.shape[1],1])\n\tY_val_temp = t_df_val.iloc[:, num_features].as_matrix()\n\tY_val = np.zeros([Y_val_temp.shape[0],10,1])\n\tfor i in range(Y_val_temp.shape[0]):\n\t\tY_val[i] = get_output_vector(Y_val_temp[i])\n\n\n\t# # read test data into a pandas dataframe\n\t# t_df_test = pd.read_csv(test_path)\n\t# t_df_test.set_index('id', inplace=True)\n\t# # normalize between 0 and 1\n\t# t_df_test.iloc[:, 0:num_features] /= 255.0\n\t# X_test = t_df_test.iloc[:, 0:num_features].as_matrix()\n\t# X_test = X_test.reshape([X_test.shape[0],X_test.shape[1],1])\n\n\t\n\t# print(X.shape)\n\t# print(Y.shape)\n\n\t# neural network parameters\n\t# Use Xavier Glorot init for Weights - zero mean, suitable variance?\n\tfor i in xrange(1, L+1):\n\t\troot = np.sqrt(6/(sizes[i] + sizes[i-1]))\n\t\tif(opt == 'adam'):\n\t\t\tW_layer.append(np.random.uniform(-1*root, root,[sizes[i],sizes[i-1]]))\n\t\telse:\n\t\t\tW_layer.append(np.random.randn(sizes[i],sizes[i-1]))\n\t# Initialize bias as zeros\n\tfor i in xrange(1, L+1):\n\t\t# B_layer.append(np.zeros([sizes[i],1]))\n\t\t# B_layer.append(np.random.uniform(0,1,[sizes[i],1]))\n\t\tB_layer.append(np.random.randn(sizes[i],1))\n\t# neuron inputs and activations\n\tfor i in xrange(1, L+1):\n\t\tA_layer.append(np.zeros([sizes[i],1]))\n\tfor i in xrange(1, L):\n\t\tH_layer.append(np.zeros([sizes[i],1]))\n\t\n\ttheta = init_theta()\n\t\n\tglobal log_train_file, log_val_file, theta_pickle_file\n\tlog_train_file = open(os.path.join(expt_dir, \"log_train.txt\"), \"w\", 1)\n\tlog_val_file = open(os.path.join(expt_dir, \"log_val.txt\"), \"w\", 1)\n\ttheta_pickle_file = open(os.path.join(save_dir, \"theta.pickle\"), \"w\")\n\n\tif(opt == 'gd'):\n\t\tdo_mini_batch_gradient_descent(X, Y)\n\telif(opt == 'momentum'):\n\t\tmomentum_gradient_descent(X, Y)\n\telif(opt == 'nag'):\n\t\tnag_gradient_descent(X, Y)\n\telse:\n\t\tadam_gradient_descent(X, Y)\n\n\t# pickle.dump(theta, theta_pickle_file)\n\tlog_train_file.close()\n\tlog_val_file.close()\n\ttheta_pickle_file.close()\n\n\ndef make_dir(dir_path):\n\tif not os.path.exists(dir_path):\n\t\ttry:\n\t\t\tos.makedirs(dir_path)\n\t\texcept OSError as exc: # Guard against race condition\n\t\t\tif exc.errno != errno.EEXIST:\n\t\t\t\traise\n\nif __name__ == \"__main__\":\n\t# Initialize the parser\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"--lr\", help=\"learning rate\")\n\tparser.add_argument(\"--momentum\", help=\"momentum\")\n\tparser.add_argument(\"--num_hidden\", help=\"num_hidden\")\n\tparser.add_argument(\"--sizes\", help=\"sizes of hidden layers\")\n\tparser.add_argument(\"--activation\", help=\"activation function\")\n\tparser.add_argument(\"--loss\", help=\"loss function\")\n\tparser.add_argument(\"--opt\", help=\"optimization algorithm\")\n\tparser.add_argument(\"--batch_size\", help=\"batch_size\")\n\tparser.add_argument(\"--anneal\", help=\"anneal\")\n\tparser.add_argument(\"--save_dir\", help=\"save directory\")\n\tparser.add_argument(\"--expt_dir\", help=\"log directory\")\n\tparser.add_argument(\"--train\", help=\"train path\")\n\tparser.add_argument(\"--test\", help=\"test path\")\n\tparser.add_argument(\"--val\", help=\"validation path\")\n\tparser.add_argument('--save_all_thetas', dest='save_all_thetas', action='store_true')\n\tparser.add_argument('--no-save_all_thetas', dest='save_all_thetas', action='store_false')\n\tparser.set_defaults(save_all_thetas=True)\n\t\n\targs = parser.parse_args()\n\tsave_all_thetas = args.save_all_thetas\n\n\t# Process the command line arguments\n\tif(args.lr):\n\t\tlr = float(args.lr)\n\t\tif(args.momentum):\n\t\t\tmomentum = float(args.momentum)\n\tif(args.num_hidden):\n\t\tnum_hidden = int(args.num_hidden)\n\tif(args.sizes):\n\t\ttmp = args.sizes.split(',')\n\t\tif(len(tmp)!=num_hidden):\n\t\t\tprint('argument mismatch!')\n\t\t\texit()\n\t\tfor x in tmp:\n\t\t\tsizes.append(int(x))\n\t\tsizes.append(10)\n\tif(args.activation):\n\t\tactivation = str(args.activation)\n\t\toptions = sets.Set(['sigmoid','tanh','relu','elu'])\n\t\tif(activation not in options):\n\t\t\tprint('Invalid activation function')\n\t\t\texit()\n\tif(args.loss):\n\t\tloss = str(args.loss)\n\t\toptions = sets.Set(['sq','ce'])\n\t\tif(loss not in options):\n\t\t\tprint('Invalid loss function')\n\t\t\texit()\n\tif(args.opt):\n\t\topt = str(args.opt)\n\t\toptions = sets.Set(['gd', 'momentum', 'nag', 'adam'])\n\t\tif(opt not in options):\n\t\t\tprint('Invalid loss function')\n\t\t\texit()\n\tif(args.batch_size):\n\t\tbatch_size = int(args.batch_size)\n\t\tif(batch_size != 1 and batch_size%5 != 0):\n\t\t\tprint('Invalid batch size')\n\t\t\texit()\n\tif(args.anneal):\n\t\tif(args.anneal == 'true' or args.anneal == 'True'):\n\t\t\tanneal = True\n\tif(args.save_dir):\n\t\tsave_dir = str(args.save_dir)\n\t\tmake_dir(save_dir)\n\tif(args.expt_dir):\n\t\texpt_dir = str(args.expt_dir)\n\t\tmake_dir(expt_dir)\n\tif(args.train):\n\t\ttrain_path = str(args.train)\n\tif(args.test):\n\t\ttest_path = str(args.test)\n\tif (args.val):\n\t\tval_path = str(args.val)\n# Set the total number of layers\nL = num_hidden + 1\n\ndef print_params():\n\tprint(\"lr: {}\".format(lr))\n\tprint(\"momentum: {}\".format(momentum))\n\tprint(\"num_hidden: {}\".format(num_hidden))\n\tprint(\"sizes: {}\".format(sizes))\n\tprint(\"activation: {}\".format(activation))\n\tprint(\"loss: {}\".format(loss))\n\tprint(\"opt: {}\".format(opt))\n\tprint(\"batch_size: {}\".format(batch_size))\n\tprint(\"anneal: {}\".format(anneal))\n\tprint(\"save_dir: {}\".format(save_dir))\n\tprint(\"expt_dir: {}\".format(expt_dir))\n\tprint(\"train_path: {}\".format(train_path))\n\tprint(\"test_path: {}\".format(test_path))\n\tprint(\"val_path: {}\".format(val_path))\n\tprint(\"save_all_thetas: {}\".format(save_all_thetas))\n\ndef cross_entropy_loss(y_hat, y):\n\treturn -1 * np.log2(y_hat[np.argmax(y)])[0]\n\ndef square_loss(y_hat, y):\n\treturn np.sum(np.square(y - y_hat))\n\ndef get_loss(y_hat, y):\n\tif(loss == 'ce'):\n\t\treturn cross_entropy_loss(y_hat, y)\n\telse:\n\t\treturn square_loss(y_hat, y)\n\ndef get_output_vector(n):\n\tout = np.zeros([10,1])\n\tout[n][0] = 1\n\treturn out\n\ndef sigmoid(x): \n\t# return 1.0 / (1.0 + np.exp(-x))\n\treturn 0.5 * (1 + tanh(0.5*x))\n\ndef sigmoid_der(x): \n\treturn sigmoid(x) * (1 - sigmoid(x))\n\ndef tanh(x):\n\t# return ( np.exp(x) - np.exp(-x) ) / ( np.exp(x) + np.exp(-x) )\n\treturn np.tanh(x)\n\ndef tanh_der(x):\n\treturn (1 - tanh(x) ** 2 )\n\ndef relu(x):\n\treturn (x*(x>0))\n\n# check division by 0?\ndef relu_der(x):\n\treturn ( (x*(x>0)) / (x+(x==0)) )\n\ndef elu(x):\n\treturn x*(x >= 0) + (np.exp(x) - 1)*(x < 0)\n\ndef elu_der(x):\n\treturn (x >= 0) + (x < 0)*(np.exp(x))\n\ndef activation_func(x):\n\tif(activation == 'sigmoid'):\n\t\treturn sigmoid(x)\n\telif(activation == 'relu'):\n\t\treturn relu(x)\n\telif(activation == 'elu'):\n\t\treturn elu(x)\n\telse:\n\t\treturn tanh(x)\n\ndef activation_der(x):\n\tif(activation == 'sigmoid'):\n\t\treturn sigmoid_der(x)\n\telif(activation == 'relu'):\n\t\treturn relu_der(x)\n\telif(activation == 'elu'):\n\t\treturn elu_der(x)\n\telse:\n\t\treturn tanh_der(x)\n\ndef softmax(x):\n # e_x = np.exp(x)\n e_x = np.exp(x - np.max(x))\n out = e_x / e_x.sum()\n return out\n\ndef calc_error_loss(X, Y):\n\tnum_samples, num_correct, loss = X.shape[0], 0, 0\n\ttrue_positive_count = np.zeros(10)\n\tfalse_positive_count = np.zeros(10)\n\tfalse_negative_count = np.zeros(10)\n\tfor x, y in zip(X, Y):\n\t\ty_hat = forward_propagation(x)\n\t\tloss += get_loss(y_hat, y)\n\t\ttrue_class = np.argmax(y)\n\t\tpred_class = np.argmax(y_hat)\n\t\tif (true_class == pred_class):\n\t\t\tnum_correct += 1\n\t\t\ttrue_positive_count[true_class] += 1\n\t\telse:\n\t\t\tfalse_negative_count[true_class] += 1\n\t\t\tfalse_positive_count[pred_class] += 1\n\n\tsum_tp = true_positive_count.sum()\n\tsum_fp = false_positive_count.sum()\n\tsum_fn = false_negative_count.sum()\n\tprecision = sum_tp/(sum_tp + sum_fp)\n\trecall = sum_tp/(sum_tp + sum_fn)\n\tif(precision == 0 or recall == 0):\n\t\t# print(precision, recall)\n\t\tprecision = 1\n\t\trecall = 1\n\n\tmean_f_score = 2.0/((1.0/precision)+(1.0/recall))\n\n\treturn ((num_samples-num_correct) * 1.0/num_samples, loss * 1.0/num_samples, mean_f_score)\n\ndef init_theta():\n\ttheta = []\n\ttheta.append(W_layer)\n\ttheta.append(B_layer)\n\treturn theta\n\ndef init_d_theta():\n\td_theta = [[], []]\n\tfor i in range(2):\n\t\td_theta[i].append(-1)\n\tfor j in xrange(1, L+1):\n\t\td_theta[0].append(np.zeros(W_layer[j].shape))\n\t\td_theta[1].append(np.zeros(B_layer[j].shape))\n\treturn d_theta\n\ndef update_adam_factors(d_theta, d_theta_sq):\n\tfor i in range(2):\n\t\tfor j in xrange(len(d_theta[i])):\n\t\t\td_theta_sq[i][j] = np.square(d_theta[i][j])\n\ndef add_and_set_theta(theta1, theta2):\n\tfor i in range(2):\n\t\tfor j in xrange(len(theta1[i])):\n\t\t\ttheta1[i][j] += theta2[i][j]\n\ndef sub_and_set_theta(theta1, theta2):\n\tfor i in range(2):\n\t\tfor j in xrange(len(theta1[i])):\n\t\t\ttheta1[i][j] -= theta2[i][j]\n\ndef scalar_mul_theta(theta, a):\n\tfor i in range(2):\n\t\tfor j in xrange(len(theta[i])):\n\t\t\ttheta[i][j] *= a;\n\ndef copy_to_theta(theta, theta1):\n\tfor i in range(2):\n\t\tfor j in xrange(len(theta[i])):\n\t\t\ttheta[i][j] = theta1[i][j]\n\ndef adam_decay_scale(m_t, v_t, epsilon):\n\tfor i in range(2):\n\t\tfor j in xrange(1, len(m_t[i])):\n\t\t\tm_t[i][j] *= ( 1.0 / np.sqrt( epsilon + v_t[i][j] ) )\n\ndef do_mini_batch_gradient_descent(X, Y):\n\td_theta = init_d_theta()\n\tprev_epoch_val_loss = float('inf')\n\tprev_epoch_train_loss = float('inf')\n\tprev_epoch_theta = []\n\tval_loss = 1\n\ttrain_loss = 1\n\ti = 0\n\tprev_i = -1\n\tcounter = 1\n\n\tbest_theta = []\n\tbest_val_score = 0\n\n\tglobal lr\n\twhile i < max_epochs:\n\t\tX,Y = sk.shuffle(X,Y)\n\t\tnum_points_seen = 0\n\t\tsteps = 0\n\t\tfor x,y in zip(X,Y):\n\t\t\ty_hat = forward_propagation(x)\n\t\t\tadd_and_set_theta(d_theta, backward_propagation(y, y_hat))\n\t\t\tnum_points_seen += 1\n\t\t\tif(num_points_seen % batch_size == 0):\n\t\t\t\t# seen one mini batch\n\t\t\t\tscalar_mul_theta(d_theta, lr)\n\t\t\t\tsub_and_set_theta(theta, d_theta)\n\t\t\t\tsteps += 1\n\t\t\t\tscalar_mul_theta(d_theta, 0)\n\t\t\t\tif steps % log_frequency == 0:\n\t\t\t\t\tprev_i = i\n\t\t\t\t\ttrain_error, train_loss, train_score = calc_error_loss(X, Y)\n\t\t\t\t\tval_error, val_loss, val_score = calc_error_loss(X_val, Y_val)\n\t\t\t\t\tif val_score > best_val_score:\n\t\t\t\t\t\tbest_theta = theta\n\t\t\t\t\t\tbest_theta_pickle_file = open(os.path.join(save_dir, \"best_theta.pickle\"), \"w\")\n\t\t\t\t\t\tpickle.dump(best_theta, best_theta_pickle_file)\n\t\t\t\t\t\tbest_theta_pickle_file.close()\n\t\t\t\t\t\tbest_val_score = val_score\n\n\t\t\t\t\tlog_train_file.write(\"{}: Epoch {}, Step {}, Loss: {}, Error: {}, lr: {}, score: {}\\n\".format(counter, i, steps, train_loss, train_error, lr, train_score))\n\t\t\t\t\tlog_val_file.write(\"{}: Epoch {}, Step {}, Loss: {}, Error: {}, lr: {}, score: {}\\n\".format(counter, i, steps, val_loss, val_error, lr, val_score))\n\t\t\t\t\tcounter += 1\n\t\tif save_all_thetas:\n\t\t\tpickle.dump(theta, theta_pickle_file)\n\n\t\trepeat_epoch = False\n\t\tif anneal_type == \"val\":\n\t\t\trepeat_epoch = (val_loss > prev_epoch_val_loss)\n\t\telse:\n\t\t\trepeat_epoch = (train_loss > prev_epoch_train_loss)\n\t\t\n\t\tif (anneal == True) and repeat_epoch:\n\t\t\tcopy_to_theta(theta, prev_epoch_theta)\n\t\t\tif (lr < anneal_threshold):\n\t\t\t\treturn;\n\t\t\tlr /= 2\n\t\telse:\n\t\t\tif (anneal == True):\n\t\t\t\tprev_epoch_theta = copy.deepcopy(theta)\n\t\t\t\tprev_epoch_train_loss = train_loss\n\t\t\t\tprev_epoch_val_loss = val_loss\n\t\t\ti += 1\n\ndef momentum_gradient_descent(X, Y):\n\td_theta = init_d_theta()\n\tupdate = init_d_theta()\n\n\tprev_epoch_val_loss = float('inf')\n\tprev_epoch_train_loss = float('inf')\n\tprev_epoch_theta = []\n\tprev_update = []\n\tval_loss = 1\n\ttrain_loss = 1\n\ti = 0\n\tprev_i = -1\n\tcounter = 1\n\n\tbest_theta = []\n\tbest_val_score = 0\n\n\tglobal lr\n\twhile i < max_epochs:\n\t\tX,Y = sk.shuffle(X,Y)\n\t\tnum_points_seen = 0\n\t\tsteps = 0\n\t\tfor x,y in zip(X,Y):\n\t\t\ty_hat = forward_propagation(x)\n\t\t\tadd_and_set_theta(d_theta, backward_propagation(y, y_hat))\n\t\t\tnum_points_seen += 1\n\t\t\tif(num_points_seen % batch_size == 0):\n\t\t\t\t# seen one mini batch\n\t\t\t\tscalar_mul_theta(update, momentum)\n\t\t\t\tscalar_mul_theta(d_theta, lr)\n\t\t\t\tadd_and_set_theta(update, d_theta)\n\t\t\t\tsub_and_set_theta(theta, update)\n\t\t\t\tsteps += 1\n\t\t\t\tscalar_mul_theta(d_theta, 0)\n\t\t\t\tif steps % log_frequency == 0:\n\t\t\t\t\tprev_i = i\n\t\t\t\t\ttrain_error, train_loss, train_score = calc_error_loss(X, Y)\n\t\t\t\t\tval_error, val_loss, val_score = calc_error_loss(X_val, Y_val)\n\t\t\t\t\tif val_score > best_val_score:\n\t\t\t\t\t\tbest_theta = theta\n\t\t\t\t\t\tbest_theta_pickle_file = open(os.path.join(save_dir, \"best_theta.pickle\"), \"w\")\n\t\t\t\t\t\tpickle.dump(best_theta, best_theta_pickle_file)\n\t\t\t\t\t\tbest_theta_pickle_file.close()\n\t\t\t\t\t\tbest_val_score = val_score\n\n\t\t\t\t\tlog_train_file.write(\"{}: Epoch {}, Step {}, Loss: {}, Error: {}, lr: {}, score: {}\\n\".format(counter, i, steps, train_loss, train_error, lr, train_score))\n\t\t\t\t\tlog_val_file.write(\"{}: Epoch {}, Step {}, Loss: {}, Error: {}, lr: {}, score: {}\\n\".format(counter, i, steps, val_loss, val_error, lr, val_score))\n\t\t\t\t\tcounter += 1\n\t\tif save_all_thetas:\n\t\t\tpickle.dump(theta, theta_pickle_file)\n\n\t\trepeat_epoch = False\n\t\tif anneal_type == \"val\":\n\t\t\trepeat_epoch = (val_loss > prev_epoch_val_loss)\n\t\telse:\n\t\t\trepeat_epoch = (train_loss > prev_epoch_train_loss)\n\t\t\n\t\tif (anneal == True) and repeat_epoch:\n\t\t\tcopy_to_theta(theta, prev_epoch_theta)\n\t\t\tcopy_to_theta(update, prev_update)\n\t\t\tif (lr < anneal_threshold):\n\t\t\t\treturn;\n\t\t\tlr /= 2\n\t\telse:\n\t\t\tif (anneal == True):\n\t\t\t\tprev_epoch_theta = copy.deepcopy(theta)\n\t\t\t\tprev_update = copy.deepcopy(update)\n\t\t\t\tprev_epoch_train_loss = train_loss\n\t\t\t\tprev_epoch_val_loss = val_loss\n\t\t\ti += 1\n\ndef nag_gradient_descent(X, Y):\n\td_theta = init_d_theta()\n\tupdate = init_d_theta()\n\n\tprev_epoch_val_loss = float('inf')\n\tprev_epoch_train_loss = float('inf')\n\tprev_epoch_theta = []\n\tprev_update = []\n\tval_loss = 1\n\ttrain_loss = 1\n\tcounter = 1\n\n\tbest_theta = []\n\tbest_val_score = 0\n\n\ti = 0\n\tprev_i = -1\n\tglobal lr\n\twhile i < max_epochs:\n\t\tX,Y = sk.shuffle(X,Y)\n\t\tnum_points_seen = 0\n\t\tsteps = 0\n\t\tfor x,y in zip(X,Y):\n\t\t\tif(num_points_seen % batch_size == 0):\n\t\t\t\tscalar_mul_theta(update, momentum)\n\t\t\t\tsub_and_set_theta(theta, update)\t\t\t\t\n\t\t\ty_hat = forward_propagation(x)\n\t\t\tadd_and_set_theta(d_theta, backward_propagation(y, y_hat))\n\t\t\tnum_points_seen += 1\n\t\t\tif(num_points_seen % batch_size == 0):\n\t\t\t\t# seen one mini batch\n\t\t\t\tscalar_mul_theta(d_theta, lr)\n\t\t\t\tadd_and_set_theta(update, d_theta)\n\t\t\t\tsub_and_set_theta(theta, d_theta)\n\t\t\t\tsteps += 1\n\t\t\t\tscalar_mul_theta(d_theta, 0)\n\t\t\t\tif steps % log_frequency == 0:\n\t\t\t\t\tprev_i = i\n\t\t\t\t\ttrain_error, train_loss, train_score = calc_error_loss(X, Y)\n\t\t\t\t\tval_error, val_loss, val_score = calc_error_loss(X_val, Y_val)\n\t\t\t\t\tif val_score > best_val_score:\n\t\t\t\t\t\tbest_theta = theta\n\t\t\t\t\t\tbest_theta_pickle_file = open(os.path.join(save_dir, \"best_theta.pickle\"), \"w\")\n\t\t\t\t\t\tpickle.dump(best_theta, best_theta_pickle_file)\n\t\t\t\t\t\tbest_theta_pickle_file.close()\n\t\t\t\t\t\tbest_val_score = val_score\n\n\t\t\t\t\tlog_train_file.write(\"{}: Epoch {}, Step {}, Loss: {}, Error: {}, lr: {}, score: {}\\n\".format(counter, i, steps, train_loss, train_error, lr, train_score))\n\t\t\t\t\tlog_val_file.write(\"{}: Epoch {}, Step {}, Loss: {}, Error: {}, lr: {}, score: {}\\n\".format(counter, i, steps, val_loss, val_error, lr, val_score))\n\t\t\t\t\tcounter += 1\n\t\tif save_all_thetas:\n\t\t\tpickle.dump(theta, theta_pickle_file)\n\n\t\trepeat_epoch = False\n\t\tif anneal_type == \"val\":\n\t\t\trepeat_epoch = (val_loss > prev_epoch_val_loss)\n\t\telse:\n\t\t\trepeat_epoch = (train_loss > prev_epoch_train_loss)\n\t\t\n\t\tif (anneal == True) and repeat_epoch:\n\t\t\tcopy_to_theta(theta, prev_epoch_theta)\n\t\t\tcopy_to_theta(update, prev_update)\n\t\t\tif (lr < anneal_threshold):\n\t\t\t\treturn;\n\t\t\tlr /= 2\n\t\telse:\n\t\t\tif (anneal == True):\n\t\t\t\tprev_epoch_theta = copy.deepcopy(theta)\n\t\t\t\tprev_update = copy.deepcopy(update)\n\t\t\t\tprev_epoch_train_loss = train_loss\n\t\t\t\tprev_epoch_val_loss = val_loss\n\t\t\ti += 1\n\ndef adam_gradient_descent(X, Y):\n\tbeta_1, beta_2, epsilon = 0.9, 0.999, 1e-8\n\td_theta = init_d_theta()\n\tm_t = init_d_theta()\n\tv_t = init_d_theta()\n\td_theta_sq = init_d_theta()\n\n\tprev_epoch_val_loss = float('inf')\n\tprev_epoch_train_loss = float('inf')\n\tprev_epoch_theta = []\n\tprev_m_t = []\n\tprev_v_t = []\n\tval_loss = 1\n\ttrain_loss = 1\n\n\tbest_theta = []\n\tbest_val_score = 0\n\n\tcounter = 1\n\n\ti = 0\n\tprev_i = -1\n\tglobal lr\n\tsteps = 0\n\tprev_steps = 0\n\twhile i < max_epochs:\n\t\tsteps_print = 0\n\t\tX,Y = sk.shuffle(X,Y)\n\t\tnum_points_seen = 0\n\t\tfor x,y in zip(X,Y):\n\t\t\tmasks = [-1]\n\t\t\ty_hat = forward_propagation_with_dropouts(x, masks)\n\t\t\tadd_and_set_theta(d_theta, backward_propagation_with_dropouts(y, y_hat, masks))\n\t\t\tnum_points_seen += 1\n\t\t\tif(num_points_seen % batch_size == 0):\n\t\t\t\t# seen one mini batch\n\t\t\t\tscalar_mul_theta(v_t, beta_2)\n\t\t\t\tupdate_adam_factors(d_theta, d_theta_sq)\n\t\t\t\tscalar_mul_theta(d_theta_sq, 1 - beta_2)\n\t\t\t\tadd_and_set_theta(v_t, d_theta_sq)\n\t\t\t\t\n\t\t\t\tscalar_mul_theta(m_t, beta_1)\n\t\t\t\tscalar_mul_theta(d_theta, 1 - beta_1)\n\t\t\t\tadd_and_set_theta(m_t, d_theta)\n\n\t\t\t\tsteps += 1\n\t\t\t\tsteps_print += 1\n\t\t\t\ttemp_m_t = copy.deepcopy(m_t)\n\t\t\t\ttemp_v_t = copy.deepcopy(v_t)\n\n\t\t\t\tscalar_mul_theta(temp_m_t, (1.0 / (1.0 - np.power(beta_1, steps))))\n\t\t\t\tscalar_mul_theta(temp_v_t, (1.0 / (1.0 - np.power(beta_2, steps))))\n\n\t\t\t\tadam_decay_scale(temp_m_t, temp_v_t, epsilon)\n\t\t\t\tscalar_mul_theta(temp_m_t, lr)\n\t\t\t\tsub_and_set_theta(theta, temp_m_t)\n\t\t\t\t\n\t\t\t\tscalar_mul_theta(d_theta, 0)\n\t\t\t\tif steps % log_frequency == 0:\n\t\t\t\t\tprev_i = i\n\t\t\t\t\ttrain_error, train_loss, train_score = calc_error_loss(X, Y)\n\t\t\t\t\tval_error, val_loss, val_score = calc_error_loss(X_val, Y_val)\n\n\t\t\t\t\tif val_score > best_val_score:\n\t\t\t\t\t\tbest_theta = theta\n\t\t\t\t\t\tbest_theta_pickle_file = open(os.path.join(save_dir, \"best_theta.pickle\"), \"w\")\n\t\t\t\t\t\tpickle.dump(best_theta, best_theta_pickle_file)\n\t\t\t\t\t\tbest_theta_pickle_file.close()\n\t\t\t\t\t\tbest_val_score = val_score\n\n\t\t\t\t\tlog_train_file.write(\"{}: Epoch {}, Step {}, Loss: {}, Error: {}, lr: {}, score: {}\\n\".format(counter, i, steps_print, train_loss, train_error, lr, train_score))\n\t\t\t\t\tlog_val_file.write(\"{}: Epoch {}, Step {}, Loss: {}, Error: {}, lr: {}, score: {}\\n\".format(counter, i, steps_print, val_loss, val_error, lr, val_score))\n\t\t\t\t\tcounter += 1\n\t\t\n\t\tif save_all_thetas:\n\t\t\tpickle.dump(theta, theta_pickle_file)\n\n\t\trepeat_epoch = False\n\t\tif anneal_type == \"val\":\n\t\t\trepeat_epoch = (val_loss > prev_epoch_val_loss)\n\t\telse:\n\t\t\trepeat_epoch = (train_loss > prev_epoch_train_loss)\n\t\t\n\t\tif (anneal == True) and repeat_epoch:\n\t\t\tcopy_to_theta(theta, prev_epoch_theta)\n\t\t\tcopy_to_theta(m_t, prev_m_t)\n\t\t\tcopy_to_theta(v_t, prev_v_t)\n\t\t\tsteps = prev_steps\n\t\t\tif (lr < anneal_threshold):\n\t\t\t\treturn;\n\t\t\tlr /= 2\n\t\telse:\n\t\t\tif (anneal == True):\n\t\t\t\tprev_epoch_theta = copy.deepcopy(theta)\n\t\t\t\tprev_m_t = copy.deepcopy(m_t)\n\t\t\t\tprev_v_t = copy.deepcopy(v_t)\n\t\t\t\tprev_epoch_train_loss = train_loss\n\t\t\t\tprev_epoch_val_loss = val_loss\n\t\t\t\tprev_steps = steps\n\t\t\ti += 1\n\n\ndef forward_propagation(x):\n\tH_layer[0] = x\n\tfor i in xrange(1, L):\n\t\tA_layer[i] = B_layer[i] + np.matmul(W_layer[i], H_layer[i-1])\n\t\tH_layer[i] = activation_func(A_layer[i])\n\tA_layer[L] = B_layer[L] + np.matmul(W_layer[L], H_layer[L-1])\n\ty_hat = softmax(A_layer[L]) \n\treturn y_hat\n\ndef forward_propagation_with_dropouts(x, masks):\n\tp = 0.92\n\tH_layer[0] = x\n\tfor i in xrange(1, L):\n\t\tA_layer[i] = B_layer[i] + np.matmul(W_layer[i], H_layer[i-1])\n\t\tH_layer[i] = activation_func(A_layer[i])\n\t\tU = (np.random.rand(*H_layer[i].shape) < p) / p\n\t\tH_layer[i] *= U\n\t\tmasks.append(U)\n\tA_layer[L] = B_layer[L] + np.matmul(W_layer[L], H_layer[L-1])\n\ty_hat = softmax(A_layer[L]) \n\treturn y_hat\n\n# TODO: find gradients for squared error - Done. Verify.\ndef backward_propagation(y, y_hat):\n\td_H = [-1]\n\td_A = [-1]\n\td_B = [-1]\n\td_W = [-1]\n\tfor i in xrange(1, L):\n\t\td_H.append(np.zeros(H_layer[i].shape))\n\tfor i in xrange(1, L+1):\n\t\td_A.append(np.zeros(A_layer[i].shape))\n\tfor i in xrange(1, L+1):\n\t\td_B.append(np.zeros(B_layer[i].shape))\n\tfor i in xrange(1, L+1):\n\t\td_W.append(np.zeros(W_layer[i].shape))\n\t\n\t# Output gradient computation\n\tif(loss == 'ce'):\n\t\td_A[L] = -(y - y_hat)\n\telse:\n\t\ttemp_1 = 2 * y_hat\n\t\ttemp_2 = y_hat - y + ( ( y_hat[np.argmax(y)] - np.square(y_hat).sum() ) * np.ones([10,1]) )\n\t\td_A[L] = temp_1 * temp_2\n\n\tfor k in xrange(L, 0, -1):\n\t\t# Parameter gradient computation\n\t\td_W[k] = np.matmul(d_A[k], H_layer[k-1].transpose())\n\t\td_B[k] = d_A[k]\n\n\t\t# Means we have already computed till W[1] and B[1]\n\t\tif(k == 1):\n\t\t\tbreak\n\n\t\t# Compute gradient wrt layer below\n\t\td_H[k-1] = np.matmul(W_layer[k].transpose(),d_A[k])\n\n\t\t# Compute gradient wrt layer below (pre-activation)\n\t\ttemp = activation_der(A_layer[k-1])\n\t\td_A[k-1] = d_H[k-1] * temp\n\n\td_theta = []\n\td_theta.append(d_W)\n\td_theta.append(d_B)\n\tscalar_mul_theta(d_theta, 1.0/batch_size)\n\treturn d_theta\n\ndef backward_propagation_with_dropouts(y, y_hat, masks):\n\td_H = [-1]\n\td_A = [-1]\n\td_B = [-1]\n\td_W = [-1]\n\tfor i in xrange(1, L):\n\t\td_H.append(np.zeros(H_layer[i].shape))\n\tfor i in xrange(1, L+1):\n\t\td_A.append(np.zeros(A_layer[i].shape))\n\tfor i in xrange(1, L+1):\n\t\td_B.append(np.zeros(B_layer[i].shape))\n\tfor i in xrange(1, L+1):\n\t\td_W.append(np.zeros(W_layer[i].shape))\n\t\n\t# Output gradient computation\n\tif(loss == 'ce'):\n\t\td_A[L] = -(y - y_hat)\n\telse:\n\t\ttemp_1 = 2 * y_hat\n\t\ttemp_2 = y_hat - y + ( ( y_hat[np.argmax(y)] - np.square(y_hat).sum() ) * np.ones([10,1]) )\n\t\td_A[L] = temp_1 * temp_2\n\n\tfor k in xrange(L, 0, -1):\n\t\t# Parameter gradient computation\n\t\td_W[k] = np.matmul(d_A[k], H_layer[k-1].transpose()) + 2*regularization*W_layer[k]\n\n\t\t# no need to regularize bias parameters: http://cs231n.github.io/neural-networks-2/#reg\n\t\td_B[k] = d_A[k]\n\n\t\t# Means we have already computed till W[1] and B[1]\n\t\tif(k == 1):\n\t\t\tbreak\n\n\t\t# Compute gradient wrt layer below\n\t\td_H[k-1] = np.matmul(W_layer[k].transpose(),d_A[k]) * masks[k-1] \n\n\t\t# Compute gradient wrt layer below (pre-activation)\n\t\ttemp = activation_der(A_layer[k-1])\n\t\td_A[k-1] = d_H[k-1] * temp\n\n\td_theta = []\n\td_theta.append(d_W)\n\td_theta.append(d_B)\n\tscalar_mul_theta(d_theta, 1.0/batch_size)\n\treturn d_theta\n\nif __name__ == '__main__':\n\tnp.random.seed(1234)\n\tprint_params()\n\tmain()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":23561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"99954146","text":"import json, os, sys, time, pickle, h5py, cv2, numpy as np\nsys.path.append('/usr/local/lib/')\nimport pyrealsense2 as rs\n\ndef start_pipes(pipelines,configs,serial_numbers):\n\tprofiles = {}\n\tfor n in serial_numbers[::-1]:\n\t\tprofile = pipelines[n].start(configs[n])\n\t\tprofiles[n] = profile\n\treturn profiles\n\ndef stop_pipes(pipelines):\n\tfor n in pipelines:\n\t\tpipelines[n].stop() \n \ndef get_metadata(serial_numbers,start_time,stop_time,num_frames,intrinsics, timestamps, PARAMS):\n\tmetadata = {'parameters':PARAMS,\n 'serial_numbers':serial_numbers,\n 'start_time':start_time,\n 'stop_time':stop_time,\n 'num_frames':num_frames,\n\t\t 'intrinsics':intrinsics,\n 'timestamps': timestamps}\n\treturn metadata\n\ndef get_pipelines(serial_numbers, PARAMS):\n\tpipelines = {}\n\tconfigs = {}\n\tfor n in serial_numbers:\n\t\ttry:\n\t\t\tpipeline = rs.pipeline()\n\t\t\tconfig = rs.config()\n\t\t\tconfig.enable_device(n)\n\t\t\tconfig.enable_stream(rs.stream.depth, PARAMS['frame_width'], PARAMS['frame_height'], rs.format.z16, PARAMS['fps'])\n\t\t\tconfig.enable_stream(rs.stream.color, PARAMS['frame_width'], PARAMS['frame_height'], rs.format.bgr8, PARAMS['fps'])\n\t\t\tconfig.enable_record_to_file(PARAMS['working_directory'] + '/data/' + PARAMS['session_name']+'_'+n+'.bag')\n\t\t\tpipelines[n] = pipeline\n\t\t\tconfigs[n] = config\n\t\texcept: \n\t\t\tprint('Error connecting to camera '+n)\n\treturn pipelines, configs\n\ndef get_connected_devices():\n\tctx = rs.context()\n\tds5_dev = rs.device()\n\tdevices = ctx.query_devices();\n\tserial_numbers = []\n\tfor d in devices:\n\t\tprint('Found device ',d)\n\t\tserial_numbers.append(str(d).split('S/N: ')[1].split(')')[0])\n\tif len(devices)==0:\n\t\tprint('No devices found')\n\treturn serial_numbers\n\ndef get_intrinsics(pipelines):\n\tintrinsics = {k:{} for k in pipelines.keys()}\n\tfor n in pipelines:\n\t\tins = pipelines[n].wait_for_frames().get_depth_frame().profile.as_video_stream_profile().intrinsics\n\t\tintrinsics[n]['ppx'] = ins.ppx\n\t\tintrinsics[n]['ppy'] = ins.ppy\n\t\tintrinsics[n]['fx'] = ins.fx\n\t\tintrinsics[n]['fy'] = ins.fy\n\treturn intrinsics\n\n","sub_path":"MoseqMulti_acquire.py","file_name":"MoseqMulti_acquire.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"26181468","text":"import dash\nfrom dash.dependencies import Input, Output\nimport dash_core_components as dcc\nimport dash_html_components as html\n\n\nstyles = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\napp = dash.Dash(__name__, external_stylesheets=styles)\n\napp.layout = html.Div([\n dcc.Input(id='n-multi', type='number', value=5),\n html.Table([html.Tr([html.Td(['x', html.Sup(2)]), html.Td(id='square')]),\n html.Tr([html.Td(['x', html.Sup(3)]), html.Td(id='cube')]),\n html.Tr([html.Td([2, html.Sup('x')]), html.Td(id='twos')]),\n html.Tr([html.Td([3, html.Sup('x')]), html.Td(id='threes')]),\n html.Tr([html.Td(['x', html.Sup('x')]), html.Td(id='x^x')])])])\n\n\n@app.callback(Output('square', 'children'),\n Output('cube', 'children'),\n Output('twos', 'children'),\n Output('threes', 'children'),\n Output('x^x', 'children'),\n Input('n-multi', 'value'))\ndef do_math(x):\n return x ** 2, x ** 3, 2 ** x, 3 ** x, x ** x\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n \n \n","sub_path":"plotly/tutorial/docs/03_basic_callbacks/multi_output.py","file_name":"multi_output.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"654046195","text":"# COMPILADO DE FUNÇÕES SIMPLES E ÚTEIS DE FORMA GERAL \n\n# Funções matemáticas\n\ndef maximo_divisor_comum(num1, num2):\n resto = num1 % num2\n while resto:\n num1 = num2\n num2 = resto\n resto = num1 % num2\n return num2 \n\n\ndef fatorial(valor):\n fatorial = 1\n while valor > 0:\n fatorial *= valor\n valor -=1\n return fatorial\n\n\ndef valida_opc(opc, msg):\n while opc not in range(1, 3):\n print(msg)\n opc = int(input())\n return opc\n\n\ndef get_inteiro(mensagem):\n try:\n return int(input(mensagem))\n except:\n print('Valor inválido.')\n return get_inteiro(mensagem)\n\n\ndef get_number(mensagem):\n try:\n return float(input(mensagem))\n except:\n print('Valor inválido.')\n return get_number(mensagem)\n\n\ndef get_int_positivo(msg):\n valor = get_inteiro(msg)\n while valor < 0:\n print('Valor digitado menor que 0.')\n valor = get_inteiro(msg)\n return valor\n\n\ndef apaga_lista(lista):\n for c in range(0, len(lista)):\n lista.pop()\n return lista\n\ndef fibonacci(parada): # escreve fibonacci até a 'parada'.\n penultimo = atual = 0\n ultimo = 1\n contador = 3\n print(0, 1, end = ' ')\n while contador <= parada:\n atual = penultimo + ultimo\n print(atual, end = ' ')\n penultimo = ultimo\n ultimo = atual\n contador += 1\n print()\n\ndef lista_fibonacci(parada): # escreve fibonacci até a 'parada'.\n fibonacci = [0, 1]\n for c in range(0, parada):\n fibonacci.append(fibonacci[c]+fibonacci[c+1])\n for e in range(0, parada):\n if e != parada-1:\n print(fibonacci[e], end = ' ')\n else:\n print(fibonacci[e])\n\n\ndef maior_valor(num1, num2):\n return num1 if num1 >= num2 else num2\n\n\ndef primo(valor): # checa se um valor é primo ou não\n\ttotal = 0\n\tfor count in range(1, valor+1):\n\t\tif valor % count == 0:\n\t\t\ttotal += 1\n\treturn True if total == 2 else False","sub_path":"Fabio03_For/utilidades.py","file_name":"utilidades.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"111095382","text":"#!/usr/bin/python3\nlist = ['172.30.8.0/23/172.30.5.3', '172.30.12.0/22/172.30.5.3', '192.168.128.0/18/172.30.5.3', '172.30.80.20/32/172.30.5.3']\n\ndef removezeros(ipaddr):\n\ti = 3 # 4 bytes in IPv4 address\n\tstrippedip = ''\n\twhile i >= 0 :\n\t\tif ipaddr.split(sep=\".\")[i] != '0':\n\t\t\tbreak\n\t\ti -= 1\n\tj = 0\n\twhile j <= i :\n\t\tstrippedip += ipaddr.split(sep=\".\")[j] #Rewriting the shorter IP address\n\t\tif i != j :\n\t\t\tstrippedip += '.' #Do not add separator . if it's the last byte\n\t\tj += 1\n\treturn strippedip\n\ndef dec2hex(number):\n\t\tfullhex=hex(int(number))\n\t\tif int(number) < 16 :\n\t\t\treturn ('0' + str(fullhex).split(sep=\"x\")[1])\n\t\treturn (fullhex.split(sep=\"x\")[1])\n\t\ndef ipblock(netip, netmask, gw):\n\tconcatblock = netmask + '.' + removezeros(netip) + '.' + gw\n\tfinalblock = ''\n\tfor byte in concatblock.split(sep=\".\"):\n\t\tfinalblock += dec2hex(byte)\n\t\tfinalblock += ':'\n\treturn (finalblock)\n\ndef option121(list):\n\tstring = ''\n\tfor route in list:\n\t\tnetip = route.split(sep=\"/\")[0]\n\t\tnetmask = route.split(sep=\"/\")[1]\n\t\tgw = route.split(sep=\"/\")[2]\n\t\tstring += ipblock(netip, netmask, gw)\n\treturn (string.strip(':'))\n\t\nprint(option121(list))\n","sub_path":"option121/option121.py","file_name":"option121.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"480000339","text":"# Request get and post\nimport requests\nr=requests.get('https://xkcd.com/1906/')\nprint(\"Status code is {0}\",r.status_code)\nprint(\"Header is {0}\",r.headers)\n\n\n#Download an image using Request Response\nimport requests\nreceive=requests.get('https://imgs.xkcd.com/comics/making_progress.png')\nwith open(r'C:\\Prosenjit\\Nupur\\image.png','wb') as f :\n f.write(receive.content)\n\n","sub_path":"HttpRequest.py","file_name":"HttpRequest.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"306113515","text":"# Author: eunsunlee\n# partial source: https://github.com/chanlhock/IMU/blob/master/imu.py\n# integration of rk4: https://www.dis.uniroma1.it/~pretto/papers/tpm_icra2014.pdf\n# integration of rk4: https://github.com/scomup/imu_tools/blob/master/src/imu_tracker/common.h\n\nimport socket, traceback\nimport csv\nimport struct\nimport sys, time, string, pygame\nfrom pygame.locals import *\nfrom ponycube import *\nfrom madgwickahrs import *\nimport quaternion\nfrom quaternion import QuaternionClass\nfrom a3 import IntegrationRK4,computeOmegaskew, QuatToRotMat, RotMatToQuat\n\n\naccel = [[0.0190, -0.0522, -0.9780],\n [0.0269, -0.0327, 0.9897],\n [0,-0.0093,1.0205],\n [0.0112, -0.0327, 1.0015],\n [0.0151, -0.0327, 1.0015]]\n\ngyro = [[-0.9375, -1.25, 0.875],\n [-1.3125, -2, 0.125],\n [-1.5625, -2.3125, -0.1875],\n [-1, -1.9375, -0.0625],\n [-0.6250, -1.3125, 0]]\nmag = [[0.20996090, 0.03125, -0.4487305],\n [0.2148438, 0.04101563, -0.4536133],\n [0.2148438, 0.04101563, -0.4536133],\n [0.2148438, 0.04101563, -0.4536133],\n [0.2148438, 0.04101563, -0.4536133]]\n\n\n\n\n\npygame.init()\nscreen = Screen(480,400,scale=1.5)\ncube = Cube(40,30,60)\nq = Quaternion(1,0,0,0)\nincr = Quaternion(0.96,0.01,0.01,0).normalized()\ncube.erase(screen)\ncube.draw(screen,q)\n\nprevious_timestamp = 0\n\nquat = QuaternionClass(1, 0, 0, 0)\nomega0 = [0,0,0]\n\n# a3quat = QuaternionClass(1, 0, 0, 0)\n\ndt = 1/256\n\nwhile 1:\n for i in range(len(mag)):\n ax = accel[i][0]\n ay = accel[i][1]\n az = accel[i][2]\n gx = gyro[i][0]\n gy = gyro[i][1]\n gz = gyro[i][2]\n mx = mag[i][0]\n my = mag[i][1]\n mz = mag[i][2]\n\n\n # A3 Algorithm gyroscope calibration\n omega1 = [gx, gy, gz]\n quat = IntegrationRK4(omega0, omega1, quat, dt)\n omega0 = omega1\n\n\n # Madgwick Algorithm\n # Imupredict = MadgwickAHRS();\n # Imupredict.quaternion = quat\n # Imupredict.sampleperiod = dt\n # Imupredict.update(gyro,accel,mag)\n # quat = Imupredict.quaternion\n\n qw = quat[0]\n qx = quat[1]\n qy = quat[3]\n qz = -quat[2]\n\n q.w = qw\n q.x = qx\n q.y = qy\n q.z = qz\n q = q.normalized()\n \n print(\"quat\")\n print(q.w, q.x, q.y, q.z)\n\n\n cube.erase(screen)\n cube.draw(screen,q)\n pygame.display.flip()\n pygame.time.delay(0)\n event = pygame.event.poll()\n if event.type == pygame.QUIT \\\n or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):\n break\n if i == len(mag):\n i = 0\n\n\n\n\n\n\n\n\n","sub_path":"IMU_algorithms/sensorapp.py","file_name":"sensorapp.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"366208395","text":"# -*-coding:utf-8-*-\nimport getopt\nimport os\nimport sys\nimport re\ncurrent_path = os.path.abspath(os.path.dirname(__file__))\nproject_path = os.path.abspath(\"{}/../..\".format(current_path))\nsys.path.append(project_path)\nfrom tools.usage import usage_help\n__author__ = 'Allen Woo'\n\nclass Transations():\n\n def main(self):\n\n self.cfg_path = \"{}/babel.cfg\".format(current_path)\n self.extract_path = \"{}/apps\".format(project_path)\n s_ops = \"hq\"\n l_ops = [\"init\", \"update\", \"compile\", \"cfg=\", \"extract=\", \"output=\", \"lan=\", \"all-lan\",\n \"get-msgid=\", \"re-msgstr=\"]\n s_opexplain = [\"help\",\"quiet:A small amount of output\"]\n l_opexplain = [\"init translation\",\n \"update: extract and update\",\n \"compile\",\n \", The default:{}.\\n\\t\\tOptional: {}/babel_py.cfg\".format(self.cfg_path, current_path),\n \",The default: {}\".format(self.extract_path),\n \"
\n \n \n \"\"\")\n POSITIONS = dedent(\"\"\"\n \n \n | Trades Portfolios | \n Prime_Expiries | \n
\n {}
\n
\n \"\"\")\n\n def __init__(self, portfolios, business_days, show_positions, ignore_trades=()):\n self.instruments = set()\n self.portfolios = portfolios\n self.business_days = business_days\n self.show_positions = show_positions\n self.ignore_trade_statuses = ignore_trades\n\n def _sort_instruments_by(self, method):\n return sorted(self.instruments, key=methodcaller(method))\n\n def _positions(self, instrument):\n ratio = 1.0 / instrument.ContractSize()\n portfolio_to_position = defaultdict(float)\n for trade in instrument.Trades():\n if trade.Status() in self.ignore_trade_statuses:\n continue\n portfolio = trade.Portfolio()\n if any(is_child_portf(portfolio, parent) for parent in self.portfolios):\n portfolio_to_position[portfolio.Name()] += trade.Position() * ratio\n\n if any(portfolio_to_position.values()):\n return self.POSITIONS.format(''.join(\n '| {} | {:,} | '.format(portfolio, pos)\n for portfolio, pos in sorted(portfolio_to_position.items()) if pos))\n else:\n return ''\n\n def _instrument_line(self, instrument):\n line = 'Expiring: {} - {}'.format(\n instrument.ExpiryDateOnly(),\n instrument.Name())\n if self.show_positions:\n line += self._positions(instrument)\n return line\n\n def add_instrument(self, instrument):\n self.instruments.add(instrument)\n\n def get_body(self):\n return self.REPORT_BODY.format(\n date_today=DATE_TODAY,\n business_days=self.business_days,\n portfolios='
'.join(portfolio.Name() for portfolio in self.portfolios),\n instruments='
'.join(\n self._instrument_line(instrument) for instrument in\n self._sort_instruments_by('ExpiryDateOnly')))\n\n def write_to_file(self, full_path):\n with open(full_path, \"w+\") as output_file:\n output_file.write(self.get_body())\n\n def send_mail(self, email_to):\n message = EmailHelper(\n body=self.get_body(),\n subject=self.SUBJECT.format(date_today=DATE_TODAY, env=get_env_name()),\n mail_to=email_to.split(','),\n sender_type=EmailHelper.SENDER_TYPE_SMTP,\n host=EmailHelper.get_acm_host())\n message.send()\n\n\ndef get_env_name():\n return acm.FInstallationData.Select('').At(0).Name()\n\n\ndef expires_within(instrument, business_days):\n expiry_date = instrument.ExpiryDate()\n if expiry_date and DATE_TODAY <= expiry_date:\n timediff = bankingday_timediff(\n instrument.Currency().Calendar(), DATE_TODAY, instrument.ExpiryDate())\n return timediff.days <= business_days\n else:\n return False\n\n\ndef is_valid(instrument, invalid_instypes):\n \"\"\"Check if the instrument type is valid given the list of invalid types.\n\n Always exclude call account expires - it does not\n make sense since they will show up daily.\n \"\"\"\n return not (\n instrument.InsType() in invalid_instypes or\n (instrument.InsType() == INST_DEPOSIT and\n instrument.OpenEnd() == OPEN_END_STATUS_TEXT)\n )\n\n\ndef collect_instruments(portfolios, exclude_trade_statuses, exclude_ins_types):\n instrument_collection = set()\n\n for portfolio in portfolios:\n LOGGER.info('Checking portfolio: {}'.format(portfolio.Name()))\n for trade in portfolio.Trades():\n if (trade.Status() not in exclude_trade_statuses and\n is_valid(trade.Instrument(), exclude_ins_types)):\n instrument_collection.add(trade.Instrument())\n\n return instrument_collection\n\n\ndef ael_main(ael_dict):\n LOGGER.msg_tracker.reset()\n LOGGER.info('Collecting Instruments In Portfolios')\n instruments = collect_instruments(\n ael_dict['portfolio'],\n ael_dict['excl_trade_status'],\n ael_dict['excl_ins_type']\n )\n expiry_report = ExpiryReport(\n ael_dict['portfolio'],\n ael_dict['days_to_expiry'],\n ael_dict['show_positions'],\n ael_dict['excl_trade_status'],\n )\n\n LOGGER.info('Checking Expiry Dates')\n for instrument in instruments:\n if expires_within(instrument, ael_dict['days_to_expiry']):\n LOGGER.info('{} expires on {}'.format(\n instrument.Name(), instrument.ExpiryDateOnly()))\n expiry_report.add_instrument(instrument)\n\n if not expiry_report.instruments:\n LOGGER.info(\n 'There are no instruments expiring within '\n '{} business day(s)'.format(ael_dict['days_to_expiry']))\n return\n\n if ael_dict['output_location']:\n LOGGER.info('Writing to file')\n try:\n expiry_report.write_to_file(ael_dict['output_location'])\n except (IOError, OSError) as err:\n LOGGER.error('Failed to write to file: {}'.format(err))\n else:\n LOGGER.info('Wrote to {}'.format(ael_dict['output_location']))\n\n if ael_dict['send_email']:\n LOGGER.info('Sending email')\n try:\n expiry_report.send_mail(ael_dict['email_destinations'])\n except Exception as err:\n LOGGER.error('Failed to send email notification: {}'.format(err))\n else:\n LOGGER.info('Email sent')\n\n if LOGGER.msg_tracker.errors_counter:\n raise RuntimeError(\"ERRORS occurred. Please check the log.\")\n\n LOGGER.info(\"Completed successfully.\")\n","sub_path":"Python modules/Ops_InsExpiryNotification.py","file_name":"Ops_InsExpiryNotification.py","file_ext":"py","file_size_in_byte":10070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"222523528","text":"#!/usr/bin/env python3\nn = int(input())\ndef is_prime_like(n):\n if n == 1:\n return False\n if n in [2, 3, 5]:\n return True\n if int(list(str(n)).pop()) in [0, 2, 4, 5, 6, 8]:\n return False\n if sum(map(int,list(str(n)))) % 3 == 0:\n return False\n return True\nif not is_prime_like(n):\n print('Not ', end='')\nprint('Prime')\n","sub_path":"arc/044/a/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"575991731","text":"import torch\nimport pickle \nimport copy\nimport sys\nimport pdb\nimport time\n\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import preprocessing\nfrom sklearn.metrics import (confusion_matrix, precision_score, recall_score, f1_score,\\\n fbeta_score, roc_auc_score, precision_recall_curve, auc,\\\n brier_score_loss)\nfrom collections import Counter\n\nsys.path.insert(1, '../')\nfrom focal_loss import FocalLoss\n\nfrom load_data import *\n\n# network related constants \nITERATION = 200\nNUM_CLASSES = 2\nETA = 1e-4\n\n# two running environment options below:\n# device = torch.device(\"cpu\")\ndevice = torch.device(\"cuda:0\")\ndtype = torch.float\n\nclass model_ca(nn.Module):\n def __init__(self):\n super(model_ca, self).__init__()\n self.class_count = NUM_CLASSES\n # self.hidden = [1977, 1000, 200]\n self.hidden = [8112,4000, 800] # previously more previously 799 \n\n self.mp4 = nn.MaxPool1d(4)\n self.mp2 = nn.MaxPool1d(2)\n\n # self.conv1 = nn.Conv1d(1,1,256, stride=1, dilation=1)\n # self.conv2 = nn.Conv1d(1,1,8, stride=1, dilation=1)\n self.conv3 = nn.Conv1d(1,1,16, stride=1, dilation=1)\n self.conv4 = nn.Conv1d(1,1,32, stride=1, dilation=1)\n self.conv5 = nn.Conv1d(1,1,64, stride=1, dilation=1)\n self.conv6 = nn.Conv1d(1,1,128, stride=1, dilation=1)\n\n \n\n self.fc1 = nn.Linear(self.hidden[0], self.hidden[1])\n self.fc2 = nn.Linear(self.hidden[1], self.hidden[2])\n self.fc3 = nn.Linear(self.hidden[2], self.class_count)\n\n def forward(self, x_):\n x = x_.data.unsqueeze(1)\n\n # x1 = self.mp4(F.relu(self.conv1(x))).squeeze()\n # x2 = self.mp4(F.relu(self.conv2(x))).squeeze()\n x3 = self.mp4(F.relu(self.conv3(x))).squeeze()\n x4 = self.mp4(F.relu(self.conv4(x))).squeeze()\n x5 = self.mp4(F.relu(self.conv5(x))).squeeze()\n x6 = self.mp4(F.relu(self.conv6(x))).squeeze()\n \n x = torch.cat((x3,x4,x5,x6), dim= 1).data.unsqueeze(1)\n\n # x = self.dropout(x)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n\n return F.softmax(x.squeeze(), dim=1)\n\n# custom weight initialization\ndef init_weights(m):\n if type(m) == nn.Linear:\n torch.nn.init.xavier_uniform_(m.weight)\n m.bias.data.fill_(0.01)\n elif type(m) == nn.Conv1d:\n torch.nn.init.xavier_uniform_(m.weight)\n m.bias.data.fill_(0.01)\n\n\nprint()\nprint(\"********** Classifier Model Training **********\")\nprint()\n\nauroc_folds = []\naupr_folds = []\n\nseed = 2294\nprint(\"Seed: \", seed)\n\n# valid_index = np.random.randint(low=1, high=9)\n# print(\"Validation fold is: \", valid_index)\ntest_indices = [1,2,3,4,5,6,7,8]\n# test_indices.remove(valid_index)\n\nfor test_index in test_indices:\n\n torch.cuda.manual_seed_all(seed)\n\n train_indices = copy.deepcopy(test_indices)\n train_indices.remove(test_index)\n\n print()\n print(\"************ Test Fold \" + str(test_index) + \" ************\")\n print()\n\n test_hydrogens = eval(\"fold_\" + str(test_index) + \"_hydrogens\")\n test_labels = eval(\"fold_\" + str(test_index) + \"_labels\")\n\n # valid_hydrogens = eval(\"fold_\" + str(valid_index) + \"_hydrogens\")\n # valid_labels = eval(\"fold_\" + str(valid_index) + \"_labels\")\n\n train_hydrogens_tuple = tuple([eval('fold_' + str(x) + '_hydrogens') for x in train_indices])\n train_labels_tuple = tuple([eval('fold_' + str(x) + '_labels') for x in train_indices])\n train_hydrogens = np.vstack(train_hydrogens_tuple)\n train_labels = np.vstack(train_labels_tuple)\n\n test_fold_data = torch.from_numpy(test_hydrogens).float()\n train_fold_data = torch.from_numpy(train_hydrogens).float()\n # valid_fold_data = torch.from_numpy(valid_hydrogens).float()\n test_fold_labels = torch.from_numpy(test_labels).long()\n train_fold_labels = torch.from_numpy(train_labels).long()\n # valid_fold_labels = torch.from_numpy(valid_labels).long()\n\n model = model_ca()\n model.apply(init_weights)\n model = model.to(device)\n\n # optimizers\n adam = torch.optim.Adam(model.parameters(), lr=ETA, weight_decay=0.001)\n sgd = torch.optim.SGD(model.parameters(), lr=ETA, weight_decay=0.001, momentum=0.85, nesterov=True)\n optimizer = adam # selected optimizer\n\n # learning rate scheduler\n # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.4, patience=20, verbose=True,min_lr=1e-5)\n\n # weighted cross entropy loss function for training \n counter = Counter(train_fold_labels.numpy().T.reshape(1,-1)[0,:].tolist())\n mw = max([counter[x] for x in range(NUM_CLASSES)]) \n weight = torch.tensor([mw/counter[x] for x in range(NUM_CLASSES)]).to(device)\n # print (\"Weights: \", [mw/counter[x] for x in range(NUM_CLASSES)])\n loss_fn = torch.nn.CrossEntropyLoss(weight=weight)\n # loss_fn = FocalLoss(class_num=2, gamma=1.2, alpha=weight)\n\n # # weighted cross entropy loss for validation dataset\n # counter = Counter(valid_fold_labels.numpy().T.reshape(1,-1)[0,:].tolist())\n # mw = max([counter[x] for x in range(NUM_CLASSES)]) \n # weight = torch.tensor([mw/counter[x] for x in range(NUM_CLASSES)]).to(device)\n # # valid_loss_fn = torch.nn.CrossEntropyLoss(weight=weight)\n # valid_loss_fn = FocalLoss(class_num=2, gamma=1, alpha=weight)\n\n # scale all samples according to training set\n scaler = preprocessing.MinMaxScaler().fit(train_fold_data.numpy())\n train_fold_data_normalized = torch.from_numpy(scaler.transform(train_fold_data.numpy())).float().to(device)\n test_fold_data_normalized = torch.from_numpy(scaler.transform(test_fold_data.numpy())).float().to(device)\n # valid_fold_data_normalized = torch.from_numpy(scaler.transform(valid_fold_data.numpy())).float().to(device)\n\n # convert to test set to torch variables\n test_data_torch = test_fold_data_normalized\n test_labels_torch = test_fold_labels\n test_labels_torch = torch.transpose(test_labels_torch, 0, 1)\n test_labels = test_labels_torch.to(device)\n\n # convert to test set to torch variables\n # valid_data_torch = valid_fold_data_normalized\n # valid_labels_torch = valid_fold_labels\n # valid_labels_torch = torch.transpose(valid_labels_torch, 0, 1)\n # valid_labels = valid_labels_torch.to(device)\n\n train_labels = train_fold_labels.to(device)\n\n # training and validation log related\n train_loss_history = []\n train_acc_history = []\n valid_loss_history = []\n aupr_history = []\n auc_history = []\n max_accuracy = 0\n\n # in order to find the best model based on validation loss\n best_model = model\n min_validation_loss = 1e1\n\n model.train()\n for epoch in range(ITERATION):\n # Forward pass\n train_fold_pred = model(train_fold_data_normalized)\n # Compute and save loss.\n loss = loss_fn(train_fold_pred, train_labels.squeeze())\n train_loss_history.append(loss.item())\n # compute training accuracy\n train_acc = (torch.transpose(train_labels, 0, 1) == torch.max(train_fold_pred,1)[1]).sum().cpu().numpy()/float(len(train_fold_labels))\n train_acc_history.append(train_acc)\n # try model on test set\n with torch.no_grad():\n model.eval()\n\n # predict test dataset\n test_pred = model(test_data_torch)\n test_labels_pred = torch.max(test_pred,1)[1]\n test_labels_pred = test_labels_pred.cpu().numpy()\n # calculate auroc\n auc_ = roc_auc_score(test_labels.cpu()[0], test_pred.cpu().numpy()[:,1])\n auc_history.append(auc_)\n # calculate aupr\n precision, recall, thresh = precision_recall_curve(test_labels.cpu().numpy().T, test_pred.cpu().numpy()[:,1])\n aupr = auc(recall, precision)\n aupr_history.append(aupr)\n # calculate validation set loss\n # valid_fold_pred = model(valid_fold_data_normalized)\n # valid_pred = model(valid_data_torch)\n # valid_labels_pred = torch.max(valid_pred,1)[1]\n # valid_labels_pred = valid_labels_pred.cpu().numpy()\n # valid_loss = valid_loss_fn(valid_fold_pred, valid_labels.squeeze())\n\n model.train()\n\n # print(\"Epoch: \", epoch, \"\\tTraining Loss: \", loss.item())\n # print(\"Epoch: \", epoch, \"\\tTraining Loss: \", loss.item(), \"Validation Loss: \", valid_loss.item())\n # clear gradient history\n optimizer.zero_grad()\n # Backward pass\n loss.backward()\n # parameter update\n optimizer.step()\n # scheduler.step(valid_loss)\n # find best model based on validation loss\n # if epoch == 0:\n # min_validation_loss = valid_loss.item()\n # elif valid_loss.item() < min_validation_loss:\n # min_validation_loss = valid_loss.item()\n # best_model = copy.deepcopy(model)\n \n\n\n # clear gradient history\n optimizer.zero_grad()\n\n # use best model\n model = best_model.eval()\n\n # predict test set \n test_labels_pred_ = model(test_data_torch)\n test_labels_pred = torch.max(test_labels_pred_,1)[1].detach().cpu()\n test_labels_pred = test_labels_pred.numpy()\n\n test_labels = test_labels.cpu().numpy()\n\n # calculate various metrics\n # all metrics are calculated by taking aggressive as positive class\n auroc = auc_history[len(auc_history) - 1]\n aupr = aupr_history[len(aupr_history) - 1]\n\n # record the calculated metrics \n auroc_folds.append(auroc)\n aupr_folds.append(aupr)\n\n\n # class classification rate from confusion matrix\n print(\"AUROC: \", auroc)\n print(\"AUPR: \", aupr)\n\n model = None\n optimizer = None\n loss_fn = None\n loss_fn_2 = None\n valid_fold_data = None\n valid_fold_labels = None\n valid_labels_pred = None\n valid_pred = None\n train_fold_pred = None\n adam = None\n rmsprop = None\n scaler = None\n\nwith open(\"./logs/CNN/auroc_scores.txt\", \"w\") as f:\n for auroc in auroc_folds:\n f.write(\"%f\\n\" % (auroc))\nwith open(\"./logs/CNN/aupr_scores.txt\", \"w\") as f:\n for aupr in aupr_folds:\n f.write(\"%f\\n\" % (aupr))","sub_path":"reproduce/figure_2/benign_aggressive/seed_2251/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":10236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"288436455","text":"class Solution(object):\n def largestRectangleArea(self, heights):\n \"\"\"\n :type heights: List[int]\n :rtype: int\n \"\"\"\n stack = []\n maxarea=0\n i=0\n while(i<=len(heights)):\n if len(stack)==0 or (iheights[stack[0]]):\n stack.insert(0, i)\n i+=1\n else:\n popped=stack.pop(0)\n if(len(stack)==0):\n maxarea=max(maxarea, heights[popped]*i)\n else:\n maxarea=max(maxarea, heights[popped]*(i-stack[0]-1))\n return maxarea\n \n","sub_path":"Largest Rectangle in Histogram.py","file_name":"Largest Rectangle in Histogram.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"607062329","text":"n = int(input(\"Enter the length of the sequence: \")) # Do not change this line\n#næsta tala í röðinni er\n\nfirst_number = 0\nsecond_number = 1\nthird_number = 2\nmain_number = 0\nnum = 1\n\nwhile num <= n:\n if num == 1:\n print(1)\n num += 1\n elif num == 2:\n print(2)\n num += 1\n elif num > 2 and num <= n:\n main_number = first_number + second_number + third_number\n print(main_number)\n first_number = second_number\n second_number = third_number\n third_number = main_number\n num += 1\n\n\n ","sub_path":"sequence.py","file_name":"sequence.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"223558748","text":"#!/usr/bin/python\n# ----------------------------------------------------------------\n# File: add_gateways.py\n#\n# This script downloads the list of TTN gateways around a certain central point.\n\nimport os\n\nfilePath = '/var/www/html/coverage/js/gateways.js';\n \n# As file at filePath is deleted now, so we should check if file exists or not not before deleting them\nif os.path.exists(filePath):\n os.remove(filePath)\n\nimport urllib, json\nurl = \"https://www.thethingsnetwork.org/gateway-data/location?latitude=55.6599740&longitude=12.5912461&distance=2000000\"\nresponse = urllib.urlopen(url)\ndata = json.loads(response.read())\n#print data\nprint(\"The following Gateways are added to the coverage map:\")\n\nfor key in data:\n if 'description' in data[key].keys(): gateway_name=data[key]['description']\n else: gateway_name = \"unknown\"\n gtw_id=data[key]['id']\n print(gtw_id) \n print(gateway_name)\n lat = data[key]['location']['latitude'] \n lon = data[key]['location']['longitude']\n alt = data[key]['location']['altitude'] \n file = open(\"/var/www/html/coverage/js/gateways.js\",\"a\")\n file.write('markers.addLayer(L.marker([')\n file.write(\"%f,\" % lat)\n file.write(\"%f,\" % lon)\n file.write(\"]));\")\n file.close()\n\n\n\n","sub_path":"home/add_gateways.py","file_name":"add_gateways.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"317329902","text":"from aws_cdk import (\n core,\n aws_iam as iam,\n aws_s3 as s3,\n)\n\nfrom infra.build_pipeline_construct import BuildPipelineConstruct\nfrom infra.batch_pipeline_construct import BatchPipelineConstruct\nfrom infra.deploy_pipeline_construct import DeployPipelineConstruct\n\n\nclass PipelineStack(core.Stack):\n def __init__(\n self,\n scope: core.Construct,\n construct_id: str,\n build_pipeline: bool,\n batch_pipeline: bool,\n deply_pipeline: bool,\n **kwargs,\n ) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n # Define required parmeters\n project_name = core.CfnParameter(\n self,\n \"SageMakerProjectName\",\n type=\"String\",\n description=\"The name of the SageMaker project.\",\n min_length=1,\n max_length=32,\n )\n project_id = core.CfnParameter(\n self,\n \"SageMakerProjectId\",\n type=\"String\",\n min_length=1,\n max_length=16,\n description=\"Service generated Id of the project.\",\n )\n\n # Get drift-pipeline parameters\n seed_bucket = self.resolve_ssm_parameter(\"CodeCommitSeedBucket\")\n seed_build_key = self.resolve_ssm_parameter(\"CodeCommitBuildKey\")\n seed_batch_key = self.resolve_ssm_parameter(\"CodeCommitBatchKey\")\n seed_deploy_key = self.resolve_ssm_parameter(\"CodeCommitDeployKey\")\n\n # Create the s3 artifact (name must be < 63 chars)\n artifact_bucket_name = (\n f\"sagemaker-project-{project_id.value_as_string}-{self.region}\"\n )\n s3_artifact = s3.Bucket(\n self,\n \"S3Artifact\",\n bucket_name=artifact_bucket_name,\n removal_policy=core.RemovalPolicy.DESTROY,\n )\n\n core.CfnOutput(self, \"ArtifactBucket\", value=s3_artifact.bucket_name)\n\n # Get the service catalog role for all permssions (if None CDK will create new roles)\n # CodeBuild and CodePipeline resources need to start with \"sagemaker-\" to be within default policy\n products_use_role_name = self.node.try_get_context(\"drift:ProductsUseRoleName\")\n if products_use_role_name:\n service_catalog_role = iam.Role.from_role_arn(\n self,\n \"ProductsUseRole\",\n f\"arn:{self.partition}:iam::{self.account}:role/{products_use_role_name}\",\n )\n # Use the service catalog role for all roles\n sagemaker_execution_role = service_catalog_role\n code_pipeline_role = service_catalog_role\n code_build_role = service_catalog_role\n cloudformation_role = service_catalog_role\n lambda_role = service_catalog_role\n event_role = service_catalog_role\n else:\n # Create unique scope roles per service, so that permissions can be added in build/deploy stacks\n sagemaker_execution_role = iam.Role(\n self,\n \"SageMakerExecutionRole\",\n assumed_by=iam.ServicePrincipal(\"sagemaker.amazonaws.com\"),\n path=\"/service-role/\",\n )\n code_pipeline_role = iam.Role(\n self,\n \"CodePipelineRole\",\n assumed_by=iam.ServicePrincipal(\"codepipeline.amazonaws.com\"),\n path=\"/service-role/\",\n )\n code_build_role = iam.Role(\n self,\n \"CodeBuildRole\",\n assumed_by=iam.ServicePrincipal(\"codebuild.amazonaws.com\"),\n path=\"/service-role/\",\n )\n cloudformation_role = iam.Role(\n self,\n \"CloudFormationRole\",\n assumed_by=iam.ServicePrincipal(\"cloudformation.amazonaws.com\"),\n path=\"/service-role/\",\n )\n lambda_role = iam.Role(\n self,\n \"LambdaRole\",\n assumed_by=iam.ServicePrincipal(\"lambda.amazonaws.com\"),\n path=\"/service-role/\",\n )\n event_role = iam.Role(\n self,\n \"EventRole\",\n assumed_by=iam.ServicePrincipal(\"events.amazonaws.com\"),\n path=\"/service-role/\",\n )\n\n # Add cloudformation to allow creating CW rules for re-training, and passing event role\n cloudformation_role.add_to_policy(\n iam.PolicyStatement(\n actions=[\n \"events:DeleteRule\",\n \"events:DescribeRule\",\n \"events:PutRule\",\n \"events:PutTargets\",\n \"events:RemoveTargets\",\n ],\n resources=[\"arn:aws:events:*:*:rule/sagemaker-*\"],\n )\n )\n cloudformation_role.add_to_policy(\n iam.PolicyStatement(\n actions=[\n \"iam:PassRole\",\n ],\n resources=[event_role.role_arn],\n )\n )\n\n # Add cloudwatch logs\n logs_policy = iam.PolicyStatement(\n actions=[\n \"logs:CreateLogGroup\",\n \"logs:CreateLogStream\",\n \"logs:PutLogEvents\",\n ],\n resources=[\"*\"],\n )\n lambda_role.add_to_policy(logs_policy)\n\n # Create a policy statement for SM and ECR pull\n sagemaker_policy = iam.Policy(\n self,\n \"SageMakerPolicy\",\n document=iam.PolicyDocument(\n statements=[\n logs_policy,\n iam.PolicyStatement(\n actions=[\"sagemaker:*\"],\n not_resources=[\n \"arn:aws:sagemaker:*:*:domain/*\",\n \"arn:aws:sagemaker:*:*:user-profile/*\",\n \"arn:aws:sagemaker:*:*:app/*\",\n \"arn:aws:sagemaker:*:*:flow-definition/*\",\n ],\n ),\n iam.PolicyStatement(\n actions=[\n \"ecr:BatchCheckLayerAvailability\",\n \"ecr:BatchGetImage\",\n \"ecr:Describe*\",\n \"ecr:GetAuthorizationToken\",\n \"ecr:GetDownloadUrlForLayer\",\n ],\n resources=[\"*\"],\n ),\n iam.PolicyStatement(\n actions=[\n \"cloudwatch:PutMetricData\",\n ],\n resources=[\"*\"],\n ),\n iam.PolicyStatement(\n actions=[\n \"s3:AbortMultipartUpload\",\n \"s3:DeleteObject\",\n \"s3:GetBucket*\",\n \"s3:GetObject*\",\n \"s3:List*\",\n \"s3:PutObject*\",\n ],\n resources=[\n s3_artifact.bucket_arn,\n f\"{s3_artifact.bucket_arn}/*\",\n ],\n ),\n iam.PolicyStatement(\n actions=[\"iam:PassRole\"],\n resources=[sagemaker_execution_role.role_arn],\n ),\n ]\n ),\n )\n # # SageMaker needs to manage pipelines, model package groups\n sagemaker_policy.attach_to_role(sagemaker_execution_role)\n # Code build needs to query model package groups and artifacts\n sagemaker_policy.attach_to_role(code_build_role)\n # CloudFormation creates models and endpoints\n sagemaker_policy.attach_to_role(cloudformation_role)\n # Lambda needs to describe SM and put metrics\n sagemaker_policy.attach_to_role(lambda_role)\n\n # Define an environment object to pass to build\n env = core.Environment(account=self.account, region=self.region)\n\n # Define the repository name and branch\n branch_name = \"main\"\n\n if build_pipeline:\n # Require a schedule parameter (must be cron, otherwise will trigger every time rate is enabled/disabled)\n # https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html\n retrain_schedule = core.CfnParameter(\n self,\n \"RetrainSchedule\",\n type=\"String\",\n description=\"The expression to retrain schedule. Defaults to first day of the month.\",\n default=\"cron(0 12 1 * ? *)\", # 1st of the month at 12am\n min_length=1,\n )\n BuildPipelineConstruct(\n self,\n \"build\",\n env=env,\n sagemaker_execution_role=sagemaker_execution_role,\n code_pipeline_role=code_pipeline_role,\n code_build_role=code_build_role,\n cloudformation_role=cloudformation_role,\n event_role=event_role,\n lambda_role=lambda_role,\n s3_artifact=s3_artifact,\n branch_name=branch_name,\n project_id=project_id.value_as_string,\n project_name=project_name.value_as_string,\n seed_bucket=seed_bucket,\n seed_key=seed_build_key,\n retrain_schedule=retrain_schedule.value_as_string,\n )\n\n if batch_pipeline:\n batch_schedule = core.CfnParameter(\n self,\n \"BatchSchedule\",\n type=\"String\",\n description=\"The expression to batch schedule. Defaults to every day.\",\n default=\"cron(0 12 * * ? *)\", # Every day at 12am\n min_length=1,\n )\n BatchPipelineConstruct(\n self,\n \"batch\",\n env=env,\n sagemaker_execution_role=sagemaker_execution_role,\n code_pipeline_role=code_pipeline_role,\n code_build_role=code_build_role,\n cloudformation_role=cloudformation_role,\n event_role=event_role,\n lambda_role=lambda_role,\n s3_artifact=s3_artifact,\n branch_name=branch_name,\n project_id=project_id.value_as_string,\n project_name=project_name.value_as_string,\n seed_bucket=seed_bucket,\n seed_key=seed_batch_key,\n batch_schedule=batch_schedule.value_as_string,\n )\n\n if deply_pipeline:\n DeployPipelineConstruct(\n self,\n \"deploy\",\n sagemaker_execution_role=sagemaker_execution_role,\n code_pipeline_role=code_pipeline_role,\n code_build_role=code_build_role,\n cloudformation_role=cloudformation_role,\n event_role=event_role,\n s3_artifact=s3_artifact,\n branch_name=branch_name,\n project_id=project_id.value_as_string,\n project_name=project_name.value_as_string,\n seed_bucket=seed_bucket,\n seed_key=seed_deploy_key,\n )\n\n def resolve_ssm_parameter(self, key: str):\n parameter_name = self.node.try_get_context(f\"drift:{key}\")\n return core.CfnDynamicReference(\n core.CfnDynamicReferenceService.SSM, parameter_name\n ).to_string()\n\n\nclass BatchPipelineStack(PipelineStack):\n \"\"\"Creates a Pipeline for batch deployment\"\"\"\n\n def __init__(\n self,\n scope: core.Construct,\n construct_id: str,\n **kwargs,\n ) -> None:\n super().__init__(scope, construct_id, True, True, False, **kwargs)\n\n\nclass DeployPipelineStack(PipelineStack):\n \"\"\"Creates a Pipelinfe for real-time deployment\"\"\"\n\n def __init__(\n self,\n scope: core.Construct,\n construct_id: str,\n **kwargs,\n ) -> None:\n super().__init__(scope, construct_id, True, False, True, **kwargs)\n","sub_path":"infra/pipeline_stack.py","file_name":"pipeline_stack.py","file_ext":"py","file_size_in_byte":12580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"373193249","text":"# coding=utf-8\n# Copyright 2014 Foursquare Labs Inc. All Rights Reserved.\n\nfrom __future__ import (\n absolute_import,\n division,\n generators,\n nested_scopes,\n print_function,\n unicode_literals,\n with_statement,\n)\n\nfrom functools import total_ordering\nimport logging\nimport re\n\n\nlogger = logging.getLogger(__name__)\n\n\n@total_ordering\nclass MavenVersion(object):\n MAVEN_VERSION_REGEX = re.compile(\n r'(?P\\d+)\\.'\n r'(?P\\d+)'\n r'(?P\\.\\d+)?'\n r'(?P-\\w+)?'\n )\n\n def __init__(self, version_str):\n self._version_str = version_str\n match = self.MAVEN_VERSION_REGEX.match(self._version_str)\n if not match:\n raise Exception('Invalid Maven version string: {}'.format(self._version_str))\n self.major = int(match.group('major'))\n self.minor = int(match.group('minor'))\n rev = match.group('rev')\n if rev:\n self.rev = int(rev[1:])\n else:\n self.rev = 0\n qualifier_or_build = match.group('qualifier_or_build')\n if qualifier_or_build:\n qualifier_or_build = qualifier_or_build[1:]\n if re.match(r'\\d+', qualifier_or_build):\n self.build = int(qualifier_or_build)\n self.qualifier = None\n else:\n self.build = None\n self.qualifier = qualifier_or_build\n else:\n self.build = 0\n self.qualifier = None\n\n def __lt__(self, rhs):\n if (self.major, self.minor, self.rev) != (rhs.major, rhs.minor, rhs.rev):\n return (self.major, self.minor, self.rev) < (rhs.major, rhs.minor, rhs.rev)\n elif self.qualifier is None and rhs.qualifier is not None:\n return True\n elif rhs.qualifier is None and self.qualifier is not None:\n return False\n elif self.qualifier is not None and rhs.qualifier is not None:\n return self.qualifier.lower() < rhs.qualifier.lower()\n else:\n return self.build < rhs.build\n\n def __eq__(self, rhs):\n self_tuple = (self.major, self.minor, self.rev, self.qualifier, self.build)\n rhs_tuple = (rhs.major, rhs.minor, rhs.rev, rhs.qualifier, rhs.build)\n return self_tuple == rhs_tuple\n\n def __repr__(self):\n return 'MavenVersion{}'.format((self.major, self.minor, self.rev, self.qualifier, self.build))\n\n def __str__(self):\n return self._version_str\n\n\nclass MavenVersionRangeRef(object):\n \"\"\"A container and parser for Maven Version Range specs.\n\n See: http://docs.oracle.com/middleware/1212/core/MAVEN/maven_version.htm#MAVEN402\n or http://docs.codehaus.org/display/MAVEN/Dependency+Mediation+and+Conflict+Resolution#DependencyMediationandConflictResolution-DependencyVersionRanges\n or any of the other variously undated or outdated or ignored documentations of this spec.\n \"\"\"\n\n # e.g. '1.0'. \"Suggested\" because someone thought it was a good idea to make '1.0' the\n # \"eh, whatever\" spec and '[1.0]' the \"no, seriously\" spec.\n SUGGESTED_VERSION_REGEX = re.compile(r'^([^\\[\\](),<>=]+)$')\n\n # e.g. '[1.0]'. See above.\n EXACT_RANGE_SPEC_REGEX = re.compile(r'^\\[{}\\]$'.format(SUGGESTED_VERSION_REGEX.pattern))\n\n # Just for code cleanliness. Matches '[foo]', '(foo)', '[foo)', '(foo]'.\n RANGE_REF_PATTERN = r'[\\[(].*?[\\]|)]'\n\n # A comma delimited list of the above pattern, with some generous whitespace guards.\n RANGES_REGEX = re.compile(\n r'^\\s*{range_ref_pattern}'\n r'(?:\\s*,{range_ref_pattern}\\s*)*$'\n .format(range_ref_pattern=RANGE_REF_PATTERN)\n )\n\n # The same as `RANGE_REF_PATTERN`, but pattern matched into component parts.\n RANGE_REF_REGEX = re.compile(\n r'(?P\\[|\\()'\n r'(?P.*?)'\n r'(?P\\]|\\))'\n )\n\n # Very liberally whitespace guarded pattern match of \"range_content\" in the above pattern.\n # Matches ',foo', 'foo,', 'foo,bar', etc.\n RANGE_CONTENT_REGEX = re.compile(\n r'^\\s*'\n r'(?P.*?)'\n r'\\s*,\\s*'\n r'(?P.*?)'\n r'\\s*$'\n )\n\n def __init__(self, ref_str):\n self._ref_str = ref_str.strip()\n self._parse_ref_to_matchers()\n\n def _parse_ref_to_matchers(self):\n # Special case: This is just a version with no extra bells and whistles.\n # Also the most common case.\n match = self.SUGGESTED_VERSION_REGEX.match(self._ref_str)\n if match:\n self._matchers = [lambda candidate: candidate == match.groups(0)]\n return\n\n # The \"exact, no really\" spec. We handle this and the loose \"this one, I guess\" spec\n # identically.\n match = self.EXACT_RANGE_SPEC_REGEX.match(self._ref_str)\n if match:\n self._matchers = [lambda candidate: candidate == match.groups(0)]\n return\n\n # A sequence of range specs\n matches = self.RANGES_REGEX.findall(self._ref_str)\n if not matches:\n raise Exception('Invalid Maven Version Range ref: {0}'.format(self._ref_str))\n\n self._matchers = []\n for matched_substr in matches:\n range_match = self.RANGE_REF_REGEX.match(matched_substr)\n begin_range_token = range_match.group('begin_range')\n end_range_token = range_match.group('end_range')\n range_content_match = self.RANGE_CONTENT_REGEX.match(range_match.group('range_content'))\n\n left_version = range_content_match.group('left_version')\n left_matcher = None\n if left_version:\n left_maven_version = MavenVersion(left_version)\n if begin_range_token == '[':\n left_matcher = lambda candidate: left_maven_version <= candidate\n else:\n left_matcher = lambda candidate: left_maven_version < candidate\n else:\n left_matcher = lambda _: True\n\n right_version = range_content_match.group('right_version')\n right_matcher = None\n if right_version:\n right_maven_version = MavenVersion(right_version)\n if begin_range_token == ']':\n right_matcher = lambda candidate: candidate <= right_maven_version\n else:\n right_matcher = lambda candidate: candidate < right_maven_version\n else:\n right_matcher = lambda _: True\n self._matchers.append(lambda mv: left_matcher(mv) and right_matcher(mv))\n\n def matches(self, maven_version):\n for matcher in self._matchers:\n if matcher(maven_version):\n return True\n return False\n","sub_path":"src/python/fsqio/pants/pom/maven_version.py","file_name":"maven_version.py","file_ext":"py","file_size_in_byte":6128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"581235759","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## Neural Networks\n\n# Yujue Wang wangy66@rpi.edu\n\n# In[550]:\n\n\nimport numpy as np\nimport math\nimport sys\nimport pandas as pd\nfrom random import uniform, randrange, sample\n\n\n# In[569]:\n\n\nTrain = pd.read_csv(sys.argv[1], sep=\",\", header=None)#sys.argv[1]\nTest = pd.read_csv(sys.argv[2], sep=\",\", header=None)#sys.argv[2]\nm = int(sys.argv[3]) #sys.argv[3]\nn = float(sys.argv[4]) #sys.argv[4]\nepochs = int(sys.argv[5]) #sys.argv[5], maxtier\n\n\n# Function Module\n\n# In[552]:\n\n\n# function for construct D and class array\ndef initializer(rawData):\n col = rawData.shape[1]\n row = rawData.shape[0]\n dataClass = rawData[col - 1].values.reshape(row, 1)\n D = D = {'X1':rawData[0],'X2': rawData[1],'X3': rawData[2],'X4': rawData[3], 'X5': rawData[4], 'X6': rawData[5], 'X7': rawData[6], 'X8': rawData[7], 'X9': rawData[8]}\n D = pd.DataFrame(D).values\n numClass = np.amax(dataClass) - np.amin(dataClass) + 1 # p\n return D, dataClass, numClass\n\n\n# In[553]:\n\n\n# initialize bias vectors\ndef biasVector(m, p):\n bh = []\n bo = []\n for i in range(m):\n bh.append(uniform(0.01, 1))# possible modified\n for i in range(p):\n bo.append(uniform(0.01, 1))\n return np.array(bh), np.array(bo)\n\n\n# In[554]:\n\n\n# initialize weight matrices\ndef weightMatrice(d, m, p):\n wh = np.zeros((d, m))\n wo = np.zeros((m, p))\n for i in range(d):\n for j in range(m):\n wh[i][j] = uniform(-0.01, 0.01)\n if (wh[i][j] == 0):\n j -= 1\n \n for i in range(m):\n for j in range(p):\n wo[i][j] = uniform(-0.01, 0.01)\n if (wo[i][j] == 0):\n j -= 1\n return wh, wo\n\n\n# In[555]:\n\n\n# generate true response vector\ndef generateY(dataClass, row, numClass):\n y = []\n for i in range(row):\n yi = np.zeros(numClass)\n if (dataClass[i] == 1):\n yi[0] = 1\n elif (dataClass[i] == 2):\n yi[1] = 1\n else:\n yi[2] = 1\n y.append(np.array(yi))\n return np.array(y)\n\n\n# In[556]:\n\n\ndef softmax(x):\n net_sum = 0\n for i in range(x.shape[0]):\n net_sum += np.exp(x[i]) #get denominator of softmax function\n o = np.zeros([x.shape[0],1])\n for j in range(x.shape[0]):\n o[j] = np.exp(x[j])/net_sum #neuron vec given hidden neuron vector z\n return o\n\n\n# In[557]:\n\n\n# Feed-forward phase\ndef feedForwardZ(b, w, xi):\n netk = b + w.T.dot(xi)\n return netk, np.maximum(0, netk)\n\ndef feedForwardO(b, w, zi):\n netj = b + np.dot(w.T,zi) #output layer neuron \n o = softmax(netj) #neuron vec given hidden neuron vector z\n return o\n\n\n# In[558]:\n\n\n# Backpropagation phase\n# output layer, softmax\ndef calNetGradientO(oi, yi):\n res = oi.T - yi\n return res[0]\n\n\n# In[559]:\n\n\n# hidden layer, ReLU activation\ndef calNetGradientH(wo, neto, netk):\n partials = np.zeros_like(netk)\n for i in range(netk.shape[0]):\n if netk[i]<=0:\n partials[i] = 0\n else:\n partials[i] = 1\n return np.multiply(np.dot(wo, neto),partials)[0]\n\n\n# In[560]:\n\n\n# MLP Training\ndef MLPTraining(D,dataClass, m, n, epochs, numClass):\n # implementation\n bh, bo = biasVector(m, numClass)\n wh, wo = weightMatrice(D.shape[1], m, numClass)\n row = D.shape[0]\n y = generateY(dataClass, row, numClass)\n t = 0 #iteration counter\n randomList = sample(range(row), row)\n while (t < epochs):\n for i in randomList:\n # Feed-forward phase\n netk, zi = feedForwardZ(bh, wh, D[i])\n #print(\"zi is\", zi)\n oi = feedForwardO(bo, wo, zi)\n #print(\"oi is \", oi)\n \n # Backpropagation phase\n neto = calNetGradientO(oi, y[i])\n #print(\"neto is \", neto[0])\n neth = calNetGradientH(wo, neto, netk.reshape(m, 1))\n #print(\"neth is \", neth)\n\n # Gradient descent for bias vectors\n graDescentBo = neto\n #print(graDescentBo.shape)\n #print(n)\n bo = bo - n * graDescentBo\n \n graDescentBh = neth\n #print(graDescentBh)\n #print(bh)\n bh = bh - n * graDescentBh\n \n # Gradient descent for weight matrices\n #print(neth.shape)\n graDescentWo = zi.reshape(1, len(zi)) * neto.reshape(len(neto), 1)\n wo = wo - n * graDescentWo.T\n \n graDescentWh = D[i].reshape(1, len(D[i])) * neth.reshape(len(neth), 1)\n wh = wh - n * graDescentWh.T\n \n t += 1\n\n return bh, bo, wo, wh\n\n\n# ### Train\n\n# In[561]:\n\n\nD, dataClass, numClass = initializer(Train)\n\n\n# In[562]:\n\n\nbh, bo, wo, wh = MLPTraining(D,dataClass, m, n, epochs, numClass)\n\n\n# In[563]:\n\n\nprint(\"bh is \",bh, \"\\n\")\nprint(\"bo is \",bo, \"\\n\")\nprint(\"wo is \",wo, \"\\n\")\nprint(\"wh is \",wh, \"\\n\")\n\n\n# ### Test\n\n# In[564]:\n\n\nDT, dataClassT, numClassT = initializer(Test)\n\n\n# In[565]:\n\n\npredict = []\nfor i in DT:\n netk, zi = feedForwardZ(bh, wh, i)\n oi = feedForwardO(bo, wo, zi)\n predict.append(oi)\n\n\n# In[566]:\n\n\ndef findIndex(predict):\n res = []\n for i in predict:\n target = np.amax(i)\n for j in range(len(i)):\n if (i[j] == target):\n res.append(j + 1)\n return res\n\n\n# In[567]:\n\n\npredictY = findIndex(predict)\n\n\n# In[568]:\n\n\ncnt = 0\nfor i in range(len(predictY)):\n if (predictY[i] == dataClassT[i]):\n cnt += 1\nprint(\"accuracy is \", cnt / len(predictY))\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Neural Networks/assign4.py","file_name":"assign4.py","file_ext":"py","file_size_in_byte":5546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"573702865","text":"from django.core.management.base import BaseCommand, CommandError\r\nfrom quest.models import *\r\nimport os\r\nimport server.settings\r\nimport quest.google_api.api\r\nimport random\r\n\r\nclass Command(BaseCommand):\r\n help = 'Update views count for quests'\r\n\r\n SCOPE = ['https://www.googleapis.com/auth/analytics.readonly']\r\n SERVICE_ACCOUNT_EMAIL = 'service@ageless-granite-156009.iam.gserviceaccount.com'\r\n KEY_FILE_LOCATION = os.path.join(server.settings.BASE_DIR, 'quest', 'google_api', 'Analytics-691f0dc58d05.p12')\r\n\r\n def updateDB(self, results):\r\n if results and isinstance(results, dict) and 'rows' in results:\r\n rows = results['rows']\r\n Quest.objects.all().update(view_count=0)\r\n for path, v in {r[0]: {'views': r[1], 'uviews': r[2]} for r in rows}.items():\r\n url = filter(None, path.rsplit('/'))[-1]\r\n # print url, v['views'], v['uviews']\r\n try:\r\n q = Quest.objects.get(seo_url=url)\r\n q.view_count = int(v['views'])\r\n q.save()\r\n except:\r\n pass\r\n\r\n visitors, created = Settings.objects.get_or_create(name='visitors_count')\r\n min, min_created = Settings.objects.get_or_create(name='visitors_min_count')\r\n max, max_created = Settings.objects.get_or_create(name='visitors_max_count')\r\n dx, dx_created = Settings.objects.get_or_create(name='visitors_dx_count')\r\n\r\n if min_created:\r\n min.value = 7\r\n min.save()\r\n if max_created:\r\n max.value = 38\r\n max.save()\r\n if dx_created:\r\n dx.value = 3\r\n dx.save()\r\n\r\n val = 0\r\n\r\n if created:\r\n val = random.randint(int(min.value), int(max.value))\r\n else:\r\n val = random.randint(int(visitors.value) - int(dx.value), int(visitors.value) + int(dx.value))\r\n\r\n if val > int(max.value):\r\n val = max.value\r\n elif val < int(min.value):\r\n val = min.value\r\n\r\n visitors.value = val\r\n\r\n visitors.save()\r\n\r\n\r\n\r\n\r\n def add_arguments(self, parser):\r\n pass\r\n # parser.add_argument('poll_id', nargs='+', type=int)\r\n\r\n def handle(self, *args, **options):\r\n # Authenticate and construct service.\r\n service = quest.google_api.api.get_service('analytics', 'v3', Command.SCOPE,\r\n Command.KEY_FILE_LOCATION,\r\n Command.SERVICE_ACCOUNT_EMAIL)\r\n profile = quest.google_api.api.get_first_profile_id(service)\r\n results = quest.google_api.api.get_pageviews(service, profile)\r\n self.updateDB(results)","sub_path":"quest/management/commands/updateQuestViews.py","file_name":"updateQuestViews.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"323999322","text":"#! /usr/bin/python\n# To change this license header, choose License Headers in Project Properties.\n# To change this template file, choose Tools | Templates\n# and open the template in the editor.\n# -*- coding: utf-8 -*-\n\nfrom gi.repository import Gtk\n\nfrom conexion import bd\n\nimport clientes\n\nimport factura\n\nimport impresion\n\nclass main:\n\n def __init__(self):\n\n#declaracion de widgets\n b = Gtk.Builder()\n b.add_from_file(\"taller.glade\")\n self.ventanaPrincipal = b.get_object(\"ventanaPrincipal\")\n self.ventanaNeocli = b.get_object(\"ventanaNeocli\")\n self.ventanaTaller = b.get_object(\"ventanaTaller\")\n self.ventanaVentas = b.get_object(\"ventanaVentas\")\n self.btnSalircli = b.get_object(\"btnSalircli\")\n self.btnNeocli = b.get_object(\"btnNeocli\")\n self.btnGrabcli = b.get_object(\"btnGrabcli\")\n self.btnSalir = b.get_object(\"btnSalir\")\n self.listCliente = b.get_object(\"listCliente\")\n self.dnicli = b.get_object(\"entDni\")\n self.apelcli = b.get_object(\"entApel\")\n self.nomcli = b.get_object(\"entNom\")\n self.dircli = b.get_object(\"entDir\")\n self.loccli = b.get_object(\"entLoc\")\n self.provcli = b.get_object(\"cbProv\")\n self.cpcli = b.get_object(\"entCp\")\n self.movcli = b.get_object(\"entMov\")\n self.telcli = b.get_object(\"entTel\")\n self.mailcli = b.get_object(\"entMail\")\n self.sicli = b.get_object(\"rtbSi\")\n self.nocli = b.get_object(\"rtbNo\")\n self.entMatrifac = b.get_object(\"entMatri\")\n self.entModelfac = b.get_object(\"entModel\")\n self.entFechafac = b.get_object(\"entFecha\")\n self.entMarcafac = b.get_object(\"entMarca\")\n self.lbldnifac = b.get_object(\"lbldnifac\")\n self.lblidfac = b.get_object(\"lblidfac\")\n self.avisodni = b.get_object(\"dlgAvis\")\n self.btnAceptar = b.get_object(\"btnAceptar\")\n self.listCliente = b.get_object(\"listCiente\")\n self.trewCliente = b.get_object(\"trwCliente\")\n self.listFactura = b.get_object(\"listFactura\")\n self.trewFactura = b.get_object(\"trewFactura\") \n self.btnBorrarcli = b.get_object(\"btnBorrarcli\")\n self.btnGrabarfac = b.get_object(\"btnGrabarfac\")\n self.btnVentas = b.get_object(\"btnVentas\")\n self.btnSalirven = b.get_object(\"btnSalirven\")\n self.lblMatriven = b.get_object(\"lblMatriven\")\n self.lblFacturav = b.get_object(\"lblFacturav\")\n self.entConce = b.get_object(\"entConce\")\n self.entPrecio = b.get_object(\"entPrecio\")\n self.trewVentas = b.get_object(\"trewVentas\")\n self.listaVentas = b.get_object(\"listVentas\")\n self.btnImprimir = b.get_object(\"btnImprimir\")\n self.menubar = b.get_object(\"menubar\")\n self.listaProv=b.get_object(\"listaProv\")\n \n self.ventanaPrincipal.maximize()\n self.ventanaNeocli.maximize()\n self.ventanaTaller.maximize()\n self.ventanaVentas.maximize()\n self.ventanaPrincipal.show()\n clientes.mostrar(self.listCliente, self.trewCliente)\n \n\n dic = {\"on_btnNeocli_clicked\": self.on_btnNeocli_clicked,\n \"on_btnSalir_clicked\": self.on_btnSalir_clicked,\n \"on_btnSalircli_clicked\": self.on_btnSalircli_clicked,\n \"on_btnGrabcli_clicked\": self.on_btnGrabcli_clicked,\n \"on_ventanaPrincipal_destroy\": self.on_ventanaPrincipal_destroy,\n \"on_ventanaNeocli_delete_event\": self.on_ventanaNeocli_delete_event,\n \"on_entDni_focus_out_event\": self.on_entDni_focus_out_event,\n \"on_btnAceptar_clicked\": self.on_btnAceptar_clicked,\n \"on_rbtNo_toggled\": self.on_rbtNo_toggled,\n \"on_btnBorrarcli_clicked\": self.on_btnBorrarcli_clicked,\n \"on_trwCliente_cursor_changed\": self.on_trwCliente_cursor_changed,\n \"on_trewFactura_cursor_changed\": self.on_trewFactura_cursor_changed,\n \"on_btnTaller_clicked\": self.on_btnTaller_clicked,\n \"on_btnSalirtaller_clicked\": self.on_btnSalirtaller_clicked,\n \"on_btnGrabarfac_clicked\": self.on_btnGrabarfac_clicked,\n \"on_ventanaTaller_destroy\": self.on_ventanaTaller_destroy,\n \"on_ventanaTaller_delete_event\": self.on_ventanaTaller_delete_event,\n \"on_btnVentas_clicked\": self.on_btnVentas_clicked,\n \"on_btnSalirven_clicked\": self.on_btnSalirven_clicked,\n \"on_ventanaVentas_destroy\": self.on_ventanaVentas_destroy,\n \"on_btnSalirven_delete_event\": self.on_btnSalirven_delete_event,\n \"on_btnGrabarven_clicked\": self.on_btnGrabarven_clicked,\n \"on_imagemenuitem5_activate\": self.on_imagemenuitem5_activate,\n \"on_btnImprimir_clicked\": self.on_btnImprimir_clicked\n }\n\n b.connect_signals(dic)\n\n#declaracion y codificacion de funciones\n def on_btnImprimir_clicked(self, widget):\n impresion.imprimir(self.dataf, self.datam, self.data)\n \n def on_imagemenuitem5_activate(self, widget):\n Gtk.main_quit()\n\n def on_btnGrabarven_clicked(self, widget, Data=None):\n self.Conce = self.entConce.get_text()\n self.Precio = self.entPrecio.get_text()\n if factura.Grabarven(self.dataf, self.Conce, self.Precio) == False:\n self.avisodni.show()\n factura.limpiarven(self.entConce, self.entPrecio)\n factura.mostrarven(self.listaVentas, self.trewVentas, self.dataf)\n \n def on_btnVentas_clicked(self, widget):\n self.lblMatriven.set_text(self.datam)\n self.lblFacturav.set_text(self.dataf)\n factura.mostrarven(self.listaVentas, self.trewVentas, self.dataf)\n self.ventanaVentas.show()\n \n def on_ventanaVentas_destroy(self, widget):\n self.ventanaVentas.hide()\n return True\n \n def on_btnTaller_clicked(self, widget):\n self.lbldnifac.set_text(self.data)\n factura.mostrar(self.listFactura, self.trewFactura, self.data)\n self.ventanaTaller.show()\n \n def on_btnSalirven_clicked(self, widget):\n self.ventanaVentas.hide()\n return True\n \n def on_btnSalirven_delete_event(self, widget):\n self.ventanaVentas.hide()\n return True \n \n def on_ventanaTaller_destroy(self, widget):\n self.ventanaTaller.hide()\n return True\n \n def on_ventanaTaller_delete_event(self, widget, Data=None):\n self.ventanaTaller.hide()\n return True\n \n def on_btnSalirtaller_clicked(self, widget):\n self.ventanaTaller.hide()\n return True\n \n def on_btnGrabarfac_clicked(self, widget):\n self.dnifac = self.data\n self.matrifac = self.entMatrifac.get_text()\n self.matrifac.upper()\n self.marcafac = self.entMarcafac.get_text()\n self.marca.capitalize()\n self.modelfac = self.entModelfac.get_text()\n self.modelfac.capitalize()\n self.fechafac = self.entFechafac.get_text()\n if factura.Grabarfac(self.dnifac, self.matrifac, self.marcafac, self.modelfac, self.fechafac) == False:\n self.aviso.show()\n factura.limpiarfac(self.lbldnifac, self.entMatrifac, self.entMarcafac, self.entModelfac, self.entFechafac, self.lblidfac)\n factura.mostrar(self.listFactura, self.trewFactura, self.data)\n \n def on_trewFactura_cursor_changed(self, widget):\n self.seleccion = self.trewFactura.get_selection()\n model, iter = self.seleccion.get_selected()\n self.dataf = model[iter][0]\n self.dataf = str(self.dataf)\n self.datam = model[iter][2]\n self.datam = str(self.datam)\n \n \n def on_trwCliente_cursor_changed(self, widget, Data=None):\n self.seleccion = self.trewCliente.get_selection()\n model, iter = self.seleccion.get_selected()\n self.data = model[iter][0]\n self.data = str(self.data)\n \n def on_btnBorrarcli_clicked(self, widget):\n clientes.Borrarcli(self.data)\n clientes.mostrar(self.listCliente, self.trewCliente) \n \n def on_btnNeocli_clicked(self, widget, data=None):\n self.llenarCombo()\n self.ventanaNeocli.show()\n self.pub = \"no\"\n\n def on_btnSalir_clicked(self, widget):\n Gtk.main_quit()\n\n def on_ventanaPrincipal_destroy(self, widget):\n Gtk.main_quit()\n\n def on_ventanaNeocli_delete_event(self, widget, data=None):\n self.ventanaNeocli.hide()\n return True\n\n def on_btnAceptar_clicked(self, widget):\n self.avisodni.hide()\n return True\n\n def on_entDni_focus_out_event(self, widget, Data=None):\n self.dni = self.dnicli.get_text()\n self.dni = self.dni.upper()\n self.dnicli.set_text(self.dni)\n if (clientes.validoDNI(self.dni) is False and self.dni != \"\"):\n self.avisodni.show()\n self.dnicli.set_text(\"\")\n\n def on_btnSalircli_clicked(self, widget, Data=None):\n self.ventanaNeocli.hide()\n return True\n\n def on_rbtNo_toggled(self, widget, Data=None):\n if widget.get_active():\n self.pub = \"no\"\n else:\n self.pub = \"si\"\n \n def llenarCombo(self):\n cursor = bd.cursor()\n rows=cursor.execute(\"SELECT provincia FROM provincias\")\n for row in rows:\n self.listaProv.append([row[0]])\n \n def on_btnGrabcli_clicked(self, widget):\n self.dni = self.dnicli.get_text()\n self.apel = self.apelcli.get_text()\n self.nom = self.nomcli.get_text()\n self.dir = self.dircli.get_text()\n self.loc = self.loccli.get_text()\n \n tree_iter = self.provcli.get_active_iter()\n if tree_iter != None:\n model = self.provcli.get_model()\n self.prov = model[tree_iter][0]\n \n self.cp = self.cpcli.get_text()\n self.mov = self.movcli.get_text()\n self.tel = self.telcli.get_text()\n self.mail = self.mailcli.get_text()\n \n if clientes.Grabarcli(self.dni, self.apel, self.nom, self.dir, self.loc, self.prov, self.cp, self.mov, self.tel, self.mail, self.pub) == False:\n self.avisodni.show()\n \n clientes.limpiarcli(self.dnicli, self.apelcli, self.nomcli, self.dircli, self.loccli, self.provcli, self.cpcli, self.movcli, self.telcli, self.mailcli)\n clientes.mostrar(self.listCliente, self.trewCliente)\n\nif __name__ == \"__main__\":\n main = main()\n Gtk.main()","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"268764275","text":"\n\n\n\n\nbtag_veto_soft_bjet = '(nBSoftJet == 0 )'\nbtag_one_soft_bjet = '(nBSoftJet == 1 )'\nbtag_one_or_more_soft_bjet = '(nBSoftJet >= 1 )'\nbtag_veto_hard_bjet = '(nBHardJet == 0 )'\nbtag_one_hard_bjet = '(nBHardJet == 1 )'\nbtag_one_or_more_hard_bjet = '(nBHardJet >= 1 )'\nbtag_veto_bjet = '((nBHardJet + nBSoftJet)== 0 )'\nbtag_one_bjet = '((nBHardJet + nBSoftJet)== 1 )'\nbtag_one_or_more_bjet = '((nBHardJet + nBSoftJet)>= 1 )'\nbtag_two_or_more_bjet = '((nBHardJet + nBSoftJet)>= 2 )'\nbtag_sr1_bjet = btag_veto_bjet\nbtag_sr2_bjet = \"( (nBSoftJet>=1) && (nBHardJet==0) )\"\nbtag_cr1_bjet = btag_veto_bjet\nbtag_cr2_bjet = \"( (nBSoftJet>=1) && (nBHardJet==0) )\"\nbtag_crtt1_bjet = \"( (nBSoftJet==0) && (nBHardJet==1) )\"\nbtag_crtt2_bjet = \"( (nBJet>=2) && (nBHardJet>=1) )\"\n\n\n\n\nsf_veto_soft_bjet = '(weightSBTag0_SF)'\nsf_one_soft_bjet = '(weightSBTag1_SF)'\nsf_one_or_more_soft_bjet = '(weightSBTag1p_SF)'\nsf_veto_hard_bjet = '(weightHBTag0_SF)'\nsf_one_hard_bjet = '(weightHBTag1_SF)'\nsf_one_or_more_hard_bjet = '(weightHBTag1p_SF)'\nsf_veto_bjet = '(weightBTag0_SF)'\nsf_one_bjet = '(weightBTag1_SF)'\nsf_one_or_more_bjet = '(weightBTag1p_SF)'\nsf_two_or_more_bjet = '(weightBTag2p_SF)'\n\nsf_sr1_bjet = sf_veto_bjet\nsf_sr2_bjet = \"(weightSBTag1p_SF * weightHBTag0_SF)\"\nsf_cr1_bjet = sf_veto_bjet\nsf_cr2_bjet = \"(weightSBTag1p_SF * weightHBTag0_SF)\" #\"( (nBSoftJet>=1) && (nBHardJet==0) )\"\nsf_crtt1_bjet = \"(weightSBTag0_SF * weightHBTag1_SF)\" #\"( (nBSoftJet==0) && (nBHardJet==1) )\"\nsf_crtt2_bjet = \"(weightBTag2p_SF * weightHBTag1p_SF)\"#\"( (nBJet>=2) && (nBHardJet>=1) )\"\n\n\n\nbtag_to_sf = {\n btag_veto_soft_bjet : sf_veto_soft_bjet , \n btag_one_soft_bjet : sf_one_soft_bjet , \n btag_one_or_more_soft_bjet : sf_one_or_more_soft_bjet , \n btag_veto_hard_bjet : sf_veto_hard_bjet , \n btag_one_hard_bjet : sf_one_hard_bjet , \n btag_one_or_more_hard_bjet : sf_one_or_more_hard_bjet , \n btag_veto_bjet : sf_veto_bjet , \n btag_one_bjet : sf_one_bjet , \n btag_one_or_more_bjet : sf_one_or_more_bjet ,\n btag_two_or_more_bjet : sf_two_or_more_bjet , \n\n btag_sr1_bjet : sf_sr1_bjet , \n btag_sr2_bjet : sf_sr2_bjet , \n btag_cr1_bjet : sf_cr1_bjet , \n btag_cr2_bjet : sf_cr2_bjet , \n btag_crtt1_bjet : sf_crtt1_bjet , \n btag_crtt2_bjet : sf_crtt2_bjet , \n }\n \nsf_to_btag = dict( (reversed(item) for item in btag_to_sf.items() ) )\n\n\n\nbtag_to_weight_vars ={\n 'nBJet' : 'weightBTag%s_SF' , \n 'nBSoftJet' : 'weightSBTag%s_SF' , \n 'nBHardJet' : 'weightHBTag%s_SF' , \n }\n\nweight_to_btag_vars = dict( (reversed(item) for item in btag_to_weight_vars.items() ) )\n\n","sub_path":"DegenerateStopAnalysis/python/tools/btag_sf_map.py","file_name":"btag_sf_map.py","file_ext":"py","file_size_in_byte":3652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"471767995","text":"import re\nimport operator\n\nfile_default=\"log.txt\"\n\ntry:\n print(\"Leave empty for default\")\n file=input()\n if not file:\n raise ValueError()\nexcept ValueError:\n file=file_default\n\ndef ips(file):\n pattern=re.compile(r\"^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\")\n ip_dict={}\n for line in open(file, \"r\"):\n ipis=pattern.findall(line)\n for ip in ipis:\n if ip not in ip_dict:\n ip_dict[ip]=1\n else:\n ip_dict[ip]+=1\n sorted_ip = sorted(ip_dict.items(), key=operator.itemgetter(1))\n print(sorted_ip[:-11:-1])\n\nips(file)","sub_path":"task2/part4.py","file_name":"part4.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"465885073","text":"import numpy\nfrom array import array\nfrom random import random\n\n'''\na = numpy.arange(12) # Build and inspect a numpy.ndarray with integers 0 to 11.\nprint(\"a:\", a)\nprint(\"type(a):\", type(a))\nprint(a.shape) # Inspect the dimensions of the array: this is a one-dimensional, 12-element array.\na.shape = 3, 4 # Change the shape of the array, adding one dimension, then inspecting the result.\nprint(\"a:\", a)\nprint(\"a[2]\", a[2]) # Get row at index 2.\nprint(\"a[2, 1]\", a[2, 1]) # Get element at index 2, 1.\nprint\nprint(\"a[:, 1]\",a[:, 1]) # Get column at index 1.\nprint()\nprint(\"a.transpose():\", a.transpose()) # Create a new array by transposing (swapping columns with rows).\n'''\n\nfloats = array('f', (random() for i in range(10**7)))\nnumpy_array = numpy.array(floats)\nnumpy.savetxt('floats-10M-lines.txt', numpy_array)\n\nfloats = numpy.loadtxt('floats-10M-lines.txt') # Load 10 million floating-point numbers from a text file.\nprint(\"floats[-3:]\", floats[-3:]) # Use sequence slicing notation to inspect the last three numbers.\n\nfloats *= .5 # Multiply every element in the floats array by .5 and inspect the last three elements again.\nprint(\"floats[-3:]\", floats[-3:])\n\nfrom time import perf_counter as pc # Import the high-resolution performance measurement timer (available since Python 3.3).\nt0 = pc(); floats /= 3; print(\"pc() - t0:\", pc() - t0) # Divide every element by 3; the elapsed time for 10 million floats is less than 40 milliseconds\n\nnumpy.save('floats-10M', floats) # Save the array in a .npy binary file.\nfloats2 = numpy.load('floats-10M.npy', 'r+') # Load the data as a memory-mapped file into another array; this allows efficient\n # processing of slices of the array even if it does not fit entirely in memory.\nfloats2 *= 6\n\nprint(\"floats2[-3:]\", floats2[-3:]) # Inspect the last three elements after multiplying every element by 6.\n","sub_path":"DataStructures/NumPyStuff.py","file_name":"NumPyStuff.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"126480649","text":"import torchvision.models as models\nimport torch.nn as nn\nimport torch\nfrom torchvision.models.inception import Inception3\n\n# class InceptionV3(nn.Module):\n# def __init__(self, num_classes, aux_logits=True, transform_input=False):\n# super(InceptionV3, self).__init__()\n# model = Inception3(num_classes=num_classes, aux_logits=aux_logits,transform_input=transform_input)\n# self.model = model\n#\n# def forward(self, x):\n# x = self.model(x)\n# return x\n\nclass InceptionV3(nn.Module):\n def __init__(self, num_classes, aux_logits=False, transform_input=False):\n super(InceptionV3, self).__init__()\n model = models.inception_v3(pretrained=True)\n num_ftrs = model.AuxLogits.fc.in_features\n model.AuxLogits.fc = nn.Linear(num_ftrs,num_classes)\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs,num_classes)\n# model = Inception3(num_classes=num_classes, aux_logits=aux_logits,transform_input=transform_input)\n self.model = model\n\n def forward(self, x):\n x = self.model(x)\n return x\n","sub_path":"Messidor/networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"27780749","text":"import argparse\nimport numpy as np \nimport matplotlib.pyplot as plt\nimport networkx as nx\nfrom numpy import *\nimport grnular.data.gen_data as gen_data\nfrom grnular.utils.metrics import report_metrics\nimport sys, copy, pickle\nfrom arboreto.algo import grnboost2\nfrom arboreto.utils import load_tf_names\nimport pandas as pd\nimport sklearn \nTRAIN=True\n\nparser = argparse.ArgumentParser(description='Classification of different cell types as well as recovering the gene regulatory network of RNA seq data: using SERGIO simulator for training')\n#****************** general parameters\nparser.add_argument('--K_train', type=int, default=5, #2, #1000,\n help='Num of training examples for a fixed D')\nparser.add_argument('--K_valid', type=int, default=5, #5, #2, #1000,\n help='Number of valid examples for a fixed D ')\nparser.add_argument('--K_test', type=int, default=100, #100, #10,\n help='Number of testing examples for a fixed D')\nparser.add_argument('--D', type=int, default=100, #1000,\n help='Number of genes ')\nparser.add_argument('--C', type=int, default=9,\n help='different cell types, target variable')\nparser.add_argument('--sparsity', type=float, default=0.3, #0.2,\n help='sparsity of erdos-renyi graph')\nparser.add_argument('--DATA_METHOD', type=str, default='sim_expt', #'syn_expt2', #'sim_expt1', \n help='expt details in draft: random/syn_same_precision, sim_expt1=DS1 and sim_expt2=Custom, sim_expt3=GRN')\nparser.add_argument('--DATA_TYPE', type=str, default='clean', #'case2', \n help='expt details in draft: sim_exp1 clean/noisy')\n\nparser.add_argument('--USE_TF_NAMES', type=str, default='no',# 'yes' \n help='use transcription factors for grnboost2: will use in general')\n\n# SERGIO simulator parameters\nparser.add_argument('--DATA_NAME', type=str, default='CUSTOM', #'DS1', \n help='expt details in draft: DS1, DS2, DS3, CUSTOM')\nparser.add_argument('--POINTS_PER_CLASS', type=int, default=2000,# NOTE: try 2000\n help='cells per class type')\nparser.add_argument('--SAMPLING_STATE', type=int, default=1, #1,\n help='num of simulations')\nparser.add_argument('--NOISE_PARAMS', type=float, default=0.1, #1,\n help='num of noise params')\nparser.add_argument('--DECAYS', type=float, default=5, #0.8,#0.8,\n help='decay params')\nparser.add_argument('--NOISE_TYPE', type=str, default='dpd', #'dpd', \n help='different noise types: \"dpd\", “sp”, “spd”')\nparser.add_argument('--SHARED_COOP_STATE', type=int, default=2, #1,\n help='shared coop state')\nparser.add_argument('--pcr_low_min', type=float, default=0.2, #1,\n help='production cell rate: low expression range')\nparser.add_argument('--pcr_low_max', type=float, default=0.5, #1,\n help='production cell rate: low expression range')\nparser.add_argument('--pcr_high_min', type=float, default=0.7, #1,\n help='production cell rate: high expression range')\nparser.add_argument('--pcr_high_max', type=float, default=1, #1,\n help='production cell rate: high expression range')\nparser.add_argument('--Kij_min', type=float, default=1, #1,\n help='Interaction strengths Kij min')\nparser.add_argument('--Kij_max', type=float, default=5, #1,\n help='Interaction strengths Kij max')\nparser.add_argument('--ratio_MR', type=float, default=0.1, #1,\n help='number of master regulators ~ ratio_MR * D')\nparser.add_argument('--connect_TF_prob', type=float, default=0.2, #1,\n help='probability of connecting master regulators')\n# SERGIO technical noise parameters\nparser.add_argument('--ADD_TECHNICAL_NOISE', type=str, default='yes',# 'no' \n help='add technical noise on the saved clean data')\nparser.add_argument('--dropout_shape', type=float, default=6.5, #1,\n help='SERGIO dropout param: shape, higher -> less dropout')\nparser.add_argument('--dropout_percentile', type=float, default=82, #1,\n help='SERGIO dropout param: percentile, lower -> less dropout')\n\nargs = parser.parse_args()\n\ndef get_args_str(dict1):\n args_str = ''\n for i, (k, v) in enumerate(dict1.items()):\n# print(k , item)\n if k in ['C', 'GLAD_LOSS', 'MODEL_SELECT', 'SUB_METHOD']:\n args_str = args_str+str(k)+str(v)+'_'\n return args_str\n\nargs_str = get_args_str(vars(args))\n\ndef get_res_filepath(name):\n FILE_NUM = str(np.random.randint(10000))\n savepath = 'simulator/BEELINE-data/my_pred_networks/'\n filepath = savepath +name+'_beeline_pred_tag'+str(FILE_NUM)+'.pickle'\n return filepath\n\n\ndef fit_grnboost2(data, PREDICT_TF=False, BEELINE=False):\n EARLY_BREAK = 9 \n print('FITTING GRNBOOST2')\n # #############################################################################\n res = []\n typeS = 'mean'\n print('Using ', typeS, ' scaling')\n for i, d in enumerate(data):\n X, y, theta_true, master_regulators = d\n Xc = normalizing_data(X, typeS)\n print('\\n grnboost2: TRAIN data batch : ', i, ' total points = ', X.shape[0])\n if args.USE_TF_NAMES=='yes' and PREDICT_TF:\n res.append(helper_grnboost2(Xc, theta_true, tf_names = master_regulators))\n\n else:\n # NOTE: breaking early as tf=None takes lot of time\n if i > EARLY_BREAK:\n print('Breaking at i = ', i, ' as tf=None case takes a lot of time')\n break\n res.append(helper_grnboost2(Xc, theta_true))\n\n\n res_mean = np.mean(np.array(res).astype(np.float64), 0)\n res_std = np.std(np.array(res).astype(np.float64), 0)\n res_mean = [\"%.3f\" %x for x in res_mean]\n res_std = [\"%.3f\" %x for x in res_std]\n res_dict = {} # name: [mean, std]\n for i, _name in enumerate(['FDR', 'TPR', 'FPR', 'SHD', 'nnz_true', 'nnz_pred', 'precision', 'recall', 'Fb', 'aupr', 'auc']): # dictionary\n res_dict[_name]= [res_mean[i], res_std[i]]#mean std\n if PREDICT_TF:\n print('\\nAvg GRNBOOST2-TF: FDR, ,TPR, ,FPR, ,SHD, ,nnz_true, ,nnz_pred, ,precision, ,recall, ,Fb, ,aupr, ,auc, ')\n else:\n print('\\nAvg GRNBOOST2: FDR, ,TPR, ,FPR, ,SHD, ,nnz_true, ,nnz_pred, ,precision, ,recall, ,Fb, ,aupr, ,auc, ')\n mean_std = [[rm, rs] for rm, rs in zip(res_mean, res_std)]\n flat_list = [item for ms in mean_std for item in ms]\n print('%s' % ', '.join(map(str, flat_list))) \n return\n\n\ndef normalizing_data(X, typeS='log'):\n if typeS == 'mean':\n #print('Centering and scaling the input data...')\n scaledX = X - X.mean(axis=0)\n scaledX = scaledX/X.std(axis=0)\n # NOTE: replacing all nan's by 0, as sometimes in dropout the complete column\n # goes to zero\n scaledX = convert_nans_to_zeros(scaledX)\n elif typeS == 'log':\n scaledX = np.log(X+1)\n else:\n print('Check the valid scaling')\n return scaledX\n\n\ndef convert_nans_to_zeros(X):\n where_are_nans = isnan(X)\n X[where_are_nans] = 0\n return X\n\n\ndef helper_grnboost2(X, theta_true, tf_names=[], BEELINE=False):#_string\n print('Running GRNBoost2 method', X.shape)\n theta_true = theta_true.real\n ex_matrix = pd.DataFrame(X)\n if args.USE_TF_NAMES == 'yes' and len(tf_names)!=0:\n tf_names = ['G'+str(n) for n in tf_names]\n else:\n tf_names = None\n \n gene_names = ['G'+str(c) for c in ex_matrix.columns]\n ex_matrix.columns = gene_names\n network = grnboost2(expression_data=ex_matrix, gene_names=gene_names, tf_names=tf_names)#, verbose=True)\n pred_edges = np.array(network[['TF', 'target', 'importance']])\n G_pred = nx.Graph()\n# G_pred.add_nodes_from(['G'+str(n) for n in range(args.D)])\n G_pred.add_nodes_from(['G'+str(n) for n in range(len(gene_names))])\n G_pred.add_weighted_edges_from(pred_edges)\n# pred_theta = nx.adj_matrix(G_pred).todense() + np.eye(args.D)\n pred_theta = nx.adj_matrix(G_pred).todense() + np.eye(len(gene_names))\n recovery_metrics = report_metrics(np.array(theta_true), np.array(pred_theta))\n print('GRNBOOST2: FDR, TPR, FPR, SHD, nnz_true, nnz_pred, precision, recall, Fb, aupr, auc')\n print('GRNBOOST2: Recovery of true theta: ', *np.around(recovery_metrics, 3))\n \n res = list(recovery_metrics)\n return res\n\n\ndef get_filepath():\n dict1 = vars(args)\n filename = ''\n abbrv_dict = {'K_train': 'KTr', 'K_valid': 'KVa', 'K_test': 'KTe', 'D': 'D', 'C':'C',\n 'sparsity': 'Sp', 'DATA_TYPE':'Dt', 'POINTS_PER_CLASS': 'ppc',\n 'SAMPLING_STATE': 'SS', 'NOISE_PARAMS': 'NP', 'DECAYS': 'De',\n 'NOISE_TYPE': 'NT', 'SHARED_COOP_STATE': 'SCS', 'pcr_low_min': 'pcrln',\n 'pcr_low_max': 'pcrlx', 'pcr_high_min': 'pcrhn', 'pcr_high_max': 'pcrhx',\n 'Kij_min': 'kmin', 'Kij_max': 'kmax', 'ratio_MR': 'rMR', \n 'connect_TF_prob': 'TFp'}\n for k in abbrv_dict.keys():\n v = dict1[k]\n filename = filename+str(abbrv_dict[k])+str(v)+'_'\n\n SAVEPATH = 'grnular/data/saved_data/'\n FILEPATH = SAVEPATH + filename + '.pickle'\n print('Filepath: ', FILEPATH)\n return FILEPATH\n\n\ndef load_saved_data():\n FILEPATH = get_filepath()\n with open(FILEPATH, 'rb') as handle:\n data = pickle.load(handle)\n return data\n\n\ndef main():\n print(args)\n print('\\nReading the input data: Single cell RNA: M(samples) x D(genes) & corresponding C(targets)')\n train_data, valid_data, test_data = load_saved_data()\n if args.ADD_TECHNICAL_NOISE == 'yes':\n print('adding technical noise')\n# train_data = gen_data.add_technical_noise(args, train_data)\n# valid_data = gen_data.add_technical_noise(args, valid_data)\n test_data = gen_data.add_technical_noise(args, test_data)\n\n # Fitting a grnboost2 \n fit_grnboost2(test_data)\n print('Using TF NAMES')\n fit_grnboost2(test_data, PREDICT_TF=True)\n print('\\nExpt Done')\n return \n\nif __name__==\"__main__\":\n main()\n","sub_path":"baselines/grnboost2/main_grnboost2.py","file_name":"main_grnboost2.py","file_ext":"py","file_size_in_byte":10195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"16909865","text":"import re\n\ndef initDictCounter():\n\tslovar = {};\n\tcount = 0;\n\tfilePath = input(\"Enter result file: \");\n\tfile = open(filePath, 'r');\n\tfor line in file:\n\t\tbytesTuple = re.findall(r'\\d+', line);\n\t\tslovar.update({(bytesTuple[0], bytesTuple[1]): bytesTuple[2]});\n\t\tcount += int(bytesTuple[2]);\n\tfile.close();\n\treturn (slovar, count);\n\ndef splitString():\n\tstrings = [];\n\tfilePath = input(\"Enter file to check: \");\n\tl = input(\"Enter l: \");\n\tfile = open(filePath, 'rb');\n\tstring = file.read();\n\tfor index in range(0,len(string)):\n\t\tstrings.append(string[index:index + int(l)]);\n\tfile.close;\n\treturn strings;\n\ndef p1(x0, table, count):\n count_x0=0;\n for i in range(0, 256):\n count_x0 += int(table[tuple([str(x0), str(i)])]);\n return count_x0 / count;\n\ndef p2(x0,x1, table, count):\n count_x0=0;\n for i in range(0, 256):\n count_x0 += int(table[tuple([str(x0),str(i)])]);\n return int(table[tuple([str(x0), str(i)])]) / count_x0;\n\ndef calculations(table, count, strings):\n\topt = [];\n\tresult = 1;\n\tfor string in strings:\n\t\tfor i in range(len(string)):\n\t\t\tif i == 0:\n\t\t\t\tresult *= p1(string[i], table, count);\n\t\t\telse:\n\t\t\t\tresult *= p2(string[i-1],strings[i], table, count);\n\t\topt.append(result);\n\t\tresult = 1;\n\treturn opt;\n\ndef main():\n\tresultHex = \"\";\n\tresultChar = \"\";\n\tslovar = initDictCounter();\n\ttable = slovar[0];\n\tcount = slovar[1];\n\tstrings = splitString();\n\topt = calculations(table, count, strings);\n\testimate = min(opt);\n\tresultBytes = strings[opt.index(estimate)];\n\tprint(\"The upper estimate: \" + str(estimate));\n\ttemp = list(resultBytes)\n\tfor i in range(len(temp)):\n\t\tif temp[i] != 0:\n\t\t\tresultChar += chr(temp[i]);\n\t\telse:\n\t\t\tresultChar += \".\";\n\tprint(\"String: \" + resultChar);\n\tprint(\"Hex: \" + resultBytes.hex());\n\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"definition_signature.py","file_name":"definition_signature.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"300886277","text":"# -*- coding: utf-8 -*-\n\nfrom django.core.exceptions import ObjectDoesNotExist\n\ndef model_to_dict(obj, exclude=[]):\n '''\n serialize model object to dict with related objects\n\n '''\n tree = {}\n for field_name in obj._meta.get_all_field_names():\n try:\n field = getattr(obj, field_name)\n except (ObjectDoesNotExist, AttributeError):\n continue\n \n if field_name in exclude:\n continue\n \n field = obj._meta.get_field_by_name(field_name)[0]\n if field.__class__.__name__ in exclude:\n continue\n \n value = getattr(obj, field_name)\n \n if value:\n tree[field_name] = value\n return tree\n\n\nfrom django.db.models.loading import get_model\n\ndef copy_model_object(obj, \\\n copy_related=True, copy_self_refs=False, \\\n copy_rels=[],\\\n exclude=None, defaults=None, values=None, \\\n parent_obj=None, parent_field=None):\n \"\"\"\n exclude:\n {\n '_':[], # self fields list\n 'somesubmodel_set':{'_': []}, # named as 'related_set'\n 'anothersubmodel_set':{'_': []}\n }\n \n values: # ovverides existing values\n as exclude\n defaults:\n as values\n \"\"\"\n # Инстанцируем новую модель\n model = get_model(obj._meta.app_label, obj._meta.module_name)\n new_model = model()\n\n # Получаем список полей, исключаем ненужные\n exclude = exclude or { '_': [] }\n\n # Автоматически исключаем поле с ключем\n pk_field = obj._meta.pk.name\n if pk_field not in exclude['_']:\n exclude['_'].append(pk_field)\n all_fields = [f.name for f in model._meta.fields]\n fields = list(set(all_fields) ^ set(exclude['_']))\n \n # Задаем аттрибуты полей и сохраняем\n defaults = defaults or { '_': {} }\n values = values or { '_': {} }\n \n # Принудительное заполнение родительского аттрибута для связанных моделей\n if parent_field and parent_obj:\n values['_'][parent_field] = parent_obj\n \n for field_name in fields:\n v = getattr(obj, field_name)\n \n if field_name in values['_']:\n v = values['_'][field_name]\n elif field_name in defaults['_'] and not v:\n v = defaults['_'][field_name]\n \n setattr(new_model, field_name, v)\n \n new_model.save()\n\n m2m_fields = model._meta.get_m2m_with_model()\n for field, fl in m2m_fields:\n field_name = field.name\n v = getattr(obj, field_name)\n v = v.all()\n if field_name in values['_']:\n v = values['_'][field_name]\n elif field_name in defaults['_'] and not v:\n v = defaults['_'][field_name]\n \n m2m_field = getattr(new_model, field_name)\n m2m_field.clear()\n m2m_field.add(*v)\n \n # Если стоит опция копирования дочерних объектов, запускаем обработку связей\n if copy_related:\n related_sets = model._meta.get_all_related_objects()\n for rel in related_sets:\n \n # пропускаем ссылки на собственную модель, если такая опция выключена в настройках\n if not copy_self_refs and rel.model==model:\n continue\n \n rel_name = rel.get_accessor_name()\n \n # если задан список связанных объектов и имя текущего сэта не находится в нём, то пропускаем\n if copy_rels and rel_name not in copy_rels:\n continue\n \n related_set = getattr(obj, rel_name)\n for related_obj in related_set.all():\n params = {\n 'obj':related_obj,\n 'copy_related':copy_related,\n 'copy_self_refs':copy_self_refs,\n 'exclude':rel_name in exclude and exclude[rel_name] or None,\n 'defaults':rel_name in defaults and defaults[rel_name] or None,\n 'values':rel_name in values and values[rel_name] or None,\n 'parent_obj':new_model,\n 'parent_field':rel.field.name\n }\n copy_model_object(**params)\n \n return new_model","sub_path":"apps/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"601125383","text":"# 先建立图,然后递归解决。注意queries中的元素不在dict中的情况,例如[x,x] 返回-1,而不是1\n\nfrom typing import List\nimport collections\nclass Solution:\n def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:\n def dfs(start, end, visited):\n if start == end and start in graph:\n return 1.0\n \n if start in visited:\n return -1.0\n \n visited.add(start)\n for node, val in graph[start]:\n tmp = dfs(node, end, visited)\n if tmp > 0:\n return val * tmp\n \n return -1.0\n \n graph = collections.defaultdict(list)\n for ops, val in zip(equations, values):\n op1, op2 = ops\n graph[op1].append([op2, val])\n graph[op2].append([op1, 1.0 / val])\n \n res = []\n for first, second in queries:\n res.append(dfs(first, second, set()))\n \n return res","sub_path":"leetcode/399-Evaluate-Division.py","file_name":"399-Evaluate-Division.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"528123792","text":"from django import forms\nfrom .models import curso\n\nclass tipocursoForm(forms.Form):\n at = {'class':'form-control', 'placeholder':'ingresar tipo de curso'}\n descripciontipocurso = forms.CharField(label='Descripción tipo curso', max_length=15, widget=forms.TextInput(attrs=at))\n\nclass cursoForm(forms.ModelForm):\n class Meta:\n model = curso\n fields = '__all__'\n dc = {'class':'form-control', 'placeholder':'ingresar descripción de curso'}\n tp = {'class':'form-control'}\n cr = {'class':'form-control', 'placeholder': 'ingrese número de créditos'}\n c = ciclos = (\n ('I', 'I'), ('II', 'II'), ('III', 'III'), ('IV','IV'),\n ('V', 'V'), ('VI', 'VI'), ('VII', 'VII'), ('VIII','VIII'),\n ('IX', 'IX'), ('X', 'X'), ('E', 'E'),\n )\n\n exclude = {'estadocurso'}\n\n labels = {\n 'descripcioncurso' : ('Descripcion del curso'),\n 'tipocurso' : ('Tipo curso'),\n 'creditos' : ('Créditos'),\n }\n widgets = {\n 'descripcioncurso' : forms.TextInput(attrs=dc),\n 'tipocurso' : forms.Select(attrs=tp),\n 'creditos' : forms.NumberInput(attrs=cr),\n 'ciclo' : forms.Select(attrs=tp, choices=c)\n }\n help_texts = {\n 'creditos': ('** No escribir mas de 5 créditos.
'),\n 'ciclo' : ('** Seleccione un ciclo.
'),\n }\n error_messages = {\n 'creditos': {\n 'max_length': (\"Crédito Muy largo.\"),\n },\n }\n","sub_path":"app/sistema/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"25251883","text":"desired = {\n 'deviceName': 'Android Emulator',\n 'automationName': 'appium',\n 'platformName': 'Android',\n 'platformVersion': '6.0.1',\n 'appPackage': 'com.zjhz.erpt',\n 'appActivity': '.logic.launch.LoginActivity',\n 'unid': '127.0.0.1:7555',\n 'noReset': 'True',\n 'unicodeKeyboary':'True',\n 'resetKeyboard':'True'\n}\n\n\nfind_ID = \"3\" #搜索的陌集号\n\nimport yaml\nimport log_config\n\nlogger = log_config.handle()\n\nwith open('devices_config') as f:\n data = yaml.load(f) #读取\n\n logger.info('开始查询。。。。。')\n print(data['appPackage']) #查询\n\n data['unid'] = \"46fe7912\" #修改\n logger.info('结束,,,,,,')\n applictions = {'app':'C:\\\\Users\\\\admin\\\\Documents\\\\app-release(15).apk'}\n yaml.dump(applictions) #数据转化","sub_path":"appium_project/Public/devices_config.py","file_name":"devices_config.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"324012589","text":"import cv2\nfrom os import environ\n\ndetect_face = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\nrecognizer = cv2.face.EigenFaceRecognizer_create()\nrecognizer.read('classifierEigen.yml')\n# recognizer.read('classifierEigenYale.yml')\nwidth, height = 220, 220\nfont = cv2.FONT_HERSHEY_COMPLEX_SMALL\n\ncamera = cv2.VideoCapture(environ['URL']) # First webcam available\n\nwhile True:\n conect, image = camera.read()\n grey_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n detected_faces = detect_face.detectMultiScale(\n grey_image, scaleFactor=1.5, minSize=(30, 30))\n\n for (x, y, w, h) in detected_faces:\n face_image = cv2.resize(grey_image[y:y + h, x:x + w], (width, height))\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)\n\n id, reliability = recognizer.predict(face_image)\n\n name = ''\n\n if id == 1:\n name = 'Juan'\n elif id == 2:\n name = 'Tamiris'\n else:\n name = 'Desconhecido'\n\n cv2.putText(image, name, (x, y + (h + 30)), font, 2, (0, 0, 255))\n cv2.putText(image, str(reliability),\n (x, y + (h + 50)), font, 1, (0, 0, 255))\n\n cv2.imshow('Face', image)\n if cv2.waitKey(1) == ord('q'):\n break\n\ncamera.release()\ncv2.destroyAllWindows()\n","sub_path":"recognition_eigenfaces.py","file_name":"recognition_eigenfaces.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"245602514","text":"from __future__ import print_function\r\n\r\nimport sys\r\nfrom operator import add\r\nfrom pyspark import SparkContext\r\nfrom csv import reader\r\n\r\nif __name__ == \"__main__\":\r\n\tsc = SparkContext()\r\n\t\r\n\tdef toCSVLine(data):\r\n\t\treturn ','.join(str(d) for d in data)\r\n \r\n\tlines = sc.textFile(sys.argv[1], 1)\r\n\r\n\tlines = lines.mapPartitions(lambda x: reader(x))\r\n\t\r\n\tlines = lines.map(lambda x: (x[1], x[2]))\r\n\t\r\n\theader = lines.first()\r\n\t\r\n\tcounts = lines.filter(lambda x: x != header) \\\r\n\t\t\t.map(lambda x: (x,1)).reduceByKey(add).sortByKey()\r\n\t\r\n\ttotal_offenses_borough = counts.map(toCSVLine)\r\n\t\r\n\ttotal_offenses_borough.saveAsTextFile('total_offenses_borough.csv')\t","sub_path":"total_offenses_borough.py","file_name":"total_offenses_borough.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"349196818","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/4/23 19:27\n# @Author : Alan\n# @Email : xiezhengwen2013@163.com\n# @File : model2.py\n# @Software: PyCharm\n\n\nimport tensorflow as tf\nfrom model_utils import *\nfrom tensorflow.contrib.layers import l2_regularizer, xavier_initializer\nfrom tensorflow.contrib.rnn import LSTMCell, GRUCell, DropoutWrapper\n\n\nclass CAM(object):\n def __init__(self, config):\n self.ques_len = config.ques_length\n self.ans_len = config.ans_length\n self.hidden_size = config.hidden_size\n self.output_size = config.output_size\n self.pos_weight = config.pos_weight\n self.learning_rate = config.learning_rate\n self.optimizer = config.optimizer\n self.l2_lambda = config.l2_lambda\n self.clip_value = config.clip_value\n self.embeddings = config.embeddings\n self.window_sizes = config.window_sizes\n self.n_filters = config.n_filters\n self.rnn_size = config.rnn_size\n\n self._placeholder_init_pointwise()\n self.initialize_weights()\n pred = self._build(self.embeddings)\n # 损失和精确度\n self.y_hat, self.total_loss= self._add_loss_op(pred)\n # 训练节点\n self.train_op = self._add_train_op(self.total_loss)\n\n def _placeholder_init_pointwise(self):\n self._ques = tf.placeholder(tf.int32, [None, self.ques_len], name='ques_point')\n self._ans = tf.placeholder(tf.int32, [None, self.ans_len], name='ans_point')\n self._ques_mask = tf.placeholder(tf.int32, [None], 'ques_mask')\n self._ans_mask = tf.placeholder(tf.int32, [None], 'ans_mask')\n self._y = tf.placeholder(tf.int32, [None])\n self.dropout_keep_prob = tf.placeholder(tf.float32, name=\"dropout_keep_prob\")\n self.batch_size, self.list_size = tf.shape(self._ans)[0], tf.shape(self._ans)[1]\n\n def initialize_weights(self):\n \"\"\"Global initialization of weights for the representation layer\n\n \"\"\"\n # preprocessing\n self.W_i = weight_variable('W_i', [self.hidden_size, self.hidden_size])\n self.W_l1 = weight_variable('W_l1', [5 * self.hidden_size, self.hidden_size])\n self.W_l2 = weight_variable('W_l2', [self.hidden_size, 2])\n\n self.b_i = bias_variable('b_i', [self.hidden_size])\n self.b_l1 = bias_variable('b_l1', [self.hidden_size])\n self.b_l2 = bias_variable('b_l2', [2])\n\n\n def proj_layer(self, seq, out_size, name, reuse=None):\n \"\"\"\n 投影层\n \"\"\"\n assert len(seq.get_shape()) == 3\n out1 = self.mlp(seq, out_size, 1,\n tf.nn.sigmoid, name + '_sigmoid', reuse=reuse)\n out2 = self.mlp(seq, out_size, 1,\n tf.nn.tanh, name + '_tanh', reuse=reuse)\n out = out1 * out2\n return out\n\n def biLSTMBlock(self, inputs, num_units, scope, rnn_type, dropout_keep_prob, seq_len=None, isReuse=None):\n with tf.variable_scope(scope, reuse=isReuse):\n if rnn_type == 'LSTM':\n lstmCell = LSTMCell(num_units=num_units)\n elif rnn_type == 'GRU':\n lstmCell = GRUCell(num_units=num_units)\n dropLSTMCell = lambda: DropoutWrapper(lstmCell, output_keep_prob=dropout_keep_prob)\n fwLSTMCell, bwLSTMCell = dropLSTMCell(), dropLSTMCell()\n output = tf.nn.bidirectional_dynamic_rnn(cell_fw=fwLSTMCell,\n cell_bw=bwLSTMCell,\n inputs=inputs,\n sequence_length=seq_len,\n dtype=tf.float32)\n return output\n\n def _preprocess_layer(self, question, answer, out_size):\n # 对应于原文公式(1),得到Q,A\n with tf.variable_scope('context_encoding') as scope:\n q_encode = self.proj_layer(question, out_size, 'proj_layer', reuse=None)\n a_encode = self.proj_layer(answer, out_size, 'proj_layer', reuse=True)\n return q_encode, a_encode\n\n def _preprocess_layer2(self, question, answer, out_size, reuse=None):\n # 不共享参数\n out1 = self.mlp(question, out_size, 1,\n tf.nn.relu, 'proj_layer' + 'ques_relu', reuse=reuse)\n out2 = self.mlp(answer, out_size, 1,\n tf.nn.relu, 'proj_layer' + 'ans_relu', reuse=reuse)\n return out1, out2\n\n def _preprocess_layer3(self, question, answer, out_size):\n # 共享参数\n out1 = self.mlp(question, out_size, 1,\n tf.nn.relu, 'proj_layer' + 'relu', reuse=None)\n out2 = self.mlp(answer, out_size, 1,\n tf.nn.relu, 'proj_layer' + 'relu', reuse=True)\n return out1, out2\n\n def _preprocess_layer4(self, question, answer, out_size):\n # 共享参数\n # bilstm\n rnn_outputs_left, final_state_left = self.biLSTMBlock(question, out_size, 'R', 'LSTM',\n self.dropout_keep_prob, self._ques_mask)\n rnn_outputs_right, final_state_right =self.biLSTMBlock(answer, out_size, 'R', 'LSTM',\n self.dropout_keep_prob, self._ans_mask, isReuse=True)\n rnn_q = tf.concat(rnn_outputs_left, axis=2) # (bz*ls, q_len, 2rz)\n rnn_a = tf.concat(rnn_outputs_right, axis=2) # (bz*ls, a_len, 2rz)\n print_shape('rnn_q', rnn_q)\n print_shape('rnn_a', rnn_a)\n return rnn_q, rnn_a\n\n def _attention_layer(self, question, answer, question_mask, answer_mask):\n \"\"\"\n q: [batch_size, q_length, represent_dim]\n a: [batch_size, a_length, represent_dim]\n q_mask : [bz, q_len] -> 3d\n a_mask : [bz, a_len] -> 3d\n \"\"\"\n question = tf.reshape(question, [-1, self.hidden_size])\n question = tf.nn.xw_plus_b(question, self.W_i, self.b_i)\n question = tf.reshape(question, [-1, self.ques_len, self.hidden_size])\n att_inner_product = tf.matmul(question, tf.transpose(answer, (0, 2, 1))) # [batch_size, q_length, a_length]\n question_mask = tf.expand_dims(question_mask, axis=-1)\n answer_mask = tf.expand_dims(answer_mask, axis=1)\n q_softmax = attention_softmax_3d_align(att_inner_product, question_mask, dim=1)\n # we set all items to zero that correspond to zero-padded positions of the answer\n G_zero = tf.multiply(q_softmax, answer_mask)\n output_a = tf.matmul(tf.transpose(G_zero, [0, 2, 1]), question)\n\n a_softmax = attention_softmax_3d_align(att_inner_product, answer_mask, dim=-1)\n G_zero_ = tf.multiply(a_softmax, question_mask)\n output_q = tf.matmul(G_zero_, answer) # [batch_size, q_length, 2hz]\n return output_a, output_q\n\n def _attention_layer2(self, question, answer, question_mask, answer_mask):\n \"\"\"\n q: [batch_size, q_length, represent_dim]\n a: [batch_size, a_length, represent_dim]\n q_mask : [bz, q_len] -> 3d\n a_mask : [bz, a_len] -> 3d\n \"\"\"\n question = tf.reshape(question, [-1, self.hidden_size])\n question = tf.nn.xw_plus_b(question, self.W_i, self.b_i)\n question = tf.reshape(question, [-1, self.ques_len, self.hidden_size])\n question = tf.multiply(question, tf.expand_dims(question_mask, axis=-1))\n answer = tf.multiply(answer, tf.expand_dims(answer_mask, axis=-1))\n\n matrix = tf.matmul(question, tf.transpose(answer, (0, 2, 1))) # [batch_size, q_length, a_length]\n matrix = tf.multiply(matrix, tf.expand_dims(question_mask, -1))\n matrix = tf.multiply(matrix, tf.expand_dims(answer_mask, 1))\n\n q_softmax = tf.nn.softmax(matrix, axis=1)\n a_softmax = tf.nn.softmax(matrix, axis=-1)\n q_softmax = tf.multiply(q_softmax, tf.expand_dims(question_mask, axis=-1))\n a_softmax = tf.multiply(a_softmax, tf.expand_dims(answer_mask, axis=1))\n\n a_align = tf.matmul(tf.transpose(q_softmax, [0, 2, 1]), question)\n q_align = tf.matmul(a_softmax, answer)\n return a_align, q_align\n\n def _compare_layer(self, q, q_align, a, a_align, comp_type):\n \"\"\"\n a: [batch_size, a_length, 2hz]\n a_att: [batch_size, a_length, 2hz]\n fuse_A: [batch_size, a_length, 2hz]\n fuse_Q: [batch_size, q_length, 2hz]\n \"\"\"\n size = q.get_shape()[-1]\n if comp_type == 'Gate_fuse':\n fuse_a = tf.concat([a, a_align, a * a_align, a - a_align], axis=2)\n fuse_q = tf.concat([q, q_align, q * q_align, q - q_align], axis=2)\n fuse_a_sigmoid = self.mlp(fuse_a, size, 1, tf.nn.sigmoid, 'fuse_a_sigmoid',\n use_dropout=False, bias=True)\n fuse_q_sigmoid = self.mlp(fuse_q, size, 1, tf.nn.sigmoid, 'fuse_q_sigmoid',\n use_dropout=False, bias=True)\n fuse_a_tanh = self.mlp(fuse_a, size, 1, tf.nn.tanh, 'fuse_a_tanh',\n use_dropout=False, bias=True)\n fuse_q_tanh = self.mlp(fuse_q, size, 1, tf.nn.tanh, 'fuse_q_tanh',\n use_dropout=False, bias=True)\n fuse_A = fuse_a_sigmoid * fuse_a_tanh + a - fuse_a_sigmoid*a\n fuse_Q = fuse_q_sigmoid * fuse_q_tanh + q - fuse_q_sigmoid*q\n elif comp_type == 'simple_fuse':\n fuse_A = tf.concat([a, a_align, a * a_align, a - a_align], axis=2)\n fuse_Q = tf.concat([q, q_align, q * q_align, q - q_align], axis=2)\n elif comp_type == 'mul':\n fuse_A = a * a_align\n fuse_Q = q * q_align\n else:\n raise ValueError('{} method is not implemented!'.format(comp_type))\n return fuse_A, fuse_Q\n\n def _cnn_layer(self, input, mask, name, isreuse=False, dim = -1):\n \"\"\"\n :param\n :return:\n \"\"\"\n # tf.layers.Conv1D(inputs, filters, kernel_size, strides=1)\n # self.n_filters一般指词嵌入的维度\n with tf.variable_scope(name, reuse=isreuse) as scope1:\n all = []\n for i in range(len(self.window_sizes)):\n cnn_out = tf.layers.conv1d(input, self.n_filters, self.window_sizes[i], padding='same',\n activation=tf.nn.relu, name='q_conv_' + str(i))\n all.append(cnn_out)\n cnn_outs = tf.concat(all, axis=-1)\n cnn_outs_padded = tf.multiply(cnn_outs, tf.expand_dims(mask, axis=dim))\n R_max = maxpool(cnn_outs_padded)\n R_men = meanpool(cnn_outs_padded)\n print_shape('R', R_max)\n return R_max, R_men\n\n def _build(self, embeddings, encoder_type = 'sigmoid'):\n self.Embedding = tf.Variable(tf.to_float(embeddings), trainable=False, name='Embedding')\n self.q_embed = tf.nn.dropout(tf.nn.embedding_lookup(self.Embedding, self._ques),keep_prob=self.dropout_keep_prob)\n self.a_embed = tf.nn.dropout(tf.nn.embedding_lookup(self.Embedding, self._ans),keep_prob=self.dropout_keep_prob)\n q_mask = tf.sequence_mask(self._ques_mask, self.ques_len, dtype=tf.float32)\n a_mask = tf.sequence_mask(self._ans_mask, self.ans_len, dtype=tf.float32)\n\n # ~~ Preprocessing\n if encoder_type == 'lstm':\n question, answer = self._preprocess_layer4(self.q_embed, self.a_embed, self.rnn_size) # (bz, len, 2hz)\n elif encoder_type == 'relu_share':\n question, answer = self._preprocess_layer3(self.q_embed, self.a_embed, self.rnn_size) # (bz, len, hz)\n elif encoder_type == 'relu_noshare':\n question, answer = self._preprocess_layer2(self.q_embed, self.a_embed, self.rnn_size) # (bz, len, hz)\n elif encoder_type == 'sigmoid':\n question, answer = self._preprocess_layer(self.q_embed, self.a_embed, self.hidden_size) # (bz, len, hz)\n\n # ~~ Attention and Comparison\n algin_a, algin_q = self._attention_layer2(question, answer, q_mask, a_mask)\n fuse_A, fuse_Q = self._compare_layer(question, algin_q, answer, algin_a, comp_type='mul')\n\n # ~~ Aggregation\n # self, input, mask, name, isreuse=False, dim = 1\n R_max_a, R_men_a = self._cnn_layer(fuse_A, a_mask, 'cnn_answer')\n # R_max_q, R_men_q = self._cnn_layer(fuse_Q, q_mask, 'cnn_question')\n\n # ~~ Predict\n # M = tf.concat([R_men_q, R_max_q, R_men_a, R_max_a], axis=-1) # (?, 1, 16*2hz)\n M = R_max_a\n\n fc1 = tf.nn.xw_plus_b(M, self.W_l1, self.b_l1)\n fc1 = tf.nn.tanh(fc1)\n fc2 = tf.nn.xw_plus_b(fc1, self.W_l2, self.b_l2)\n predict = fc2\n print_shape('predict:', predict)\n return predict\n\n def _add_loss_op(self, pred, l2_lambda=0.00001):\n \"\"\"\n 损失节点\n \"\"\"\n y_hat = tf.nn.softmax(pred, dim=-1)\n loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(self._y, pred))\n tf.add_to_collection('total_loss', loss)\n total_loss = tf.add_n(tf.get_collection('total_loss'))\n reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n l2_loss = sum(reg_losses) * l2_lambda\n pointwise_loss = total_loss + l2_loss\n tf.summary.scalar('pointwise_loss', pointwise_loss)\n self.summary_op = tf.summary.merge_all()\n return y_hat, pointwise_loss\n\n def _add_train_op(self, loss):\n \"\"\"\n 训练节点\n \"\"\"\n with tf.name_scope('train_op'):\n # 记录训练步骤\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n opt = tf.train.AdamOptimizer(self.learning_rate)\n # train_op = opt.minimize(loss, self.global_step)\n gradients, v = zip(*opt.compute_gradients(loss))\n clip_gradients = gradients\n if self.clip_value is not None:\n clip_gradients, _ = tf.clip_by_global_norm(gradients, self.clip_value)\n train_op = opt.apply_gradients(zip(clip_gradients, v), global_step=self.global_step)\n return train_op\n\n def mlp(self, bottom, size, layer_num, activation, name, use_dropout=True, reuse=None, bias = True):\n \"\"\"\n bottom: 上层输入\n size: 神经元大小\n layer_num: 神经网络层数\n name: mlp的名称\n reuse: 是否复用层\n initializer: w和b的初始化均采用xavier_initializer()\n \"\"\"\n\n now = bottom\n if use_dropout:\n now = tf.nn.dropout(now, keep_prob=self.dropout_keep_prob)\n for i in range(layer_num):\n now = tf.layers.dense(now, size,\n activation=activation,\n name=name + '_{}'.format(i),\n reuse=reuse,\n use_bias=bias,\n kernel_initializer=xavier_initializer(),\n bias_initializer=xavier_initializer()\n )\n return now","sub_path":"Demo2/model2.py","file_name":"model2.py","file_ext":"py","file_size_in_byte":15058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"61336303","text":"from sklearn.metrics import mean_squared_error\r\nfrom sklearn.pipeline import make_pipeline\r\nimport plotly.express as px\r\nimport plotly.graph_objects as go\r\nfrom plotly.offline import iplot,plot\r\nimport dataSource as ds\r\nimport dataVisualization as dv\r\nfrom copy import deepcopy\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\n\r\n# =============================================================================\r\n# Class Traitement\r\n# =============================================================================\r\n\r\nclass Traitement:\r\n \"\"\"\r\n La classe Traitement permet de construire les données X,y \r\n d'apprentissage et de test.\r\n \"\"\"\r\n \r\n def __init__(self, df, l_attrs_x, labels, freq_train=1000, freq_test=400, preprocessor=None):\r\n #DataFrame\r\n self.df = df\r\n #features\r\n self.l_attrs = l_attrs_x\r\n #targets\r\n self.labels = labels\r\n #preprocessor\r\n self.preprocessor = preprocessor\r\n #fréquences\r\n self.freq_train = freq_train\r\n self.freq_test = freq_test\r\n\r\n #Données d'apprentissage/test pour chaque modèle\r\n self.l_Xtrain = []\r\n self.l_Ytrain = []\r\n self.l_Xtest = []\r\n self.l_Ytest = []\r\n \r\n \r\n # Fonction de construction des données de train/test\r\n def set_data_train_test(self, train_size=0.8):\r\n X_train, X_test, y_train, y_test = ds.create_data_xy(self.df, train_size, self.freq_train, self.freq_test)\r\n \r\n #On vide les anciennes données s'il y en a\r\n self.l_Xtrain = []\r\n self.l_Ytrain = []\r\n self.l_Xtest = []\r\n self.l_Ytest = []\r\n \r\n for attrs in self.l_attrs:\r\n self.l_Xtrain.append(X_train[attrs])\r\n self.l_Xtest.append(X_test[attrs])\r\n self.l_Ytrain.append(y_train[self.labels])\r\n self.l_Ytest.append(y_test[self.labels])\r\n\r\n\r\n# =============================================================================\r\n# Class Evaluation \r\n# =============================================================================\r\n\r\nclass Evaluation :\r\n \"\"\"\r\n La classe Evaluation permet d'entrainer des modèles à partir de la\r\n classe Traitement et d'en afficher des résultats.\r\n \"\"\"\r\n \r\n def __init__(self, models, traitement):\r\n self.models = models\r\n self.traitement = traitement\r\n self.preprocessor = self.traitement.preprocessor\r\n\r\n self.l_Xtrain = self.traitement.l_Xtrain\r\n self.l_Ytrain = self.traitement.l_Ytrain\r\n self.l_Xtest = self.traitement.l_Xtest\r\n self.l_Ytest = self.traitement.l_Ytest\r\n self.labels = self.traitement.labels\r\n \r\n #Ajout du preprocessor à la pipeline s'il y en a un\r\n if self.preprocessor is not None :\r\n self.models_pip = [make_pipeline(self.preprocessor[i], self.models[i]) for i in range(len(self.models))]\r\n else: \r\n self.models_pip = self.models\r\n \r\n # for mi in range(len(models)):\r\n # if type(models[mi]).__name__ == 'model_physique1':\r\n # temp = self.l_Xtest[mi].copy()\r\n # temp['index'] = temp.index\r\n # f = temp.groupby(['Trip']).nth(1).reset_index()['index'].values\r\n # self.l_Ytest[mi].drop(f, inplace=True)\r\n \r\n \r\n \r\n def fit(self):\r\n \"\"\"\r\n Fonction qui entraine tous nos modèles.\r\n \"\"\"\r\n for i in range(len(self.models)):\r\n self.models_pip[i].fit(self.l_Xtrain[i], self.l_Ytrain[i])\r\n \r\n def score(self):\r\n \"\"\"\r\n Fonction retournant une liste de scores sur les données de test\r\n pour chaque modèle.\r\n \"\"\"\r\n return [self.models_pip[i].score(self.l_Xtest[i], self.l_Ytest[i]) for i in range(len(self.models))]\r\n \r\n def predict(self, X):\r\n \"\"\"\r\n Fonction retournant une liste de prédiction sur X pour chaque\r\n modèle.\r\n \"\"\"\r\n return [self.models_pip[i].predict(X[i]) for i in range(len(self.models))]\r\n \r\n def getCoef(self):\r\n \"\"\"\r\n Fonction retournant les paramètres appris pour chaque modèle.\r\n \"\"\"\r\n return [self.models[i].coef_ for i in range(len(self.models))]\r\n \r\n def calculMse(self):\r\n ypred = self.predict(self.l_Xtest)\r\n return [mean_squared_error(self.l_Ytest[i],ypred[i]) for i in range(len(self.models))]\r\n \r\n \r\n # ------------------------- Fonctions d'affichage -------------------------\r\n \r\n def afficher_score(self):\r\n \"\"\"\r\n Fonction affichant les scores pour chaque modèle.\r\n \"\"\"\r\n scores = self.score()\r\n for i in range(len(self.models)):\r\n print(f\"Score obtenu pour le modèle {type(self.models[i]).__name__ : <10} : {scores[i]}\")\r\n \r\n def afficher_coef(self):\r\n \"\"\"\r\n Fonction affichant les coefficients pour chaque modèle.\r\n \"\"\"\r\n coefs = self.getCoef()\r\n for i in range(len(self.models)):\r\n print(f\"Coefficients obtenu pour le modèle {i : <10} : {coefs[i]}\")\r\n \r\n def afficher_mse(self):\r\n ypred = self.predict(self.l_Xtest)\r\n print(\"MSE sur les données de test:\\n\")\r\n for i in range(len(self.models)):\r\n print(f\"MSE obtenue pour {type(self.models[i]).__name__ : <10} : {mean_squared_error(self.l_Ytest[i],ypred[i])}\")\r\n #print(f\"MSE obtenue pour {type(self.models[i]).__name__ : <10} : {np.mean((self.l_Ytest[i]-ypred[i])**2)}\")\r\n \r\n def afficher_resultats(self):\r\n \"\"\"\r\n Fonction appelant les autres fonctions d'affichage.\r\n \"\"\"\r\n #self.afficher_score()\r\n print()\r\n self.afficher_mse()\r\n print()\r\n #self.afficher_coef()\r\n \r\n #def afficher_pred(self):\r\n \r\n \r\n # ----------------------------- Fonctions MSE -----------------------------\r\n \r\n def tabMSEFreq(self, liste_freq, freq_train,train_size=0.8):\r\n tab_mse = []\r\n models = [deepcopy(m) for m in self.models]\r\n \r\n for freq in liste_freq:\r\n traitement = Traitement(self.traitement.df, self.traitement.l_attrs, self.traitement.labels,\r\n freq_train, freq, self.traitement.preprocessor)\r\n traitement.set_data_train_test(train_size)\r\n \r\n evaluateur = Evaluation(models,traitement)\r\n evaluateur.fit()\r\n \r\n tab_mse.append(evaluateur.calculMse())\r\n \r\n tab_mse = np.array(tab_mse)\r\n \"\"\" \r\n #Affichage MSE pour le premier modèle\r\n plt.figure(figsize=(15,5))\r\n plt.title(\"Erreur MSE en fonction de la fréquence\")\r\n plt.plot(liste_freq, tab_mse[:,0], label=type(models[0]).__name__)\r\n plt.xlabel(\"Temps entre deux points\")\r\n plt.ylabel(\"MSE\")\r\n plt.legend()\r\n plt.show()\r\n \"\"\"\r\n #Affichage des erreurs MSE des modèles en fonction de la fréquence \r\n\r\n \r\n for i in range(len(models)):\r\n plt.figure(figsize=(15,5))\r\n plt.plot(tab_mse[:,i], label=type(models[i]).__name__)\r\n\r\n plt.xticks(np.arange(len(liste_freq)), np.array(liste_freq))\r\n plt.xlabel(\"Fréquences\")\r\n plt.xlabel(\"Temps entre deux points\")\r\n plt.ylabel(\"MSE\")\r\n plt.legend()\r\n plt.show()\r\n\r\n plt.figure(figsize=(10,5))\r\n for i in range(len(models)):\r\n plt.plot(tab_mse[:,i], label=type(models[i]).__name__)\r\n plt.xticks(np.arange(len(liste_freq)), np.array(liste_freq))\r\n plt.xlabel(\"Fréquences\")\r\n plt.xlabel(\"Temps entre deux points\")\r\n plt.ylabel(\"MSE\")\r\n plt.legend()\r\n plt.show()\r\n #Tableau des erreurs MSE en DataFrame\r\n columns = [type(m).__name__ for m in models]\r\n errMSE = pd.DataFrame(tab_mse, columns=columns, index=liste_freq)\r\n \r\n return errMSE\r\n \r\n \r\n def matMSECase(self, freq_train, freq_test, lat_min, long_min, e_x, e_y, min_datapts=20, train_size=0.8, n_interval=10): \r\n #Copie des modèles\r\n models = [deepcopy(m) for m in self.models]\r\n # liste matrices erreurs des cases\r\n l_mat_err= [np.zeros((n_interval, n_interval)) for i in range(len(models))]\r\n \r\n df = self.traitement.df\r\n \r\n #Opérations pour stocker les MSE par effectif et par case\r\n eff = np.unique(df[\"Effectif_case\"])\r\n ind_eff = {eff[i]:i for i in range(len(eff))}\r\n \r\n vit = np.unique(df[\"Vitesse_moy_case\"])\r\n ind_vit = {vit[i]:i for i in range(len(vit))}\r\n \r\n var = np.unique(df[\"Vitesse_var_case\"])\r\n ind_var = {var[i]:i for i in range(len(var))}\r\n \r\n l_mse_eff = [np.zeros(len(eff)) for _ in range(len(models))]\r\n l_mse_vit = [np.zeros(len(vit)) for _ in range(len(models))]\r\n l_mse_var = [np.zeros(len(var)) for _ in range(len(models))]\r\n \r\n eff_count = [np.zeros(len(eff)) for _ in range(len(models))]\r\n vit_count = [np.zeros(len(vit)) for _ in range(len(models))]\r\n var_count = [np.zeros(len(var)) for _ in range(len(models))]\r\n \r\n # parcours de toutes les cases\r\n for i in range(n_interval):\r\n for j in range(n_interval):\r\n # récupération des données de la case\r\n case_df=ds.trouve_data_case(df, (i, j), lat_min, long_min, e_x, e_y)\r\n\r\n #On prend les Trips qui ont au moins $min_datapoints$ points\r\n #c'est pas au moins 2 points car tu splits en train et en test, ca aura moins d'un point \r\n ctrips, ccounts = np.unique(case_df[\"Trip\"], return_counts=True)\r\n ctrips = ctrips[ccounts>min_datapts]\r\n case_df = case_df[case_df['Trip'].isin(ctrips)]\r\n\r\n #Cases qui ont au moins 2 trips\r\n if len(pd.unique(case_df[\"Trip\"])) > 1 :\r\n traitement = Traitement(case_df, self.traitement.l_attrs, self.traitement.labels, \r\n freq_train, freq_test, self.traitement.preprocessor)\r\n traitement.set_data_train_test(train_size)\r\n \r\n l_ypred = self.predict(traitement.l_Xtest)\r\n \r\n for mi in range(len(models)):\r\n\r\n mse_ij = mean_squared_error(traitement.l_Ytest[mi],l_ypred[mi])\r\n l_mat_err[mi][n_interval-1-i, j] = mse_ij\r\n \r\n ei = ind_eff[pd.unique(df['Effectif_case'].loc[case_df.index])[0]]\r\n vi = ind_vit[pd.unique(df['Vitesse_moy_case'].loc[case_df.index])[0]]\r\n vi2 = ind_var[pd.unique(df['Vitesse_var_case'].loc[case_df.index])[0]]\r\n l_mse_eff[mi][ei] += mse_ij\r\n l_mse_vit[mi][vi] += mse_ij\r\n l_mse_var[mi][vi2] += mse_ij\r\n eff_count[mi][ei] += 1\r\n vit_count[mi][vi] += 1\r\n var_count[mi][vi2] += 1\r\n \r\n \r\n for mi in range(len(models)):\r\n tmp = np.where(eff_count[mi] != 0)[0]\r\n l_mse_eff[mi][tmp] /= eff_count[mi][tmp]\r\n tmp = np.where(vit_count[mi] != 0)[0]\r\n l_mse_vit[mi][tmp] /= vit_count[mi][tmp]\r\n tmp = np.where(var_count[mi] != 0)[0]\r\n l_mse_var[mi][tmp] /= var_count[mi][tmp]\r\n \r\n fig, ax = plt.subplots(2,2, figsize=(15,13)) \r\n for m in range(len(l_mat_err)):\r\n # fig, ax = plt.subplots(3,2, figsize=(13,13))\r\n #fig.suptitle(f'{type(models[m]).__name__}', fontsize=16)\r\n \r\n ax[m//2][m%2].set_title(f\"Erreur MSE par case : {type(models[m]).__name__}\")\r\n # sns.heatmap(l_mat_err[m], linewidths=.5,annot=True, cmap=\"YlGnBu\", yticklabels=np.arange(n_interval-1, -1, -1), ax=ax[0][0])\r\n sns.heatmap(l_mat_err[m], linewidths=.5,annot=True, cmap=\"YlGnBu\", yticklabels=np.arange(n_interval-1, -1, -1), ax=ax[m//2][m%2])\r\n # ax[0][1].set_title(\"Histogramme des valeurs MSE\")\r\n # val = l_mat_err[m].ravel()[l_mat_err[m].ravel() != 0]\r\n # sns.histplot(val, ax=ax[0][1])\r\n \r\n # ax[1][0].set_title(\"Histplot MSE moy par effectif\")\r\n # h1 = sns.histplot(x=ind_eff.keys(), y=l_mse_eff[m], ax=ax[1][0], cmap=\"RdPu\", cbar=True)\r\n # h1.set(xlabel='Effectif', ylabel='MSE')\r\n \r\n # ax[1][1].set_title(\"Histplot MSE moy par vitesse moy\")\r\n # h2 = sns.histplot(x=ind_vit.keys(), y=l_mse_vit[m], ax=ax[1][1], cmap=\"YlOrRd\", cbar=True)\r\n # h2.set(xlabel='Vitesse_moy', ylabel='MSE')\r\n \r\n # ax[2][0].set_title(\"Histplot MSE moy par variance vitesse\")\r\n # h3 = sns.histplot(x=ind_var.keys(), y=l_mse_var[m], ax=ax[2][0], cmap=\"YlOrRd\", cbar=True)\r\n # h3.set(xlabel='Variance_vit', ylabel='MSE')\r\n \r\n # fig.delaxes(ax[2][1])\r\n \r\n plt.show()\r\n \r\n def scatterPred(self, begin_point, end_point):\r\n models = [deepcopy(m) for m in self.models]\r\n \r\n txt = [f\"Point n°{t}\" for t in range(end_point-begin_point)]\r\n trace_0 = go.Scatter(x=self.l_Xtest[0]['Latitude'].iloc[begin_point:end_point], y=self.l_Xtest[0]['Longitude'].iloc[begin_point:end_point], mode=\"lines\",name=\"Xtest\", text=txt)\r\n trace_1 = go.Scatter(x=self.l_Ytest[0].iloc[begin_point:end_point,0], y=self.l_Ytest[0].iloc[begin_point:end_point,1], mode=\"lines+markers\", name=\"Target\", text=txt)\r\n data = [trace_0,trace_1]\r\n l_mse = []\r\n \r\n for mi in range(len(models)):\r\n ypred = models[mi].predict(self.l_Xtest[mi])[begin_point:end_point]\r\n y = self.l_Ytest[mi].iloc[begin_point:end_point].to_numpy()\r\n # mse = (ypred-y)**2\r\n mse = [mean_squared_error(y[i], ypred[i]) for i in range(len(y))]\r\n #txt = [f\"Point n°{i}
MSE_Lat = {mse[i,0]}
MSE_Long = {mse[i,1]}\" for i in range(len(mse))]\r\n txt = [f\"Point n°{i}
MSE = {mse[i]}\" for i in range(len(mse))]\r\n data.append(go.Scatter(x=ypred[:,0], y=ypred[:,1], mode=\"lines+markers\", name=type(models[mi]).__name__, text=txt))\r\n l_mse.append(np.sum(mse))\r\n \r\n layout = go.Layout(\r\n title='Targets et Predictions',\r\n xaxis = dict(\r\n title='Latitude',\r\n ticklen = 5,\r\n showgrid = True,\r\n zeroline = False\r\n ),\r\n yaxis = dict(\r\n title='Logitude',\r\n ticklen=5,\r\n showgrid=True,\r\n zeroline=False,\r\n )\r\n )\r\n\r\n fig = go.Figure(data=data, layout=layout)\r\n iplot(fig, filename=\"ScatterPred\")\r\n \r\n return l_mse","sub_path":"Eval.py","file_name":"Eval.py","file_ext":"py","file_size_in_byte":15241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"540045336","text":"import asyncio\nimport time\n\nnow = lambda : time.time()\n\nasync def do_some_work(x):\n print('Waiting: ', x)\n return \"Done\"\n\n\n\nstart = now()\n\nloop = asyncio.get_event_loop()\ntasks = [loop.create_task(do_some_work(i)) for i in range(4)]\nloop.run_until_complete(asyncio.wait(tasks))\n\n\n\n\nprint('TIME: ', now() - start)\n","sub_path":"groutine/async_ascompleted_2.py","file_name":"async_ascompleted_2.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"39897701","text":"# x만큼 간격이 있는 n개의 숫자\n# 문제 설명\n# 함수 solution은 정수 x와 자연수 n을 입력 받아, x부터 시작해 x씩 증가하는 숫자를 n개 지니는 리스트를 리턴해야 합니다. 다음 제한 조건을 보고, 조건을 만족하는 함수, solution을 완성해주세요.\n\n# 제한 조건\n# x는 -10000000 이상, 10000000 이하인 정수입니다.\n# n은 1000 이하인 자연수입니다.\n\n\ndef solution(x, n):\n answer = []\n if x == 0 and n == 0:\n return answer\n elif x == 0 and n != 0:\n answer = [0 for i in range(n)]\n elif x > 0:\n for i in range(x, (x*n)+1, x):\n answer.append(i)\n else:\n for i in range(x, (x*n)-1, x):\n answer.append(i)\n return answer\n","sub_path":"programmers/step_by_X_num.py","file_name":"step_by_X_num.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"378426744","text":"from . import views\nfrom django.urls import path\n\n\napp_name = 'superheroes'\nurlpatterns = [\n path('', views.index, name='index'),\n path('/', views.detail, name='detail'),\n path('new/', views.create, name='create_new_superhero')\n]\n","sub_path":"superhero_database/superheroes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"281919891","text":"##########################################################\n## configuration for XZZ2l2nu \n##########################################################\n\nimport CMGTools.XZZ2l2nu.fwlite.Config as cfg\nfrom CMGTools.XZZ2l2nu.fwlite.Config import printComps\nfrom CMGTools.XZZ2l2nu.RootTools import *\nfrom PhysicsTools.HeppyCore.framework.heppy_loop import getHeppyOption\n\n\n#Load all common analyzers\nfrom CMGTools.XZZ2l2nu.analyzers.coreXZZ_cff import *\n\n#-------- SAMPLES AND TRIGGERS -----------\nfrom CMGTools.XZZ2l2nu.samples.loadSamples80x import *\nselectedComponents = mcSamples+dataSamples\n\ntriggerFlagsAna.triggerBits ={\n \"ISOMU\":triggers_1mu_iso,\n \"MU\":triggers_1mu_noniso,\n \"MUv2\":triggers_1mu_noniso_v2,\n \"MU50\":triggers_1mu_noniso_M50,\n \"ISOELE\":triggers_1e,\n \"ELE\":triggers_1e_noniso,\n \"ELEv2\":triggers_1e_noniso_v2,\n \"ELE115\":triggers_1e_noniso_E115,\n \"MUMU\": triggers_mumu,\n \"MUMUNOISO\":triggers_mumu_noniso,\n \"ELEL\": triggers_ee,\n \"HT800\":triggers_HT800,\n \"HT900\":triggers_HT900,\n \"JJ\":triggers_dijet_fat,\n \"MET90\":triggers_met90_mht90+triggers_metNoMu90_mhtNoMu90,\n \"MET120\":triggers_metNoMu120_mhtNoMu120,\n \"PHOTONHZZ\": triggers_photon_unbias,\n \"ALLPHOTON\": triggers_all_photons\n}\n\n#-------- Analyzer\nfrom CMGTools.XZZ2l2nu.analyzers.treeXZZ_cff import *\n\nmultiStateAna.processTypes = ['PhotonJets']\nmultiStateAna.selectPhotonJets = (lambda x: x.leg1.pt()>20.0 and x.leg2.pt()>-0.0)\nvvSkimmer.required = ['PhotonJets']\n\nvvTreeProducer.globalVariables = [\n NTupleVariable(\"nVert\", lambda ev: len(ev.goodVertices), int, help=\"Number of good vertices\"), \n NTupleVariable(\"nVertAll\", lambda ev: len(ev.vertices), int, help=\"Number of good vertices\"), \n NTupleVariable(\"rho\", lambda ev: ev.rho , float),\n ]\nvvTreeProducer.globalObjects = { }\n\nvvTreeProducer.collections = {\n\t \"jets\" : NTupleCollection(\"jet\",JetType,100, help=\"all jets in miniaod\"),\n \"selectedPhotons\" : NTupleCollection(\"photon\",photonType,100, help=\"selected photons in miniaod\"),\n \"PhotonJets\" : NTupleCollection(\"gjet\",PhotonJetType ,100, help=\"photon and MET\"),\n }\n\n\n\n\n#-------- SEQUENCE\ncoreSequence = [\n skimAnalyzer,\n genAna,\n jsonAna,\n triggerAna,\n pileUpAna,\n vertexAna,\n #lepAna,\n photonAna, \n jetAna,\n metAna,\n multiStateAna,\n eventFlagsAna,\n triggerFlagsAna,\n]\n \n#sequence = cfg.Sequence(coreSequence)\nsequence = cfg.Sequence(coreSequence+[vvSkimmer,vvTreeProducer])\n#sequence = cfg.Sequence(coreSequence+[vvSkimmer,multtrg,vvTreeProducer])\n#sequence = cfg.Sequence(coreSequence+[vvSkimmer,fullTreeProducer])\n \n\n#-------- HOW TO RUN\ntest = 1\nif test==1:\n # test a single component, using a single thread.\n #selectedComponents = dataSamples\n #selectedComponents = mcSamples\n #selectedComponents = SinglePhoton\n #selectedComponents = [SinglePhoton_Run2016D_PromptReco_v2]\n #selectedComponents = [GJet_Pt_20toInf_DoubleEMEnriched]\n #selectedComponents = [GJet_Pt_20to40_DoubleEMEnriched, GJet_Pt_40toInf_DoubleEMEnriched]\n selectedComponents = [GJet_Pt_20toInf_DoubleEMEnriched, GJet_Pt_20to40_DoubleEMEnriched, GJet_Pt_40toInf_DoubleEMEnriched]\n #selectedComponents = [SingleMuon_Run2015D_Promptv4,SingleElectron_Run2015D_Promptv4]\n #selectedComponents = [SingleMuon_Run2015C_25ns_16Dec]\n #selectedComponents = [SingleMuon_Run2016B_PromptReco_v2] \n #selectedComponents = SingleMuon+SingleElectron\n #selectedComponents = [SingleMuon_Run2016B_PromptReco_v2,SingleElectron_Run2016B_PromptReco_v2] \n #selectedComponents = [SingleMuon_Run2016D_PromptReco_v2,SingleElectron_Run2016D_PromptReco_v2] \n #selectedComponents = [MuonEG_Run2015D_16Dec] #MuEG\n #selectedComponents = [RSGravToZZToZZinv_narrow_800]\n #selectedComponents = [DYJetsToLL_M50]\n #selectedComponents = [DYJetsToLL_M50_MGMLM_Ext1]\n #selectedComponents = [BulkGravToZZToZlepZinv_narrow_600] \n #selectedComponents = signalSamples\n #selectedComponents = [TTTo2L2Nu]\n #selectedComponents = [BulkGravToZZ_narrow_800]\n #selectedComponents = [BulkGravToZZToZlepZhad_narrow_800]\n for c in selectedComponents:\n #c.files = c.files[3:10]\n c.splitFactor = (len(c.files)/5 if len(c.files)>5 else 1)\n #c.splitFactor = 7\n #c.triggers=triggers_1mu_noniso\n #c.triggers=triggers_1e_noniso\n\n## output histogram\noutputService=[]\nfrom PhysicsTools.HeppyCore.framework.services.tfile import TFileService\noutput_service = cfg.Service(\n TFileService,\n 'outputfile',\n name=\"outputfile\",\n fname='vvTreeProducer/tree.root',\n option='recreate'\n )\noutputService.append(output_service)\n\nfrom PhysicsTools.HeppyCore.framework.eventsfwlite import Events\nfrom CMGTools.TTHAnalysis.tools.EOSEventsWithDownload import EOSEventsWithDownload\nevent_class = EOSEventsWithDownload\nevent_class = Events\nif getHeppyOption(\"nofetch\"):\n event_class = Events\nconfig = cfg.Config( components = selectedComponents,\n sequence = sequence,\n services = [],\n events_class = event_class)\n\n\n\n\n","sub_path":"XZZ2l2nu/cfg/mc80x/run_xzz2l2nu_80x_cfg_photon_mc.py","file_name":"run_xzz2l2nu_80x_cfg_photon_mc.py","file_ext":"py","file_size_in_byte":5139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"44179572","text":"# -*- coding: utf-8 -*-\n'''\nCreated on Fri Aug 2 11:57:41 2019\nUpdated 20220904 22:42WER\n\n@authors: wrosing, mfitz\n'''\n\n#import json\nimport sys\n#import time\nimport os\nimport pathlib\nimport socket\nimport glob\n\n# This routine here removes all mention of previous configs from the path... for safety and my local computer got clogged with all manner of configs in the path (MTF)\npathRemovals=[]\nfor q in range(len(sys.path)):\n #print (sys.path[q])\n if 'ptr-observatory' in sys.path[q] and 'configs' in sys.path[q]:\n print ('Removing old config path: ' + str(sys.path[q]))\n pathRemovals.append(sys.path[q])\n\nfor remover in pathRemovals:\n sys.path.remove(remover)\n\npathdone=0\n\n## First try to get the hostname from a file in the directory above (..) ptr-observatory\ncwd=str(pathlib.Path().resolve())\nhwd=cwd.replace('ptr-observatory','')\nhostnamefile=glob.glob(hwd+'hostname*')\ntry:\n site_name=hostnamefile[0].split('hostname')[1]\n print(site_name)\n print ('Adding new config path: ' + str(os.path.join(pathlib.Path().resolve(),\"configs\", site_name)))\n sys.path.append(os.path.join(pathlib.Path().resolve(),\"configs\", site_name))\n pathdone=1\nexcept:\n print (\"Could not find a hostname* file in the directory above ptr-observatory e.g. hostnamesro\")\n print (\"trying another method\")\n\n\n#try:\n\n\nif pathdone==0:\n print (\"Attempting hostname approach to config file\")\n\n host_site = socket.gethostname()[:3].lower() # NB May be better to split on\n # '-' and use first part of hostname.\n if host_site =='saf':\n host_site == 'aro' # NB NB THIS is a blatant hack.\n print ('Adding new config path: ' + str(os.path.join(pathlib.Path().resolve(),\"configs\", host_site)))\n sys.path.append(os.path.join(pathlib.Path().resolve(),\"configs\", host_site))\n\ntry:\n from site_config import *\n\nexcept:\n\n print (\"Failed the hostname approach to config file\")\n print (str(host_site) + \" isn't a real place or there isn't a config file\\\n that I can find!|n\")\n\n try:\n site_name = input('What site am I running at?\\n')\n sys.path.append(os.path.join(pathlib.Path().resolve(),\"configs\", \\\n site_name))\n try:\n from site_config import *\n except:\n print (str(site_name) + \" isn't a real place, or there isn't a \\\n config file that I can find!\")\n sys.exit()\n except:\n print('You need to supply a correct site name.')\n sys.exit()","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"110191679","text":"import os\nfrom django.shortcuts import render\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.template import loader\nfrom django.http import HttpResponse\n\nfrom django_generic_plus.views import GenericModelDetailView\n\nfrom .models import HueBridge, HueLight, MQTTBroker, MQTTSwitch\n\n\ndef index(request):\n # check if a bridge is available yet\n try:\n bridge = HueBridge.objects.get(name=\"bridgeAC\", ip=\"192.168.0.16\")\n except ObjectDoesNotExist:\n bridge = HueBridge.objects.create(name=\"bridgeAC\", ip=\"192.168.0.16\")\n # bridge.save()\n\n if request.method == \"POST\":\n print(request.POST)\n return HttpResponse(\"sucess\")\n lights = []\n for api_id, api_light in bridge.api.get_light_objects('id').items():\n try:\n light = HueLight.objects.get(name=api_light.name, bridge=bridge)\n except:\n light = HueLight.objects.create(name=api_light.name,\n api_id=api_id,\n bridge=bridge)\n # light.save()\n lights.append(light)\n # print( bridge.api.get_light(api_id) )\n print( light.api._get().get(\"modelid\") )\n template = loader.get_template('django_hai_hue/index.html')\n context = {\"bridge\": bridge, \"lights\": lights}\n return HttpResponse(template.render(context, request))\n\ndef mqtt(request):\n port = 1883\n url = \"192.168.0.192\"\n name = \"mosquitto\"\n try:\n # client.connect(, 1883, 60)\n\n broker = MQTTBroker.objects.get(name=name, url=url)\n except ObjectDoesNotExist:\n broker = MQTTBroker.objects.create(name=name, url=url, port = port)\n name = \"arduino\"\n feed = \"/house/light1\"\n try:\n switch = MQTTSwitch.objects.get(name=name, feed=feed)\n except ObjectDoesNotExist:\n switch = MQTTSwitch.objects.create(name=name, feed=feed, broker=broker, state=False)\n template = loader.get_template('django_hai_hue/mqtt.html')\n context = {\"broker\": broker, \"switch\": switch}\n return HttpResponse(template.render(context, request))\n\ndef mqtt_get(request):\n name = \"arduino\"\n feed = \"/house/light1\"\n switch = MQTTSwitch.objects.get(name=name, feed=feed)\n switch.switch_state()\n switch.save()\n return mqtt(request)\n\ndef change_switch_state(request,**kwargs):\n api_info = \"\"\" This route may be used with a POST requests to set the\n hsv value of a Hue light.\n Format: {\n light-id: $ID\n }\"\"\"\n api_action = os.path.basename(os.path.normpath(request.path_info))\n\n if request.method == \"POST\":\n print(request.POST)\n try:\n api_id = int(request.POST[\"light-id\"][0])\n except:\n return HttpResponse(\"input format malformed\")\n try:\n l = HueLight.objects.get(api_id=api_id)\n except:\n return HttpResponse(\"light notfound\")\n l.api.on = not l.api.on\n return HttpResponse(\"success\")\n template = loader.get_template('django_hai_hue/api_info.html')\n context = {\"api_info\" : api_info,\"api_action\" : api_action}\n return HttpResponse(template.render(context, request))\n\n\ndef sethsv(request, **kwargs):\n api_info = \"\"\" This route may be used with a POST requests to set the\n hsv value of a Hue light.\n Format: {\n light-id: $ID\n hue: $hue\n saturation: $saturation\n brightness: $brightness\n }\"\"\"\n api_action = os.path.basename(os.path.normpath(request.path_info))\n\n if request.method == \"POST\":\n hue_norm = 65535\n brightness_norm = 255\n saturation_norm = 255\n try:\n api_id = int(request.POST[\"light-id\"][0] )\n hue = int(float(request.POST[\"hue\"]) * hue_norm / 360)\n saturation = int(float(request.POST[\"saturation\"]) * saturation_norm)\n brightness = int(float(request.POST[\"brightness\"]) * brightness_norm)\n except:\n return HttpResponse(\"input format malformed\")\n try:\n l = HueLight.objects.get(api_id=api_id)\n except:\n return HttpResponse(\"light notfound\")\n if not l.api.on:\n return HttpResponse(\"light off\")\n l.api.hue = hue\n l.api.saturation = saturation\n l.api.brightness = brightness\n return HttpResponse(\"success\")\n\n template = loader.get_template('django_hai_hue/api_info.html')\n context = {\"api_info\" : api_info,\"api_action\" : api_action}\n return HttpResponse(template.render(context, request))\n\nclass HueLightView(GenericModelDetailView):\n ''' Base class for Hue Light action views '''\n","sub_path":"django_hai_hue/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"286708","text":"import pickle as pickle\nimport logging\nimport os\n\nfrom freenasUI.system.alert import alertPlugins, Alert, BaseAlert\nfrom lockfile import LockFile, LockTimeout\n\nlog = logging.getLogger('system.alertmods.collectd')\n\nCOLLECTD_FILE = '/tmp/.collectdalert'\n\n\nclass CollectdAlert(BaseAlert):\n\n def run(self):\n alerts = []\n\n if not os.path.exists(COLLECTD_FILE):\n return alerts\n\n lock = LockFile(COLLECTD_FILE)\n\n while not lock.i_am_locking():\n try:\n lock.acquire(timeout=5)\n except LockTimeout:\n return alerts\n\n with open(COLLECTD_FILE, 'rb') as f:\n try:\n data = pickle.loads(f.read())\n except:\n data = {}\n\n lock.release()\n\n for k, v in list(data.items()):\n if v['Severity'] == 'WARNING':\n l = Alert.WARN\n else:\n l = Alert.CRIT\n if k == 'ctl-ha/disk_octets':\n msg = \"CTL HA link is actively used, check initiators connectivity\"\n else:\n msg = k\n alerts.append(Alert(l, msg))\n\n return alerts\n\nalertPlugins.register(CollectdAlert)\n","sub_path":"gui/system/alertmods/collectd.py","file_name":"collectd.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"147378244","text":"'''\n处理《续资治通鉴》电子书\n'''\n\n\nimport os\nimport re\n\ndirname = r'e:\\temp\\续资治通鉴'\n\n\ndef search_index(path):\n all_index = []\n for (thisdir, subs, files) in os.walk(dirname):\n for f in files:\n if f.endswith('.htm') and f.startswith('xtj'):\n all_index.append(os.path.join(thisdir, f))\n return all_index\n\n\ndef read_htm(file):\n with open(file, 'r', errors='ignore') as f:\n htm_lines = f.readlines()\n return htm_lines\n\n\ndef proc_line(lines):\n txt = ''\n s_vol = 'style=\"font-size: 28pt\">(.*?) 0:\n txt += result + '\\n'\n result = get_useful(l, s_title)\n if len(result) > 0:\n txt += result + '\\n'\n result = get_useful(l, s_discription)\n if len(result) > 0:\n txt += result + '\\n'\n result = get_useful(l, s_king)\n if len(result) > 0:\n txt += '\\n' + result + '\\n'\n result = get_useful(l, s_year)\n if len(result) > 0:\n txt += result + '\\n'\n result = get_useful(l, s_main)\n if len(result) > 0:\n txt += ' ' + result + '\\n'\n return txt\n\n\ndef get_useful(line, pattern):\n p = re.compile(pattern, re.S)\n m = re.search(p, line)\n if m is None:\n return ''\n else:\n return m.group(1).strip()\n\n\ndef main():\n l_all_txt = search_index(dirname)\n # print(len(l_all_txt), l_all_txt)\n\n # htm_lines = read_htm(r'e:\\temp\\资治通鉴\\zztj_001.htm')\n # print(proc_line(htm_lines))\n # print(extract_txt(htm))\n\n # for l in l_all_txt:\n # htm = read_htm(l)\n # print(extract_txt(htm), l)\n\n out_file = os.path.join(dirname, '续资治通鉴.txt')\n with open(out_file, 'w+') as f:\n for l in l_all_txt:\n htm_lines = read_htm(l)\n f.write(proc_line(htm_lines))\n f.write('\\n' * 2)\n f.write('* ' * 15)\n f.write('\\n' * 2)\n print('[Read]', l)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"xtj.py","file_name":"xtj.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"177992486","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 14 08:07:36 2020\n\nClasses representing different kind of Ordinary Differential Equations (ODEs).\n\n@author: Carollo Andrea - Tomasi Matteo\n\"\"\"\n\nimport numpy as np\nimport pinocchio as pin\nfrom numpy.linalg import norm\n\n\nclass ContactPoint_ODE:\n ''' A point on the robot surface that can make contact with surfaces.\n '''\n def __init__(self, model, data, frame_name):\n self.model = model # robot model\n self.data = data # robot data\n self.frame_name = frame_name # name of reference frame associated to this contact point\n self.frame_id = model.getFrameId(frame_name) # id of the reference frame\n self.active = False # True if this contact point is in contact\n self.p0 = np.zeros(3)\n \n def get_position(self):\n ''' Get the current position of this contact point \n '''\n M = self.data.oMf[self.frame_id]\n return M.translation\n \n def get_velocity(self):\n M = self.data.oMf[self.frame_id]\n R = pin.SE3(M.rotation, 0*M.translation) # same as M but with translation set to zero\n v_local = pin.getFrameVelocity(self.model, self.data, self.frame_id)\n v_world = (R.act(v_local)).linear # convert velocity from local frame to world frame\n return v_world\n \n def get_jacobian(self):\n J6 = pin.getFrameJacobian(self.model, self.data, self.frame_id, pin.ReferenceFrame.LOCAL_WORLD_ALIGNED)\n return J6[:3,:]\n \n \nclass ContactSurface_ODE:\n ''' A visco-elastic planar surface\n '''\n def __init__(self, name, pos, normal, K, B, mu):\n self.name = name # name of this contact surface\n self.x0 = pos # position of a point of the surface\n self.normal = normal # direction of the normal to the surface\n self.K = K # stiffness of the surface material\n self.B = B # damping of the surface material\n self.mu = mu # friction coefficient of the surface\n self.bias = self.x0.dot(self.normal)\n \n def check_collision(self, p):\n ''' Check the collision of the given point\n with this contact surface. If the point is not\n inside this surface, then return False.\n '''\n normal_penetration = self.bias - p.dot(self.normal)\n if(normal_penetration < 0.0):\n return False # no penetration\n return True\n\n def compute_force(self, contact_point, q, vq, robot):\n cp = contact_point\n # cp.p0 anchor_point\n \n # get position of the contact point\n H = robot.framePlacement(q, cp.frame_id, False)\n p = H.translation\n\n # get velocity of the contact point\n R = pin.SE3(H.rotation, 0*H.translation) # same as M but with translation set to zero\n v_local = robot.frameVelocity(q, vq, cp.frame_id, False)\n v = (R.act(v_local)).linear \n\n # compute contact force using spring-damper law\n f = self.K.dot(cp.p0 - p) - self.B.dot(v)\n \n # check whether contact force is outside friction cone\n f_N = f.dot(self.normal) # norm of normal force\n f_T = f - f_N*self.normal # tangential force (3d)\n f_T_norm = norm(f_T) # norm of tangential force\n \n if(f_T_norm > self.mu*f_N):\n # contact is slipping \n # f_T_norm = self.mu*f_N\n t_dir = f_T/np.amax(np.absolute(f_T))\n t_dir = t_dir/norm(t_dir)\n \n # saturate force at the friction cone boundary\n f = f_N*self.normal + self.mu*f_N*t_dir\n # update anchor point so that f is inside friction cone\n if (f_T_norm >= 8.9e999):\n f_T_norm = 8.9e999\n delta_p0 = (f_T_norm - self.mu*f_N) / self.K[0,0]\n # print( \"Delta_p0 : \", delta_p0, \" - f_t_norm : \", f_T_norm.T, \" - f_N_norm : \", norm(f_N))\n # print( \"Delta_p0 : \", delta_p0, \" - t_dir : \", t_dir.T)\n cp.p0 -= delta_p0*t_dir\n\n \n return f\n \n\nclass ODE:\n def __init__(self, name):\n self.name = name\n\n def f(self, x, u, t):\n return np.zeros(x.shape)\n\n\nclass ODESin:\n ''' ODE defining a sinusoidal trajectory '''\n\n def __init__(self, name, A, f, phi):\n self.name = name\n self.A = A\n self.two_pi_f = 2*np.pi*f\n self.phi = phi\n\n def f(self, x, u, t):\n return self.two_pi_f*self.A*np.cos(self.two_pi_f*t + self.phi)\n\n\nclass ODELinear:\n ''' A linear ODE: dx = A*x + b\n '''\n\n def __init__(self, name, A, B, b):\n self.name = name\n self.A = A\n self.B = B\n self.b = b\n self.nx = A.shape[0]\n self.nu = B.shape[1]\n\n def f(self, x, u, t, jacobian=False):\n dx = self.A.dot(x) + self.b + self.B.dot(u)\n if(jacobian):\n return (np.copy(dx), np.copy(self.A), np.copy(self.B))\n return np.copy(dx)\n\n\nclass ODEStiffDiehl:\n def f(self, x, u, t):\n return -50.0*(x - np.cos(t))\n\n\nclass ODEPendulum:\n def __init__(self):\n self.g = -9.81\n\n def f(self, x, u, t):\n dx = np.zeros(2)\n dx[0] = x[1]\n dx[1] = self.g*np.sin(x[0])\n return dx\n\n\nclass ODERobot:\n ''' An ordinary differential equation representing a robotic system\n '''\n\n def __init__(self, name, robot):\n ''' robot: instance of RobotWrapper\n '''\n self.name = name\n self.robot = robot\n self.nu = robot.na\n nq, nv = self.robot.nq, self.robot.nv\n self.nx = nq+nv\n self.nu = self.robot.na\n self.Fx = np.zeros((self.nx, self.nx))\n self.Fx[:nv, nv:] = np.identity(nv)\n self.Fu = np.zeros((self.nx, self.nu))\n self.dx = np.zeros(2*nv)\n\n ''' System dynamics '''\n\n def f(self, x, u, t, jacobian=False):\n nq = self.robot.nq\n nv = self.robot.nv\n model = self.robot.model\n data = self.robot.data\n q = x[:nq]\n v = x[nq:]\n\n if(nv == 1):\n # for 1 DoF systems pin.aba does not work (I don't know why)\n pin.computeAllTerms(model, data, q, v)\n ddq = (u-data.nle) / data.M[0]\n else:\n ddq = pin.aba(model, data, q, v, u)\n\n self.dx[:nv] = v\n self.dx[nv:] = ddq\n\n if(jacobian):\n pin.computeABADerivatives(model, data, q, v, u)\n self.Fx[:nv, :nv] = 0.0\n self.Fx[:nv, nv:] = np.identity(nv)\n self.Fx[nv:, :nv] = data.ddq_dq\n self.Fx[nv:, nv:] = data.ddq_dv\n self.Fu[nv:, :] = data.Minv\n\n return (np.copy(self.dx), np.copy(self.Fx), np.copy(self.Fu))\n\n return np.copy(self.dx)\n\n def f_x_fin_diff(self, x, u, t, delta=1e-8):\n ''' Partial derivatives of system dynamics w.r.t. x computed via finite differences '''\n f0 = self.f(x, u, t)\n Fx = np.zeros((self.nx, self.nx))\n for i in range(self.nx):\n xp = np.copy(x)\n xp[i] += delta\n fp = self.f(xp, u, t)\n Fx[:, i] = (fp-f0)/delta\n return Fx\n\n def f_u_fin_diff(self, x, u, t, delta=1e-8):\n ''' Partial derivatives of system dynamics w.r.t. u computed via finite differences '''\n f0 = self.f(x, u, t)\n Fu = np.zeros((self.nx, self.nu))\n for i in range(self.nu):\n up = np.copy(u)\n up[i] += delta\n fp = self.f(x, up, t)\n Fu[:, i] = (fp-f0)/delta\n return Fu\n\n\nclass ODERobot_wc:\n ''' An ordinary differential equation representing a robotic system\n '''\n\n def __init__(self, name, robot, contact_points, contact_surfaces):\n ''' robot: instance of RobotWrapper\n '''\n self.name = name\n self.robot = robot\n self.nu = robot.na\n nq, nv = self.robot.nq, self.robot.nv\n self.nx = nq+nv\n self.nu = self.robot.na\n self.Fx = np.zeros((self.nx, self.nx))\n self.Fx[:nv, nv:] = np.identity(nv)\n self.Fu = np.zeros((self.nx, self.nu))\n self.dx = np.zeros(2*nv)\n self.contact_points = contact_points\n self.contact_surfaces = contact_surfaces\n\n nk = 3*len(self.contact_points)*len(self.contact_surfaces) # size of contact force vector\n self.fc = np.zeros(nk) # contact forces\n self.Jc = np.zeros((nk, self.robot.model.nv)) # contact Jacobian\n\n ''' System dynamics '''\n\n def f(self, x, u, t, jacobian=False):\n nq = self.robot.nq\n nv = self.robot.nv\n model = self.robot.model\n data = self.robot.data\n q = x[:nq]\n v = x[nq:]\n\n i = 0\n self.robot.computeAllTerms(q,v)\n for cs in self.contact_surfaces: # for each candidate contact surface\n for cp in self.contact_points: # for each candidate contact point\n # Contact point placement\n H = self.robot.framePlacement(q, cp.frame_id, False)\n p_c = H.translation\n\n if(cs.check_collision(p_c)): # check whether the point is colliding with the surface\n if(cp.active == False): # if the contact was not already active\n cp.active = True\n cp.p0 = np.copy(p_c) # anchor point\n\n # Compute the contact force\n self.fc[i:i+3] = cs.compute_force(cp, q, v, self.robot)\n # compute the jacobian\n self.Jc[i:i+3, :] = cp.get_jacobian()\n i += 3\n\n else: # if the point is not colliding more\n if(cp.active): # if the contact was already active\n cp.active = False\n\n # Contact force equal to 0\n self.fc[i:i+3] = np.zeros(3)\n # jacobian equl to zero\n self.Jc[i:i+3, :] = np.zeros((3, self.robot.model.nv))\n i += 3\n # compute JT*force from contact point\n u_con = u + self.Jc.T.dot(self.fc)\n\n if(nv == 1):\n # for 1 DoF systems pin.aba does not work (I don't know why)\n ddq = (u_con-data.nle) / data.M[0]\n else:\n ddq = pin.aba(model, data, q, v, u_con)\n\n self.dx[:nv] = v\n self.dx[nv:] = ddq\n\n if(jacobian):\n pin.computeABADerivatives(model, data, q, v, u_con)\n self.Fx[:nv, :nv] = 0.0\n self.Fx[:nv, nv:] = np.identity(nv)\n self.Fx[nv:, :nv] = data.ddq_dq\n self.Fx[nv:, nv:] = data.ddq_dv\n self.Fu[nv:, :] = data.Minv\n\n return (np.copy(self.dx), np.copy(self.Fx), np.copy(self.Fu))\n\n return np.copy(self.dx)\n\n def f_x_fin_diff(self, x, u, t, delta=1e-8):\n ''' Partial derivatives of system dynamics w.r.t. x computed via finite differences '''\n f0 = self.f(x, u, t)\n Fx = np.zeros((self.nx, self.nx))\n for i in range(self.nx):\n xp = np.copy(x)\n xp[i] += delta\n fp = self.f(xp, u, t)\n Fx[:, i] = (fp-f0)/delta\n return Fx\n\n def f_u_fin_diff(self, x, u, t, delta=1e-8):\n ''' Partial derivatives of system dynamics w.r.t. u computed via finite differences '''\n f0 = self.f(x, u, t)\n Fu = np.zeros((self.nx, self.nu))\n for i in range(self.nu):\n up = np.copy(u)\n up[i] += delta\n fp = self.f(x, up, t)\n Fu[:, i] = (fp-f0)/delta\n return Fu","sub_path":"Course_Proj/Assignment_3/Code/OCP/ode.py","file_name":"ode.py","file_ext":"py","file_size_in_byte":11692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"106917631","text":"\nimport torch \nfrom torch.autograd import Variable \nimport cv2 \nfrom data import BaseTransform, VOC_CLASSES as labelmap\nfrom ssd import build_ssd \nimport imageio \n \n# Detection\ndef detect (image, net, transform):\n h, w = image.shape[:2] \n #perform transformations\n image_t = transform(image)[0] #transform the image and get the first index to get the right dimension and colors\n image_t2 = torch.from_numpy(image_t).permute(2,0,1) #turning numpy array into torch tensor and then convert RBG to GRB since that is how the NN was changed\n image_t3 = Variable(image_t2.unsqueeze(0)) #add fake dimension then turn into torch variable\n y = net(image_t3) #putting the image through the neural network and put it into var y\n detections = y.data # gives the torch tensor of the y (since otherwise we would get gradient and tensor) \n scale = torch.Tensor([w,h,w,h]) # we create a tensor object of dimensions [width, height, width, height] to normalize the position of the image between 0-1\n # elements of detections tensor : detections = [batch, number of classes, number of occurences of class, (score, x0,y0,x1,y1) ]\n # where batch is from the fake dimension, classes are objects (planes, boats, dogs), and the score rates how likely it is that image and effects occurance, 0.6>+1 occurence, <0.6 occurence is not changed and then we get the upper left and bottom right of the detected image\n for i in range(detections.size(1)): #detections.size(1) is number of classes\n j = 0 # We initialize the loop variable j that will correspond to the occurrences of the class\n while detections[0,i,j,0]>=0.6: # We take into account all the occurrences j of the class i that have a matching score larger than 0.6.\n point = (detections[0,i,j,1:]*scale).numpy() #grabs coordinates as torch tensor so convert to numpy\n cv2.rectangle( image, ((point[0]), int(point[1])), (int(point[2]), int(point[3])), (255,0,0), 2) #draw rectangle\n cv2.putText(image, labelmap[i-1], ((point[0]), int(point[1])), cv2.FONT_HERSHEY_SIMPLEX,2,(255,255,255),2, cv2.LINE_AA) #writes text on upper left corner of detected image\n j+=1\n return image\n\n# SSD Neural Network\nnet = build_ssd('test') #train/test phase\nnet.load_state_dict(torch.load('ssd300_mAP_77.43_v2.pth', map_location = lambda storage, loc: storage)) #load pre-trained weights and open it with a torch tensor \n\n# Perform transformations so it is compatible with the NN\ntransform = BaseTransform(net.size, (104/256.0, 117/256.0, 123/256.0)) # We create an object of the BaseTransform class, a class that will do the required transformations so that the image can be the input of the neural network \n #the argumenets above are the target size and the scale under which the net was trained\n\n# Object Detection and Video Creation\nmy_video = 'Hockey.mp4'\n\nreader = imageio.get_reader(my_video) #open video\nframesps = reader.get_meta_data()['fps'] #get fps of the frames\nwriter = imageio.get_writer('output_video.mp4', fps=framesps) #create the detected video file with stated fps\nvid = cv2.VideoCapture(my_video)\nlength = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))\nfor i, image in enumerate(reader): \n image = detect(image,net.eval(),transform) #net.eval() targets the detections \n writer.append_data(image) #add frame to video\n print('Rendering Frame ', i, 'of', length) #print frame\nprint('Finished Rendering Video')\nwriter.close() ","sub_path":"player-tracking.py","file_name":"player-tracking.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"281837489","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import patterns, url, include\nfrom core.ridge.views import *\n\nurlpatterns = patterns('',\n url(r'^$', home, name='home'),\n\n url(r'^tournament/add/$', tournament_add, name='tournament_add'),\n url(r'^tournament/(?P\\d+)/$',tournament_view, name='tournament_view'),\n url(r'^tournament/delete/(?P\\d+)/$',tournament_delete, name='tournament_delete'),\n\n url(r'^set_result$', set_result, name='rate'),\n url(r'^proceed$', proceed, name='proceed'),\n\n url(r'^user/$', user_auth, name='user_auth'),\n url(r'^user/logout$', user_logout, name='user_logout'),\n\n url(r'^chessplayer/add/$', chessplayer_add, name='chessplayer_add'),\n url(r'^test/$', for_test, name='test'),\n)","sub_path":"core/ridge/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"363405895","text":"import random\nnots = (\"ZERO\", \"ONE\", \"TWO\", \"THREE\", \"FOUR\", \"FIVE\", \"SIX\", \"SEVEN\", \"EIGHT\", \"NINE\")\n\ndef isIn(str1, str2):\n i = False\n str2 = list(str2)\n \n c = 0\n for char in str1:\n if char in str2:\n str2.pop(str2.index(char))\n c+=1\n\n if(c == len(str1)):\n i = True\n\n return (i, \"\".join(str2))\n\n\n\n\ntests = int(input(\"\"))\ni = 0\n\nwhile i < tests:\n raw = input(\"\")\n\n ori = raw\n final = []\n loop = 0\n\n while raw:\n loop+=1\n j = random.randint(0,9)\n #print(\"trying \", j)\n\n t = isIn(nots[j], raw)\n if(t[0]):\n #print(\"Found \", j)\n final.append(j)\n raw = t[1]\n #print(raw)\n\n if(loop >= 10):\n final = []\n raw = ori\n loop = 0\n\n print(\"CASE #{}:\".format(i+1),\"\".join([str(x) for x in sorted(final)]))\n\n i+=1\n","sub_path":"codes/CodeJamCrawler/16_2_1/bermuda.ut/digits.py","file_name":"digits.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"185272594","text":"import os\nimport sys\nimport torch\nimport numpy as np\nimport random\nimport time\n\nfrom .utils_for_robust import linf_loss, l0_loss, l1_loss, l2_loss, _auto_name, load, normalize_invert, same_length_str, \\\n tensor_detach\nfrom .models import factory\nfrom .utils import device\nfrom .datasets import Dataset\nimport foolbox\nimport pandas as pd\n\nMOMENTS = {\n 'imagenet': ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n 'cifar10': ([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),\n 'cifar100': ([0.5071, 0.4867, 0.4408], [0.2675, 0.2565, 0.2761]),\n}\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\", \"--dataset\", choices=[\"cifar10\", \"imagenet\"], default=\"imagenet\")\n parser.add_argument(\"-s\", \"--save\", default=\"./saved_results\")\n\n parser.add_argument(\"-a\", \"--attack\", choices=[\"FGSM\"], default=\"FGSM\")\n parser.add_argument(\"-u\", \"--untargeted\", action='store_true')\n parser.add_argument(\"-e\", \"--epsilon\", type=float, default=0.03)\n\n parser.add_argument(\"--num_valid_test_imgs\", type=int, default=10000)\n\n parser.add_argument(\"--attack_batch_size\", type=int, default=1)\n\n parser.add_argument(\"--model_name\", type=str, help='Name of the model.') #\n parser.add_argument(\"--load_name\", default=None, type=str, help='Checkpoint name to load')\n\n parser.add_argument(\"-z\", \"--use_zvalue\", action='store_true')\n parser.add_argument(\"--seed\", type=int, default=1216)\n\n args = vars(parser.parse_args())\n print('args = ', args)\n\n # setup random seed\n random.seed(args['seed'])\n np.random.seed(args['seed'])\n torch.manual_seed(args[\"seed\"])\n print(\"seed = \", args[\"seed\"])\n\n overall_timestart = time.time()\n\n use_log = not args['use_zvalue']\n print(\"use_log = \", use_log)\n\n model_name = args['model_name']\n dataset_name = args['dataset']\n\n model = factory(model_name, dataset_name).to(device)\n model = load(model_name, dataset_name, model, args['load_name'])\n #######################\n normMean, normStd = MOMENTS[dataset_name]\n mean = np.array(normMean, dtype='float').reshape((3, 1, 1))\n std = np.array(normStd, dtype='float').reshape((3, 1, 1))\n # --------------------------------------------------------------------------------------#\n if args['attack'] == \"FGSM\": #\n fmodel = foolbox.models.PyTorchModel(model.eval(), bounds=(0, 1), num_classes=10,\n preprocessing=(mean, std))\n attack = foolbox.attacks.FGSM(fmodel)\n else:\n print(\"Invalid attack name, exit 1\")\n return\n #####################\n attack_batch_size = args['attack_batch_size']\n # >>>>>>>>>>>>>>>>>>>>>\n data = Dataset(args['dataset'], False, attack_batch_size) # >>>>\n\n targeted_flag = not args['untargeted']\n print(\"targeted_flag = \", targeted_flag)\n\n img_no = 0\n img_nan = 0 # #########\n pred_right = 0 # ############\n total_success = 0\n l0_list = []\n l1_list = []\n l2_list = []\n linf_list = []\n time_list = []\n\n verbose_f = open(args['save'] + \"/\" + \"_\".join([args['dataset'], args['attack'], str(targeted_flag),\n str(args['epsilon']), \"verbose.txt\"]), \"w\")\n aggre_f = open(args['save'] + \"/\" + \"_\".join([args['dataset'], args['attack'], str(targeted_flag),\n str(args['epsilon']), \"aggre.txt\"]), \"w\")\n robust_f = open(args['save'] + \"/\" + \"_\".join([args['dataset'], args['attack'], str(targeted_flag),\n str(args['epsilon']), \"robust.txt\"]), \"a\")\n if targeted_flag is True:\n verbose_head_str_raw = ['total', 'pre_rig', 'adv_suc', 'time', 'success', 'pred', 'target',\n 'adv', 'l0_dist', 'l1_dist', 'l2_dist',\n 'linf_distortion']\n else:\n verbose_head_str_raw = ['total', 'pre_rig', 'adv_suc', 'time', 'success', 'pred', 'adv',\n 'l0_distor', 'l1_dist', 'l2_dist', 'linf_dist']\n\n verbose_head_str = same_length_str(verbose_head_str_raw)\n aggre_head_str_raw = ['total_count', 'pred_rate', 'suc_rate_a', 'suc_rate_r',\n 'l0_avg', 'l0_std', 'l1_avg', 'l1_std', 'l2_avg', 'l2_std',\n 'linf_avg', 'linf_std', 'time_avg', 'time_std']\n aggre_head_str = same_length_str(aggre_head_str_raw)\n\n verbose_f.write(verbose_head_str + '\\n')\n aggre_f.write(aggre_head_str + '\\n')\n\n robust_f.write(str(args['load_name']) + '\\n')\n robust_f.write(aggre_head_str + '\\n')\n\n sys.stdout.flush()\n\n for i, (images, labels) in enumerate(data):\n for j in range(labels.size()[0]):\n timestart = time.time()\n print(\"=\" * 10, \"i = \", i, \"=\" * 10, \"j=\", j, \"=\" * 10)\n \"\"\"perform the attack\"\"\"\n image = np.array(normalize_invert(images[j].cpu().clone(), normMean, normStd)).clip(0, 1)\n label = int(labels[j].item())\n adv = attack(image, label, unpack=False,epsilons=[float(args['epsilon'])], max_epsilon=0)\n\n timeend = time.time()\n time_used = timeend - timestart\n time_used_per_image = time_used\n\n original_predict = np.squeeze(model(torch.unsqueeze(images[j], 0)))\n #original_predict = np.squeeze(fmodel.forwards(images[j].numpy()))\n original_predict = tensor_detach(original_predict)\n original_class = np.argsort(original_predict)\n\n sys.stdout.flush()\n\n predict_label = np.argmax(original_predict)\n target_label = labels[j].data.cpu().numpy()\n success = False\n ####################################\n if predict_label == target_label:\n pred_right += 1\n\n img_no += 1\n print('img_nan:', img_nan)\n # if the array contains NaN, the solver did not return a solution\n if np.any(pd.isnull(adv)):\n img_nan += 1 # ########################\n print('Attack failed. (solver returned NaN)')\n l0_distortion = l1_distortion = l2_distortion = linf_distortion = np.nan\n else:\n\n l0_distortion = l0_loss(np.array(adv.cpu()), np.array(images[j].cpu()))\n l1_distortion = l1_loss(adv, np.array(images[j].cpu()))\n l2_distortion = l2_loss(adv, np.array(images[j].cpu()))\n linf_distortion = linf_loss(adv, np.array(images[j].cpu()))\n\n adversarial_predict = np.squeeze(model(torch.tensor([adv]).to(device)))\n adversarial_predict = tensor_detach(adversarial_predict)\n\n adversarial_prob = np.sort(adversarial_predict)\n adversarial_class = np.argsort(adversarial_predict)\n attack_label = np.argmax(adversarial_predict)\n\n sys.stdout.flush()\n\n success = False\n if targeted_flag:\n success = np.argsort(adversarial_predict)[-1] == target_label\n\n # dealing with the tie issue in the adversarial_predict vector\n candidates = np.array([i for i in range(len(adversarial_predict) - 1)\n if abs(adversarial_predict[i] - adversarial_prob[-1]) < 0.001])\n if len(candidates) > 1 and target_label in candidates:\n success = True\n\n else:\n success = np.argsort(adversarial_predict)[-1] != target_label\n if success:\n print(\"Attack succeeded.\")\n else:\n print(\"Attack failed.\")\n\n if success:\n total_success += 1\n l0_list.append(l0_distortion)\n l1_list.append(l1_distortion)\n l2_list.append(l2_distortion)\n linf_list.append(linf_distortion)\n time_list.append(time_used_per_image)\n print('total success', total_success)\n suffix = \"seq={0}_prev={1}_adv={2}_res={3}\".format(i, original_class[-1], adversarial_class[-1], success)\n print(\"Saving to\", suffix)\n sys.stdout.flush()\n\n L1_debug_str = \"[STATS][L1] total = {}, seq = {}, time = {:.3f}, success = {}, \" \\\n \"prev_class = {}, new_class = {}, distortion = {:.5f}, success_rate = {:.3f}, \" \\\n \"l2_avg = {:.5f}\".format(img_no, i * attack_batch_size + j,\n time_used_per_image, success, original_class[-1],\n adversarial_class[-1], l2_distortion,\n total_success / float(img_no),\n 0 if total_success == 0 else np.mean(l2_list))\n\n print(L1_debug_str)\n sys.stdout.flush()\n\n if targeted_flag is True:\n verbose_str_raw = [str(img_no), str(pred_right), str(total_success),\n format(time_used_per_image, '<6.3f'), str(success),\n str(original_class[-1]), str(target_label), str(adversarial_class[-1]),\n format(l0_distortion, '<9.3f'), format(l1_distortion, '<9.3f'),\n format(l2_distortion, '<9.3f'), format(linf_distortion, '<9.3f')]\n else:\n verbose_str_raw = [str(img_no), str(pred_right), str(total_success), str(time_used_per_image),\n str(success),\n str(original_class[-1]), str(adversarial_class[-1]), str(l0_distortion),\n str(l1_distortion),\n str(l2_distortion), str(linf_distortion)]\n verbose_str = same_length_str(verbose_str_raw)\n\n verbose_f.write(verbose_str + \"\\n\")\n verbose_f.flush()\n print(verbose_head_str)\n print(verbose_str)\n\n sys.stdout.flush()\n\n overall_timeend_sofar = time.time()\n\n overall_time_used_sofar = overall_timeend_sofar - overall_timestart\n\n print(\"overall_time_used_sofar = \", overall_time_used_sofar)\n sys.stdout.flush()\n if img_no >= args[\"num_valid_test_imgs\"]:\n break\n verbose_f.close()\n\n if img_no == 0:\n success_rate = 0.0\n else:\n # success_rate = total_success / float(img_no)\n success_rate = total_success / float(pred_right)\n\n if total_success == 0:\n aggre_str_raw = [str(img_no), str(success_rate), str(0.0), str(0.0), str(0.0), str(0.0),\n str(0.0), str(0.0), str(0.0), str(0.0), str(0.0), str(0.0)]\n else:\n aggre_str_raw = [str(img_no), format(pred_right / float(img_no), '<6.4f'),\n format(total_success / float(img_no), '<6.4f'), format(success_rate, '<6.4f'),\n format(np.mean(l0_list), '<6.3f'), format(np.std(l0_list), '<6.3f'),\n format(np.mean(l1_list), '<6.3f'), format(np.std(l1_list), '<6.3f'),\n format(np.mean(l2_list), '<6.3f'), format(np.std(l2_list), '<6.3f'),\n format(np.mean(linf_list), '<6.3f'), format(np.std(linf_list), '<6.3f'),\n format(np.mean(time_list), '<6.3f'), format(np.std(time_list), '<6.3f')]\n aggre_str = same_length_str(aggre_str_raw)\n\n aggre_f.write(aggre_str + \"\\n\")\n robust_f.write(aggre_str + '\\n') # #############\n print(aggre_head_str)\n print(aggre_str)\n sys.stdout.flush()\n robust_f.close() # ########\n aggre_f.close()\n\n overall_timeend = time.time()\n\n overall_time_used = overall_timeend - overall_timestart\n\n print(\"overall_time_used = \", overall_time_used)\n sys.stdout.flush()\n\n print(\"ALL DONE!!!\")\n return\n\n\nif __name__ == \"__main__\":\n main()\n\n print(\"Experiment Done!!!\")\n","sub_path":"umbreon/test_robust_new.py","file_name":"test_robust_new.py","file_ext":"py","file_size_in_byte":12246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"415358103","text":"import glob\nimport os\nimport pathlib\nimport stat\nimport time\nfrom collections import OrderedDict, defaultdict\nfrom pathlib import Path\nfrom textwrap import dedent\nfrom typing import Optional\n\nfrom loguru import logger\n\nfrom .action import ActionForBuild\nfrom .uninstall import uninstall\nfrom .util import run_user_script\nfrom ..exceptions import (\n BinaryArchiveNotFoundException,\n InternalCommandException,\n InternalSubprocessException,\n UserException,\n)\nfrom ..gitutils import lfs\nfrom ..gitutils import get_worktree_root\nfrom ..model.install_metadata import (\n load_metadata,\n init_metadata_from_build,\n save_metadata,\n save_file_list,\n is_installed,\n installed_component_license_path,\n installed_component_file_list_path,\n installed_component_metadata_path,\n)\n\n\nclass InstallAction(ActionForBuild):\n def __init__(\n self,\n build,\n script,\n config,\n allow_build=True,\n allow_binary_archive=True,\n create_binary_archive=False,\n no_merge=False,\n keep_tmproot=False,\n run_tests=False,\n ):\n assert (\n allow_build or allow_binary_archive\n ), f\"You must allow at least one option between building and installing from binary archives for {build.name}\"\n sources = []\n if allow_build:\n sources.append(\"build\")\n if allow_binary_archive:\n sources.append(\"binary archives\")\n sources_str = \" or \".join(sources)\n\n super().__init__(f\"install ({sources_str})\", build, script, config)\n self.allow_build = allow_build\n self.allow_binary_archive = allow_binary_archive\n self.create_binary_archive = create_binary_archive\n self.no_merge = no_merge\n self.keep_tmproot = keep_tmproot\n self.run_tests = run_tests\n\n def _run(self, explicitly_requested=False):\n tmp_root = self.environment[\"TMP_ROOT\"]\n orchestra_root = self.environment[\"ORCHESTRA_ROOT\"]\n\n logger.debug(\"Preparing temporary root directory\")\n self._prepare_tmproot()\n\n pre_file_list = self._index_directory(tmp_root + orchestra_root, relative_to=tmp_root + orchestra_root)\n\n install_start_time = time.time()\n if self.allow_binary_archive and self.binary_archive_exists():\n self._install_from_binary_archive()\n source = \"binary archives\"\n elif self.allow_build:\n self._build_and_install()\n if self.create_binary_archive:\n self._create_binary_archive()\n source = \"build\"\n else:\n raise UserException(f\"Could not find binary archive nor build: {self.build.qualified_name}\")\n install_end_time = time.time()\n\n # Binary archive symlinks always need to be updated, not only when the binary archive is rebuilt\n self.update_binary_archive_symlink()\n\n post_file_list = self._index_directory(tmp_root + orchestra_root, relative_to=tmp_root + orchestra_root)\n post_file_list.append(\n os.path.relpath(installed_component_file_list_path(self.component.name, self.config), orchestra_root)\n )\n post_file_list.append(\n os.path.relpath(installed_component_metadata_path(self.component.name, self.config), orchestra_root)\n )\n new_files = [f for f in post_file_list if f not in pre_file_list]\n\n if not self.no_merge:\n if is_installed(self.config, self.build.component.name):\n logger.debug(\"Uninstalling previously installed build\")\n uninstall(self.build.component.name, self.config)\n\n logger.debug(\"Merging installed files into orchestra root directory\")\n self._merge()\n\n self._update_metadata(\n new_files,\n install_end_time - install_start_time,\n source,\n explicitly_requested,\n )\n\n if not self.keep_tmproot:\n logger.debug(\"Cleaning up tmproot\")\n self._cleanup_tmproot()\n\n def _update_metadata(self, file_list, install_time, source, set_manually_insalled):\n # Save installed file list (.idx)\n save_file_list(self.component.name, file_list, self.config)\n\n # Save metadata\n metadata = load_metadata(self.component.name, self.config)\n if metadata is None:\n metadata = init_metadata_from_build(self.build)\n\n metadata.recursive_hash = self.component.recursive_hash\n metadata.source = source\n metadata.manually_installed = metadata.manually_installed or set_manually_insalled\n metadata.install_time = install_time\n metadata.binary_archive_path = self.binary_archive_relative_path\n\n save_metadata(metadata, self.config)\n\n def _prepare_tmproot(self):\n script = dedent(\n \"\"\"\n rm -rf \"$TMP_ROOT\"\n mkdir -p \"$TMP_ROOT\"\n mkdir -p \"${TMP_ROOT}${ORCHESTRA_ROOT}/include\"\n mkdir -p \"${TMP_ROOT}${ORCHESTRA_ROOT}/lib64\"{,/include,/pkgconfig}\n test -e \"${TMP_ROOT}${ORCHESTRA_ROOT}/lib\" || ln -s lib64 \"${TMP_ROOT}${ORCHESTRA_ROOT}/lib\"\n test -L \"${TMP_ROOT}${ORCHESTRA_ROOT}/lib\"\n mkdir -p \"${TMP_ROOT}${ORCHESTRA_ROOT}/bin\"\n mkdir -p \"${TMP_ROOT}${ORCHESTRA_ROOT}/usr/\"{lib,include}\n mkdir -p \"${TMP_ROOT}${ORCHESTRA_ROOT}/share/\"{info,doc,man,orchestra}\n touch \"${TMP_ROOT}${ORCHESTRA_ROOT}/share/info/dir\"\n mkdir -p \"${TMP_ROOT}${ORCHESTRA_ROOT}/libexec\"\n \"\"\"\n )\n self._run_internal_script(script)\n\n def _install_from_binary_archive(self):\n # TODO: handle nonexisting binary archives\n logger.debug(\"Fetching binary archive\")\n self._fetch_binary_archive()\n logger.debug(\"Extracting binary archive\")\n self._extract_binary_archive()\n\n logger.debug(\"Removing conflicting files\")\n self._remove_conflicting_files()\n\n def _fetch_binary_archive(self):\n binary_archive_path = self.locate_binary_archive()\n assert binary_archive_path is not None\n binary_archive_path = pathlib.Path(binary_archive_path)\n binary_archive_root = get_worktree_root(binary_archive_path)\n binary_archive_relative_path = binary_archive_path.relative_to(binary_archive_root)\n failures = 0\n while True:\n try:\n lfs.fetch(binary_archive_root, include=[binary_archive_relative_path])\n break\n except InternalSubprocessException as e:\n failures += 1\n if failures >= self.config.max_lfs_retries:\n raise e\n\n def _extract_binary_archive(self):\n if not self.binary_archive_exists():\n raise UserException(\"Binary archive not found!\")\n\n archive_filepath = self.locate_binary_archive()\n script = dedent(\n f\"\"\"\n mkdir -p \"$TMP_ROOT$ORCHESTRA_ROOT\"\n cd \"$TMP_ROOT$ORCHESTRA_ROOT\"\n tar xaf \"{archive_filepath}\"\n \"\"\"\n )\n self._run_internal_script(script)\n\n def _implicit_dependencies(self):\n if self.allow_binary_archive and self.binary_archive_exists() or not self.allow_build:\n return set()\n else:\n return {self.build.configure}\n\n def _implicit_dependencies_for_hash(self):\n return {self.build.configure}\n\n def _build_and_install(self):\n env = self.environment\n env[\"RUN_TESTS\"] = \"1\" if self.run_tests else \"0\"\n\n logger.debug(\"Executing install script\")\n run_user_script(self.script, environment=env)\n\n logger.debug(\"Removing conflicting files\")\n self._remove_conflicting_files()\n\n if self.build.component.skip_post_install:\n logger.debug(\"Skipping post install\")\n else:\n self._post_install()\n\n def _post_install(self):\n logger.debug(\"Dropping absolute paths from pkg-config\")\n self._drop_absolute_pkgconfig_paths()\n\n logger.debug(\"Purging libtools' files\")\n self._purge_libtools_files()\n\n # TODO: maybe this should be put into the configuration and not in orchestra itself\n logger.debug(\"Converting hardlinks to symbolic\")\n self._hard_to_symbolic()\n\n # TODO: maybe this should be put into the configuration and not in orchestra itself\n logger.debug(\"Fixing RPATHs\")\n self._fix_rpath()\n\n # TODO: this should be put into the configuration and not in orchestra itself\n logger.debug(\"Replacing NDEBUG preprocessor statements\")\n self._replace_ndebug(self.build.ndebug)\n\n # TODO: this should be put into the configuration and not in orchestra itself\n logger.debug(\"Replacing ASAN preprocessor statements\")\n self._replace_asan(self.build.asan)\n\n if self.build.component.license:\n logger.debug(\"Copying license file\")\n source = self.build.component.license\n destination = installed_component_license_path(self.build.component.name, self.config)\n script = dedent(\n f\"\"\"\n DESTINATION_DIR=\"$TMP_ROOT$(dirname \"{destination}\")\"\n mkdir -p \"$DESTINATION_DIR\"\n for DIR in \"$BUILD_DIR\" \"$SOURCE_DIR\"; do\n if test -e \"$DIR/{source}\"; then\n cp \"$DIR/{source}\" \"$TMP_ROOT/{destination}\"\n exit 0\n fi\n done\n echo \"Couldn't find {source}\"\n exit 1\n \"\"\"\n )\n self._run_internal_script(script)\n\n def _remove_conflicting_files(self):\n script = dedent(\n \"\"\"\n if test -d \"$TMP_ROOT/$ORCHESTRA_ROOT/share/info\"; then\n rm -rf \"$TMP_ROOT/$ORCHESTRA_ROOT/share/info\";\n fi\n if test -d \"$TMP_ROOT/$ORCHESTRA_ROOT/share/locale\"; then\n rm -rf \"$TMP_ROOT/$ORCHESTRA_ROOT/share/locale\";\n fi\n \"\"\"\n )\n self._run_internal_script(script)\n\n def _drop_absolute_pkgconfig_paths(self):\n script = dedent(\n \"\"\"\n cd \"${TMP_ROOT}${ORCHESTRA_ROOT}\"\n if [ -e lib/pkgconfig ]; then\n find lib/pkgconfig \\\\\n -name \"*.pc\" \\\\\n -exec sed -i 's|/*'\"$ORCHESTRA_ROOT\"'/*|${pcfiledir}/../..|g' {} ';'\n fi\n \"\"\"\n )\n self._run_internal_script(script)\n\n def _purge_libtools_files(self):\n script = dedent(\n \"\"\"\n find \"${TMP_ROOT}${ORCHESTRA_ROOT}\" -name \"*.la\" -type f -delete\n \"\"\"\n )\n self._run_internal_script(script)\n\n def _hard_to_symbolic(self):\n duplicates = defaultdict(list)\n for root, dirnames, filenames in os.walk(f'{self.environment[\"TMP_ROOT\"]}{self.environment[\"ORCHESTRA_ROOT\"]}'):\n for path in filenames:\n path = os.path.join(root, path)\n info = os.lstat(path)\n inode = info.st_ino\n if inode == 0 or info.st_nlink < 2 or not stat.S_ISREG(info.st_mode):\n continue\n\n duplicates[inode].append(path)\n\n for _, equivalent in duplicates.items():\n base = equivalent.pop()\n for alternative in equivalent:\n os.unlink(alternative)\n os.symlink(os.path.relpath(base, os.path.dirname(alternative)), alternative)\n\n def _fix_rpath(self):\n replace_dynstr = os.path.join(os.path.dirname(__file__), \"..\", \"support\", \"elf-replace-dynstr.py\")\n fix_rpath_script = dedent(\n f\"\"\"\n cd \"$TMP_ROOT$ORCHESTRA_ROOT\"\n # Fix rpath\n find . -type f -executable | while read EXECUTABLE; do\n if head -c 4 \"$EXECUTABLE\" | grep '^.ELF' > /dev/null &&\n file \"$EXECUTABLE\" | grep x86-64 | grep -E '(shared|dynamic)' > /dev/null;\n then\n REPLACE='$'ORIGIN/$(realpath --relative-to=\"$(dirname \"$EXECUTABLE\")\" \".\")\n echo \"Setting rpath of $EXECUTABLE to $REPLACE\"\n \"{replace_dynstr}\" \"$EXECUTABLE\" \"$RPATH_PLACEHOLDER\" \"$REPLACE\" /\n \"{replace_dynstr}\" \"$EXECUTABLE\" \"$ORCHESTRA_ROOT\" \"$REPLACE\" /\n fi\n done\n \"\"\"\n )\n self._run_internal_script(fix_rpath_script)\n\n def _replace_ndebug(self, disable_debugging):\n debug, ndebug = (\"0\", \"1\") if disable_debugging else (\"1\", \"0\")\n patch_ndebug_script = dedent(\n rf\"\"\"\n cd \"$TMP_ROOT$ORCHESTRA_ROOT\"\n find include/ -name \"*.h\" \\\n -exec \\\n sed -i \\\n -e 's|^\\s*#\\s*ifndef\\s\\+NDEBUG|#if {debug}|' \\\n -e 's|^\\s*#\\s*ifdef\\s\\+NDEBUG|#if {ndebug}|' \\\n -e 's|^\\(\\s*#\\s*if\\s\\+.*\\)!defined(NDEBUG)|\\1{debug}|' \\\n -e 's|^\\(\\s*#\\s*if\\s\\+.*\\)defined(NDEBUG)|\\1{ndebug}|' \\\n {{}} ';'\n \"\"\"\n )\n self._run_internal_script(patch_ndebug_script)\n\n def _replace_asan(self, asan_enabled):\n replace_with = \"1\" if asan_enabled else \"0\"\n # fmt: off\n patch_ndebug_script = dedent(rf\"\"\"\n cd \"$TMP_ROOT$ORCHESTRA_ROOT\"\n find include/ -name \"*.h\" \\\n -exec \\\n sed -i \\\n -e 's|__has_feature\\(address_sanitizer\\)|{replace_with}|' \\\n -e 's|defined\\(__SANITIZE_ADDRESS__\\)|{replace_with}|' \\\n {{}} ';'\n \"\"\")\n # fmt: on\n self._run_internal_script(patch_ndebug_script)\n\n def _merge(self):\n copy_command = f'cp -far --reflink=auto \"$TMP_ROOT/$ORCHESTRA_ROOT/.\" \"$ORCHESTRA_ROOT\"'\n self._run_internal_script(copy_command)\n\n def _create_binary_archive(self):\n if self.binary_archive_exists():\n logger.debug(f\"Binary archive for {self.component.name} already exists, skipping its creation\")\n return\n logger.debug(\"Creating binary archive\")\n binary_archive_path = self._binary_archive_path()\n binary_archive_parent_dir = os.path.dirname(binary_archive_path)\n binary_archive_repo_name = self._binary_archive_repo_name\n absolute_binary_archive_tmp_path = os.path.join(\n self.config.binary_archives_local_paths[binary_archive_repo_name],\n f\"_tmp_{self.binary_archive_filename}\",\n )\n script = dedent(\n f\"\"\"\n mkdir -p \"$BINARY_ARCHIVES\"\n cd \"$TMP_ROOT$ORCHESTRA_ROOT\"\n rm -f '{absolute_binary_archive_tmp_path}'\n tar cvaf '{absolute_binary_archive_tmp_path}' --owner=0 --group=0 *\n mkdir -p '{binary_archive_parent_dir}'\n mv '{absolute_binary_archive_tmp_path}' '{binary_archive_path}'\n \"\"\"\n )\n self._run_internal_script(script)\n self._save_hash_material()\n\n def _save_hash_material(self):\n logger.debug(\"Saving hash material\")\n hash_material_path = Path(self._hash_material_path())\n hash_material_path.write_text(self.component.recursive_hash_material())\n\n def update_binary_archive_symlink(self):\n \"\"\"Creates/updates convenience symlinks to the binary archives.\n Symlinks named _.tar.xz point to the binary archives built for the\n corresponding component and orchestra branches.\n Example: fix-something_master.tar.xz -> abcdef_fedcba.tar.xz would be created if the binary archive\n for component branch fix-something with orchestra configuration on the `master` branch is available.\n \"\"\"\n logger.debug(\"Updating binary archive symlink\")\n\n binary_archive_repo_name = self._binary_archive_repo_name\n if binary_archive_repo_name is None:\n logger.warning(\"No binary archive configured\")\n return\n\n try:\n orchestra_config_branch = self._get_script_output('git -C \"$ORCHESTRA_DOTDIR\" rev-parse --abbrev-ref HEAD')\n orchestra_config_branch = orchestra_config_branch.strip().replace(\"/\", \"-\")\n except InternalCommandException:\n logger.warning(\n \"Orchestra configuration is not inside a git repository. Defaulting to `master` as branch name\"\n )\n orchestra_config_branch = \"master\"\n\n archive_dir_path = os.path.dirname(self._binary_archive_path())\n\n def create_symlink(branch, commit):\n branch = branch.replace(\"/\", \"-\")\n target_name = self._binary_archive_filename(commit, self.component.recursive_hash)\n target_absolute_path = os.path.join(archive_dir_path, target_name)\n symlink_absolute_path = os.path.join(archive_dir_path, f\"{branch}_{orchestra_config_branch}.tar.xz\")\n if os.path.exists(target_absolute_path):\n if os.path.exists(symlink_absolute_path):\n os.unlink(symlink_absolute_path)\n os.symlink(target_name, symlink_absolute_path)\n\n if self.component.clone:\n for branch, commit in self.component.clone.heads().items():\n create_symlink(branch, commit)\n else:\n create_symlink(\"none\", \"none\")\n\n @staticmethod\n def _index_directory(root_dir_path, relative_to=None):\n paths = []\n for current_dir_path, child_dir_names, child_file_names in os.walk(root_dir_path):\n for child_filename in child_file_names:\n child_file_path = os.path.join(current_dir_path, child_filename)\n if relative_to:\n child_file_path = os.path.relpath(child_file_path, relative_to)\n paths.append(child_file_path)\n\n for child_dir in child_dir_names:\n child_dir_path = os.path.join(current_dir_path, child_dir)\n if os.path.islink(child_dir_path):\n if relative_to:\n child_dir_path = os.path.relpath(child_dir_path, relative_to)\n paths.append(child_dir_path)\n\n return paths\n\n def _cleanup_tmproot(self):\n self._run_internal_script('rm -rf \"$TMP_ROOT\"')\n\n @property\n def _binary_archive_repo_name(self):\n \"\"\"Returns the name of the binary archives repository where new archives should be created\"\"\"\n if self.component.binary_archives:\n binary_archive_repo_name = self.component.binary_archives\n if binary_archive_repo_name not in self.config.binary_archives_remotes.keys():\n raise UserException(\n f\"Component {self.component.name} wants to push to an unknown binary-archives \"\n f\"repository ({binary_archive_repo_name})\"\n )\n return binary_archive_repo_name\n elif self.config.binary_archives_remotes:\n return list(self.config.binary_archives_remotes.keys())[0]\n else:\n return None\n\n @property\n def binary_archive_relative_path(self) -> str:\n \"\"\"Returns the path to the binary archive, relative to the binary archive repository\"\"\"\n return os.path.join(\n self.binary_archive_relative_dir,\n self.binary_archive_filename,\n )\n\n @property\n def hash_material_relative_path(self) -> str:\n \"\"\"Returns the path to the hash material, relative to the binary archive repository\"\"\"\n return os.path.join(\n self.binary_archive_relative_dir,\n self.hash_material_filename,\n )\n\n @property\n def binary_archive_filename(self) -> str:\n \"\"\"Returns the filename of the binary archive for the target build.\n *Warning*: the filename is the same for all the builds of the same component. Use `binary_archive_relative_path`\n to get a path which is unique to a single build\n \"\"\"\n component_commit = self.component.commit() or \"none\"\n return self._binary_archive_filename(component_commit, self.component.recursive_hash)\n\n @property\n def binary_archive_relative_dir(self) -> str:\n \"\"\"Returns the path to the directory containing the binary archives for the associated build, relative to the\n binary archive repository\"\"\"\n return os.path.join(\n self.architecture,\n self.component.name,\n self.build.name,\n )\n\n @property\n def hash_material_filename(self) -> str:\n \"\"\"Returns the filename of the hash material for the target build.\n *Warning*: the filename is the same for all the builds of the same component. Use `hash_material_relative_path`\n to get a path which is unique to a single build\n \"\"\"\n component_commit = self.component.commit() or \"none\"\n return self._hash_material_filename(component_commit, self.component.recursive_hash)\n\n @staticmethod\n def _binary_archive_filename(component_commit, component_recursive_hash) -> str:\n return f\"{component_commit}_{component_recursive_hash}.tar.xz\"\n\n @staticmethod\n def _hash_material_filename(component_commit, component_recursive_hash) -> str:\n return f\"{component_commit}_{component_recursive_hash}.hash-material.yml\"\n\n def _binary_archive_path(self) -> str:\n \"\"\"Returns the absolute path where the binary archive should be created.\n Note: Use `locate_binary_archive` to locate the binary archive to extract when installing.\n \"\"\"\n return os.path.join(\n self.config.binary_archives_local_paths[self._binary_archive_repo_name],\n self.binary_archive_relative_path,\n )\n\n def available_binary_archives(self):\n \"\"\"Returns all available binary archives related to this build\"\"\"\n available_binary_archives = set()\n for binary_archive_repo in self.config.binary_archives_local_paths.values():\n binary_archives_glob = os.path.join(\n binary_archive_repo, f\"{self.build.install.binary_archive_relative_dir}/*.tar.*\"\n )\n for binary_archive in glob.glob(binary_archives_glob):\n binary_archive_path = Path(binary_archive)\n if not binary_archive_path.exists() or binary_archive_path.is_symlink():\n continue\n available_binary_archives.add(binary_archive)\n return available_binary_archives\n\n def _hash_material_path(self) -> str:\n \"\"\"Returns the absolute path where the material used to compute the component hash should be created\"\"\"\n return os.path.join(\n self.config.binary_archives_local_paths[self._binary_archive_repo_name],\n self.hash_material_relative_path,\n )\n\n def locate_binary_archive(self) -> Optional[str]:\n \"\"\"Returns the absolute path to the binary archive that can be extracted to install the target build.\n *Note*: the path may be pointing to a git LFS pointer which needs to be downloaded and checked out (smudged)\"\"\"\n binary_archives_path = self.config.binary_archives_dir\n for name in self.config.binary_archives_remotes:\n relative_path_without_extension = os.path.splitext(self.binary_archive_relative_path)[0]\n extensions = [\".xz\", \".gz\", \"\"]\n for extension in extensions:\n try_path = os.path.join(binary_archives_path, name, relative_path_without_extension + extension)\n if os.path.exists(try_path):\n return try_path\n return None\n\n def binary_archive_exists(self) -> bool:\n \"\"\"Returns True if the binary archive for the target build exists (cached or downloadable)\"\"\"\n return self.locate_binary_archive() is not None\n\n @property\n def environment(self) -> \"OrderedDict[str, str]\":\n env = super().environment\n env[\"DESTDIR\"] = self.tmp_root\n return env\n\n @property\n def architecture(self):\n return \"linux-x86-64\"\n\n def is_satisfied(self):\n return is_installed(\n self.config,\n self.build.component.name,\n wanted_build=self.build.name,\n wanted_recursive_hash=self.build.component.recursive_hash,\n )\n\n def assert_prerequisites_are_met(self):\n super().assert_prerequisites_are_met()\n # Verify either sources or ls-remote info are available\n if self.component.clone is not None and self.component.commit() is None:\n raise UserException(f\"HEAD commit for {self.component.name} not available. Run `orc update`.\")\n\n # Verify binary archive is available\n if not self.allow_build and not self.binary_archive_exists():\n raise BinaryArchiveNotFoundException(self)\n","sub_path":"orchestra/actions/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":24858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"125310470","text":"import trading_system.domain.domain as dom\nfrom store.models import BaseItemRule as m_BaseItemRule\n\n\nclass BaseItemRule:\n\tdef __init__(self, model=None):\n\t\tif model != None:\n\t\t\tself._model = model\n\t\t\treturn\n\n\t@property\n\tdef pk(self):\n\t\treturn self._model.pk\n\n\t@property\n\tdef id(self):\n\t\treturn self._model.pk\n\n\t@property\n\tdef type(self):\n\t\treturn self._model.type\n\n\t@property\n\tdef parameter(self):\n\t\treturn self._model.parameter\n\n\tdef check(self, amount):\n\t\tif self.type == 'MAX' and amount > int(self.parameter):\n\t\t\treturn False\n\t\telif self.type == 'MIN' and amount < int(self.parameter):\n\t\t\treturn False\n\t\treturn True\n\n\tdef update(self, item_dict):\n\t\tfor field in self._model._meta.fields:\n\t\t\tif field.attname in item_dict.keys():\n\t\t\t\tsetattr(self._model, field.attname, item_dict[field.attname])\n\n\t\ttry:\n\t\t\tself._model.save()\n\t\texcept Exception:\n\t\t\traise dom.DBFailedExceptionDomainToService(msg='DB Failed')\n\n\tdef delete(self):\n\n\t\ttry:\n\t\t\tself._model.delete()\n\t\texcept Exception:\n\t\t\traise dom.DBFailedExceptionDomainToService(msg='DB Failed')\n\n\t@staticmethod\n\tdef get_b_rule(rule_id):\n\n\t\ttry:\n\t\t\treturn BaseItemRule(model=m_BaseItemRule.objects.get(id=rule_id))\n\t\texcept Exception:\n\t\t\traise dom.DBFailedExceptionDomainToService(msg='DB Failed')\n\n\t@staticmethod\n\tdef get_item_bi_rules(item_id):\n\n\t\ttry:\n\t\t\tcir_models = m_BaseItemRule.objects.filter(item_id=item_id)\n\t\t\treturn list(map(lambda cir_model: BaseItemRule(model=cir_model), list(cir_models)))\n\t\texcept Exception:\n\t\t\traise dom.DBFailedExceptionDomainToService(msg='DB Failed')\n","sub_path":"dev/trading_system/domain/base_item_rule.py","file_name":"base_item_rule.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"443570271","text":"\"\"\"Given a rod of length n and a table of prices\np_i, i = 1, 2, ... , n, write an algorithm to find the max revenue r_n\nobtainable by cutting up the rod and selling the pieces.\n\"\"\"\n\ndef naive(values, length):\n if length <= 0:\n return 0\n r_n = -1\n for i in range(length):\n r_n = max(r_n, values[i] + naive(values, length - (i + 1)))\n return r_n\n","sub_path":"interview_prep/python/rod.py","file_name":"rod.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"372716189","text":"import csv\nfrom random import (\n sample,\n randint,\n random,\n choice\n )\nfrom string import (\n ascii_letters,\n digits\n )\n\nimport names\n\ndef create_mockup_txt(filename, size=14):\n school_ids = sample(range(1,8000), size)\n school_class = \"{c}{n:02d}\".format(\n c=choice(ascii_letters),\n n=int(choice(digits))\n )\n with open(filename, 'w') as f:\n f.write('\"Interne ID-Nummer\";\"Nachname\";\"Vorname\";\"Klasse\"\\n')\n for i in school_ids:\n f.write('{};\"{}\";\"{}\";\"{}\"\\n'.format(\n i,\n names.get_last_name(),\n names.get_first_name(),\n school_class\n )\n )\n\ncreate_mockup_txt('Test.TXT', size=10)\n","sub_path":"generate_mockup_txt.py","file_name":"generate_mockup_txt.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"598670824","text":"#\n# Emails configuration and templates used when an task order is created, CUSTOMIZE...\n#\n\nimport os\n\n\nEMAIL_TIMEOUT = 3 # seconds\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\nEMAIL_HOST = os.getenv('EMAIL_HOST', 'smtp.gmail.com')\nEMAIL_USE_TLS = os.getenv('EMAIL_USE_TLS', 'true') == 'true'\nEMAIL_PORT = os.getenv('EMAIL_PORT', 587)\nEMAIL_HOST_USER = os.getenv('EMAIL_HOST_USER', 'YOUREMAIL@gmail.com')\nEMAIL_HOST_PASSWORD = os.getenv('EMAIL_HOST_PASSWORD', 'PASS')\n\n\nTASKS_SEND_EMAILS_TO_ASSIGNED = os.getenv('TASKS_SEND_EMAILS_TO_ASSIGNED', 'false') == 'true'\nTASKS_SEND_EMAILS_TO_PARTNERS = os.getenv('TASKS_SEND_EMAILS_TO_PARTNERS', 'false') == 'true'\n\n\n# Enables the Tornado Django Coleman Viewer (it will send emails with the order URL)\n# Check: https://github.com/mrsarm/tornado-dcoleman-mtasks-viewer\nTASKS_VIEWER_ENABLED = os.getenv('TASKS_VIEWER_ENABLED', 'false') == 'true'\nTASKS_VIEWER_HASH_SALT = os.getenv('TASKS_VIEWER_HASH_SALT', '1two3') # REPLACE in production !!!\nTASKS_VIEWER_ENDPOINT = os.getenv('TASKS_VIEWER_ENDPOINT', 'http://localhost:8888/{number}?t={token}')\n\nTASKS_EMAIL_WITHOUT_URL = '''\\\nNew task #{id} created.\n\nTitle:\n{title}\n\nAssigned:\n{user}\n\nDescription:\n{description}\n\nPlease note: Do NOT reply to this email. This email is sent from an unattended mailbox.\nReplies will not be read.\n\n---\n{sign}\n'''\n\n\nTASKS_EMAIL_WITH_URL = '''\\\nNew task #{id} created.\n\nTitle:\n{title}\n\nAssigned:\n{user}\n\nDescription:\n{description}\n\nOrder URL:\n{url}\n\nPlease note: Do NOT reply to this email. This email is sent from an unattended mailbox.\nReplies will not be read.\n\n---\n{sign}\n'''","sub_path":"employee_task/settings_emails.py","file_name":"settings_emails.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"2189641","text":"import threading\nfrom flask import jsonify, request, json, abort\nfrom OpinionOrderClassify.model_predict import opinion_order_clasify\nfrom app import log,app\nfrom api.opinion_classify_fun import transcode,save_opinion_result,validate_opinion_classify\nfrom api.opinion_classify_fun import PRE_RESULY_CODE_TEST,PRE_RESULT_TEST\nimport os\nfrom TASK_AND_CODE_GENERATE.TaskAllocation import task_master,logger\n\n\n@app.route('/opinionclassify', methods=['post'])\ndef predition():\n # 验证数据\n data = validate_opinion_classify(request)\n log.info(data)\n data.pop('tk')#不存储tk\n try:\n model_directory_path = os.path.join('.','OpinionOrderClassify','model_save')\n pre_result = opinion_order_clasify(data,model_directory_path=model_directory_path)\n log.info('Success to run model')\n except:\n log.error(\"Fail to run model:\\n\" , exc_info=True)\n pre_result = PRE_RESULT_TEST\n try:\n pre_result_code = transcode(pre_result) # 将中文结果编码成英文输出\n # thrd_savedata = threading.Thread(target=save_opinion_result, kwargs={'req': data, 'resp': pre_result})\n # thrd_savedata.start()\n save_opinion_result(req=data,resp=pre_result)\n log.info('Transcoding success')\n except:\n log.error(\"Transcoding fail:\\n\", exc_info=True)\n pre_result_code= PRE_RESULY_CODE_TEST\n resp = jsonify(pre_result_code)\n log.info(pre_result_code)\n return resp\n\n@app.route('/infoextract', methods=['post'])\ndef extract():\n data = request.get_json()\n if not isinstance(data,dict):\n data = json.loads(data) # 转成字典\n logger.info(data)\n result = task_master(data)\n resp = jsonify(result)\n logger.info(result)\n\n return resp","sub_path":"api/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"304966293","text":"from file_importer import FileImporter\nfrom intcode_computer import IntcodeComputer\n\ndef get_input(st):\n return list(map(ord, list(st))) + [10]\n\nprog = list(map(int, FileImporter.get_input(\"/../input/21.txt\").split(\",\")))\ncode = \"\"\"\\\nOR A J\nNOT B T\nAND T J\nNOT C T\nAND T J\nAND D J\nNOT A T\nOR T J\nNOT C T\nAND A T\nAND B T\nAND D T\nOR T J\nRUN\"\"\"\n\ncomputer = IntcodeComputer(prog, get_input(code))\n\nwhile not computer.halted:\n out = computer.get_output()\n if out is not None:\n if out < 0x110000:\n print(chr(out), end=\"\")\n else: print(out)","sub_path":"src/_21b.py","file_name":"_21b.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"38763664","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 9 17:53:20 2017\n\n@author: Master Chief\n\"\"\"\n\nimport scipy\nimport scipy.optimize\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as font_manager\n\nfont_path = 'C:\\Windows\\Fonts\\consola.ttf'\nfont_prop = font_manager.FontProperties(fname=font_path, size=10)\n\ntitle_font = {'fontname':'Arial', 'size':'16', 'color':'black', 'weight':'normal',\n 'verticalalignment':'bottom'}\n\nfont = {'fontname':'Comic Sans MS','fontsize':14}\n\ndef f(x):\n y = x + 2*scipy.cos(x)\n return y\n \nraiz= scipy.optimize.newton(f, 2)\n\nprint(raiz)\n\nx = np.linspace(-5,5)\n\nplt.plot(raiz, 0,'bo', label='Raíz calculada')\nplt.plot(x,f(x), label='$x + \\cos(x)$')\nplt.axhline(y=0, lw=0.7, ls='dashed')\nplt.axvline(x=0, lw=0.7, ls='dashed')\nplt.title('Función a la que se desea calcular la raíz',**font_prop)\nplt.legend(loc=1)\nplt.show()","sub_path":"Tema 2 - Operaciones matematicas basicas/Codigos python/03 Raices/raices_python_scipy_03.py","file_name":"raices_python_scipy_03.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"233151612","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2021, Gopi and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe.website.website_generator import WebsiteGenerator\nfrom frappe.utils import flt, comma_or, nowdate, getdate\n\nclass Payments(WebsiteGenerator):\n\tdef validate(self):\n\t\tfor d in self.get('ref'):\n\t\t\tout =flt(d.total)- flt(self.paid_amount)\n\t\t\td.outstanding = out\n\t\t\td.allocated = self.paid_amount\n\ndef on_submit(self):\n\n\tfor d in self.get(\"ref\"):\n\t\tre = frappe.get_value(\"Orderss\",d.namee,\"paid_amount\")\n\t\tfrappe.db.set_value(\"Orderss\",d.namee,{\n\t\t\"paid_amount\":flt(re)+flt(self.paid_amount)\n\t\t})\n\n\t\t\n\t\t\n\t\tfrappe.db.set_value(\"Orderss\",d.namee,{\n\t\t\"outstanding\":d.outstanding\n\t\t})\n\t\t\n\n\t\tif flt(self.paid_amount)100) :\n continue\n y=g[y]\n if(dist[y]==-1 or dist[y]>dist[x]+1) :\n dist[y]=dist[x]+1\n q.append(y)\n\nprint(dist[-1])","sub_path":"algorithm_202105/baek16928.py","file_name":"baek16928.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"195878652","text":"#!/usr/bin/env python3\n\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-i\",\"--islands\", help=\"Nombre del fichero con datos de coordenadas de islas CpG\")\nparser.add_argument(\"-p\",\"--promoters\", help=\"Nombre del fichero con coordenadas de promotores\")\nargs = parser.parse_args()\n\n\nislands=list(open(args.islands))\npromoters=list(open(args.promoters))\nout=open('%s.promoters' % (args.islands),'wt')\n\nfor i in range(0, len(islands)):\n\tstart_island = int(islands[i].split('\\t')[1])\n\tend_island = int(islands[i].split('\\t')[2])\n\tfor k in range(0, len(promoters)):\n\t\tstart_promoter=int(promoters[k].split('\\t')[1])\n\t\tend_promoter=int(promoters[k].split('\\t')[2])\n\t\tif start_island in range(start_promoter, end_promoter) or end_island in range(start_promoter, end_promoter):\n\t\t\tout.write('%s\\t%s\\t%s\\t%s\\t%s\\n' % (islands[i].strip(), promoters[k].split('\\t')[3].split('#')[0], promoters[k].split('\\t')[1], promoters[k].split('\\t')[2], promoters[k].split('\\t')[4]))\n\n\n\n","sub_path":"IslandsinPromoters.py","file_name":"IslandsinPromoters.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"224647199","text":"from xml.etree.ElementTree import parse\nfrom hyper.utils.process_scan import scan_all\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.checks.messages import Critical\nfrom django.urls import reverse\nfrom django.views.generic import TemplateView, View\nfrom .forms import ScanForm, RenameScanForm, CreateAssetGroup, AddAssetForm, DeleteAssetForm, AssetScanForm\nfrom hyper.utils.general import *\nfrom .tasks import go_to_sleep\nfrom django.shortcuts import redirect\nfrom django.views.static import serve\nimport os\nfrom django.http import HttpResponse, Http404\nfrom wsgiref.util import FileWrapper\n\n\nUser = get_user_model()\nclass LoginRequiredView(LoginRequiredMixin, TemplateView):\n pass\n\n\nclass DashboardMainView(LoginRequiredMixin, TemplateView):\n template_name = \"dashboard/main.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n scans = select_scans(self.request.user.id)\n context['scans'] = scans\n return context\n\n\nclass ScanDetailsView(LoginRequiredMixin, TemplateView):\n template_name = \"dashboard/scan/scan_details.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['slug'] = self.kwargs['slug']\n selected_scan = select_slug(self.kwargs['slug'][5:],self.request.user.id)\n context['scan'] = selected_scan\n if len(selected_scan) >= 1:\n context['data'] = get_scan_data(self.kwargs['slug'][5:], self.request.user.id)\n return context\nclass CveDetailsView(LoginRequiredMixin, TemplateView):\n template_name = \"dashboard/scan/cve_details.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n scan_id = self.kwargs['slug']\n cve_id = self.kwargs['cveid']\n context['cve'] = get_cve(scan_id[5:],cve_id,self.request.user.id)\n return context\n\nclass ScanView(LoginRequiredMixin, TemplateView):\n template_name = \"dashboard/scan/scan.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['scan_form'] = ScanForm(self.request.user.id, initial={'ports':'top'})\n context['is_asset_group'] = False\n return context\n def post(self, request, **kwargs):\n context = self.get_context_data(**kwargs)\n form = ScanForm(self.request.user.id, request.POST)\n print(form.errors)\n if form.is_valid():\n context['address'] = parse_scan_addresses(form.cleaned_data['address'])[1]\n context['scan_name'] = form.cleaned_data['scan_name']\n all_scan_ips = parse_scan_addresses(form.cleaned_data['address'])[0]\n for group_name in form.cleaned_data['asset_groups']:\n gid = get_group_id(self.request.user.id, group_name)\n group_ips = get_assets(self.request.user.id, gid)\n for ip in group_ips:\n all_scan_ips.append(ip)\n \"\"\"\n Check to see if there are old scan results that have the same addresses\n and delete them, there could be other possible solutions to this\n as this will remove results from older scans\n \"\"\"\n delete_old_addresses(all_scan_ips)\n slug = add_scan(request.user.id, form.cleaned_data['scan_name'],all_scan_ips, generate_scan_display_address(all_scan_ips))\n\n \n \n \"\"\"\n Im not sure how to calculate the percent of the work done so for now we\n print a message after they submit the scan saying its running in the background\n \"\"\"\n #context['task_id'] = convert_scan_to_model(form.cleaned_data['name'], slug[5:])\n scan_all(all_scan_ips,slug[5:],form.cleaned_data['ports'], form.cleaned_data['custom_range'], self.request.user.id)\n context['scan_status'] = \"scanning\"\n \n\n return self.render_to_response(context)\n\nclass ScanManageView(LoginRequiredMixin, TemplateView):\n template_name = \"dashboard/scan/scan_manage.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['slug'] = self.kwargs['slug']\n context['rename_form'] = RenameScanForm()\n return context\n\n def post(self, request, **kwargs):\n context = self.get_context_data(**kwargs)\n form = RenameScanForm(request.POST)\n if form.is_valid():\n context['new_name'] = form.cleaned_data['name']\n scan = select_slug(self.kwargs['slug'][5:], request.user.id)\n scan.update(name=form.cleaned_data['name'])\n \n return redirect(f\"/scan/{self.kwargs['slug']}\")\n if request.POST.get('delete'):\n clear_ports(request.user.id, self.kwargs['slug'][5:])\n clear_scans(request.user.id, self.kwargs['slug'][5:])\n return redirect('/')\n return self.render_to_response(context)\n\n\nclass AddressDashboardView(LoginRequiredMixin, TemplateView):\n template_name = \"dashboard/address/address_dashboard.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n ip_list = get_ips(user=self.request.user.id)\n context['ip_list'] = []\n for ip in ip_list:\n context['ip_list'].append([ip, ip.replace('.', '-')])\n return context\nclass AddressDetailsView(LoginRequiredMixin, TemplateView):\n template_name = \"dashboard/address/address_details.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n slug = self.kwargs['slug'].replace('-', '.')\n context['data'] = get_address_data(self.request.user.id, slug)\n return context\nclass AddressCveDetailsView(LoginRequiredMixin, TemplateView):\n template_name = \"dashboard/scan/cve_details.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n address = self.kwargs['slug'].replace('-', '.')\n cve_id = self.kwargs['cveid']\n context['cve'] = get_address_cve(address, self.request.user.id, cve_id)\n return context\nclass AssetGroupDashboardView(LoginRequiredMixin, TemplateView):\n template_name = 'dashboard/assets/assets.html'\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['data'] = []\n groups = get_asset_groups(user=self.request.user.id)\n for group in groups:\n context['data'].append([group, get_assets(self.request.user.id, group)])\n return context\n\n\nclass CreateAssetGroupView(LoginRequiredMixin, TemplateView):\n template_name =\"dashboard/assets/create.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['create_form'] = CreateAssetGroup(self.request.user.id)\n return context\n def post(self, request, **kwargs):\n context = self.get_context_data(**kwargs)\n form = CreateAssetGroup(self.request.user.id, request.POST)\n if form.is_valid():\n gid = create_asset_group(request.user.id, form.cleaned_data['name'])\n addresses = form.cleaned_data['Add_Addresses']\n for ip in addresses:\n add_asset_to_group(ip, self.request.user.id, gid)\n return redirect('/assets/')\n return self.render_to_response(context)\n\nclass ManageAssetGroupView(LoginRequiredMixin, TemplateView):\n template_name = \"dashboard/assets/manage.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n #TODO give rename its own form\n context['change_name_form'] = RenameScanForm()\n context['add_asset_form'] = AddAssetForm(self.request.user.id)\n context['del_asset_form'] = DeleteAssetForm(self.request.user.id, self.kwargs['groupid'])\n context['gid'] = self.kwargs['groupid']\n return context\n def post(self, request, **kwargs):\n context = self.get_context_data(**kwargs)\n if request.POST.get('change_name'):\n form = RenameScanForm(request.POST)\n if form.is_valid():\n groupid = self.kwargs['groupid']\n change_group_name(groupid, form.cleaned_data['name'])\n return redirect(\"/assets/\")\n if request.POST.get('add'):\n form = AddAssetForm(request.user.id, request.POST)\n if form.is_valid():\n for x in form.cleaned_data['Add_Addresses']:\n if x != \"None\":\n add_asset_to_group(x, request.user.id, self.kwargs['groupid'])\n return redirect(\"/assets/\")\n if request.POST.get('delete') == \"Submit\":\n form = DeleteAssetForm(request.user.id, self.kwargs['groupid'], request.POST)\n \n if form.is_valid():\n print(form.cleaned_data)\n for x in form.cleaned_data['Remove_Addresses']:\n if x != \"None\":\n del_asset_from_group(request.user.id, self.kwargs['groupid'], x)\n return redirect(\"/assets/\")\n if request.POST.get('remove'):\n delete_asset_group(self.request.user.id, self.kwargs['groupid'])\n return redirect('/')\n return self.render_to_response(context)\n \nclass AssetGroupAddressView(LoginRequiredMixin, TemplateView):\n template_name = \"dashboard/address/address_dashboard.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n ip_list = get_assets(self.request.user.id, self.kwargs['groupid'])\n context['ip_list'] = []\n for ip in ip_list:\n context['ip_list'].append([ip, ip.replace('.', '-')])\n return context\n\nclass DashboardInfoView(LoginRequiredMixin, TemplateView):\n template_name = 'dashboard/info.html'\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n ips = get_ips(self.request.user.id)\n context['ip_num'] = len(ips)\n context['cve_nums'] = len(num_cves(self.request.user.id))\n context['critical_num'] = len(num_cves(self.request.user.id).filter(score__gte=9))\n context['high_num'] = len(num_cves(self.request.user.id).filter(score__gte=7).filter(score__lt=9))\n context['medium_num'] = len(num_cves(self.request.user.id).filter(score__gte=4).filter(score__lt=7))\n context['top_ten'] = get_top_ten(self.request.user.id)\n context['data'] = num_cves(self.request.user.id)\n return context\n\nclass DashboardScoreView(LoginRequiredMixin, TemplateView):\n template_name = \"dashboard/scan/scan_details.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['scan'] = 1\n level = self.kwargs['score']\n if level == 'critical':\n context['level'] = 'Critical Level Vulnerabilities'\n context['data'] = num_cves(self.request.user.id).filter(score__gte=9)\n elif level == 'high':\n context['level'] = 'High Level Vulnerabilities'\n context['data'] = num_cves(self.request.user.id).filter(score__gte=7).filter(score__lt=9)\n elif level == 'medium':\n context['level'] = 'Medium Level Vulnerabilities'\n context['data'] = num_cves(self.request.user.id).filter(score__gte=4).filter(score__lt=7)\n return context\n\nclass AssetScanView(LoginRequiredMixin, TemplateView):\n template_name = \"dashboard/scan/scan.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['scan_form'] = AssetScanForm(initial={'ports':'top'})\n context['is_asset_group'] = True\n context['groupid'] = self.kwargs['groupid']\n return context\n def post(self, request, **kwargs):\n context = self.get_context_data(**kwargs)\n form = AssetScanForm(request.POST)\n \n if form.is_valid():\n #context['name'] = form.cleaned_data['name']\n context['scan_name'] = form.cleaned_data['scan_name']\n \"\"\"\n Check to see if there are old scan results that have the same addresses\n and delete them, there could be other possible solutions to this\n as this will remove results from older scans\n \"\"\"\n addresses = get_assets(self.request.user.id, self.kwargs['groupid'])\n delete_old_addresses(addresses)\n slug = add_scan(request.user.id, form.cleaned_data['scan_name'],addresses)\n\n \"\"\"\n Im not sure how to calculate the percent of the work done so for now we\n print a message after they submit the scan saying its running in the background\n \"\"\"\n #context['task_id'] = convert_scan_to_model(form.cleaned_data['name'], slug[5:])\n \n scan_all(addresses,slug[5:],form.cleaned_data['ports'], form.cleaned_data['custom_range'], self.request.user.id)\n \n\n return self.render_to_response(context)\n\nclass TempDownloadView(LoginRequiredMixin, View):\n template_name = 'dashboard/download/download.html'\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['download_type'] = self.kwargs['downloadtype']\n file_path = '/tmp/test.csv'\n \n \n return context\n\nclass FileDownloadView(View):\n # Set FILE_STORAGE_PATH value in settings.py\n \n # Here set the name of the file with extension\n file_name = '/tmp/report.csv'\n # Set the content type value\n content_type_value = 'text/csv'\n\n def get(self, request, downloadtype, downloadvalue):\n if downloadtype == 'address':\n data = num_cves(request.user.id).filter(ip=downloadvalue.replace('-', '.'))\n write_data_to_csv([data])\n elif downloadtype == 'scan':\n data = get_scan_data(downloadvalue[5:], request.user.id)\n write_data_to_csv([data])\n elif downloadtype == 'group':\n members = get_assets(self.request.user.id, downloadvalue)\n data = get_cve_for_multiple_address(request.user.id, members)\n write_data_to_csv(data)\n\n file_path = self.file_name\n if os.path.exists(file_path):\n with open(file_path, 'rb') as fh:\n response = HttpResponse(\n fh.read(),\n content_type=\"text/csv\"\n )\n response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(file_path)\n return response\n else:\n raise Http404\n\nclass DownloadView(FileDownloadView):\n file_name = '/tmp/report.csv'\n\ndashboard_info_view = DashboardInfoView.as_view()\ndashboard_manage_scan_view = ScanManageView.as_view()\ndashboard_scan_view = ScanView.as_view()\ndashboard_cve_details = CveDetailsView.as_view()\ndashboard_main_view = DashboardMainView.as_view()\ndashboard_scan_details = ScanDetailsView.as_view()\ndashboard_address_view = AddressDashboardView.as_view()\naddress_details_view = AddressDetailsView.as_view()\naddress_cve_details_view = AddressCveDetailsView.as_view()\nasset_group_view = AssetGroupDashboardView.as_view()\nasset_group_create_view = CreateAssetGroupView.as_view()\nasset_group_manage_view = ManageAssetGroupView.as_view()\nasset_group_address_view = AssetGroupAddressView.as_view()\ndashboard_score_view = DashboardScoreView.as_view()\nasset_group_scan_view = AssetScanView.as_view()\ndownload_view = FileDownloadView.as_view()","sub_path":"hyper/dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"89208054","text":"def validate_email(email: str) -> bool:\n from re import search\n # regex = r\"^ (?=[A-Z0-9][A-Z0-9@._ % +-]{5, 253}$)[A-Z0-9._ % +-]{1, 64}@(?: (?=[A-Z0-9-]{1, 63}\\.)[A-Z0-9]+(?: -[A-Z0-9]+) *\\.){1, 8}[A-Z]{2, 63}$\"\n regex = r\"[^@]+@[^@]+\\.[^@]+\"\n return True if search(regex, email) else False\n\n\ndef validate_text(text: str, sizemin=0, sizemax=80, alnum=False) -> bool:\n return True if type(text) == str and sizemin <= len(text) <= sizemax and (not alnum or text.isalnum()) else False\n\n\ndef check_valid(type: str, val) -> bool:\n return validators[type](val)\n\n\nvalidators = {\n \"Name\": validate_text,\n \"First_name\": validate_text,\n \"Family_name\": validate_text,\n \"Email_id\": validate_email,\n \"Password\": lambda val: validate_text(val, sizemin=8, sizemax=15, alnum=True),\n \"Mobile\": lambda x: len(x) == 10,\n \"Sex\": lambda x: x in {'M', 'F', 'O'},\n \"Address\": validate_text,\n \"Degree\": validate_text,\n \"Roll_no\": lambda x: type(x) == int and len(str(x)) == 10,\n \"Batch\": lambda val: validate_text(val, sizemax=10),\n \"cgpa\": lambda x: 0 <= x <= 10,\n \"Team_name\": validate_text,\n \"Course_name\": lambda x: validate_text(x, sizemax=30),\n \"Details\": lambda x: validate_text(x, sizemax=100),\n \"Textbook\": lambda x: validate_text(x, sizemax=30),\n \"Channel_name\": validate_text,\n \"Time\": lambda x: True,\n \"Quiz_no\": lambda x: type(x) == int and x < 1e9,\n \"No_of_qn\": lambda x: type(x) == int and x < 1e9,\n \"Qn_text\": lambda x: validate_text(x, sizemax=1000),\n \"Q_id\": lambda x: type(x) == int and x < 1e9,\n \"Answer\": lambda x: validate_text(x, sizemax=1000),\n \"Marks\": lambda x: 0 <= x <= 10,\n \"Percentage_marks\": lambda x: 0 <= x <= 100,\n}\n\nvalidators[\"SRoll_no\"] = validators[\"Roll_no\"]\nvalidators[\"Sup_id\"] = validators[\"Email_id\"]\nvalidators[\"Org_id\"] = validators[\"Email_id\"]\nvalidators[\"Member_id\"] = validators[\"Email_id\"]\nvalidators[\"Admin_id\"] = validators[\"Email_id\"]\nvalidators[\"Inst_Email_id\"] = validators[\"Email_id\"]\nvalidators[\"Start_time\"] = validators[\"Time\"]\nvalidators[\"End_time\"] = validators[\"Time\"]\n","sub_path":"code/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"491234608","text":"from plugin import load\nimport pickle\n\nclass Agent:\n def __init__(self):\n pass\n \n def echo(self, sock):\n while 1:\n data = sock.recv(1024)\n if data.strip() == 'break' : break\n sock.send(data)\n sock.close()\n\n def agent(self, sock):\n while 1:\n data = sock.recv(1024)\n if data.strip() == 'break' : break\n sock.send(\"agent: \" + data)\n sock.close()\n\n def load(self, sock):\n sock.send (pickle.dumps (load.get_data(), 2))\n sock.close()\n \n\n\n","sub_path":"old/connect/old/server/chandler.py","file_name":"chandler.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"116102505","text":"import sys\nsys.path.append(\"..\")\n\nfrom __main__ import settings, database\nimport discord, asyncio, json\nfrom discord.ext import commands\n\n# import mixer api and mixer chatbot from bots.mixer module\nfrom bots.mixer import api as api\nfrom bots.mixer import chat as mixer_chat\nfrom bots.mixer import channel as channel\n\n# initialize discord bot\nbot = commands.Bot(command_prefix = '>')\n\n@bot.command()\nasync def link(ctx):\n await ctx.send(\"https://mixer.com/\" + channel.token)\n\n@bot.command()\nasync def leaderboard(ctx):\n leaderboard = channel.get_leaderboard('sparks-weekly')\n message = \"\"\n for i in range(len(leaderboard)):\n leader = leaderboard[i]\n user_id = leader[\"userId\"]\n username = leader[\"username\"]\n sparks = leader[\"statValue\"]\n place = i + 1\n mixcord_user = database.user_from_mixer(user_id)\n if mixcord_user is not None:\n member = bot.get_user(mixcord_user[\"discord_id\"])\n username = member.mention\n else:\n username = \"**{}**\".format(username)\n message += \"{} is in {} place w/ {} sparks\\n\".format(username, place, sparks)\n await ctx.send(message)\n\n@bot.command()\nasync def uptime(ctx):\n\n # get uptime and check if online\n uptime = channel.get_uptime()\n if uptime is None:\n await ctx.send(channel.token + \" is not currently online.\")\n return\n\n # return formatted uptime\n await ctx.send(channel.token + \" has been live for: \" + str(uptime))\n\n# triggered when the discord bot is connected + authenticated\n@bot.event\nasync def on_ready():\n print('discord logged in:', bot.user)\n\n# triggered when !mixcord command is executed in discord\n@bot.command()\nasync def mixcord(ctx):\n\n # make sure discord id isn't already in database\n discord_id = ctx.author.id\n if database.user_from_discord(discord_id) is not None:\n await ctx.author.send(\"You've already linked your Mixer account via mixcord.\")\n return\n\n # get shortcode stuff from mixer\n shortcode = api.get_shortcode()\n code = shortcode[\"code\"]\n handle = shortcode[\"handle\"]\n\n # tell the user what to do to link their mixer account\n await ctx.author.send(\"Visit the following page to link your Mixer: \".format(code))\n\n # poll shortcode checking endpoint with handle until we can move on with authorization_code\n while True:\n await asyncio.sleep(10)\n response = api.check_shortcode(handle)\n status_code = response.status_code\n if status_code == 200:\n authorization_code = response.json()[\"code\"]\n break\n elif status_code == 403:\n await ctx.author.send(\"Failed: user denied permissions.\")\n return\n elif status_code == 404:\n await ctx.author.send(\"Failed: verification timed out.\")\n return\n\n tokens = api.get_token(authorization_code)\n token_data = api.check_token(tokens[\"access_token\"])\n user = api.get_user(token_data[\"sub\"])\n\n database.insert_user(user.id, user.channel.id, discord_id)\n database.update_tokens(discord_id, tokens[\"access_token\"], tokens[\"refresh_token\"], token_data[\"exp\"])\n\n await ctx.author.send(\"Your Mixer account has been linked: \" + user.username)\n await mixer_chat.send_message(\"@{} has linked their discord account: {}\".format(user.username, ctx.author))\n\nasync def send_announcement(message):\n guild = bot.get_guild(settings[\"discord\"][\"guild\"])\n channel = discord.utils.get(guild.text_channels, name = \"announcements\")\n await channel.send(\"@everyone \" + message)\n","sub_path":"mixcord/bots/discord.py","file_name":"discord.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"82924131","text":"#import logger as log\nimport global_var\nimport logging\nimport os\n\n\nclass createTestReport(object):\n\n def __init__(self):\n self.html = global_var.log_path + '/Test_Results.html'\n \n def create_test_report_header(self):\n \n logpath = global_var.log_path\n logpath = os.path.expanduser(logpath)\n self.html = logpath + '/Test_Results.html'\n print('Setting up Result File at %s'%self.html)\n if not os.path.isfile(self.html):\n self.testreport = open(self.html, 'w+')\n title = '\\n\\n' \n self.testreport.write(title)\n self.testreport.flush()\n else:\n self.testreport = open(self.html, 'a+')\n \n def create_test_report_table(self):\n table_header = ''\n table_header = table_header + '
suite: %s |
' % global_var.suite_name\n table_header = table_header + ''' \n \n | Test | \n Test Case Summary | \n Result | \n Failure Reason | \n
\\n'''\n self.testreport.write(table_header)\n self.testreport.flush()\n return True\n\n def write_to_test_report(self, data):\n self.testreport.write('\\n')\n col_val = '| %s | \\n' % data['name']\n self.testreport.write(col_val)\n col_val=''\n for summary in data['summary'].split('\\n'):\n col_val += '%s ' % summary\n col_val += ' | \\n' \n self.testreport.write(col_val)\n if not data['result'] or data['result'] == 'FAIL':\n col_val = 'FAIL | \\n'\n self.testreport.write(col_val)\n else:\n col_val = 'PASS | \\n'\n self.testreport.write(col_val)\n if data['comment'] != '':\n col_val=''\n for comment in data['comment'].split('\\n'):\n col_val += '%s ' % comment\n col_val += ' | \\n' \n self.testreport.write(col_val)\n else:\n col_val = 'NA | \\n'\n self.testreport.write(col_val)\n self.testreport.write('
\\n')\n self.testreport.flush()\n\n\n def close_test_table_tag(self):\n self.testreport.write('
\\n')\n self.testreport.flush()\n return True\n\n def close_test_report(self):\n footer = '''\n '''\n self.testreport.write(footer)\n self.testreport.close()\n return True\n","sub_path":"testsetup/create_test_report.py","file_name":"create_test_report.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"551560371","text":"import subprocess\nimport sys\nimport csv\nfrom urllib.parse import urlparse\nimport urllib.request\nimport os\n\n#database name\ncustomCsv = \"results/phantomas.csv\"\n#phantomas command\ncommand = os.environ.get(\"PHANTOMAS_PATH\", \"phantomas\")\n\ninteresting_metrics = [\n 'Domain',\n 'requests',\n 'httpsRequests',\n 'timeToFirstByte',\n 'timeToLastByte',\n 'httpTrafficCompleted',\n 'domContentLoaded',\n 'domComplete',\n 'timeBackend',\n 'timeFrontend',\n]\n\ndef scan(site):\n #choose the protocol for the connection\n protocol = \"\"\n if not(site.startswith(\"https://\") or site.startswith(\"http://\")):\n try:\n urllib.request.urlopen(\"https://\"+site, None)\n protocol=\"https://\"\n except:\n urllib.request.urlopen(\"http://\"+site,None)\n protocol=\"http://\"\n #create subprocess that execute phantomas\n site= protocol+site\n print(site)\n proc = subprocess.Popen([command, site],stdout=subprocess.PIPE, shell=True)\n #get the output of phantomas\n out = proc.stdout.read()\n #splits the output to get only the wanted data\n out = str(out).split(\"\\\\n\\\\n\")[1][1:].split(\"\\\\n*\")\n #get the domain name\n domain = urlparse(site).hostname\n if domain.startswith(\"www\"):\n domain = domain[4:]\n #create the full dictionary\n data = {'Domain':domain}\n for line in out:\n parts = line.lstrip().split(\":\")\n data[parts[0]]=parts[1].lstrip()\n #create a smaller dictionary with only necessary parts\n newData = {}\n for metric in interesting_metrics:\n newData[metric] = data[metric]\n return newData\n return \"\"\n\ndef multiple_scan(csvFile):\n domains = \"\"\n with open(csvFile,'r') as file:\n domains = file.read().split(\"\\n\")[:-1]\n temp_data = []\n for domain in domains:\n temp_data.append(scan(domain.split(\",\")[0]))\n #write data to a csv file \n with open(customCsv, 'w') as output_file:\n writer = csv.writer(output_file, lineterminator=\"\\n\")\n writer.writerow(interesting_metrics)\n for data in temp_data:\n if data!=\"\":\n writer.writerow(data.values())\n\ndef single_scan(url):\n temp_data = scan(url)\n with open(customCsv, 'w') as output_file:\n writer = csv.writer(output_file, lineterminator=\"\\n\")\n writer.writerow(interesting_metrics)\n writer.writerow(temp_data.values())\n\n\nif __name__ == \"__main__\":\n arg = sys.argv[1]\n if str(arg).endswith(\".csv\"):\n multiple_scan(arg)\n else:\n \tsingle_scan(arg)\n print(\"Ended scan\")","sub_path":"phantomas.py","file_name":"phantomas.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"227706676","text":"from asyncio import FIRST_COMPLETED, IncompleteReadError, ensure_future, sleep, wait\nfrom asyncio.subprocess import DEVNULL, PIPE, create_subprocess_exec\nfrom contextlib import suppress\nfrom datetime import datetime\nfrom os import sep\nfrom pathlib import Path\nfrom shlex import quote\nfrom sys import stderr\nfrom textwrap import dedent\nfrom typing import Sequence\n\nfrom .consts import BIN, NUL, TIME_FMT\nfrom .copy import copy\nfrom .logging import log\nfrom .shared import join, kill_children\n\n\ndef _tunneling_prog() -> str:\n canonical = BIN / \"csshd\"\n\n try:\n rel_path = canonical.relative_to(Path.home())\n except ValueError:\n return quote(str(canonical))\n else:\n return 'exec \"$HOME\"' + quote(str(Path(sep) / rel_path))\n\n\ndef _tunnel_cmd(name: str, args: Sequence[str]) -> Sequence[str]:\n sh = _tunneling_prog()\n if name == \"cssh\":\n return (\"ssh\", \"-T\", *args, sh)\n elif name == \"cdocker\":\n return (\"docker\", \"exec\", *args, \"sh\", \"-c\", sh)\n else:\n assert False\n\n\nasync def _daemon(local: bool, name: str, args: Sequence[str]) -> int:\n cmds = _tunnel_cmd(name, args=args)\n proc = await create_subprocess_exec(\n *cmds, start_new_session=True, stdin=DEVNULL, stdout=PIPE\n )\n p_done = ensure_future(proc.wait())\n time = datetime.now().strftime(TIME_FMT)\n\n msg = f\"\"\"\n {time} | Establishing link via:\n {join(cmds)}\n \"\"\"\n log.info(\"%s\", dedent(msg))\n\n try:\n assert proc.stdout\n while True:\n p_data = ensure_future(proc.stdout.readuntil(NUL))\n await wait((p_done, p_data), return_when=FIRST_COMPLETED)\n\n if p_data.done():\n with suppress(IncompleteReadError):\n data = await p_data\n await copy(local, args=args, data=data[:-1])\n\n time = datetime.now().strftime(TIME_FMT)\n msg = f\"\"\"\n -- RECV --\n {time}\n \"\"\"\n log.info(\"%s\", dedent(msg))\n\n if p_done.done():\n return await proc.wait()\n\n finally:\n with suppress(ProcessLookupError):\n kill_children(proc.pid)\n await proc.wait()\n\n\nasync def l_daemon(local: bool, name: str, args: Sequence[str]) -> int:\n while True:\n code = await _daemon(local, name=name, args=args)\n log.warn(\"%s\", f\"Exited - $? {code}\")\n print(\"\\a\", end=\"\", file=stderr, flush=True)\n await sleep(1)\n","sub_path":"iso_cp/local_daemon.py","file_name":"local_daemon.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"96919996","text":"#helper function\r\nfrom django.contrib.gis.geoip2 import GeoIP2\r\nfrom geopy.geocoders import Nominatim\r\nfrom ipware import get_client_ip\r\n\r\ndef get_geo(ip):\r\n g = GeoIP2()\r\n country = g.country(ip)\r\n city = g.city(ip)\r\n lat, lon = g.lat_lon(ip)\r\n return country, city, lat, lon\r\n\r\ndef get_destination(request):\r\n\r\n ip, is_routable = get_client_ip(request)\r\n if ip is None:\r\n ip_ = '2401:4900:36bf:e76b:b5bf:e254:883:ccac'\r\n else:\r\n if is_routable:\r\n ip_ = ip\r\n else:\r\n ip_ = '2401:4900:36bf:e76b:b5bf:e254:883:ccac'\r\n country, city, lat1, lon1= get_geo(ip_)\r\n lat1 = int(lat1)\r\n lon1 = int(lon1)\r\n #print( city )\r\n for key,value in city.items():\r\n if key == \"postal_code\":\r\n try:\r\n postal_code=value\r\n break\r\n except:\r\n break\r\n \r\n #print(postal_code)\r\n try:\r\n geolocator = Nominatim(user_agent= 'app' )\r\n\r\n destination = geolocator.geocode(postal_code)\r\n #print(destination)\r\n except:\r\n destination=\"Not Found\"\r\n\r\n return destination, lat1, lon1\r\n\r\n\r\ndef unique_key_generator(instance):\r\n \r\n size = random.randint(30, 45)\r\n key = random_string_generator(size=size)\r\n\r\n Klass = instance.__class__\r\n qs_exists = Klass.objects.filter(key=key).exists()\r\n if qs_exists:\r\n return unique_key_generator(instance)\r\n return key\r\n\r\n\r\ndef unique_otp_generator(instance):\r\n\r\n key = random.randint(1, 999999)\r\n print(key)\r\n\r\n Klass = instance.__class__\r\n qs_exists = Klass.objects.filter(key=key).exists()\r\n if qs_exists:\r\n return unique_otp_generator(instance)\r\n return key\r\n\r\nimport re\r\nimport random\r\n\r\ndef phone_validator(phone_number):\r\n \"\"\"\r\n Returns true if phone number is correct else false\r\n \"\"\"\r\n regix = r'^\\+?1?\\d{10}$'\r\n com = re.compile(regix)\r\n find = len(com.findall(phone_number))\r\n if find == 1:\r\n return True\r\n else:\r\n return False\r\n\r\ndef otp_generator():\r\n otp = random.randint(999, 9999)\r\n return otp\r\n","sub_path":"nearbyshops/app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"44199389","text":"\"\"\"--------------------------------------------------------------------------------------------------------------------------------------\nMODULE\n RerunConfirmationsTask\n\nDESCRIPTION\n This module contains an AEL main script used for re-running confirmation\n processing for any eligible entities updated on or after a specified time.\n\n This task is not intended for normal use and exists only as a support\n tool to be used to resolve any missed processing without replaying AMB\n messages.\n\n Examples of situations in which this tool may prove useful are:\n\n - Recovery after missed processing caused by the Confirmation ATS\n not being restarted after the deployment of a new confirmation hook.\n\n - Recovery after failed processing caused by a coding error in a\n confirmation hook.\n\n-----------------------------------------------------------------------------------------------------------------------------------------\nHISTORY\n=========================================================================================================================================\nDate Change no Developer Requester Description\n-----------------------------------------------------------------------------------------------------------------------------------------\n2019-06-18 FAOPS-536 Stuart Wilson Kgomotso Gumbo Initial implementation.\n2020-06-11 FAOPS-814 Cuen Edwards Kgomotso Gumbo Improvements to prevent unnecessary event generation.\n2020-09-06 FAOPS-920 Cuen Edwards Kgomotso Gumbo Improvements to allow for specifying the to time, event\n tables to examine, and the option to exclude touching\n entities already updated by the current user.\n-----------------------------------------------------------------------------------------------------------------------------------------\n\"\"\"\n\nimport datetime\n\nimport acm\nimport FConfirmationMain\nfrom FConfirmationEventFactory import FConfirmationEventFactory\nfrom FConfirmationProcess import GetConfirmationGeneratingObjects\n\nfrom at_ael_variables import AelVariableHandler\nfrom at_logging import getLogger\nimport FValidation_settings\nimport SessionFunctions\n\n\nLOGGER = getLogger(__name__)\n\n\ndef _create_ael_variable_handler():\n \"\"\"\n Create an AelVariableHandler for this script.\n \"\"\"\n ael_variable_handler = AelVariableHandler()\n # From Time.\n ael_variable_handler.add(\n name='from_time',\n label='From Time',\n cls='string',\n default=acm.Time.DateToday() + ' 00:00:00',\n mandatory=True,\n multiple=False,\n alt=\"The time from which to rerun confirmations processing. Any eligible \" +\n \"entity updated on or after this time will be touched in order to \" +\n \"trigger confirmations processing.\"\n )\n # To Time.\n ael_variable_handler.add(\n name='to_time',\n label='To Time',\n cls='string',\n default=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n mandatory=True,\n multiple=False,\n alt=\"The time up until which to rerun confirmations processing. Any eligible \" +\n \"entity updated on or before this time will be touched in order to \" +\n \"trigger confirmations processing.\"\n )\n # Event Tables.\n ael_variable_handler.add(\n name='event_table_names',\n label='Event Tables',\n cls='string',\n collection=FConfirmationMain.dbTables,\n default=','.join(FConfirmationMain.dbTables),\n mandatory=True,\n multiple=True,\n alt=\"The confirmation event tables to examine for updates between the from and \" +\n \"to times. This option allows one to avoid touching unnecessary entities \" +\n \"when it is known which types of entity need to be touched.\"\n )\n # Exclude Updates By Current User.\n ael_variable_handler.add_bool(\n name='exclude_updates_by_current_user',\n label='Exclude Updates by Current User',\n default=True,\n mandatory=True,\n multiple=False,\n alt=\"Prevent touching entities already updated by the current user. This option \" +\n \"allows one to avoid touching the same entities multiple times in the event \" +\n \"of multiple executions of this tool.\"\n )\n return ael_variable_handler\n\n\nael_variables = _create_ael_variable_handler()\n\nael_gui_parameters = {\n 'windowCaption': 'Rerun Confirmations',\n 'runButtonLabel': '&&Rerun',\n 'runButtonTooltip': 'Rerun Confirmations',\n 'hideExtraControls': True,\n 'closeWhenFinished': False\n}\n\n\ndef ael_main(ael_parameters):\n \"\"\"\n AEL Main Function.\n \"\"\"\n try:\n start_date_time = datetime.datetime.today()\n LOGGER.info('Starting at {start_date_time}'.format(start_date_time=start_date_time))\n from_time = ael_parameters['from_time']\n to_time = ael_parameters['to_time']\n event_table_names = ael_parameters['event_table_names']\n exclude_updates_by_current_user = ael_parameters['exclude_updates_by_current_user']\n _validate_from_and_to_time(from_time, to_time)\n _validate_running_as_fvalidation_exempt_user()\n _trigger_confirmation_processing(from_time, to_time, event_table_names, exclude_updates_by_current_user)\n end_date_time = datetime.datetime.today()\n LOGGER.info('Completed successfully at {end_date_time}'.format(end_date_time=end_date_time))\n duration = end_date_time - start_date_time\n LOGGER.info('Duration: {duration}'.format(duration=duration))\n except Exception as exception:\n if SessionFunctions.is_prime():\n _show_error_dialog(exception)\n LOGGER.exception(exception)\n else:\n raise\n\n\ndef _validate_from_and_to_time(from_time, to_time):\n \"\"\"\n Validate the from_time and to_time AEL parameters.\n \"\"\"\n # Validate From Time.\n from_datetime = datetime.datetime.strptime(from_time, '%Y-%m-%d %H:%M:%S')\n datetime_today = datetime.datetime.today()\n from_datetime_limit = datetime_today - datetime.timedelta(days=7)\n if from_datetime < from_datetime_limit:\n raise ValueError(\"The from time may not be earlier than '{from_datetime_limit}'.\".format(\n from_datetime_limit=from_datetime_limit\n ))\n if from_datetime > datetime_today:\n raise ValueError(\"The from time may not be in the future.\")\n # Validate To Time.\n to_datetime = datetime.datetime.strptime(to_time, '%Y-%m-%d %H:%M:%S')\n if to_datetime > datetime_today:\n raise ValueError(\"The to time may not be in the future.\")\n if to_datetime <= from_datetime:\n raise ValueError(\"The to time must after the from time.\")\n\n\ndef _validate_running_as_fvalidation_exempt_user():\n \"\"\"\n Validate that the current user is exempt from FValidation.\n \"\"\"\n if acm.UserName() not in FValidation_settings.SUPERUSERS:\n # Ensure that tool is run as a user exempt from FValidation\n # in order to avoid GUI pop-ups when touching entities.\n raise ValueError(\"This tool must be run by a user that is exempt from FValidation.\")\n\n\ndef _trigger_confirmation_processing(from_time, to_time, event_table_names, exclude_updates_by_current_user):\n \"\"\"\n Trigger confirmation processing for any eligible objects updated\n between the specified from time and to time.\n \"\"\"\n entities = _get_entities_updated_between_times(from_time, to_time, event_table_names)\n for entity in entities:\n if exclude_updates_by_current_user and entity.UpdateUser() == acm.User():\n info_message = \"{entity_class} {entity_oid}, already updated by the current user \"\n info_message += \"at '{update_time}', skipping...\"\n LOGGER.info(info_message.format(\n entity_class=entity.ClassName(),\n entity_oid=entity.Oid(),\n update_time=acm.Time.DateTimeFromTime(entity.UpdateTime())\n ))\n continue\n if not _entity_triggers_confirmation_processing(entity):\n info_message = \"{entity_class} {entity_oid}, updated '{update_time}', would \"\n info_message += \"not trigger confirmation processing, skipping...\"\n LOGGER.info(info_message.format(\n entity_class=entity.ClassName(),\n entity_oid=entity.Oid(),\n update_time=acm.Time.DateTimeFromTime(entity.UpdateTime())\n ))\n continue\n info_message = \"{entity_class} {entity_oid}, updated '{update_time}', would \"\n info_message += \"trigger confirmation processing, touching...\"\n LOGGER.info(info_message.format(\n entity_class=entity.ClassName(),\n entity_oid=entity.Oid(),\n update_time=acm.Time.DateTimeFromTime(entity.UpdateTime())\n ))\n try:\n entity.Touch()\n entity.Commit()\n except Exception as exception:\n LOGGER.exception(exception)\n\n\ndef _get_entities_updated_between_times(from_time, to_time, table_names):\n \"\"\"\n Get any ACM entities updated between the specified from time and\n to time.\n \"\"\"\n LOGGER.info(\"Finding entities updated between '{from_time}' and '{to_time}'...\".format(\n from_time=from_time,\n to_time=to_time\n ))\n select_expression = \"updat_time >= '{from_time}' and updat_time <= '{to_time}'\".format(\n from_time=acm.Time.LocalToUtc(from_time),\n to_time=acm.Time.LocalToUtc(to_time)\n )\n updated_entities = acm.FArray()\n for table_name in table_names:\n table = acm.FTable['ADM.{table_name}'.format(\n table_name=table_name\n )]\n entities = table.Select(select_expression).AsArray()\n LOGGER.info(\"Found {number_of_entities} {table_name} entities updated since '{from_time}'.\".format(\n number_of_entities=len(entities),\n table_name=table_name.lower(),\n from_time=from_time\n ))\n updated_entities.AddAll(entities)\n updated_entities.SortByProperty('UpdateTime')\n LOGGER.info(\"Found {number_of_entities} total entities updated since '{from_time}'.\".format(\n number_of_entities=len(updated_entities),\n from_time=from_time\n ))\n return updated_entities\n\n\ndef _entity_triggers_confirmation_processing(entity):\n \"\"\"\n Determine whether or not an entity would trigger any confirmation\n processing.\n \"\"\"\n if entity.IsKindOf(acm.FOperationsDocument):\n return True\n if entity.IsKindOf(acm.FParty):\n return True\n trades = GetConfirmationGeneratingObjects(entity)\n confirmation_events = FConfirmationEventFactory.GetConfirmationEvents()\n for trade in trades:\n for confirmation_event in confirmation_events:\n if confirmation_event.baseRule.IsSatisfiedBy(trade):\n return True\n return False\n\n\ndef _show_error_dialog(exception):\n \"\"\"\n Display an error dialog to the user.\n \"\"\"\n message_box = acm.GetFunction('msgBox', 3)\n ok_button = 0\n error_icon = 16\n message_box('Error', str(exception), ok_button | error_icon)\n","sub_path":"Extensions/ABSA Documentation/FPythonCode/RerunConfirmationsTask.py","file_name":"RerunConfirmationsTask.py","file_ext":"py","file_size_in_byte":11347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"393591444","text":"#!/usr/bin/python3\n\"\"\" Create .tgz file based in content of web_static \"\"\"\nfrom fabric.api import *\nfrom datetime import datetime\nimport os\n\nenv.hosts = ['35.231.235.242', '35.175.190.73']\n\n\ndef do_pack():\n try:\n local(\"mkdir -p versions/\")\n date = datetime.now()\n actual_date = date.strftime(\"%Y%m%d%H%M%S\")\n local(\"tar -cvzf versions/web_static_{}.tgz web_static/\".\n format(actual_date))\n return(\"versions/web_static_{}.tgz\".format(actual_date))\n except:\n return (None)\n\n\ndef do_deploy(archive_path):\n if not(os.path.exists(archive_path)):\n return False\n try:\n put(archive_path, \"/tmp/\")\n base_name = os.path.basename(archive_path)\n file_name = os.path.splitext(base_name)[0]\n run(\"mkdir -p /data/web_static/releases/{}\".format(file_name))\n run(\"tar -xzf /tmp/{} -C /data/web_static/releases/{}\".\n format(base_name, file_name))\n run(\"rm /tmp/{}\".format(base_name))\n dir_rel = \"/data/web_static/releases/\"\n run(\"mv {}{}/web_static/* {}{}\".\n format(dir_rel, file_name, dir_rel, file_name))\n run(\"rm -rf /data/web_static/releases/{}/web_static\".format(file_name))\n run(\"rm -rf /data/web_static/current\")\n run(\"ln -s /data/web_static/releases/{} /data/web_static/current\"\n .format(file_name))\n print(\"New version deployed!\")\n return(True)\n except:\n return(False)\n","sub_path":"2-do_deploy_web_static.py","file_name":"2-do_deploy_web_static.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"323170640","text":"######################################################################\n#\n# File: b2sdk/bucket.py\n#\n# Copyright 2019 Backblaze Inc. All Rights Reserved.\n#\n# License https://www.backblaze.com/using_b2_code.html\n#\n######################################################################\n\nimport logging\n\nfrom .exception import FileNotPresent, FileOrBucketNotFound, UnrecognizedBucketType\nfrom .file_version import FileVersionInfo, FileVersionInfoFactory\nfrom .progress import DoNothingProgressListener\nfrom .transfer.emerge.executor import AUTO_CONTENT_TYPE\nfrom .transfer.emerge.write_intent import WriteIntent\nfrom .transfer.outbound.copy_source import CopySource\nfrom .transfer.outbound.upload_source import UploadSourceBytes, UploadSourceLocalFile\nfrom .utils import B2TraceMeta, disable_trace, limit_trace_arguments\nfrom .utils import b2_url_encode, validate_b2_file_name\n\nlogger = logging.getLogger(__name__)\n\n\nclass Bucket(metaclass=B2TraceMeta):\n \"\"\"\n Provide access to a bucket in B2: listing files, uploading and downloading.\n \"\"\"\n\n DEFAULT_CONTENT_TYPE = AUTO_CONTENT_TYPE\n\n def __init__(\n self,\n api,\n id_,\n name=None,\n type_=None,\n bucket_info=None,\n cors_rules=None,\n lifecycle_rules=None,\n revision=None,\n bucket_dict=None,\n options_set=None,\n ):\n \"\"\"\n :param b2sdk.v1.B2Api api: an API object\n :param str id_: a bucket id\n :param str name: a bucket name\n :param str type_: a bucket type\n :param dict bucket_info: an info to store with a bucket\n :param dict cors_rules: CORS rules to store with a bucket\n :param dict lifecycle_rules: lifecycle rules to store with a bucket\n :param int revision: a bucket revision number\n :param dict bucket_dict: a dictionary which contains bucket parameters\n :param set options_set: set of bucket options strings\n \"\"\"\n self.api = api\n self.id_ = id_\n self.name = name\n self.type_ = type_\n self.bucket_info = bucket_info or {}\n self.cors_rules = cors_rules or []\n self.lifecycle_rules = lifecycle_rules or []\n self.revision = revision\n self.bucket_dict = bucket_dict or {}\n self.options_set = options_set or set()\n\n def get_id(self):\n \"\"\"\n Return bucket ID.\n\n :rtype: str\n \"\"\"\n return self.id_\n\n def set_info(self, new_bucket_info, if_revision_is=None):\n \"\"\"\n Update bucket info.\n\n :param dict new_bucket_info: new bucket info dictionary\n :param int if_revision_is: revision number, update the info **only if** *revision* equals to *if_revision_is*\n \"\"\"\n return self.update(bucket_info=new_bucket_info, if_revision_is=if_revision_is)\n\n def set_type(self, bucket_type):\n \"\"\"\n Update bucket type.\n\n :param str bucket_type: a bucket type (\"allPublic\" or \"allPrivate\")\n \"\"\"\n return self.update(bucket_type=bucket_type)\n\n def update(\n self,\n bucket_type=None,\n bucket_info=None,\n cors_rules=None,\n lifecycle_rules=None,\n if_revision_is=None,\n ):\n \"\"\"\n Update various bucket parameters.\n\n :param str bucket_type: a bucket type\n :param dict bucket_info: an info to store with a bucket\n :param dict cors_rules: CORS rules to store with a bucket\n :param dict lifecycle_rules: lifecycle rules to store with a bucket\n :param int if_revision_is: revision number, update the info **only if** *revision* equals to *if_revision_is*\n \"\"\"\n account_id = self.api.account_info.get_account_id()\n return self.api.session.update_bucket(\n account_id,\n self.id_,\n bucket_type=bucket_type,\n bucket_info=bucket_info,\n cors_rules=cors_rules,\n lifecycle_rules=lifecycle_rules,\n if_revision_is=if_revision_is\n )\n\n def cancel_large_file(self, file_id):\n \"\"\"\n Cancel a large file transfer.\n\n :param str file_id: a file ID\n \"\"\"\n return self.api.cancel_large_file(file_id)\n\n def download_file_by_id(self, file_id, download_dest, progress_listener=None, range_=None):\n \"\"\"\n Download a file by ID.\n\n .. note::\n download_file_by_id actually belongs in :py:class:`b2sdk.v1.B2Api`, not in :py:class:`b2sdk.v1.Bucket`; we just provide a convenient redirect here\n\n :param str file_id: a file ID\n :param download_dest: an instance of the one of the following classes: \\\n :class:`~b2sdk.v1.DownloadDestLocalFile`,\\\n :class:`~b2sdk.v1.DownloadDestBytes`,\\\n :class:`~b2sdk.v1.DownloadDestProgressWrapper`,\\\n :class:`~b2sdk.v1.PreSeekedDownloadDest`,\\\n or any sub class of :class:`~b2sdk.v1.AbstractDownloadDestination`\n :param b2sdk.v1.AbstractProgressListener, None progress_listener: a progress listener object to use, or ``None`` to not report progress\n :param tuple[int, int] range_: two integer values, start and end offsets\n \"\"\"\n return self.api.download_file_by_id(\n file_id, download_dest, progress_listener, range_=range_\n )\n\n def download_file_by_name(self, file_name, download_dest, progress_listener=None, range_=None):\n \"\"\"\n Download a file by name.\n\n .. seealso::\n\n :ref:`Synchronizer `, a *high-performance* utility that synchronizes a local folder with a Bucket.\n\n :param str file_name: a file name\n :param download_dest: an instance of the one of the following classes: \\\n :class:`~b2sdk.v1.DownloadDestLocalFile`,\\\n :class:`~b2sdk.v1.DownloadDestBytes`,\\\n :class:`~b2sdk.v1.DownloadDestProgressWrapper`,\\\n :class:`~b2sdk.v1.PreSeekedDownloadDest`,\\\n or any sub class of :class:`~b2sdk.v1.AbstractDownloadDestination`\n :param b2sdk.v1.AbstractProgressListener, None progress_listener: a progress listener object to use, or ``None`` to not track progress\n :param tuple[int, int] range_: two integer values, start and end offsets\n \"\"\"\n url = self.api.session.get_download_url_by_name(self.name, file_name)\n return self.api.services.download_manager.download_file_from_url(\n url, download_dest, progress_listener, range_\n )\n\n def get_file_info_by_id(self, file_id: str) -> FileVersionInfo:\n \"\"\"\n Gets a file version's info by ID.\n\n :param str file_id: the id of the file who's info will be retrieved.\n :rtype: generator[b2sdk.v1.FileVersionInfo]\n \"\"\"\n return FileVersionInfoFactory.from_api_response(self.api.get_file_info(file_id))\n\n def get_file_info_by_name(self, file_name: str) -> FileVersionInfo:\n \"\"\"\n Gets a file version's info by its name.\n\n :param str file_name: the name of the file who's info will be retrieved.\n :rtype: generator[b2sdk.v1.FileVersionInfo]\n \"\"\"\n try:\n return FileVersionInfoFactory.from_response_headers(\n self.api.session.get_file_info_by_name(self.name, file_name)\n )\n except FileOrBucketNotFound:\n raise FileNotPresent(bucket_name=self.name, file_id_or_name=file_name)\n\n def get_download_authorization(self, file_name_prefix, valid_duration_in_seconds):\n \"\"\"\n Return an authorization token that is valid only for downloading\n files from the given bucket.\n\n :param str file_name_prefix: a file name prefix, only files that match it could be downloaded\n :param int valid_duration_in_seconds: a token is valid only during this amount of seconds\n \"\"\"\n response = self.api.session.get_download_authorization(\n self.id_, file_name_prefix, valid_duration_in_seconds\n )\n return response['authorizationToken']\n\n def list_parts(self, file_id, start_part_number=None, batch_size=None):\n \"\"\"\n Get a list of all parts that have been uploaded for a given file.\n\n :param str file_id: a file ID\n :param int start_part_number: the first part number to return. defaults to the first part.\n :param int batch_size: the number of parts to fetch at a time from the server\n \"\"\"\n return self.api.list_parts(file_id, start_part_number, batch_size)\n\n def list_file_versions(self, file_name, fetch_count=None):\n \"\"\"\n Lists all of the versions for a single file.\n\n :param str file_name: the name of the file to list.\n :param int,None fetch_count: how many entries to list per API call or ``None`` to use the default. Acceptable values: 1 - 10000\n :rtype: generator[b2sdk.v1.FileVersionInfo]\n \"\"\"\n if fetch_count is not None and fetch_count <= 0:\n # fetch_count equal to 0 means \"use API default\", which we don't want to support here\n raise ValueError(\"unsupported fetch_count value\")\n start_file_name = file_name\n start_file_id = None\n session = self.api.session\n while 1:\n response = session.list_file_versions(\n self.id_, start_file_name, start_file_id, fetch_count, file_name\n )\n\n for entry in response['files']:\n file_version_info = FileVersionInfoFactory.from_api_response(entry)\n if file_version_info.file_name != file_name:\n # All versions for the requested file name have been listed.\n return\n yield file_version_info\n start_file_name = response['nextFileName']\n start_file_id = response['nextFileId']\n if start_file_name is None:\n return\n\n def ls(self, folder_to_list='', show_versions=False, recursive=False, fetch_count=10000):\n \"\"\"\n Pretend that folders exist and yields the information about the files in a folder.\n\n B2 has a flat namespace for the files in a bucket, but there is a convention\n of using \"/\" as if there were folders. This method searches through the\n flat namespace to find the files and \"folders\" that live within a given\n folder.\n\n When the `recursive` flag is set, lists all of the files in the given\n folder, and all of its sub-folders.\n\n :param str folder_to_list: the name of the folder to list; must not start with \"/\".\n Empty string means top-level folder\n :param bool show_versions: when ``True`` returns info about all versions of a file,\n when ``False``, just returns info about the most recent versions\n :param bool recursive: if ``True``, list folders recursively\n :param int,None fetch_count: how many entries to return or ``None`` to use the default. Acceptable values: 1 - 10000\n :rtype: generator[tuple[b2sdk.v1.FileVersionInfo, str]]\n :returns: generator of (file_version_info, folder_name) tuples\n\n .. note::\n In case of `recursive=True`, folder_name is returned only for first file in the folder.\n \"\"\"\n # Every file returned must have a name that starts with the\n # folder name and a \"/\".\n prefix = folder_to_list\n if prefix != '' and not prefix.endswith('/'):\n prefix += '/'\n\n # Loop until all files in the named directory have been listed.\n # The starting point of the first list_file_names request is the\n # prefix we're looking for. The prefix ends with '/', which is\n # now allowed for file names, so no file name will match exactly,\n # but the first one after that point is the first file in that\n # \"folder\". If the first search doesn't produce enough results,\n # then we keep calling list_file_names until we get all of the\n # names in this \"folder\".\n current_dir = None\n start_file_name = prefix\n start_file_id = None\n session = self.api.session\n while True:\n if show_versions:\n response = session.list_file_versions(\n self.id_, start_file_name, start_file_id, fetch_count, prefix\n )\n else:\n response = session.list_file_names(self.id_, start_file_name, fetch_count, prefix)\n for entry in response['files']:\n file_version_info = FileVersionInfoFactory.from_api_response(entry)\n if not file_version_info.file_name.startswith(prefix):\n # We're past the files we care about\n return\n after_prefix = file_version_info.file_name[len(prefix):]\n if '/' not in after_prefix or recursive:\n # This is not a folder, so we'll print it out and\n # continue on.\n yield file_version_info, None\n current_dir = None\n else:\n # This is a folder. If it's different than the folder\n # we're already in, then we can print it. This check\n # is needed, because all of the files in the folder\n # will be in the list.\n folder_with_slash = after_prefix.split('/')[0] + '/'\n if folder_with_slash != current_dir:\n folder_name = prefix + folder_with_slash\n yield file_version_info, folder_name\n current_dir = folder_with_slash\n if response['nextFileName'] is None:\n # The response says there are no more files in the bucket,\n # so we can stop.\n return\n\n # Now we need to set up the next search. The response from\n # B2 has the starting point to continue with the next file,\n # but if we're in the middle of a \"folder\", we can skip ahead\n # to the end of the folder. The character after '/' is '0',\n # so we'll replace the '/' with a '0' and start there.\n #\n # When recursive is True, current_dir is always None.\n if current_dir is None:\n start_file_name = response.get('nextFileName')\n start_file_id = response.get('nextFileId')\n else:\n start_file_name = max(\n response['nextFileName'],\n prefix + current_dir[:-1] + '0',\n )\n\n def list_unfinished_large_files(self, start_file_id=None, batch_size=None, prefix=None):\n \"\"\"\n A generator that yields an :py:class:`b2sdk.v1.UnfinishedLargeFile` for each\n unfinished large file in the bucket, starting at the given file, filtering by prefix.\n\n :param str,None start_file_id: a file ID to start from or None to start from the beginning\n :param int,None batch_size: max file count\n :param str,None prefix: file name prefix filter\n :rtype: generator[b2sdk.v1.UnfinishedLargeFile]\n \"\"\"\n return self.api.services.large_file.list_unfinished_large_files(\n self.id_,\n start_file_id=start_file_id,\n batch_size=batch_size,\n prefix=prefix,\n )\n\n def start_large_file(self, file_name, content_type=None, file_info=None):\n \"\"\"\n Start a large file transfer.\n\n :param str file_name: a file name\n :param str,None content_type: the MIME type, or ``None`` to accept the default based on file extension of the B2 file name\n :param dict,None file_info: a file info to store with the file or ``None`` to not store anything\n \"\"\"\n validate_b2_file_name(file_name)\n return self.api.services.large_file.start_large_file(\n self.id_, file_name, content_type=content_type, file_info=file_info\n )\n\n @limit_trace_arguments(skip=('data_bytes',))\n def upload_bytes(\n self,\n data_bytes,\n file_name,\n content_type=None,\n file_infos=None,\n progress_listener=None,\n ):\n \"\"\"\n Upload bytes in memory to a B2 file.\n\n :param bytes data_bytes: a byte array to upload\n :param str file_name: a file name to upload bytes to\n :param str,None content_type: the MIME type, or ``None`` to accept the default based on file extension of the B2 file name\n :param dict,None file_infos: a file info to store with the file or ``None`` to not store anything\n :param b2sdk.v1.AbstractProgressListener,None progress_listener: a progress listener object to use, or ``None`` to not track progress\n :rtype: generator[b2sdk.v1.FileVersion]\n \"\"\"\n upload_source = UploadSourceBytes(data_bytes)\n return self.upload(\n upload_source,\n file_name,\n content_type=content_type,\n file_info=file_infos,\n progress_listener=progress_listener,\n )\n\n def upload_local_file(\n self,\n local_file,\n file_name,\n content_type=None,\n file_infos=None,\n sha1_sum=None,\n min_part_size=None,\n progress_listener=None,\n ):\n \"\"\"\n Upload a file on local disk to a B2 file.\n\n .. seealso::\n\n :ref:`Synchronizer `, a *high-performance* utility that synchronizes a local folder with a :term:`bucket`.\n\n :param str local_file: a path to a file on local disk\n :param str file_name: a file name of the new B2 file\n :param str,None content_type: the MIME type, or ``None`` to accept the default based on file extension of the B2 file name\n :param dict,None file_infos: a file info to store with the file or ``None`` to not store anything\n :param str,None sha1_sum: file SHA1 hash or ``None`` to compute it automatically\n :param int min_part_size: a minimum size of a part\n :param b2sdk.v1.AbstractProgressListener,None progress_listener: a progress listener object to use, or ``None`` to not report progress\n :rtype: generator[b2sdk.v1.FileVersion]\n \"\"\"\n upload_source = UploadSourceLocalFile(local_path=local_file, content_sha1=sha1_sum)\n return self.upload(\n upload_source,\n file_name,\n content_type=content_type,\n file_info=file_infos,\n min_part_size=min_part_size,\n progress_listener=progress_listener,\n )\n\n def upload(\n self,\n upload_source,\n file_name,\n content_type=None,\n file_info=None,\n min_part_size=None,\n progress_listener=None\n ):\n \"\"\"\n Upload a file to B2, retrying as needed.\n\n The source of the upload is an UploadSource object that can be used to\n open (and re-open) the file. The result of opening should be a binary\n file whose read() method returns bytes.\n\n :param b2sdk.v1.UploadSource upload_source: an object that opens the source of the upload\n :param str file_name: the file name of the new B2 file\n :param str,None content_type: the MIME type, or ``None`` to accept the default based on file extension of the B2 file name\n :param dict,None file_info: a file info to store with the file or ``None`` to not store anything\n :param int,None min_part_size: the smallest part size to use or ``None`` to determine automatically\n :param b2sdk.v1.AbstractProgressListener,None progress_listener: a progress listener object to use, or ``None`` to not report progress\n :rtype: generator[b2sdk.v1.FileVersion]\n\n The function `opener` should return a file-like object, and it\n must be possible to call it more than once in case the upload\n is retried.\n \"\"\"\n return self.create_file(\n [WriteIntent(upload_source)],\n file_name,\n content_type=content_type,\n file_info=file_info,\n progress_listener=progress_listener,\n # FIXME: Bucket.upload documents wrong logic\n recommended_upload_part_size=min_part_size,\n )\n\n def create_file(\n self,\n write_intents,\n file_name,\n content_type=None,\n file_info=None,\n progress_listener=None,\n recommended_upload_part_size=None,\n continue_large_file_id=None,\n ):\n \"\"\"\n Creates a new file in this bucket using an iterable (list, tuple etc) of remote or local sources.\n\n Source ranges can overlap and remote sources will be prioritized over local sources (when possible).\n For more information and usage examples please see :ref:`Advanced usage patterns `.\n\n :param list[b2sdk.v1.WriteIntent] write_intents: list of write intents (remote or local sources)\n :param str new_file_name: file name of the new file\n :param str,None content_type: content_type for the new file, if ``None`` content_type would be\n automatically determined or it may be copied if it resolves\n as single part remote source copy\n :param dict,None file_info: file_info for the new file, if ``None`` it will be set to empty dict\n or it may be copied if it resolves as single part remote source copy\n :param b2sdk.v1.AbstractProgressListener,None progress_listener: a progress listener object to use,\n or ``None`` to not report progress\n :param int,None recommended_upload_part_size: the recommended part size to use for uploading local sources\n or ``None`` to determine automatically, but remote sources would be copied with\n maximum possible part size\n :param str,None continue_large_file_id: large file id that should be selected to resume file creation\n for multipart upload/copy, ``None`` for automatic search for this id\n \"\"\"\n return self._create_file(\n self.api.services.emerger.emerge,\n write_intents,\n file_name,\n content_type=content_type,\n file_info=file_info,\n progress_listener=progress_listener,\n continue_large_file_id=continue_large_file_id,\n recommended_upload_part_size=recommended_upload_part_size,\n )\n\n def create_file_stream(\n self,\n write_intents_iterator,\n file_name,\n content_type=None,\n file_info=None,\n progress_listener=None,\n recommended_upload_part_size=None,\n continue_large_file_id=None,\n ):\n \"\"\"\n Creates a new file in this bucket using a stream of multiple remote or local sources.\n\n Source ranges can overlap and remote sources will be prioritized over local sources (when possible).\n For more information and usage examples please see :ref:`Advanced usage patterns `.\n\n :param iterator[b2sdk.v1.WriteIntent] write_intents_iterator: iterator of write intents which\n are sorted ascending by ``destination_offset``\n :param str new_file_name: file name of the new file\n :param str,None content_type: content_type for the new file, if ``None`` content_type would be\n automatically determined or it may be copied if it resolves\n as single part remote source copy\n :param dict,None file_info: file_info for the new file, if ``None`` it will be set to empty dict\n or it may be copied if it resolves as single part remote source copy\n :param b2sdk.v1.AbstractProgressListener,None progress_listener: a progress listener object to use,\n or ``None`` to not report progress\n :param int,None recommended_upload_part_size: the recommended part size to use for uploading local sources\n or ``None`` to determine automatically, but remote sources would be copied with\n maximum possible part size\n :param str,None continue_large_file_id: large file id that should be selected to resume file creation\n for multipart upload/copy, if ``None`` in multipart case it would always start a new\n large file\n \"\"\"\n return self._create_file(\n self.api.services.emerger.emerge_stream,\n write_intents_iterator,\n file_name,\n content_type=content_type,\n file_info=file_info,\n progress_listener=progress_listener,\n continue_large_file_id=continue_large_file_id,\n recommended_upload_part_size=recommended_upload_part_size,\n )\n\n def _create_file(\n self,\n emerger_method,\n write_intents_iterable,\n file_name,\n content_type=None,\n file_info=None,\n progress_listener=None,\n recommended_upload_part_size=None,\n continue_large_file_id=None,\n ):\n validate_b2_file_name(file_name)\n progress_listener = progress_listener or DoNothingProgressListener()\n\n return emerger_method(\n self.id_,\n write_intents_iterable,\n file_name,\n content_type,\n file_info,\n progress_listener,\n recommended_upload_part_size=recommended_upload_part_size,\n continue_large_file_id=continue_large_file_id,\n )\n\n def concatenate(\n self,\n outbound_sources,\n file_name,\n content_type=None,\n file_info=None,\n progress_listener=None,\n recommended_upload_part_size=None,\n continue_large_file_id=None,\n ):\n \"\"\"\n Creates a new file in this bucket by concatenating multiple remote or local sources.\n\n :param list[b2sdk.v1.OutboundTransferSource] outbound_sources: list of outbound sources (remote or local)\n :param str new_file_name: file name of the new file\n :param str,None content_type: content_type for the new file, if ``None`` content_type would be\n automatically determined from file name or it may be copied if it resolves\n as single part remote source copy\n :param dict,None file_info: file_info for the new file, if ``None`` it will be set to empty dict\n or it may be copied if it resolves as single part remote source copy\n :param b2sdk.v1.AbstractProgressListener,None progress_listener: a progress listener object to use,\n or ``None`` to not report progress\n :param int,None recommended_upload_part_size: the recommended part size to use for uploading local sources\n or ``None`` to determine automatically, but remote sources would be copied with\n maximum possible part size\n :param str,None continue_large_file_id: large file id that should be selected to resume file creation\n for multipart upload/copy, ``None`` for automatic search for this id\n \"\"\"\n return self.create_file(\n WriteIntent.wrap_sources_iterator(outbound_sources),\n file_name,\n content_type=content_type,\n file_info=file_info,\n progress_listener=progress_listener,\n recommended_upload_part_size=recommended_upload_part_size,\n continue_large_file_id=continue_large_file_id,\n )\n\n def concatenate_stream(\n self,\n outbound_sources_iterator,\n file_name,\n content_type=None,\n file_info=None,\n progress_listener=None,\n recommended_upload_part_size=None,\n continue_large_file_id=None,\n ):\n \"\"\"\n Creates a new file in this bucket by concatenating stream of multiple remote or local sources.\n\n :param iterator[b2sdk.v1.OutboundTransferSource] outbound_sources_iterator: iterator of outbound sources\n :param str new_file_name: file name of the new file\n :param str,None content_type: content_type for the new file, if ``None`` content_type would be\n automatically determined or it may be copied if it resolves\n as single part remote source copy\n :param dict,None file_info: file_info for the new file, if ``None`` it will be set to empty dict\n or it may be copied if it resolves as single part remote source copy\n :param b2sdk.v1.AbstractProgressListener,None progress_listener: a progress listener object to use,\n or ``None`` to not report progress\n :param int,None recommended_upload_part_size: the recommended part size to use for uploading local sources\n or ``None`` to determine automatically, but remote sources would be copied with\n maximum possible part size\n :param str,None continue_large_file_id: large file id that should be selected to resume file creation\n for multipart upload/copy, if ``None`` in multipart case it would always start a new\n large file\n \"\"\"\n return self.create_file_stream(\n WriteIntent.wrap_sources_iterator(outbound_sources_iterator),\n file_name,\n content_type=content_type,\n file_info=file_info,\n progress_listener=progress_listener,\n recommended_upload_part_size=recommended_upload_part_size,\n continue_large_file_id=continue_large_file_id,\n )\n\n def get_download_url(self, filename):\n \"\"\"\n Get file download URL.\n\n :param str filename: a file name\n :rtype: str\n \"\"\"\n return \"%s/file/%s/%s\" % (\n self.api.account_info.get_download_url(),\n b2_url_encode(self.name),\n b2_url_encode(filename),\n )\n\n def hide_file(self, file_name):\n \"\"\"\n Hide a file.\n\n :param str file_name: a file name\n :rtype: b2sdk.v1.FileVersionInfo\n \"\"\"\n response = self.api.session.hide_file(self.id_, file_name)\n return FileVersionInfoFactory.from_api_response(response)\n\n def copy(\n self,\n file_id,\n new_file_name,\n content_type=None,\n file_info=None,\n offset=0,\n length=None,\n progress_listener=None\n ):\n \"\"\"\n Creates a new file in this bucket by (server-side) copying from an existing file.\n\n :param str file_id: file ID of existing file to copy from\n :param str new_file_name: file name of the new file\n :param str,None content_type: content_type for the new file, if ``None`` and ``b2_copy_file`` will be used\n content_type will be copied from source file - otherwise content_type would be\n automatically determined\n :param dict,None file_info: file_info for the new file, if ``None`` will and ``b2_copy_file`` will be used\n file_info will be copied from source file - otherwise it will be set to empty dict\n :param int offset: offset of exisiting file that copy should start from\n :param int,None length: number of bytes to copy, if ``None`` then ``offset`` have to be ``0`` and it will\n use ``b2_copy_file`` without ``range`` parameter so it may fail if file is too large.\n For large files length have to be specified to use ``b2_copy_part`` instead.\n :param b2sdk.v1.AbstractProgressListener,None progress_listener: a progress listener object to use\n for multipart copy, or ``None`` to not report progress\n \"\"\"\n\n copy_source = CopySource(file_id, offset=offset, length=length)\n if not length:\n # TODO: it feels like this should be checked on lower level - eg. RawApi\n validate_b2_file_name(new_file_name)\n progress_listener = progress_listener or DoNothingProgressListener()\n return self.api.services.copy_manager.copy_file(\n copy_source,\n new_file_name,\n content_type=content_type,\n file_info=file_info,\n destination_bucket_id=self.id_,\n progress_listener=progress_listener,\n ).result()\n else:\n return self.create_file(\n [WriteIntent(copy_source)],\n new_file_name,\n content_type=content_type,\n file_info=file_info,\n progress_listener=progress_listener,\n )\n\n # FIXME: this shold be deprecated\n def copy_file(\n self,\n file_id,\n new_file_name,\n bytes_range=None,\n metadata_directive=None,\n content_type=None,\n file_info=None,\n ):\n \"\"\"\n Creates a new file in this bucket by (server-side) copying from an existing file.\n\n :param str file_id: file ID of existing file\n :param str new_file_name: file name of the new file\n :param tuple[int,int],None bytes_range: start and end offsets (**inclusive!**), default is the entire file\n :param b2sdk.v1.MetadataDirectiveMode,None metadata_directive: default is :py:attr:`b2sdk.v1.MetadataDirectiveMode.COPY`\n :param str,None content_type: content_type for the new file if metadata_directive is set to :py:attr:`b2sdk.v1.MetadataDirectiveMode.REPLACE`, default will copy the content_type of old file\n :param dict,None file_info: file_info for the new file if metadata_directive is set to :py:attr:`b2sdk.v1.MetadataDirectiveMode.REPLACE`, default will copy the file_info of old file\n \"\"\"\n return self.api.session.copy_file(\n file_id,\n new_file_name,\n bytes_range,\n metadata_directive,\n content_type,\n file_info,\n self.id_,\n )\n\n def delete_file_version(self, file_id, file_name):\n \"\"\"\n Delete a file version.\n\n :param str file_id: a file ID\n :param str file_name: a file name\n \"\"\"\n # filename argument is not first, because one day it may become optional\n return self.api.delete_file_version(file_id, file_name)\n\n @disable_trace\n def as_dict(self):\n \"\"\"\n Return bucket representation as a dictionary.\n\n :rtype: dict\n \"\"\"\n result = {\n 'accountId': self.api.account_info.get_account_id(),\n 'bucketId': self.id_,\n }\n if self.name is not None:\n result['bucketName'] = self.name\n if self.type_ is not None:\n result['bucketType'] = self.type_\n result['bucketInfo'] = self.bucket_info\n result['corsRules'] = self.cors_rules\n result['lifecycleRules'] = self.lifecycle_rules\n result['revision'] = self.revision\n result['options'] = self.options_set\n return result\n\n def __repr__(self):\n return 'Bucket<%s,%s,%s>' % (self.id_, self.name, self.type_)\n\n\nclass BucketFactory(object):\n \"\"\"\n This is a factory for creating bucket objects from different kind of objects.\n \"\"\"\n BUCKET_CLASS = staticmethod(Bucket)\n\n @classmethod\n def from_api_response(cls, api, response):\n \"\"\"\n Create a Bucket object from API response.\n\n :param b2sdk.v1.B2Api api: API object\n :param requests.Response response: response object\n :rtype: b2sdk.v1.Bucket\n \"\"\"\n return [cls.from_api_bucket_dict(api, bucket_dict) for bucket_dict in response['buckets']]\n\n @classmethod\n def from_api_bucket_dict(cls, api, bucket_dict):\n \"\"\"\n Turn a dictionary, like this:\n\n .. code-block:: python\n\n {\n \"bucketType\": \"allPrivate\",\n \"bucketId\": \"a4ba6a39d8b6b5fd561f0010\",\n \"bucketName\": \"zsdfrtsazsdfafr\",\n \"accountId\": \"4aa9865d6f00\",\n \"bucketInfo\": {},\n \"options\": [],\n \"revision\": 1\n }\n\n into a Bucket object.\n\n :param b2sdk.v1.B2Api api: API lient\n :param dict bucket_dict: a dictionary with bucket properties\n :rtype: b2sdk.v1.Bucket\n\n \"\"\"\n bucket_name = bucket_dict['bucketName']\n bucket_id = bucket_dict['bucketId']\n type_ = bucket_dict['bucketType']\n bucket_info = bucket_dict['bucketInfo']\n cors_rules = bucket_dict['corsRules']\n lifecycle_rules = bucket_dict['lifecycleRules']\n revision = bucket_dict['revision']\n options = set(bucket_dict['options'])\n if type_ is None:\n raise UnrecognizedBucketType(bucket_dict['bucketType'])\n return cls.BUCKET_CLASS(\n api, bucket_id, bucket_name, type_, bucket_info, cors_rules, lifecycle_rules, revision,\n bucket_dict, options\n )\n","sub_path":"b2sdk/bucket.py","file_name":"bucket.py","file_ext":"py","file_size_in_byte":37006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"78743259","text":"from models.original_models import madry_model\nfrom tools.art.model import Model\n\n\nclass MadryModel(Model):\n def __init__(self, checkpoint_path, image_width=28, image_height=28, n_channels=1, n_classes=10):\n super().__init__(image_height=image_height, image_width=image_width, n_channels=n_channels, n_classes=n_classes,\n checkpoint_path=checkpoint_path)\n\n def calculate_logits(self, inputs):\n model = madry_model.MadryModel(n_classes=self.n_classes)\n output = model.fprop(inputs)\n return output['logits']\n","sub_path":"tools/art/madry_model.py","file_name":"madry_model.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"326096273","text":"from sklearn.datasets import load_iris\r\nfrom sciope.models import label_propagation\r\nimport numpy as np\r\nimport pytest\r\n\r\n\r\n@pytest.fixture\r\ndef iris_data():\r\n data = load_iris()\r\n idx = np.random.randint(0, len(data.target), 75)\r\n data.new_target = np.copy(data.target)\r\n data.new_target[idx] = -1 # unlabeled data points\r\n data.idx = idx\r\n return data\r\n\r\n\r\ndef test_lpmodel(iris_data):\r\n model = label_propagation.LPModel()\r\n model.train(iris_data.data, iris_data.new_target)\r\n print(model.gamma)\r\n","sub_path":"sciope/tests/test_lpmodel.py","file_name":"test_lpmodel.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"600869135","text":"# Copyright 2017-present Open Networking Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom xos.exceptions import *\nfrom tenantwithcontainer_decl import *\n\n\nclass TenantWithContainer(TenantWithContainer_decl):\n class Meta:\n proxy = True\n\n def __init__(self, *args, **kwargs):\n super(TenantWithContainer, self).__init__(*args, **kwargs)\n\n # vSG service relies on knowing when instance id has changed\n self.orig_instance_id = self.get_attribute(\"instance_id\")\n\n # vSG service relies on instance_id attribute\n def get_attribute(self, name, default=None):\n if name == \"instance_id\":\n if self.instance:\n return self.instance.id\n else:\n return None\n else:\n return super(TenantWithContainer, self).get_attribute(name, default)\n\n # Services may wish to override the image() function to return different\n # images based on criteria in the tenant object. For example,\n # if (self.has_feature_A):\n # return Instance.object.get(name=\"image_with_feature_a\")\n # elif (self.has_feature_B):\n # return Instance.object.get(name=\"image_with_feature_b\")\n # else:\n # return super(MyTenantClass,self).image()\n\n @property\n def image(self):\n from core.models import Image\n\n # Implement the logic here to pick the image that should be used when\n # instantiating the VM that will hold the container.\n\n slice = self.provider_service.slices.all()\n if not slice:\n raise XOSProgrammingError(\"provider service has no slice\")\n slice = slice[0]\n\n # If slice has default_image set then use it\n if slice.default_image:\n return slice.default_image\n\n raise XOSProgrammingError(\"Please set a default image for %s\" % self.slice.name)\n\n def save(self, *args, **kwargs):\n if (not self.creator) and (hasattr(self, \"caller\")) and (self.caller):\n self.creator = self.caller\n\n super(TenantWithContainer, self).save(*args, **kwargs)\n","sub_path":"xos/core/models/tenantwithcontainer.py","file_name":"tenantwithcontainer.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"242535695","text":"# -*- coding: utf-8 -*-\n\nimport json\n\nfrom flask import Blueprint, request, abort\nfrom jinja2 import Template\n\nfrom app import app\nfrom app.agents.models import Bot\nfrom app.commons import build_response\nfrom app.endpoint.utils import SilentUndefined\nfrom app.endpoint.utils import call_api\nfrom app.endpoint.utils import get_synonyms\nfrom app.endpoint.utils import split_sentence\nfrom app.intents.models import Intent\nfrom app.nlu.classifiers.starspace_intent_classifier import \\\n EmbeddingIntentClassifier\nfrom app.nlu.entity_extractor import EntityExtractor\nfrom app.nlu.tasks import model_updated_signal\n\nendpoint = Blueprint('api', __name__, url_prefix='/api')\n\nsentence_classifier = None\nsynonyms = None\nentity_extraction = None\n\n\n# Request Handler\n@endpoint.route('/v1', methods=['POST'])\ndef api():\n \"\"\"\n Endpoint to converse with chatbot.\n Chat context is maintained by exchanging the payload between client and bot.\n\n sample input/output payload =>\n\n {\n \"currentNode\": \"\",\n \"complete\": false,\n \"parameters\": [],\n \"extractedParameters\": {},\n \"missingParameters\": [],\n \"intent\": {\n },\n \"context\": {},\n \"input\": \"hello\",\n \"speechResponse\": [\n ]\n }\n\n :param json:\n :return json:\n \"\"\"\n request_json = request.get_json(silent=True)\n result_json = request_json\n\n if request_json:\n\n context = {\"context\": request_json[\"context\"]}\n\n if app.config[\"DEFAULT_WELCOME_INTENT_NAME\"] in request_json.get(\n \"input\"):\n intent = Intent.objects(\n intentId=app.config[\"DEFAULT_WELCOME_INTENT_NAME\"]).first()\n result_json[\"complete\"] = True\n result_json[\"intent\"][\"object_id\"] = str(intent.id)\n result_json[\"intent\"][\"id\"] = str(intent.intentId)\n result_json[\"input\"] = request_json.get(\"input\")\n template = Template(\n intent.speechResponse,\n undefined=SilentUndefined)\n result_json[\"speechResponse\"] = split_sentence(template.render(**context))\n\n app.logger.info(request_json.get(\"input\"), extra=result_json)\n return build_response.build_json(result_json)\n\n intent_id, confidence, suggestions = predict(request_json.get(\"input\"))\n app.logger.info(\"intent_id => %s\" % intent_id)\n intent = Intent.objects.get(intentId=intent_id)\n\n if intent.parameters:\n parameters = intent.parameters\n else:\n parameters = []\n\n if ((request_json.get(\"complete\") is None) or (\n request_json.get(\"complete\") is True)):\n result_json[\"intent\"] = {\n \"object_id\": str(intent.id),\n \"confidence\": confidence,\n \"id\": str(intent.intentId.encode('utf8'))\n }\n\n if parameters:\n # Extract NER entities\n extracted_parameters = entity_extraction.predict(\n intent_id, request_json.get(\"input\"))\n\n missing_parameters = []\n result_json[\"missingParameters\"] = []\n result_json[\"extractedParameters\"] = {}\n result_json[\"parameters\"] = []\n for parameter in parameters:\n result_json[\"parameters\"].append({\n \"name\": parameter.name,\n \"type\": parameter.type,\n \"required\": parameter.required\n })\n\n if parameter.required:\n if parameter.name not in extracted_parameters.keys():\n result_json[\"missingParameters\"].append(\n parameter.name)\n missing_parameters.append(parameter)\n\n result_json[\"extractedParameters\"] = extracted_parameters\n\n if missing_parameters:\n result_json[\"complete\"] = False\n current_node = missing_parameters[0]\n result_json[\"currentNode\"] = current_node[\"name\"]\n result_json[\"speechResponse\"] = split_sentence(current_node[\"prompt\"])\n else:\n result_json[\"complete\"] = True\n context[\"parameters\"] = extracted_parameters\n else:\n result_json[\"complete\"] = True\n\n elif request_json.get(\"complete\") is False:\n if \"cancel\" not in intent.name:\n intent_id = request_json[\"intent\"][\"id\"]\n intent = Intent.objects.get(intentId=intent_id)\n\n extracted_parameter = entity_extraction.replace_synonyms({\n request_json.get(\"currentNode\"): request_json.get(\"input\")\n })\n\n # replace synonyms for entity values\n result_json[\"extractedParameters\"].update(extracted_parameter)\n\n result_json[\"missingParameters\"].remove(\n request_json.get(\"currentNode\"))\n\n if len(result_json[\"missingParameters\"]) == 0:\n result_json[\"complete\"] = True\n context = {\"parameters\": result_json[\"extractedParameters\"],\n \"context\": request_json[\"context\"]}\n else:\n missing_parameter = result_json[\"missingParameters\"][0]\n result_json[\"complete\"] = False\n current_node = [\n node for node in intent.parameters if missing_parameter in node.name][0]\n result_json[\"currentNode\"] = current_node.name\n result_json[\"speechResponse\"] = split_sentence(current_node.prompt)\n else:\n result_json[\"currentNode\"] = None\n result_json[\"missingParameters\"] = []\n result_json[\"parameters\"] = {}\n result_json[\"intent\"] = {}\n result_json[\"complete\"] = True\n\n if result_json[\"complete\"]:\n if intent.apiTrigger:\n isJson = False\n parameters = result_json[\"extractedParameters\"]\n headers = intent.apiDetails.get_headers()\n app.logger.info(\"headers %s\" % headers)\n url_template = Template(\n intent.apiDetails.url, undefined=SilentUndefined)\n rendered_url = url_template.render(**context)\n if intent.apiDetails.isJson:\n isJson = True\n request_template = Template(\n intent.apiDetails.jsonData, undefined=SilentUndefined)\n parameters = json.loads(request_template.render(**context))\n\n try:\n result = call_api(rendered_url,\n intent.apiDetails.requestType, headers,\n parameters, isJson)\n except Exception as e:\n app.logger.warn(\"API call failed\", e)\n result_json[\"speechResponse\"] = [\"Service is not available. Please try again later.\"]\n else:\n context[\"result\"] = result\n template = Template(\n intent.speechResponse, undefined=SilentUndefined)\n result_json[\"speechResponse\"] = split_sentence(template.render(**context))\n else:\n context[\"result\"] = {}\n template = Template(intent.speechResponse,\n undefined=SilentUndefined)\n result_json[\"speechResponse\"] = split_sentence(template.render(**context))\n app.logger.info(request_json.get(\"input\"), extra=result_json)\n return build_response.build_json(result_json)\n else:\n return abort(400)\n\n\ndef update_model(app, message, **extra):\n \"\"\"\n Signal hook to be called after training is completed.\n Reloads ml models and synonyms.\n :param app:\n :param message:\n :param extra:\n :return:\n \"\"\"\n global sentence_classifier\n\n sentence_classifier = EmbeddingIntentClassifier.load(\n app.config[\"MODELS_DIR\"], app.config[\"USE_WORD_VECTORS\"])\n\n synonyms = get_synonyms()\n\n global entity_extraction\n\n entity_extraction = EntityExtractor(synonyms)\n\n app.logger.info(\"Intent Model updated\")\n\n\nwith app.app_context():\n update_model(app, \"Models updated\")\n\nmodel_updated_signal.connect(update_model, app)\n\n\ndef predict(sentence):\n \"\"\"\n Predict Intent using Intent classifier\n :param sentence:\n :return:\n \"\"\"\n bot = Bot.objects.get(name=\"default\")\n predicted, intents = sentence_classifier.process(sentence)\n app.logger.info(\"predicted intent %s\", predicted)\n if predicted[\"confidence\"] < bot.config.get(\"confidence_threshold\", .90):\n intents = Intent.objects(intentId=app.config[\"DEFAULT_FALLBACK_INTENT_NAME\"])\n intents = intents.first().intentId\n return intents, 1.0, []\n else:\n return predicted[\"intent\"], predicted[\"confidence\"], intents[1:]\n","sub_path":"app/endpoint/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":9092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"305964357","text":"\"\"\"Script to analyze how many dams found and how many pre-known.\"\"\"\nimport logging\nimport os\nimport pathlib\nimport sqlite3\nimport sys\n\nfrom osgeo import gdal\nfrom osgeo import osr\nfrom osgeo import ogr\nimport rtree\nimport shapely.wkb\n\nTARGET_VECTOR_PATH = r\"C:\\Users\\richp\\Downloads\\known_dams.gpkg\"\nBASE_DAMS_DB_PATH = r\"C:\\Users\\richp\\Documents\\annotated_dams\\natgeo_dams_database_2020_07_01.db\"\n\nlogging.basicConfig(\n stream=sys.stdout,\n level=logging.DEBUG,\n format=(\n '%(asctime)s (%(relativeCreated)d) %(processName)s %(levelname)s '\n '%(name)s [%(funcName)s:%(lineno)d] %(message)s'))\nLOGGER = logging.getLogger(__name__)\n\ndef _execute_sqlite(\n sqlite_command, database_path, argument_list=None,\n mode='read_only', execute='execute', fetch=None):\n \"\"\"Execute SQLite command and attempt retries on a failure.\n\n Parameters:\n sqlite_command (str): a well formatted SQLite command.\n database_path (str): path to the SQLite database to operate on.\n argument_list (list): `execute == 'execute` then this list is passed to\n the internal sqlite3 `execute` call.\n mode (str): must be either 'read_only' or 'modify'.\n execute (str): must be either 'execute', 'many', or 'script'.\n fetch (str): if not `None` can be either 'all' or 'one'.\n If not None the result of a fetch will be returned by this\n function.\n\n Returns:\n result of fetch if `fetch` is not None.\n\n \"\"\"\n cursor = None\n connection = None\n try:\n if mode == 'read_only':\n ro_uri = r'%s?mode=ro' % pathlib.Path(\n os.path.abspath(database_path)).as_uri()\n LOGGER.debug(\n '%s exists: %s', ro_uri, os.path.exists(os.path.abspath(\n database_path)))\n connection = sqlite3.connect(ro_uri, uri=True)\n elif mode == 'modify':\n connection = sqlite3.connect(database_path)\n else:\n raise ValueError('Unknown mode: %s' % mode)\n\n if execute == 'execute':\n cursor = connection.execute(sqlite_command, argument_list)\n elif execute == 'many':\n cursor = connection.executemany(sqlite_command, argument_list)\n elif execute == 'script':\n cursor = connection.executescript(sqlite_command)\n else:\n raise ValueError('Unknown execute mode: %s' % execute)\n\n result = None\n payload = None\n if fetch == 'all':\n payload = (cursor.fetchall())\n elif fetch == 'one':\n payload = (cursor.fetchone())\n elif fetch is not None:\n raise ValueError('Unknown fetch mode: %s' % fetch)\n if payload is not None:\n result = list(payload)\n cursor.close()\n connection.commit()\n connection.close()\n return result\n except Exception:\n LOGGER.exception('Exception on _execute_sqlite: %s', sqlite_command)\n if cursor is not None:\n cursor.close()\n if connection is not None:\n connection.commit()\n connection.close()\n raise\n\n\ndef main():\n \"\"\"Entry point.\"\"\"\n for table_id, field in [\n #('detected_dams', 'probability'),\n ('work_status', 'processed')]:\n LOGGER.info(f'processing {table_id}')\n bounding_box_list = _execute_sqlite(\n f'''\n SELECT lng_min, lat_min, lng_max, lat_max, grid_id, processed\n FROM {table_id}\n GROUP BY lng_min, lat_min, lng_max, lat_max\n ''', BASE_DAMS_DB_PATH, fetch='all', argument_list=[])\n gpkg_driver = ogr.GetDriverByName('GPKG')\n vector = gpkg_driver.CreateDataSource(f'{table_id}.gpkg')\n wgs84_srs = osr.SpatialReference()\n wgs84_srs.ImportFromEPSG(4326)\n layer = vector.CreateLayer(\n 'known_dams', wgs84_srs, geom_type=ogr.wkbPolygon)\n layer.CreateField(ogr.FieldDefn('grid_id', ogr.OFTString))\n layer.CreateField(ogr.FieldDefn('processed', ogr.OFTString))\n\n LOGGER.info(f'starting transaction')\n layer.StartTransaction()\n for lng_min, lat_min, lng_max, lat_max, grid_id, processed in (\n bounding_box_list):\n box = shapely.geometry.box(lng_min, lat_min, lng_max, lat_max)\n feature = ogr.Feature(layer.GetLayerDefn())\n feature.SetField('grid_id', grid_id)\n feature.SetField('processed', processed)\n feature.SetGeometry(ogr.CreateGeometryFromWkb(box.wkb))\n layer.CreateFeature(feature)\n\n LOGGER.info(f'commiting transaction')\n layer.CommitTransaction()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"utils/make_shapefile_for_lisa.py","file_name":"make_shapefile_for_lisa.py","file_ext":"py","file_size_in_byte":4714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"262502053","text":"import logging\nfrom typing import Generator\n\nimport pytest\nfrom pymongo import MongoClient\n\nlogger = logging.getLogger(__name__)\n\n\n@pytest.fixture(scope=\"session\")\ndef mongo_example_db() -> Generator:\n \"\"\"Return a connection to the MongoDB example DB\"\"\"\n uri = \"mongodb://mongo_user:mongo_pass@mongodb_example/mongo_test\"\n\n client = MongoClient(uri, serverSelectionTimeoutMS=5000)\n logger.debug(f\"Connecting to MongoDB example database at: {uri}\")\n # Setup above...\n yield client\n # Teardown below...\n client.close()\n\n\n@pytest.mark.integration\ndef test_mongo_example_data(mongo_example_db):\n \"\"\"Confirm that the example database is populated with simulated data\"\"\"\n db = mongo_example_db[\"mongo_test\"]\n assert set(db.collection_names()) == {\n \"payment_card\",\n \"orders\",\n \"customer\",\n \"employee\",\n \"product\",\n \"reports\",\n \"customer_details\",\n }\n assert db.customer.count() == 3\n assert db.payment_card.count() == 2\n assert db.orders.count() == 4\n assert db.employee.count() == 2\n assert db.product.count() == 3\n assert db.reports.count() == 4\n","sub_path":"tests/integration_tests/test_integration_mongodb_example.py","file_name":"test_integration_mongodb_example.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"352750986","text":"# coding:utf-8\n__author__ = 'Administrator'\nfrom Tkinter import * # 导入 Tkinter 库\n\nroot = Tk() # 创建窗口对象的背景色\n\nroot.title('Label')\nroot.geometry('900x600')\nroot.resizable(width=False, height=True)\n\nm = Menu(root)\nm2=Menu(m)\nfor item in ['python', 'perl', 'php', 'ruby']:\n m2.add_command(label=item)\n\nm2.add_separator()\n\nfor item in ['java', 'c++', 'c']:\n m2.add_command(label=item)\nm.add_cascade(label='lan', menu=m2)\nroot['menu'] =m\n\nroot.mainloop()\n\n","sub_path":"GUI/Tkinter/ok/Menu/separator.py","file_name":"separator.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"647360046","text":"import array, time\nfrom machine import Pin\nimport rp2\n\n@rp2.asm_pio(sideset_init=rp2.PIO.OUT_LOW, out_shiftdir=rp2.PIO.SHIFT_LEFT, autopull=True, pull_thresh=24)\ndef ws2812():\n T1 = 2\n T2 = 5\n T3 = 3\n wrap_target()\n label(\"bitloop\")\n out(x, 1) .side(0) [T3 - 1]\n jmp(not_x, \"do_zero\") .side(1) [T1 - 1]\n jmp(\"bitloop\") .side(1) [T2 - 1]\n label(\"do_zero\")\n nop() .side(0) [T2 - 1]\n wrap()\n \nclass ws2812b:\n def __init__(self, num_leds, state_machine, pin):\n self.pixels = array.array(\"I\", [0 for _ in range(num_leds)])\n self.sm = rp2.StateMachine(state_machine, ws2812, freq=8000000, sideset_base=Pin(pin))\n self.sm.active(1)\n self.num_leds = num_leds\n\n def set_pixel(self, pixel_num, red, green, blue):\n self.pixels[pixel_num] = blue | red << 8 | green << 16\n \n def show(self):\n for i in range(self.num_leds):\n self.sm.put(self.pixels[i],8)\n \n def fill(self, red, green, blue):\n for i in range(self.num_leds):\n self.set_pixel(i, red, green, blue)\n \n ","sub_path":"ws2812b.py","file_name":"ws2812b.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"409136932","text":"# -*- coding: utf-8 -*-\n# vim: set fileencoding=utf-8:noet:tabstop=4:softtabstop=4:shiftwidth=8:expandtab\n\n\"\"\" python3 method \"\"\"\n\n# Copyright (c) 2010 - 2020, © Badassops LLC / Luc Suryo\n# All rights reserved.\n# BSD 3-Clause License : http://www.freebsd.org/copyright/freebsd-license.html\n\nimport sys\nimport threading\nfrom time import sleep\n\nclass SpinCursor(threading.Thread):\n \"\"\" class and function so display a wait spinner (dots or wheel)\n \"\"\"\n\n def __init__(self, msg=None, maxspin=0, minspin=10, speed=5, mode=None):\n # Count of a spin\n self.count = 0\n self.out = sys.stdout\n self.flag = False\n self.max = maxspin\n self.min = minspin\n # Any message to print first ?\n self.msg = msg\n # Complete printed string\n self.string = None\n # Speed is given as number of spins a second\n # Use it to calculate spin wait time\n self.waittime = 1.0/float(speed*10)\n if mode == 'dots':\n self.spinchars = (u'◦ ', u'○ ', u'◎ ', u'◉ ')\n if mode == 'count-down':\n self.spinchars = (u'9 ', u'8 ', u'7 ', u'6 ', u'5 ', u'4 ', u'3 ', u'2 ', u'1 ', u'0 ')\n if mode == 'wheel':\n self.spinchars = (u'-', u'\\\\ ', u'| ', u'/ ')\n threading.Thread.__init__(self, None, None, \"Spin Thread\")\n\n def spin(self):\n \"\"\" perform a single spin \"\"\"\n for spinchar in self.spinchars:\n if self.msg:\n self.string = self.msg + '...\\t' + spinchar + '\\r'\n else:\n self.string = '...\\t' + spinchar + '\\r'\n #self.string = self.msg + '...\\t' + spinchar + '\\r'\n self.out.write(self.string)\n self.out.flush()\n sleep(self.waittime)\n\n def run(self):\n \"\"\" run spinning \"\"\"\n while (not self.flag) and ((self.count < self.min) or (self.count < self.max)):\n self.spin()\n self.count += 1\n # Clean up display...\n #self.out.write(' '*(len(self.string) + len('...\\t')))\n self.out.write('\\033[2K')\n\n def stop(self):\n \"\"\" stop spinning \"\"\"\n self.flag = True\n\ndef spin_message(message=None, seconds=None):\n \"\"\" print the given message and wait for the given seconds \"\"\"\n spin = SpinCursor(msg=message, minspin=seconds, speed=1, mode='wheel')\n spin.start()\n sleep(seconds)\n spin.stop()\n\ndef dot_message(message=None, seconds=None):\n \"\"\" print dots while and wait for the given seconds \"\"\"\n spin = SpinCursor(msg=message, minspin=seconds, speed=1, mode='dots')\n spin.start()\n sleep(seconds)\n spin.stop()\n\ndef count_down_message(message=None, seconds=None):\n \"\"\" print the given message and wait for the given seconds \"\"\"\n spin = SpinCursor(msg=message, minspin=seconds, speed=1, mode='count-down')\n spin.start()\n sleep(seconds)\n spin.stop()\n","sub_path":"v2/awsbuild/misc/spinner.py","file_name":"spinner.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"12788103","text":"'''\nPython module with utils used to load Google's NYT annotations (downloadable at https://github.com/dmorr-google/nyt-salience).\nJust invoke get_google_dataset method.\n'''\n\nimport os\nimport logging\nimport csv\n\nfrom dataset_utils import NYT_DIR\n\n\nlogger = logging.getLogger('Google')\n\n\nclass GoogleDataset:\n def __init__(self):\n self.train = None\n self.eval = None\n\n\nclass GoogleEntity:\n def __init__(self):\n self.index = None # int\n self.salience = None # string\n self.mention_counts = None # int\n self.text = None # string\n self.start_byte_offset = None # int\n self.end_byte_offset = None # int\n self.freebase_mid = None # string\n\n\nclass GoogleDocument:\n def __init__(self):\n self.id = None # int\n self.title = None # string\n self.entities = []\n\n\ndef fill_with_first_row(nyt_doc, row):\n nyt_doc = GoogleDocument()\n nyt_doc.id = int(row[0])\n nyt_doc.title = row[1]\n\n\ndef row_google_entity(row):\n google_entity = GoogleEntity()\n google_entity.index = int(row[0])\n google_entity.salience = row[1]\n google_entity.mention_counts = int(row[2])\n google_entity.start_byte_offset = int(row[4])\n google_entity.end_byte_offset = int(row[5])\n google_entity.freebase_mid = row[6]\n\n return google_entity\n\n\ndef get_google_documents(path):\n logger.info('Loading {0} file...'.format(path))\n google_docs = []\n\n with open(path, 'r') as f:\n\n r = csv.reader(f, delimiter='\\t')\n first_line = True\n google_doc = GoogleDocument()\n for row in r:\n\n if len(row):\n if first_line:\n\n fill_with_first_row(google_doc, row)\n first_line = False\n\n else:\n\n google_entity = row_google_entity(row)\n google_doc.entities.append(google_entity)\n else:\n\n google_docs.append(google_doc)\n google_doc = GoogleDocument()\n first_line = True\n\n logger.info('{0} file loaded.'.format(path))\n return google_docs\n\n\ndef get_google_dataset():\n logger.info('Loading Google annotations')\n\n google_dataset = GoogleDataset()\n google_dataset.train = get_google_documents(os.path.join(NYT_DIR, 'nyt-train'))\n google_dataset.eval = get_google_documents(os.path.join(NYT_DIR, 'nyt-eval'))\n\n logger.info('Google annotations loaded.')\n\n return google_dataset","sub_path":"src/main/python/dataset/nyt/google.py","file_name":"google.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"277499093","text":"#\n# Copyright 2021 Ocean Protocol Foundation\n# SPDX-License-Identifier: Apache-2.0\n#\n\"\"\"DID Resolver module.\"\"\"\n# Copyright 2018 Ocean Protocol Foundation\n# SPDX-License-Identifier: Apache-2.0\n\nimport logging\nfrom typing import Optional\n\nfrom enforce_typing import enforce_types\nfrom ocean_lib.assets.asset import Asset\nfrom ocean_lib.common.aquarius.aquarius_provider import AquariusProvider\nfrom ocean_lib.models.data_token import DataToken\nfrom web3.main import Web3\n\nlogger = logging.getLogger(\"keeper\")\n\n\n@enforce_types\ndef resolve_asset(\n did: str,\n metadata_cache_uri: Optional[str] = None,\n web3: Optional[Web3] = None,\n token_address: Optional[str] = None,\n) -> Asset:\n \"\"\"Resolve a DID to an URL/DDO or later an internal/external DID.\n\n :param did: the asset id to resolve, this is part of the ocean\n DID did:op:<32 byte value>\n :param metadata_cache_uri: str the url of the metadata store\n :param web3: Web3 instance\n :param token_address: str the address of the DataToken smart contract\n\n :return Asset: the resolved DID\n \"\"\"\n assert metadata_cache_uri or (\n web3 and token_address\n ), \"Either metadata_cache_uri or (web3 and token_address) is required.\"\n\n if not metadata_cache_uri:\n metadata_cache_uri = DataToken(web3, token_address).get_metadata_url()\n\n logger.debug(f\"found did {did} -> url={metadata_cache_uri}\")\n ddo = AquariusProvider.get_aquarius(metadata_cache_uri).get_asset_ddo(did)\n\n if ddo:\n return Asset(dictionary=ddo.as_dictionary())\n","sub_path":"ocean_lib/assets/asset_resolver.py","file_name":"asset_resolver.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"141198474","text":"import tensorflow as tf\nimport os\n\n# Reset TensorFlow Graph\ntf.reset_default_graph()\n\n# TensorFlow model variable\nlearning_rate = 0.1\nepochs = 500\n\n# Coefficient of quadratic equation\na = 1\nb = -2\nc = 2\n\nx = tf.Variable(0.,name='x_variable')\nstep_x = tf.Variable(0, trainable=False)\nloss = a * x * x - b * x + c\noptimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step=step_x)\n\n# TensorBoard\nsummary_op = tf.summary.scalar('x',x)\nfile_writer = tf.summary.FileWriter('./log',graph=tf.get_default_graph())\n# tensorboard --logdir=log (run in terminal to launch TensorBoard)\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for epoch in range(epochs):\n _, step ,result, summary = sess.run([optimizer, step_x, x, summary_op])\n print('Step {}: Computed Result = {}'.format(step,result))\n\n file_writer.add_summary(summary,step)\n file_writer.flush()\n\n print('Final Result of x is {}'.format(result))\n","sub_path":"tensorflow-quad.py","file_name":"tensorflow-quad.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"58982655","text":"\"\"\"\nClass to represent data in a table with headers\n\nHeader:\n header (str} - header ID\n attribute (dict} -\n \"Type\": type of element in cell\n \"CastFunction\": function to cast cell to element type\n \"Label\": label header for the column\n \"Alignment\": alignment for column header label\n \"Width\": width for the column header\n\n [\n \"jobID\",\n {\n \"Type\": \"int\",\n \"CastFunction\": int,\n \"Label\": \"Job ID\",\n \"Alignment\": \"right\",\n \"Width\": 80,\n },\n ],\n\"\"\"\n\n# pylint: disable=too-few-public-methods\n\nimport itertools\n\nclass HeaderAttributeKey:\n\n Alignment = \"Alignment\"\n CastFunction = \"CastFunction\"\n Label = \"Label\"\n Type = \"Type\"\n Width = \"Width\"\n\n\nclass DataKey:\n\n Cell = 0\n ToolTip = 1\n Obj = 2\n\n\nclass HeaderInfo:\n \"\"\"\n Header information\n \"\"\"\n\n header = None\n attribute = None\n headerList = None\n toolTip = None\n\n\nclass DataItem:\n \"\"\"\n Data item information\n \"\"\"\n\n cell = None\n toolTip = None\n obj = None\n\n\nclass Index:\n \"\"\"\n Dummy QModelIndex\n\n Returns:\n Index: Dummy QModelIndex\n \"\"\"\n\n def __init__(self, row, column):\n\n self._row = row\n self._column = column\n\n def row(self):\n return self._row\n\n def column(self):\n return self._column\n\n def isValid(self):\n return True\n\n\nclass TableData:\n \"\"\"\n Class to represent data in a table with columns headers\n\n data[x][y] = DataItem\n\n A header is a list of the form:\n [str, dict]\n str = string representing the column name\n dict = dictionary representing different attributes\n [\n \"Column Name\",\n {\n \"Alignment\": \"center\",\n \"CastFunction\": str,\n \"Label\": \"Column Label\",\n \"ToolTip\": \"Tool Tip string\"\n \"Type\": \"str\",\n \"Width\": 220,\n },\n ]\n\n Raises:\n IndexError: index is out of range\n TypeError: invalid index type\n\n Returns:\n str -- tableData[index] column header\n list - tableData[index,] data row\n object - tableData[row, col] element at position row,col on table\n \"\"\"\n\n def __init__(self, headerList=None, dataList=None):\n\n self.data = [] # 2 dimensional dataset\n self.headers = [] # list of HeaderInfo objects\n self.headerName = [] # list of header/columns names HeaderInfo.header\n\n if headerList is not None:\n for h in headerList:\n self.addHeader(h)\n\n if dataList is not None:\n if len(dataList) == 1:\n self.insertRow(0, dataList)\n else:\n for position, data in enumerate(dataList):\n self.insertRow(position, data)\n\n def __getitem__(self, index):\n\n if isinstance(index, (int, slice)):\n if (index < 0) or (index > len(self.headers) - 1):\n raise IndexError(\"list index [{}] out of range\".format(index))\n\n return self.headers[index].attribute[\"Label\"]\n\n if isinstance(index, tuple):\n col = None\n\n if len(index) == 1:\n row = index[0]\n elif len(index) == 2:\n row, col = index\n else:\n raise IndexError(\"Bad index format: {}\".format(index))\n\n if col is None:\n returnRow = []\n currentRow = self.data[row]\n\n for r in currentRow:\n returnRow.append(r.cell)\n\n return returnRow\n\n return self.data[row][col].cell\n\n raise TypeError(\"Invalid index type\")\n\n def __setitem__(self, index, value):\n\n if isinstance(index, int):\n self.headers[index].attribute[\"Label\"] = value\n\n elif isinstance(index, tuple):\n # Only update members of the data table no headers\n if len(index) == 2:\n row, col = index\n else:\n raise IndexError(\"Bad index format: {}\".format(index))\n\n index = Index(row, col) # Simulate index\n self.setData(index, value)\n else:\n raise TypeError(\"Invalid index type\")\n\n def __len__(self):\n return len(self.data)\n\n def addHeader(self, header=None):\n \"\"\"\n Add header information\n\n Keyword Arguments:\n header (list) -- list containing the a header (default: {None})\n \"\"\"\n\n if header is not None:\n oHeader = HeaderInfo()\n oHeader.header = header[0]\n oHeader.attribute = header[1]\n oHeader.headerList = header\n self.headerName.append(header[0])\n self.headers.append(oHeader)\n\n def setData(self, index, value):\n \"\"\"\n Insert row at the end of the data table\n\n Keyword Arguments:\n dataItem (list) -- list containing a data row (default: {None})\n \"\"\"\n\n if (value is not None) and index.isValid():\n # Use self.insertRow() so only one method add data\n # better for logging purposes\n row = index.row()\n column = index.column()\n\n if isinstance(value, DataItem):\n self.data[row][column].cell = value.cell\n self.data[row][column].toolTip = value.toolTip\n self.data[row][column].obj = value.obj\n else:\n self.data[row][column].cell = value\n\n return True\n\n return False\n\n def setToolTip(self, index, value):\n \"\"\"\n Insert row at the end of the data table\n\n Keyword Arguments:\n dataItem (list) -- list containing a data row (default: {None})\n \"\"\"\n\n if (value is not None) and index.isValid():\n # Use self.insertRow() so only one method add data\n # better for logging purposes\n row = index.row()\n column = index.column()\n self.data[row][column].toolTip = value\n\n return True\n\n return False\n\n def insertRow(self, position, row=None):\n \"\"\"\n Insert a data row\n\n Arguments:\n position (int) -- row number where to insert the data\n row (list) -- list with row data\n \"\"\"\n\n if row is not None:\n totalColumns = len(self.headerName)\n emptyRow = []\n for _ in itertools.repeat(None, totalColumns):\n rowItem = DataItem()\n emptyRow.append(rowItem)\n\n self.data.insert(position, emptyRow)\n\n for column, value in enumerate(row):\n if isinstance(value, list):\n newItem = DataItem()\n newItem.cell = value[DataKey.Cell]\n newItem.toolTip = value[DataKey.ToolTip]\n newItem.obj = value[DataKey.Obj]\n index = Index(position, column)\n self.setData(index, newItem)\n else:\n if value is not None:\n raise ValueError(\"Item at index {} is invalid\".format(column))\n else:\n totalColumns = len(self.headerName)\n emptyRow = []\n for _ in itertools.repeat(None, totalColumns):\n emptyRow.append(\"\")\n self.data.insert(position, emptyRow)\n\n def removeRow(self, index):\n \"\"\"\n Delete a data row\n\n Arguments:\n index (int) -- row number to delete 0 based\n\n Returns:\n list -- row deleted\n \"\"\"\n element = self.data.pop(index)\n\n return element\n\n def insertColumn(self, position=0, columnHeader=None, columnData=None):\n \"\"\"\n Insert a data column\n\n Arguments:\n position (int) -- column number where to insert the data\n columnHeader (str) -- header for column to be inserted\n columnData (list) -- data to insert in column cells\n \"\"\"\n\n if columnHeader is None:\n self.headers.insert(position, HeaderInfo())\n self.headerName.insert(position, \"\")\n else:\n if isinstance(columnHeader, list):\n oHeader = HeaderInfo()\n oHeader.header = columnHeader[0]\n oHeader.attribute = columnHeader[1]\n oHeader.headerList = columnHeader\n self.headers.insert(position, oHeader)\n self.headerName.insert(position, oHeader.header)\n elif isinstance(columnHeader, HeaderInfo):\n self.headers.insert(position, columnHeader)\n self.headers.insert(position, columnHeader.header)\n else:\n raise TypeError(\"Invalid column header type.\")\n\n if columnData is None:\n for r in self.data:\n r.insert(position, DataItem())\n else:\n for _, r in enumerate(self.data):\n element = DataItem()\n element.cell = columnData[0]\n r.insert(position, columnData[1])\n\n def deleteColumn(self, index):\n \"\"\"\n Delete a column from the table\n\n Arguments:\n index (int) -- column to delete\n\n Returns:\n list -- list containing the header ID, header attributes and data rows deleted\n \"\"\"\n\n deletedInfo = []\n deletedRows = []\n deletedInfo.append(self.headers.pop(index))\n deletedInfo.append(self.headerName.pop(index))\n\n for row in self.data:\n deletedRows.append(row.pop(index))\n\n deletedInfo.append(deletedRows)\n\n return deletedInfo\n","sub_path":"MKVBatchMultiplex/dataset/TableData.py","file_name":"TableData.py","file_ext":"py","file_size_in_byte":9748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"188554724","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 25 21:11:38 2018\n\n@author: Administrator\n\"\"\"\nimport requests\n\nurl = r'http://www.sse.com.cn/disclosure/listedinfo/announcement/c/2018-03-06/600066_20180306_2.pdf'\ntarget_file_name = '600066.pdf'\n\nr = requests.get(url) # create HTTP response object\nprint(r.content)\nwith open(target_file_name,'wb') as f:\n f.write(r.content)\n ","sub_path":"webscrap/pdf/pdf_download.py","file_name":"pdf_download.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"345034775","text":"from django.shortcuts import render\n\n# Create your views here.\n\nfrom django.http import HttpResponse\nfrom django.contrib import messages\nimport requests\nimport json\nfrom django.shortcuts import render\nfrom django.template import loader\n\n\ndef home(request):\n template = loader.get_template('home.html')\n\n response = requests.get(\"http://10.200.24.125:8000/api/worker\")\n\n context = {\"workers\": json.loads(response.text)[\"results\"]}\n\n return HttpResponse(template.render(context, request))\n\ndef posterHome(request):\n template = loader.get_template('posterhome.html')\n\n context = {}\n\n return HttpResponse(template.render(context, request))\n\n\ndef mePoster(request):\n template = loader.get_template('mePoster.html')\n\n response = requests.get(\"http://10.200.26.0:8000/api/poster/3\")\n\n context = json.loads(response.text)\n \n return HttpResponse(template.render(context, request))\n\ndef posterInbox(request, posterID):\n template = loader.get_template('posterinbox.html')\n\n response = requests.get(\"http://10.200.26.0:8000/api/messages/\")\n context = json.loads(response.text)\n messages = context[\"results\"]\n context = {\"messages\":[]}\n\n for message in messages:\n print(message)\n d = {\"task_id\":message[\"task_id\"],\"message\":message[\"message_content\"]}\n workerResponse = requests.get(\"http://10.200.26.0:8000/api/worker/\" + str(message[\"worker_id\"]))\n worker = json.loads(workerResponse.text)\n d[\"worker\"] = worker\n\n taskResponse = requests.get(\"http://10.200.26.0:8000/api/tasks/\" + str(message[\"task_id\"]))\n task = json.loads(taskResponse.text)\n d[\"task\"] = task \n context['messages'].append(d)\n\n return HttpResponse(template.render(context, request))\n\ndef showTask(request, ID):\n template = loader.get_template('showtask.html')\n\n response = requests.get(\"http://10.200.26.0:8000/api/tasks/\" + str(ID))\n\n context = {\"task\":json.loads(response.text)}\n\n return HttpResponse(template.render(context, request))\n\n\ndef workerHome(request):\n template = loader.get_template('workerhome.html')\n\n response = requests.get(\"http://10.200.24.125:8000/api/worker\")\n\n context = {\"workers\": json.loads(response.text)[\"results\"]}\n\n return HttpResponse(template.render(context, request))\n\n\ndef workerProfile(request, profile_id):\n template = loader.get_template('workerProfile.html')\n\n response = requests.get(\n \"http://10.200.26.0:8000/api/worker/\" + str(profile_id))\n\n context = json.loads(response.text)\n\n return HttpResponse(template.render(context, request))\n\n\ndef createJob(request):\n if request.method==\"POST\":\n url = \"http://10.200.26.0:8000/api/tasks/\"\n job = request.POST\n requests.post(url, data = job)\n\n template = loader.get_template('createjob.html')\n\n response = requests.get(\"http://10.200.26.0:8000/api/tasks/\")\n\n context = json.loads(response.text)\n tasks = context[\"results\"]\n print(tasks)\n c = {\"tasks\": tasks}\n\n return HttpResponse(template.render(c, request))\n\n\ndef housekeeping(request):\n template = loader.get_template('housekeeping.html')\n\n response = requests.get(\"http://10.200.26.0:8000/api/tasks/\")\n\n context = json.loads(response.text)[\"results\"]\n\n c = {\"tasks\":[]}\n\n for task in context:\n if task[\"category\"] == \"HK\":\n c[\"tasks\"].append(task)\n\n return HttpResponse(template.render(c, request))\n\ndef education(request):\n template = loader.get_template('education.html')\n\n response = requests.get(\"http://10.200.26.0:8000/api/worker/\")\n\n context = json.loads(response.text)\n\n return HttpResponse(template.render(context, request))\n\ndef technology(request):\n template = loader.get_template('technology.html')\n\n response = requests.get(\"http://10.200.26.0:8000/api/worker/\")\n\n context = json.loads(response.text)\n\n return HttpResponse(template.render(context, request))\n\ndef animals(request):\n template = loader.get_template('animals.html')\n\n response = requests.get(\"http://10.200.26.0:8000/api/worker/\")\n\n context = json.loads(response.text)\n\n return HttpResponse(template.render(context, request))\n\ndef repairs(request):\n template = loader.get_template('repairs.html')\n\n response = requests.get(\"http://10.200.26.0:8000/api/worker/\")\n\n context = json.loads(response.text)\n\n return HttpResponse(template.render(context, request))\n\ndef delivery(request):\n template = loader.get_template('delivery.html')\n\n response = requests.get(\"http://10.200.26.0:8000/api/worker/\")\n\n context = json.loads(response.text)\n\n return HttpResponse(template.render(context, request))\n\ndef entertainment(request):\n template = loader.get_template('entertainment.html')\n\n response = requests.get(\"http://10.200.26.0:8000/api/worker/\")\n\n context = json.loads(response.text)\n\n return HttpResponse(template.render(context, request))\n","sub_path":"lifetasks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"87356594","text":"#!/bin/usr/env python\n# -*- coding: utf-8 -*-\nfrom IO import get_parse\nfrom relink import relink_simple\n\ndef main():\n args = get_parse()\n\n if True:\n relink_simple(**args)\n\n return\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"source/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"446514678","text":"from itertools import chain\nfrom collections import defaultdict\nfrom string import ascii_lowercase, ascii_uppercase\n\nimport qanta\nfrom qanta.datasets.quiz_bowl import QuestionDatabase\nfrom qanta.wikipedia.cached_wikipedia import CachedWikipedia\nfrom qanta import qlogging\nfrom ingestion.page_assigner import PageAssigner\n\nfrom fuzzywuzzy import process\nfrom fuzzywuzzy.fuzz import UWRatio\n\nlog = qlogging.get(__name__)\n\n\ndef scorer(left, right):\n if right.startswith(\"list of\") or \\\n right.endswith(\" topics\") or \\\n right.startswith(\"wikiproject\"):\n val = 0\n else:\n val = UWRatio(left, right)\n return val\n\n\ndef reasonable_case(page):\n \"\"\"\n Checks that a wikipedia page doesn't have crazy capitalization\n (which often leads to bad matches.\n \"\"\"\n\n return len(page) > 2 and page[0] in ascii_uppercase and \\\n all(x in ascii_lowercase for x in page[1:])\n\n\nclass TitleFinder:\n def __init__(self, index, wiki, known_pages,\n normalize=lambda x: x, prune=1500):\n import gzip\n\n self.normalize = normalize\n self._index = defaultdict(set)\n self._wiki = wiki\n self._prune = set()\n self._known = known_pages\n\n # map single words to the relevant wikipedia titles\n with gzip.open(index) as f:\n line = 0\n for title in f:\n line += 1\n if line == 1:\n continue\n\n converted_title = None\n for word in [normalize(x) for x in\n title.decode('utf-8').split(\"_\") if len(x) > 2]:\n if converted_title is None:\n converted_title = title.decode('utf-8').strip()\n if len(word) > 2:\n self._index[word].add(converted_title)\n if line % 5000000 == 0:\n log.info(\"%i %s: %s -> %s\" % (line, title, word, list(self._index[word])[:3]))\n self.prune(prune)\n self.prune(prune)\n\n # Take another pass just to add exact titles\n with gzip.open(index) as f:\n for ii in f:\n title = ii.decode('utf-8').strip()\n if \"(\" not in title and reasonable_case(title):\n norm = normalize(title.replace(\"_\", \"_\"))\n self._index[norm].add(title)\n\n def prune(self, prune):\n self._prune |= set(x for x in self._index\n if len(self._index[x]) > prune)\n log.info(\"Pruning %s\" % str(list(self._prune)[:50]))\n for ii in self._prune:\n if ii in self._index:\n del self._index[ii]\n\n def query(self, text):\n norm = self.normalize(text)\n tokens = norm.split()\n candidates = set(chain.from_iterable(self._index[x] for x in tokens\n if x in self._index))\n\n # try looking for plurals\n if tokens[-1].endswith(\"s\") and tokens[-1][:-1] in self._index:\n candidates |= self._index[tokens[-1][:-1]]\n\n # try looking for exact match\n if norm in self._index:\n candidates |= self._index[norm]\n\n candidates = dict((self.normalize(x.replace(\"_\", \" \")), x) for x in\n candidates)\n\n return candidates\n\n def score(self, text, score_function=scorer):\n candidates = self.query(text)\n\n candidates = process.extract(text, candidates, limit=len(candidates),\n scorer=scorer)\n\n collapsed = defaultdict(int)\n for wiki, val, norm in candidates:\n page = self._wiki.redirect(wiki)\n if scorer(self.normalize(text),\n self.normalize(page.replace(\"_\", \" \"))) != 0:\n collapsed[page] += val\n\n # Give bonus to exact matches\n if self.normalize(page) == self.normalize(text):\n collapsed[page] += 250\n\n if page in self._known:\n collapsed[page] += 250\n\n return collapsed\n\n def best_guess(self, unassigned, min_val=50, delta=5):\n results = {}\n guess_num = 0\n for ii in [x for x in unassigned if len(x) > 2]:\n v = self.score(ii)\n if len(v) >= 2:\n scores = sorted(v, key=v.get, reverse=True)\n top = v[scores[0]]\n second = v[scores[1]]\n\n if top - second >= delta and top > min_val:\n results[ii] = scores[0]\n guess_num += 1\n\n if guess_num % 1000 == 0:\n log.info(\"Matching %s -> %s\" % (ii, results.get(ii, None)))\n elif len(v) == 1:\n if max(v.values()) > min_val:\n results[ii] = max(v.keys())\n\n return results\n\n\nif __name__ == \"__main__\":\n import argparse\n from glob import glob\n parser = argparse.ArgumentParser(description='Import questions')\n parser.add_argument('--direct_path', type=str,\n default='data/internal/page_assignment/direct/')\n parser.add_argument('--ambiguous_path', type=str,\n default='data/internal/page_assignment/ambiguous/')\n parser.add_argument('--unambiguous_path', type=str,\n default='data/internal/page_assignment/unambiguous/')\n flags = parser.parse_args()\n\n pa = PageAssigner(QuestionDatabase.normalize_answer)\n for ii in glob(\"%s/*\" % flags.ambiguous_path):\n pa.load_ambiguous(ii)\n for ii in glob(\"%s/*\" % flags.unambiguous_path):\n pa.load_unambiguous(ii)\n for ii in glob(\"%s/*\" % flags.direct_path):\n pa.load_direct(ii)\n\n cw = CachedWikipedia()\n tf = TitleFinder(\"data/enwiki-latest-all-titles-in-ns0.gz\", cw,\n pa.known_pages(),\n normalize=QuestionDatabase.normalize_answer)\n\n\n\n for ii in ['die leiden des jungen werthers', '99 Luftballons', 'saint nicholas of myra', 'édouard roche', 'the mahdi or mohammad ahmed', 'the first vatican council', 'antietam national battlefield', 'cia', 'samuel f b morse', 'the passion according to st matthew or st matthew’s passion or matthäuspassion', 'another world', 'rolling in the deep', 'tony gwynn', 'opal', 'tylenol', 'queues', 'dachau', 'lipoproteins', 'haiku', 'japan', 'zoroastrianism']:\n A = tf.score(ii)\n print(\"--------\")\n num = 0\n for ii in sorted(A, key=A.get, reverse=True):\n num += 1\n print(\"\\t%s\\t%i\" % (ii, A[ii]))\n\n if num > 10:\n break\n","sub_path":"ingestion/title_finder.py","file_name":"title_finder.py","file_ext":"py","file_size_in_byte":6597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"100916065","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/nodeconductor_sugarcrm/migrations/0010_crm_instance_url.py\n# Compiled at: 2016-09-28 11:51:43\nfrom __future__ import unicode_literals\nfrom django.db import models, migrations\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('nodeconductor_sugarcrm', '0009_remove_crm_size_field')]\n operations = [\n migrations.AddField(model_name=b'crm', name=b'instance_url', field=models.URLField(help_text=b'CRMs OpenStack instance URL in NC.', blank=True), preserve_default=True),\n migrations.AlterField(model_name=b'crm', name=b'api_url', field=models.CharField(help_text=b'CRMs OpenStack instance access URL.', max_length=127), preserve_default=True)]","sub_path":"pycfiles/nodeconductor_sugarcrm-0.5.0-py2.7/0010_crm_instance_url.py","file_name":"0010_crm_instance_url.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"513868817","text":"import os\n\nfrom django.conf import settings\nfrom django.contrib.sites.models import Site\nfrom django.core.mail import send_mail\nfrom django.template.loader import render_to_string\nfrom django.utils.html import strip_tags\n\n\ndef get_images_upload_path(instance, filename):\n return os.path.join(\n 'media',\n 'images',\n instance.__class__.__name__.lower(),\n filename\n )\n\n\ndef send_email_template(request, subject, template, recipients, data=None):\n \"\"\"\n This function sends an email using a selected template.\n\n Arguments:\n subject: the subject of the email\n template: the template to be used for the email\n recipient: a list of recipients the email will be sent to\n data: a dictionary to be added as context variables in the email\n \"\"\"\n context = {\n 'current_site': Site.objects.get_current(),\n 'protocol': 'https' if request.is_secure() else 'http'\n }\n context.update(data)\n\n html_content = render_to_string(template, context)\n text_content = strip_tags(html_content)\n\n send_mail(\n subject=f'[Site.objects.get_current().name] {subject}',\n message=text_content,\n from_email=settings.DEFAULT_FROM_EMAIL,\n recipient_list=recipients,\n fail_silently=False,\n html_message=html_content\n )\n","sub_path":"apps/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"197463178","text":"# coding: utf-8\nimport requests\nfrom requests.exceptions import HTTPError\nfrom pygbif import species\nfrom bims.models import (\n Taxon,\n TaxonomyField\n)\nfrom bims.models.taxonomy import Taxonomy\nfrom bims.models.vernacular_name import VernacularName\nfrom bims.enums import TaxonomicRank, TaxonomicStatus\n\n\ndef update_taxa():\n \"\"\"Get all taxon, then update the data bimsd on the gbif id.\"\"\"\n taxa = Taxon.objects.all()\n if not taxa:\n print('No taxon found')\n for taxon in taxa:\n print('Update taxon for %s with gbif id %s' % (\n taxon.common_name, taxon.gbif_id))\n try:\n response = species.name_usage(key=taxon.gbif_id)\n if response:\n update_taxonomy_fields(taxon, response)\n print('Taxon updated')\n except HTTPError as e:\n print('Taxon not updated')\n print(e)\n\n\ndef get_species(gbif_id):\n \"\"\"\n Get species by gbif id\n :param gbif_id: gbif id\n :return: species dictionary\n \"\"\"\n api_url = 'http://api.gbif.org/v1/species/' + str(gbif_id)\n try:\n response = requests.get(api_url)\n json_result = response.json()\n return json_result\n except (HTTPError, KeyError) as e:\n print(e)\n return None\n\n\ndef get_vernacular_names(species_id):\n \"\"\"\n Get vernacular names from species id\n :param species_id: taxonomy id\n :return: array of vernacular name\n \"\"\"\n api_url = 'http://api.gbif.org/v1/species/%s/vernacularNames' % (\n str(species_id)\n )\n try:\n response = requests.get(api_url)\n json_result = response.json()\n return json_result\n except (HTTPError, KeyError) as e:\n print(e)\n return None\n\n\ndef get_children(key):\n \"\"\"\n Lists all direct child usages for a name usage\n :return: list of species\n \"\"\"\n api_url = 'http://api.gbif.org/v1/species/{key}/children'.format(\n key=key\n )\n try:\n response = requests.get(api_url)\n json_response = response.json()\n if json_response['results']:\n return json_response['results']\n return None\n except (HTTPError, KeyError) as e:\n print(e)\n return None\n\n\ndef find_species(original_species_name, rank=None):\n \"\"\"\n Find species from gbif with lookup query.\n :param original_species_name: the name of species we want to find\n :param rank: taxonomy rank\n :return: List of species\n \"\"\"\n print('Find species : %s' % original_species_name)\n try:\n response = species.name_lookup(\n q=original_species_name,\n limit=10,\n rank=rank\n )\n if 'results' in response:\n results = response['results']\n for result in results:\n rank = result.get('rank', '')\n rank_key = rank.lower() + 'Key'\n key_found = (\n 'nubKey' in result or rank_key in result)\n if key_found and 'taxonomicStatus' in result:\n if result['taxonomicStatus'] == 'ACCEPTED' or \\\n result['taxonomicStatus'] == 'SYNONYM':\n return result\n except HTTPError:\n print('Species not found')\n\n return None\n\n\ndef search_exact_match(species_name):\n \"\"\"\n Search species detail\n :param species_name: species name\n :return: species detail if found\n \"\"\"\n api_url = 'http://api.gbif.org/v1/species/match?name=' + str(species_name)\n try:\n response = requests.get(api_url)\n json_result = response.json()\n if json_result and 'usageKey' in json_result:\n key = json_result['usageKey']\n return key\n return None\n except (HTTPError, KeyError) as e:\n print(e)\n return None\n\n\ndef update_collection_record(collection):\n \"\"\"\n Update taxon for a collection.\n :param collection: Biological collection record model\n \"\"\"\n\n taxonomy = Taxonomy.objects.filter(\n scientific_name__contains=collection.original_species_name\n )\n if taxonomy:\n print('%s exists in Taxonomy' % collection.original_species_name)\n collection.taxonomy = taxonomy[0]\n collection.save()\n return\n\n result = find_species(collection.original_species_name)\n\n if not result:\n return\n\n if 'nubKey' in result:\n taxon_key = result['nubKey']\n elif 'speciesKey' in result:\n taxon_key = result['speciesKey']\n else:\n return\n\n taxonomy = process_taxon_identifier(taxon_key)\n collection.taxonomy = taxonomy\n collection.save()\n\n\ndef update_taxonomy_fields(taxon, response):\n \"\"\"Helper to update taxonomy field of taxon from a response dictionary.\n\n :param taxon: The Taxon object.\n :type taxon: Taxon\n\n :param response: A dictionary contains of Taxonomy value.\n :type response: dict\n \"\"\"\n # Iterate through all fields and update the one which is a\n # field from Taxonomy\n taxon_fields = Taxon._meta.get_fields()\n for field in taxon_fields:\n if isinstance(field, TaxonomyField):\n if field.taxonomy_key in response:\n setattr(\n taxon,\n field.get_attname(),\n response[field.taxonomy_key])\n continue\n\n # Set vernacular names\n try:\n if field.get_attname() == 'vernacular_names':\n vernacular_names = []\n for vernacular_name in response['vernacularNames']:\n if 'vernacularName' in vernacular_name:\n vernacular_names.append(\n vernacular_name['vernacularName']\n )\n taxon.vernacular_names = vernacular_names\n except (AttributeError, KeyError) as e:\n print(e)\n continue\n\n taxon.save()\n\n\ndef process_taxon_identifier(key, fetch_parent=True):\n \"\"\"\n Get taxon detail\n :param key: gbif key\n :param fetch_parent: whether need to fetch parent, default to True\n :return:\n \"\"\"\n # Get taxon\n print('Get taxon identifier for key : %s' % key)\n\n try:\n taxon_identifier = Taxonomy.objects.get(\n gbif_key=key,\n scientific_name__isnull=False\n )\n if taxon_identifier.parent or taxon_identifier.rank == 'KINGDOM':\n return taxon_identifier\n except Taxonomy.DoesNotExist:\n pass\n\n detail = get_species(key)\n taxon_identifier = None\n\n try:\n print('Found detail for %s' % detail['scientificName'])\n taxon_identifier, status = Taxonomy.objects.get_or_create(\n gbif_key=detail['key'],\n scientific_name=detail['scientificName'],\n canonical_name=detail['canonicalName'],\n taxonomic_status=TaxonomicStatus[\n detail['taxonomicStatus']].name,\n rank=TaxonomicRank[\n detail['rank']].name,\n )\n # Get vernacular names\n vernacular_names = get_vernacular_names(detail['key'])\n if vernacular_names:\n print('Found %s vernacular names' % len(\n vernacular_names['results']))\n for result in vernacular_names['results']:\n fields = {}\n if 'source' in result:\n fields['source'] = result['source']\n if 'language' in result:\n fields['language'] = result['language']\n if 'taxonKey' in result:\n fields['taxon_key'] = int(result['taxonKey'])\n vernacular_name, status = VernacularName.objects.get_or_create(\n name=result['vernacularName'],\n **fields\n )\n taxon_identifier.vernacular_names.add(vernacular_name)\n taxon_identifier.save()\n\n if 'parentKey' in detail and fetch_parent:\n print('Found parent')\n taxon_identifier.parent = process_taxon_identifier(\n detail['parentKey']\n )\n taxon_identifier.save()\n except (KeyError, TypeError) as e:\n print(e)\n pass\n\n return taxon_identifier\n\n\ndef search_taxon_identifier(search_query, fetch_parent=True):\n \"\"\"\n Search from gbif api, then create taxon identifier\n :param search_query: string query\n :param fetch_parent: whether need to fetch parent, default to True\n :return:\n \"\"\"\n print('Search for %s' % search_query)\n species_detail = None\n key = search_exact_match(search_query)\n\n if not key:\n species_detail = find_species(search_query)\n rank = species_detail.get('rank', '')\n rank_key = rank.lower() + 'Key'\n\n if rank_key in species_detail:\n key = species_detail[rank_key]\n elif 'nubKey' in species_detail:\n key = species_detail['nubKey']\n\n if key:\n species_detail = process_taxon_identifier(key, fetch_parent)\n\n return species_detail\n","sub_path":"bims/utils/gbif.py","file_name":"gbif.py","file_ext":"py","file_size_in_byte":8987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"559332620","text":"import snap\nf = open ('musae_git_edges.csv','r')\nlinea = f.readline()\nG = snap.TNGraph.New()\nfor x in range(0,50000):\n G.AddNode(x)\nind=[]\nfor x in range(0,50000):\n ind.append(0)\n \nwhile ( 1 ):\n linea = f.readline()\n if linea == \"\": break\n x,y =map(int,linea.split(\",\"))\n print(x,\" \",y)\n G.AddEdge(x,y)\n ind[x]+=1\n\ndef BCF( x ):\n n = G.GetNodes()\n v = []\n for j in range(0,n):\n v.append(0)\n val = 100\n for j in range( 0, n):\n if G.IsEdge(j+1,x) and (ind[j+1]>1):\n G.DelEdge(j+1,x)\n PRankH2 = snap.TIntFltH()\n snap.GetPageRank(G, PRankH2)\n v[j]=PRankH2[x]\n val = min( val,v[j])\n G.AddEdge(j+1,x)\n else:\n v[j]=100\n for j in range(0,n):\n if abs(val-v[j])<1e-8:\n return j+1\n \nprint(BCF(100))\n\n\n","sub_path":"better friend/bf.py","file_name":"bf.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"530078440","text":"from kubernetes.client import CustomObjectsApi, ApiClient, CoreV1Api, Configuration, BatchV1Api,V1PodStatus\nfrom log import logger\nimport collections\nfrom utils import parse_resource, ONE_GIBI, ONE_MEBI\nfrom configparser import ConfigParser\nimport urllib3\nimport datetime\nurllib3.disable_warnings()\n\npod_metric_fields = [\n 'ns',\n 'pod',\n 'status',\n 'cpu',\n 'cpu_requests',\n 'cpu_limits',\n 'memory',\n 'memory_requests',\n 'memory_limits',\n]\nPodMetric = collections.namedtuple('PodMetric', pod_metric_fields)\nnode_metric_fields = [\n 'node', 'cpu', 'memory'\n]\nNodeMetric = collections.namedtuple('NodeMetric', node_metric_fields)\n\n\nclass K8sClient:\n def __init__(self):\n config = ConfigParser()\n config.read(\"config.ini\")\n self.token = config.get(\"kubernetes\", \"token\")\n self.host = \"https://{}:6443\".format(config.get(\"kubernetes\", \"api_host\"))\n self.configuration = Configuration()\n self.configuration.api_key['authorization'] = self.token\n self.configuration.api_key_prefix['authorization'] = 'Bearer'\n self.configuration.host = self.host\n self.configuration.verify_ssl = False\n self.api_client = ApiClient(self.configuration)\n self.api_client.configuration.debug = True\n self.node_list = [i.strip() for i in config.get(\"kubernetes\", \"node\").split(\",\")]\n\n def get_metric(self):\n api_instance = CoreV1Api(self.api_client)\n pods = api_instance.list_pod_for_all_namespaces().items\n node_usages = [self.top_node(node) for node in self.node_list]\n pods_usages = sorted([self.top_pod(pod) for pod in pods], key=lambda x: x.memory, reverse=True)\n return {\"nodes\": node_usages, \"pods\": pods_usages}\n\n @logger.catch\n def top_node(self, node):\n custom = CustomObjectsApi(self.api_client)\n data = custom.get_cluster_custom_object(\"metrics.k8s.io\", \"v1beta1\", \"nodes\", node)\n node = data['metadata']['name']\n cpu = parse_resource(data['usage']['cpu'])\n memory = parse_resource(data['usage']['memory'])\n return NodeMetric(node=node, cpu=cpu, memory=memory / ONE_GIBI)\n\n @logger.catch\n def top_pods(self):\n custom = CustomObjectsApi(self.api_client)\n data = custom.list_cluster_custom_object(\"metrics.k8s.io\", \"v1beta1\", \"pods\")\n usage_by_pod = collections.defaultdict(list)\n for pod_data in data['items']:\n pod_name = pod_data['metadata']['name']\n for container_data in pod_data['containers']:\n usage_by_pod[pod_name].append(\n {\n 'pod': container_data['name'],\n 'cpu': parse_resource(container_data['usage']['cpu']),\n 'memory': parse_resource(container_data['usage']['memory']) / ONE_MEBI,\n }\n )\n return usage_by_pod\n\n @staticmethod\n def aggregate_container_resource(pod):\n values = {\n 'memory_limits': 0,\n 'cpu_limits': 0,\n 'memory_requests': 0,\n 'cpu_requests': 0,\n }\n for container in pod.spec.containers:\n limits = getattr(container.resources, 'limits', None)\n if limits:\n values['memory_limits'] += round(parse_resource(limits.get('memory')) / ONE_GIBI, 1)\n values['cpu_limits'] += parse_resource(limits.get('cpu'))\n requests = getattr(container.resources, 'requests', None)\n if requests:\n values['memory_requests'] += round(parse_resource(requests.get('memory')) / ONE_GIBI, 1)\n values['cpu_requests'] += parse_resource(requests.get('cpu'))\n return values\n\n @logger.catch\n def top_pod(self, pod):\n ns = pod.metadata.namespace\n status = pod.status.phase\n data = self.top_pods().get(pod.metadata.name) or []\n cpu = round(sum(pod_data['cpu'] for pod_data in data), 3)\n memory = round(sum(pod_data['memory'] for pod_data in data))\n return PodMetric(ns=ns, pod=pod.metadata.name, status=status, cpu=cpu, memory=memory,\n **self.aggregate_container_resource(pod))\n\n def get_job(self):\n api_instance = BatchV1Api(self.api_client)\n jobs = api_instance.list_job_for_all_namespaces()\n jobs_status = []\n for i in jobs.items:\n name = i.metadata.name\n ns = i.metadata.namespace\n start = i.status.start_time\n if i.status.succeeded == 1:\n status = \"success\"\n elif i.status.failed == 1:\n status = \"failed\"\n else:\n status = \"active\"\n jobs_status.append({\"ns\": ns, \"name\": name, \"start\": start, \"status\": status})\n return {\"desc\": \"jobs\", \"result\": jobs_status}\n\n def get_core(self):\n api_instance = CoreV1Api(self.api_client)\n component = api_instance.list_component_status()\n component_list = []\n for i in component.items:\n status = \"Ready\" if i.conditions[0].status else \"NotReady\"\n component_list.append({\"status\": status, \"name\": i.metadata.name})\n return {\"desc\": \"component\", \"result\": component_list}\n\n def get_readiness(self):\n api_instance = CoreV1Api(self.api_client)\n node = api_instance.list_pod_for_all_namespaces()\n for i in node.items:\n for x in i.spec.containers:\n if x.readiness_probe is not None:\n print(i.metadata.name, x.readiness_probe.http_get)\n\n def get_node(self):\n api_instance = CoreV1Api(self.api_client)\n nodes = api_instance.list_node()\n result = []\n for i in nodes.items:\n node = dict()\n node['name'] = i.metadata.name\n for s in i.status.conditions:\n if s.type == \"Ready\":\n node['status'] = \"Ready\" if s.status else \"NotReady\"\n node['kernel'] = i.status.node_info.kernel_version\n node['container_runtime'] = i.status.node_info.container_runtime_version\n node['cpu'] = i.status.capacity['cpu']\n node['memory'] = round(parse_resource(i.status.capacity['memory']) / ONE_GIBI)\n result.append(node)\n return {\"desc\": \"node\", \"result\": result}\n\n def get_pod(self):\n api_instance = CoreV1Api(self.api_client)\n pods = api_instance.list_pod_for_all_namespaces()\n result = []\n for i in pods.items:\n pod = dict()\n pod['name'] = i.metadata.name\n pod['ns'] = i.metadata.namespace\n pod['status'] = i.status.phase\n if i.status.container_statuses is not None:\n pod['restart'] = max([x.restart_count for x in i.status.container_statuses])\n else:\n pod['restart'] = None\n pod['start_time'] = i.status.start_time\n pod['ip'] = i.status.pod_ip\n pod['host'] = i.status.host_ip\n result.append(pod)\n return {\"desc\": \"pod\", \"result\": result}\n\n\n\n\n\n","sub_path":"k8s.py","file_name":"k8s.py","file_ext":"py","file_size_in_byte":7108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"5533761","text":"import asyncio\n\nfrom typing import Any, Dict, List, Tuple\n\nclass EditAtom:\n '''\n A simple utility class to track all the changes for adding a node or setting a property before committing them all\n at once.\n '''\n def __init__(self, allbldgbuids):\n '''\n Args:\n allbldgbuids (Dict[bytes, Node]): a dict that should be shared among all instances of this class for a\n particular cortex.\n '''\n self.mybldgbuids = {} # buid -> node\n self.otherbldgbuids = set()\n self.doneevent = asyncio.Event()\n self.sops: List[Tuple[str, Tuple[bytes, str, str, Dict[str, Any]]]] = []\n self.allbldgbuids = allbldgbuids # buid -> (Node, Event)\n self.notified = False\n self.npvs = [] # List of tuple(Node, prop, val)\n\n def __enter__(self):\n '''\n Implement the context manager convention\n '''\n return self\n\n def getNodeBeingMade(self, buid):\n '''\n Return a node if it is currently being made, mark as a dependency, else None if none found\n '''\n nodeevnt = self.allbldgbuids.get(buid)\n if nodeevnt is None:\n return None\n if buid not in self.mybldgbuids:\n self.otherbldgbuids.add(buid)\n return nodeevnt[0]\n\n def addNode(self, node):\n '''\n Update the shared map with my in-construction node\n '''\n self.mybldgbuids[node.buid] = node\n self.allbldgbuids[node.buid] = (node, self.doneevent)\n\n async def rendevous(self):\n '''\n Wait until all my adjacent editatoms are also at this point\n '''\n self._notifyDone()\n await self._wait()\n\n def _notifyDone(self):\n '''\n Allow any other editatoms waiting on me to complete to resume\n '''\n if self.notified:\n return\n\n self.doneevent.set()\n\n for buid in self.mybldgbuids:\n del self.allbldgbuids[buid]\n\n self.notified = True\n\n async def _wait(self):\n '''\n Wait on the other editatoms who are constructing nodes my new nodes refer to\n '''\n for buid in self.otherbldgbuids:\n nodeevnt = self.allbldgbuids.get(buid)\n if nodeevnt is None:\n continue\n await nodeevnt[1].wait()\n\n def __exit__(self, exc, cls, tb):\n '''\n Regardless of success, wake up any waiters and clean myself up from shared dict\n '''\n self._notifyDone()\n\n async def commit(self, snap):\n '''\n Push the recorded changes to disk, notify all the listeners\n '''\n if not self.npvs: # nothing to do\n return\n\n for node, prop, _, valu in self.npvs:\n node.props[prop.name] = valu\n node.proplayr[prop.name] = snap.wlyr\n\n splices = [snap.splice('node:add', ndef=node.ndef) for node in self.mybldgbuids.values()]\n for node, prop, oldv, valu in self.npvs:\n info = {'ndef': node.ndef, 'prop': prop.name, 'valu': valu}\n if oldv is not None:\n info['oldv'] = oldv\n splices.append(snap.splice('prop:set', **info))\n\n await snap.stor(self.sops, splices)\n\n for node in self.mybldgbuids.values():\n snap.core.pokeFormCount(node.form.name, 1)\n snap.buidcache.append(node)\n snap.livenodes[node.buid] = node\n\n await self.rendevous()\n\n for node in self.mybldgbuids.values():\n await node.form.wasAdded(node)\n\n # fire all his prop sets\n for node, prop, oldv, valu in self.npvs:\n await prop.wasSet(node, oldv)\n\n if prop.univ:\n univ = snap.model.prop(prop.univ)\n await univ.wasSet(node, oldv)\n\n # Finally, fire all the triggers\n for node, prop, oldv, _ in self.npvs:\n await snap.core.triggers.runPropSet(node, prop, oldv)\n","sub_path":"synapse/lib/editatom.py","file_name":"editatom.py","file_ext":"py","file_size_in_byte":3928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"350803905","text":"#\n# Hello World client in Python\n# Connects REQ socket to tcp://localhost:5555\n# Sends \"Hello\" to server, expects \"World\" back\n#\n\nimport zmq\nimport winsound\nimport time\n\ndef main():\n \"\"\" main method \"\"\"\n \n # Prepare our context and publisher\n context = zmq.Context()\n subscriber = context.socket(zmq.SUB)\n subscriber.connect(\"tcp://127.0.0.1:50004\")\n subscriber.setsockopt(zmq.SUBSCRIBE, '')\n\n while True:\n # Read envelope with address\n message = subscriber.recv_string()\n print(message)\n comand,filename,currentTime,time,x,y,z = message.split(';')\n print(filename)\n winsound.PlaySound(filename, winsound.SND_FILENAME)\n \n\n\n # We never get here but clean up anyhow\n subscriber.close()\n context.term()\n\nif __name__ == \"__main__\":\n main()","sub_path":"pythonSoundClient.py","file_name":"pythonSoundClient.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"98041606","text":"from log import add_log\nfrom get_connection import connect\n\n\ndef id_question_whith_answer():\n conn = connect()\n c = conn.cursor()\n log = add_log()\n\n log.info('Sprawdzenie listy id pytań z odpowiedziami')\n\n query_one = \"\"\"\n SELECT id_question FROM \"answers\" GROUP BY id_question;\n \"\"\"\n c.execute(query_one)\n list_of_id_question = c.fetchall()\n # print(list_of_id_question)\n\n return list_of_id_question\n\n\ndef answers_of_question(id_question):\n conn = connect()\n c = conn.cursor()\n log = add_log()\n\n query_two = \"\"\"\n SELECT id_question, answer, question FROM \"answers\" WHERE id_question = ?;\"\"\"\n # INNER JOIN \"questions\" ON answers.id_question = questions.id WHERE id_question = ?;\"\"\"\n\n c.execute(query_two, (id_question,))\n answers_in_data = c.fetchall()\n # print(answers_in_data)\n\n log.info(f'Pobranie odpowiedzi do pytania: {id_question}')\n\n return answers_in_data\n\n\ndef count_answers(answers_y_or_no):\n log = add_log()\n answer_yes, answer_no = 0, 0\n for id_question, answer, question in answers_y_or_no:\n if answer == 'T':\n answer_yes += 1\n if answer == 'N':\n answer_no += 1\n question = question\n every_answer = answer_yes + answer_no\n result_y_n = {'id_question': id_question, 'question': question, 'answer_yes': answer_yes,\n 'answer_no': answer_no, 'every_answer': every_answer}\n # print(result_y_n)\n\n log.info(f'Policzenie odpowiedzi dla {result_y_n}')\n\n return result_y_n\n\n\ndef percentage_share(i):\n log = add_log()\n sum_of_question = i['answer_yes'] + i['answer_no']\n answer_yes = (i['answer_yes'] / sum_of_question) * 100\n answer_yes = f'{answer_yes:.2f} %'\n answer_yes = answer_yes.replace('.', ',')\n id_question = i['id_question']\n answer_no = (i['answer_no'] / sum_of_question) * 100\n answer_no = f'{answer_no:.2f} %'\n answer_no = answer_no.replace('.', ',')\n question = i['question']\n every_answer = i['every_answer']\n result_percent = {'id_pytania': id_question, 'question': question, 'answer_yes': answer_yes,\n 'answer_no': answer_no, 'every_answer': every_answer}\n\n log.info(f'Dokonuję obliczeń procentowych: {result_percent}')\n\n return result_percent\n\n\ndef verify_number_of_every_questions():\n conn = connect()\n c = conn.cursor()\n log = add_log()\n query = \"\"\"\n SELECT id, question FROM \"questions\";\n \"\"\"\n c.execute(query)\n list_of_id = c.fetchall()\n\n log.info('Sprawdznie ilości wszystkich pytań')\n\n return list_of_id\n\n\ndef verify_questions_without_answer(list_of_answers=id_question_whith_answer(),\n list_of_every_questions=verify_number_of_every_questions()):\n log = add_log()\n log.warning('sprawdzenie pytań bez odpowiedzi')\n\n for i in list_of_answers:\n # print(i)\n # print(list_of_answers)\n for element in list_of_every_questions:\n # print(element)\n if i[0] == element[0]:\n list_of_every_questions.remove(element)\n # print('lista do dodania: ', list_of_every_questions)\n\n return list_of_every_questions\n\n\ndef add_to_results_questions_without_answer(list_without_answers=verify_questions_without_answer()):\n results = []\n log = add_log()\n for question in list_without_answers:\n id_question = question[0]\n question = question[1]\n result_no_answer = {'id_question': id_question, 'question': question, 'answer_yes': '0,00 %',\n 'answer_no': '0,00 %'}\n results.append(result_no_answer)\n\n log.warning(f'Dodanie do wyników pytania bez odpowiedzi: {result_no_answer}')\n # print(results)\n return results\n\n\ndef prepare_data_with_every_answers():\n id_question_whith_answer()\n\n group_by_list_of_answers = []\n for id in id_question_whith_answer():\n id = id[0]\n answers = answers_of_question(id)\n result = count_answers(answers)\n group_by_list_of_answers.append(result)\n\n results_in_prep = []\n for i in group_by_list_of_answers:\n result = percentage_share(i)\n results_in_prep.append(result)\n\n verify_questions_without_answer()\n\n no_answers_results = add_to_results_questions_without_answer()\n for element in no_answers_results:\n results_in_prep.append(element)\n # print('add: ', results)\n return results_in_prep\n","sub_path":"Python - advanced/The_Form_Project/definitions_of_results.py","file_name":"definitions_of_results.py","file_ext":"py","file_size_in_byte":4434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"624421930","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.decorators.http import require_POST\nfrom django.utils.translation import ugettext_lazy as _, ungettext\nfrom django.contrib import messages\nfrom django.conf import settings\nfrom django.contrib.sitemaps import Sitemap\nfrom django.template import TemplateDoesNotExist\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\nfrom haystack.query import SearchQuerySet\n\nfrom froide.foirequest.models import FoiRequest\nfrom froide.helper.utils import render_400, render_403\nfrom froide.helper.cache import cache_anonymous_page\n\nfrom .models import (PublicBody,\n PublicBodyTag, FoiLaw, Jurisdiction)\nfrom .csv_import import CSVImporter\n\n\ndef index(request, jurisdiction=None, topic=None):\n if jurisdiction is not None:\n jurisdiction = get_object_or_404(Jurisdiction, slug=jurisdiction)\n\n if topic is not None:\n topic = get_object_or_404(PublicBodyTag, slug=topic)\n\n query = request.GET.get('q', '')\n if query:\n publicbodies = SearchQuerySet().models(PublicBody).auto_query(query)\n else:\n publicbodies = PublicBody.objects.get_list()\n\n if topic:\n publicbodies = publicbodies.filter(tags=topic.name if query else topic)\n if jurisdiction:\n publicbodies = publicbodies.filter(\n jurisdiction=jurisdiction.name if query else jurisdiction)\n\n page = request.GET.get('page')\n paginator = Paginator(publicbodies, 50)\n try:\n publicbodies = paginator.page(page)\n except PageNotAnInteger:\n publicbodies = paginator.page(1)\n except EmptyPage:\n publicbodies = paginator.page(paginator.num_pages)\n\n return render(request, 'publicbody/list.html', {\n 'object_list': publicbodies,\n 'jurisdictions': Jurisdiction.objects.get_list(),\n 'jurisdiction': jurisdiction,\n 'topic': topic,\n 'topics': PublicBodyTag.objects.get_topic_list(),\n 'query': query,\n })\n\n\n@cache_anonymous_page(15 * 60)\ndef show_jurisdiction(request, slug):\n jurisdiction = get_object_or_404(Jurisdiction, slug=slug)\n context = {\n \"object\": jurisdiction,\n \"pb_count\": PublicBody.objects.filter(jurisdiction=jurisdiction).count(),\n \"laws\": FoiLaw.objects.filter(meta=False,\n jurisdiction=jurisdiction).order_by('priority'),\n \"foirequests\": FoiRequest.published.filter(jurisdiction=jurisdiction)[:5]\n }\n try:\n return render(request,\n 'publicbody/jurisdiction/%s.html' % jurisdiction.slug, context)\n except TemplateDoesNotExist:\n return render(request,\n 'publicbody/jurisdiction.html', context)\n\n\ndef show_foilaw(request, slug):\n law = get_object_or_404(FoiLaw, slug=slug)\n context = {\"object\": law}\n return render(request, 'publicbody/show_foilaw.html', context)\n\n\ndef show_publicbody(request, slug):\n obj = get_object_or_404(PublicBody, slug=slug)\n context = {\n 'object': obj,\n 'foirequests': FoiRequest.published.filter(\n public_body=obj).order_by('-last_message')[:10],\n 'resolutions': FoiRequest.published.get_resolution_count_by_public_body(obj),\n 'foirequest_count': FoiRequest.published.filter(public_body=obj).count()\n }\n return render(request, 'publicbody/show.html', context)\n\n\n@require_POST\ndef confirm(request):\n if not request.user.is_authenticated:\n return render_403(request)\n if not request.user.is_staff and not request.user.is_superuser:\n return render_403(request)\n try:\n pb = get_object_or_404(PublicBody, pk=int(request.POST.get('public_body', '')))\n except ValueError:\n return render_400(request)\n result = pb.confirm()\n if result is None:\n messages.add_message(request, messages.ERROR,\n _('This request was already confirmed.'))\n else:\n messages.add_message(request, messages.ERROR,\n ungettext('%(count)d message was sent.',\n '%(count)d messages were sent', result\n ) % {\"count\": result})\n return redirect('admin:publicbody_publicbody_change', pb.id)\n\n\n@require_POST\ndef import_csv(request):\n if not request.user.is_authenticated:\n return render_403(request)\n if not request.user.is_staff and not request.user.is_superuser:\n return render_403(request)\n if not request.method == 'POST':\n return render_403(request)\n importer = CSVImporter()\n url = request.POST.get('url')\n try:\n if not url:\n raise ValueError(_('You need to provide a url.'))\n importer.import_from_url(url)\n except Exception as e:\n messages.add_message(request, messages.ERROR, str(e))\n else:\n messages.add_message(request, messages.SUCCESS,\n _('Public Bodies were imported.'))\n return redirect('admin:publicbody_publicbody_changelist')\n\n\nSITEMAP_PROTOCOL = 'https' if settings.SITE_URL.startswith('https') else 'http'\n\n\nclass PublicBodySitemap(Sitemap):\n protocol = SITEMAP_PROTOCOL\n changefreq = \"monthly\"\n priority = 0.6\n\n def items(self):\n return PublicBody.objects.all()\n\n def lastmod(self, obj):\n return obj.updated_at\n\n\nclass JurisdictionSitemap(Sitemap):\n protocol = SITEMAP_PROTOCOL\n changefreq = \"yearly\"\n priority = 0.8\n\n def items(self):\n return Jurisdiction.objects.all()\n\n\nclass FoiLawSitemap(Sitemap):\n protocol = SITEMAP_PROTOCOL\n changefreq = \"yearly\"\n priority = 0.3\n\n def items(self):\n return FoiLaw.objects.all()\n\n def lastmod(self, obj):\n return obj.updated\n","sub_path":"froide/publicbody/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"83264907","text":"# CS4102 Fall 2019 -- Homework 8\r\n#################################\r\n# Collaboration Policy: You are encouraged to collaborate with up to 4 other\r\n# students, but all work submitted must be your own independently written\r\n# solution. List the computing ids of all of your collaborators in the comment\r\n# at the top of your java or python file. Do not seek published or online\r\n# solutions for any assignments. If you use any published or online resources\r\n# (which may not include solutions) when completing this assignment, be sure to\r\n# cite them. Do not submit a solution that you are unable to explain orally to a\r\n# member of the course staff.\r\n#################################\r\n# Your Computing ID: er6qt\r\n# Collaborators: zh2yn, zz9ek\r\n# Sources: Introduction to Algorithms, Cormen\r\n#################################\r\nfrom collections import defaultdict\r\n\r\nclass Marriage:\r\n lukePath = []\r\n lorelaiPath = []\r\n\r\n def __init__(self):\r\n return\r\n\r\n def getLukePath(self):\r\n return self.lukePath\r\n\r\n def getLorelaiPath(self):\r\n return self.lorelaiPath\r\n\r\n # This is the method that should set off the computation\r\n # of marriage. It takes as input a list lines of input\r\n # as strings. You should parse that input and then compute\r\n # the shortest paths that both Luke and Lorelai should take.\r\n # The class fields of lukePath and lorelaiPath should be filled\r\n # with their respective paths. The getters above will be called\r\n # by the grader script.\r\n #\r\n # @return the length of the shortest paths (in rooms)\r\n def compute(self, file_data):\r\n # lukePath = []\r\n # lorelaiPath = []\r\n t_nodes = file_data[0] # total nodes\r\n begin_luke, end_luke = file_data[1].split() #get first and last node of Luke from file (1st list)\r\n begin_lorelai, end_lorelai = file_data[2].split() #get first and last node of Lorelai from file (2nd list)\r\n\r\n if (begin_luke == end_luke) and (begin_lorelai == end_lorelai): # check if beginning and end nodes are the same for both people\r\n done = True\r\n self.lukePath.append(end_luke)\r\n print(len(self.lukePath))\r\n print(\"[\", end_luke, \"]\")\r\n print(\"[\", end_lorelai, \"]\")\r\n\r\n # Add starting nodes to their respective paths\r\n self.lukePath.append(int(begin_luke))\r\n self.lorelaiPath.append(int(begin_lorelai))\r\n self.lukePath.append(int(end_luke))\r\n self.lorelaiPath.append(int(end_lorelai))\r\n\r\n #create an Object Graph1 with a list that holds the edges in the graph\r\n g = Graph1()\r\n edges = []\r\n\r\n #gets all adjacency lists(adjacent nodes) given in the input file\r\n for i in file_data[3:]:\r\n edges.append(i.strip('\\n'))\r\n\r\n # form adjacency list\r\n adj_list = []\r\n for i in edges:\r\n if (i != \" \"):\r\n adj_list.append(i.split(\" \"))\r\n\r\n # go through number of vertices\r\n for vertex in range(0, int(t_nodes)):\r\n #each line\r\n for node in adj_list[vertex]:\r\n g.addEdge(vertex, int(node))\r\n g.addEdge(vertex, vertex)\r\n\r\n # call breath first search on graph to get respective paths\r\n self.lukePath, self.lorelaiPath = g.bfs(self.lukePath[0], self.lukePath[1], self.lorelaiPath[0], self.lorelaiPath[1]) #Change\r\n return len(self.lukePath) # getting \"None\" output, so just replacing it with a blank space\r\n\r\nclass Graph1:\r\n def __init__(self): # graph containing list\r\n self.graph = defaultdict(list)\r\n\r\n def addEdge(self, frm, to): #graph can append nodes/edges\r\n self.graph[frm].append(to)\r\n\r\n def printList(self): #printing graph\r\n return dict.__repr__(self.graph)\r\n\r\n def adjacent(self, frm, to): # checking to see if graph has an edge\r\n if to in self.graph[frm]:\r\n return True\r\n else:\r\n return False\r\n\r\n # main function where bfs algorithm is implemented to get shortest paths\r\n def bfs(self, stLuke, enLuke, stLore, enLore):\r\n Luke_Queue = [[stLuke]]\r\n Lorelai_Queue = [[stLore]]\r\n\r\n LukePath = list()\r\n LorelaiPath = list()\r\n\r\n done = False #set to false to check if we have gotten to our ending nodes\r\n\r\n # if (stLuke == enLuke) and (stLore == enLore):\r\n # done = True\r\n # LukePath.append(enLuke)\r\n # print(len(LukePath))\r\n # print(\"[\", enLuke, \"]\")\r\n # print(\"[\", enLore, \"]\")\r\n\r\n while not done: # as long as we have not reached end nodes\r\n while Luke_Queue: # while queue for Luke is not empty\r\n Luke_st = Luke_Queue.pop(0) #add first node to path by popping it off\r\n nodeL = Luke_st[-1]\r\n adj_nodes = self.graph[nodeL] #get the neighboring(adjacent) nodes\r\n\r\n for adj in adj_nodes: #add all the adjacent nodes to a new list\r\n path = list(Luke_st)\r\n path.append(adj)\r\n Luke_Queue.append(path)\r\n if adj == enLuke:\r\n current_val = True\r\n LukePath.append(path)\r\n break\r\n\r\n while Lorelai_Queue: # while queue for Lorelai is not empty, do relatively same thing as done for Luke \r\n Lorelai_st = Lorelai_Queue.pop(0)\r\n nodeLo = Lorelai_st[-1]\r\n adj_nodes_Lorel = self.graph[nodeLo]\r\n\r\n for adj in adj_nodes_Lorel:\r\n path = list(Lorelai_st)\r\n path.append(adj)\r\n Lorelai_Queue.append(path)\r\n if adj == enLore:\r\n current_val = True\r\n LorelaiPath.append(path)\r\n break\r\n\r\n #conditions that check/ensure that neither is in the other’s line-of-sight\r\n\r\n if len(LukePath) > 0 and len(LorelaiPath) > 0 and current_val:\r\n current_val = False\r\n\r\n for Luke_vx in LukePath: # for each vertex in Luke's path\r\n for Lorelai_vx in LorelaiPath: # and for each vertex in Lorelai's path\r\n if len(Luke_vx) == len(Lorelai_vx): # check if they are the same\r\n shortest_path = True # True means we have found the shortest length path\r\n for node in range(0, len(Luke_vx)):\r\n if (self.adjacent(Luke_vx[node], Lorelai_vx[node])):\r\n shortest_path = False\r\n break\r\n elif (Luke_vx[node] == Lorelai_vx[node]):\r\n shortest_path = False\r\n break\r\n\r\n if shortest_path: # if shortest path is found, done is changed to True, and the function is completed\r\n done = True\r\n (len(Luke_vx))\r\n return([*Luke_vx], [*Lorelai_vx]) #Change\r\n\r\n\r\n # if there is a case where Luke's path is longer than Lorelai's, append last node in Lorelai's\r\n # path to have both paths be the same length\r\n elif len(Luke_vx) > len(Lorelai_vx):\r\n LorelaiPath.pop(LorelaiPath.index(Lorelai_vx))\r\n break\r\n\r\n # if there is a case where Lorelai's path is longer than Luke's, append last node in Luke's\r\n # path to have both paths be the same length\r\n elif len(Luke_vx) < len(Lorelai_vx):\r\n LukePath.pop(LukePath.index(Luke_vx))\r\n break","sub_path":"marriage.py","file_name":"marriage.py","file_ext":"py","file_size_in_byte":7875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"233934678","text":"\"\"\"Setup file for packaging swigibpy\"\"\"\n\nimport os\nfrom distutils.command.build_ext import build_ext\nfrom distutils.core import setup, Extension\nfrom distutils.util import get_platform\n\n###\n\nIB_DIR = 'IB_967'\nVERSION = '0.4'\n\nlibraries = []\nif(get_platform().startswith('win')):\n libraries.append('ws2_32')\n\nib_module = Extension('_swigibpy',\n sources=[IB_DIR +\n '/PosixSocketClient/EClientSocketBase.cpp',\n IB_DIR +\n '/PosixSocketClient/EPosixClientSocket.cpp',\n IB_DIR + '/swig_wrap.cpp'],\n include_dirs=[IB_DIR,\n IB_DIR + '/PosixSocketClient',\n IB_DIR + '/Shared'],\n define_macros=[('IB_USE_STD_STRING', '1')],\n libraries=libraries\n )\n\n\nclass swigibpy_build_ext(build_ext):\n def build_extensions(self):\n compiler = self.compiler.compiler_type\n if compiler == 'msvc':\n extra = ('/D_CRT_SECURE_NO_DEPRECATE',\n '/EHsc', '/wd4355', '/wd4800')\n else:\n extra = ('-Wno-switch',)\n for ext in self.extensions:\n ext.extra_compile_args += extra\n build_ext.build_extensions(self)\n\n\nreadme = os.path.join(os.path.dirname(__file__), 'README.rst')\nsetup(version=VERSION,\n name='swigibpy',\n author=\"Kieran O'Mahony\",\n author_email=\"kieranom@gmail.com\",\n url=\"https://github.com/Komnomnomnom/swigibpy/\",\n license='New BSD License',\n description=\"\"\"Third party Python API for Interactive Brokers\"\"\",\n long_description=open(readme).read(),\n keywords=[\"interactive brokers\", \"tws\"],\n ext_modules=[ib_module],\n py_modules=[\"swigibpy\"],\n cmdclass={'build_ext': swigibpy_build_ext},\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.6\",\n \"Development Status :: 4 - Beta\",\n \"Environment :: Other Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Office/Business :: Financial\",\n ],\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"652454704","text":"from django.db.models.signals import post_save\r\nfrom django.dispatch import receiver\r\nfrom django.core.files import File\r\nfrom .models import Papermail\r\n\r\nfrom django.conf import settings\r\n\r\nimport magic\r\nfrom wand.image import Image\r\nfrom os import remove\r\n\r\nfrom uuid import uuid4\r\n\r\nmedia_root = getattr(settings, 'MEDIA_ROOT')\r\n\r\n@receiver(post_save, sender=Papermail)\r\ndef generate_thumbnail(sender,instance, **kwargs):\r\n \"\"\"\r\n generate a thumbnail of the file to display in views\r\n only jpeg png or pdf is supported\r\n thumbnail name is generate with uuid module\r\n \"\"\"\r\n \r\n mime = magic.Magic(mime=True)\r\n type_fichier = mime.from_file(instance.paper_file.path)\r\n nom_thumbnail = media_root + uuid4().hex + '_thumb.jpeg'\r\n\r\n if type_fichier == 'image/png' or type_fichier == 'image/jpeg':\r\n\r\n with Image(filename=instance.paper_file.path) as img:\r\n with img.clone() as converted:\r\n converted.format = 'jpeg'\r\n converted.resize(300,400)\r\n converted.save(filename= nom_thumbnail)\r\n fich = File(open(nom_thumbnail,'rb'))\r\n post_save.disconnect(generate_thumbnail, sender=Papermail)\r\n instance.thumbnail.save(name = uuid4().hex + '_thumb.jpeg', content = fich)\r\n post_save.connect(generate_thumbnail, sender=Papermail)\r\n remove(nom_thumbnail)\r\n \r\n elif type_fichier == 'application/pdf':\r\n\r\n with Image(filename=instance.paper_file.path + '[0]') as img:\r\n with img.clone() as converted:\r\n converted.format = 'jpeg'\r\n converted.save(filename= nom_thumbnail)\r\n fich = File(open(nom_thumbnail,'rb'))\r\n post_save.disconnect(generate_thumbnail, sender=Papermail)\r\n instance.thumbnail.save(name = uuid4().hex + '_thumb.jpeg', content = fich)\r\n post_save.connect(generate_thumbnail, sender=Papermail)\r\n remove(nom_thumbnail)","sub_path":"paperworks/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"182061640","text":"# 打印图形\r\ntamp = [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']\r\nlst = []\r\ndic = {}\r\nfor i in range(1, 8):\r\n if i == 1:\r\n dic[i] = [7]\r\n else:\r\n temp = []\r\n for x in dic[i-1]:\r\n temp.append(x-1)\r\n temp.append(x+1)\r\n dic[i] = sorted(list(set(temp)))\r\nfor h in dic.keys():\r\n for j in dic[h]:\r\n tamp[j-1] = '*'\r\n lst.append(''.join(tamp))\r\nfor k in lst:\r\n print(k)\r\n","sub_path":"day2_7.py","file_name":"day2_7.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"446510549","text":"\"\"\" Bob (localhost) \"\"\"\nimport socket, pickle, random\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import serialization, asymmetric\nfrom lib.MyCryptoLibrary import MyCryptoLibrary\n\n# Key generation\nbob_private_key = asymmetric.rsa.generate_private_key(\n public_exponent=65537,\n key_size=2048,\n backend=default_backend())\n\n# Assuming that Alice has bob's PK, thus saving it as PEM format to Alice's PC.\nbob_key_pem = bob_private_key.public_key().public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo)\n\nwith open(\"PK_bob.pem\", \"wb\") as key:\n key.write(bob_key_pem)\n\n\ndef retrieve_alice_pk():\n with open(\"PK_alice.pem\", \"rb\") as pem_file:\n PK = serialization.load_pem_public_key(\n pem_file.read(),\n backend=default_backend())\n return PK\n\n\ndef decrypt_and_verify(data, PK):\n decrypted_message = MyCryptoLibrary.decrypt_message(data[0], bob_private_key)\n MyCryptoLibrary.verify_message(decrypted_message, data[1], PK)\n return decrypted_message\n\n\ndef send_encrypted_signed_message(msg, PK):\n cipher_text = MyCryptoLibrary.encrypt_message(msg, PK)\n signature_alice = MyCryptoLibrary.sign_message(msg, bob_private_key)\n data = (cipher_text, signature_alice)\n data_string = pickle.dumps(data)\n client_socket.send(data_string)\n\n\ndef compute_dice_throw(b, a):\n dice_throw = bin(int(b) ^ int(a))\n converted_dice_throw = (int(dice_throw, 2) % 6) + 1\n print(\"Bob computes throw to be \", converted_dice_throw)\n return converted_dice_throw\n\n\n# TCP socket with ipv4\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nhost = \"127.0.0.1\"; port = 6677\naddress = (host, port)\nserver.bind(address)\n\n# Handle connections\nserver.listen(2048)\nrunning = True\nprint(f\"[Server started at {host} on port {port}]\")\n\n# Creating the message to send\n\nwhile running:\n # Accept connection from client\n client_socket, address = server.accept()\n print(f\"Connection from {address} has been established...\")\n\n # Get prerequisites\n PK_alice = retrieve_alice_pk()\n\n print(\"********* Alice's dice throw *********\")\n\n # [b1] Message Com(a,r) received from alice\n received_data = pickle.loads(client_socket.recv(2048))\n print(\"Bob received Com(a,r) from Alice and tries to verify\")\n decrypted_hashed_c_from_alice = decrypt_and_verify(received_data, PK_alice)\n\n # [b2] Bob sends random bit b to Alice\n b1 = bytes(format(random.getrandbits(4), \"b\"), encoding=\"utf-8\")\n send_encrypted_signed_message(b1, PK_alice)\n\n # [b3] Receive second message (a,r) from Alice\n received_data2 = pickle.loads((client_socket.recv(2048)))\n print(\"Bob received (a,r) from Alice and tries to verify\")\n decrypted_a_r = decrypt_and_verify(received_data2, PK_alice)\n decoded_split_a_r = decrypted_a_r.decode(\"utf-8\").split(\",\")\n alice_a1 = decoded_split_a_r[0]\n opened_commitment = bytes(decoded_split_a_r[0] + decoded_split_a_r[1], \"utf-8\")\n\n # [b4] Bob is hashing a + r for checking and computing dice throw\n opened_commitment_hashed = MyCryptoLibrary.hash_message(opened_commitment)\n\n if decrypted_hashed_c_from_alice == opened_commitment_hashed:\n print(\"Bob is checking if the hashes match\")\n print(\"[Success] No changes we made to the message\")\n alice_a = decoded_split_a_r[0]\n bob_b = b1.decode(\"utf-8\")\n compute_dice_throw(bob_b, alice_a)\n else:\n print(\"[WARNING] Alice changed her message\")\n\n print()\n print(\"********* Bob's dice throw *********\")\n\n # [b1] Bob samples random bit b and random 128 bit string and sends Com(a,r)\n b2 = format(random.getrandbits(4), \"b\")\n r2 = format(random.getrandbits(128), \"b\")\n c2 = bytes(b2 + r2, encoding=\"utf-8\")\n c_hashed2 = MyCryptoLibrary.hash_message(c2)\n send_encrypted_signed_message(c_hashed2, PK_alice)\n print(\"Sending encrypted Com(a,r) to Alice\")\n\n # [b2] Message a received\n received_data3 = pickle.loads(client_socket.recv(2048))\n print(\"Alice received b from Bob and tries to verify\")\n a2 = decrypt_and_verify(received_data3, PK_alice)\n\n # [b3] Bob sends (a,r) to Alice\n b_r = bytes(b2 + \",\" + r2, encoding=\"utf-8\")\n send_encrypted_signed_message(b_r, PK_alice)\n\n # [b4] Compute output B XOR a under mod 6\n compute_dice_throw(b2, a2)\n\n running = False\n\nclient_socket.close()\n\n\n","sub_path":"bob_local.py","file_name":"bob_local.py","file_ext":"py","file_size_in_byte":4440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"490622903","text":"#\n# @contactrika\n#\n# Wrappers for custom envs.\n#\nfrom torchbeast import atari_wrappers\nfrom coinrun import coinrunenv\n\n\nclass CoinRunOneEnv(coinrunenv.CoinRunVecEnv):\n\n def seed(self, seed):\n print('CoinRun ignores seed()')\n\n def step(self, actions):\n self.step_async(actions)\n res = self.step_wait()\n return res\n\n\ndef create_env(env_name, flags):\n if env_name.startswith('Coin'):\n from coinrun import coinrunenv\n from coinrun import setup_utils as coinrun_setup_utils\n coinrun_setup_utils.setup_and_load(\n use_cmd_line_args=False,\n set_statics=flags.set_statics,\n set_dynamics=flags.set_dynamics,\n num_levels=flags.num_levels,\n any_custom_game=flags.any_custom_game,\n use_pytorch=True, paint_vel_info=0,\n is_high_res=flags.is_high_res,\n default_zoom=flags.default_zoom,\n float_obs=False) # torchbeast divides by 255\n return CoinRunOneEnv('platform', 1,\n default_zoom=flags.default_zoom, float_obs=False)\n else:\n return atari_wrappers.wrap_pytorch(\n atari_wrappers.wrap_deepmind(\n atari_wrappers.make_atari(env_name),\n clip_rewards=False,\n frame_stack=True,\n scale=False))\n","sub_path":"torchbeast/env_wrappers.py","file_name":"env_wrappers.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"121470240","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport os\n\n\ntry:\n\n chrome_options = Options()\n chrome_options.add_argument('--start-maximized')\n browser = webdriver.Chrome(options=chrome_options)\n browser.get('https://olx.ua')\n wait = WebDriverWait(browser, 10)\n element = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, '#headerSearch')))\n element.send_keys('Автомобиль')\n element = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, '#submit-searchmain')))\n element.click()\n\n\n #сделаем скриншот не хедера, а самих объявлений\n element = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, '#offers_table')))\n screen_elem = element.screenshot_as_png\n with open(os.path.join(os.path.dirname(__file__), 'scr.png'), 'wb') as f:\n f.write(screen_elem)\n\n\nexcept Exception as error_exp:\n print(error_exp)\n \nfinally:\n browser.quit()","sub_path":"HT_10/selenium_olx.py","file_name":"selenium_olx.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"226340549","text":"import allure\nfrom selenium.webdriver.common.by import By\nfrom Web_services.URL import PaidLessonPage\nfrom Web_services.SetUp import StartInterneturokClassMethod\n\n\n@allure.feature(\"Страница урока Тригонометрические функции y = sin t, y = cos t (Алгебра 11 класс)\")\n@allure.story(\"Проверка наличия элементов в Body во вкладке Видеоурок для не авторизованного пользователя\")\nclass ChecksAllElementsInLessonPageTheBodyTabVideoUserNotAuth(StartInterneturokClassMethod):\n @allure.step(\"Перейти на страницу Алгебра 8 класс\")\n def test_000_open_page(self):\n StartInterneturokClassMethod = self.driver\n go_page = PaidLessonPage(StartInterneturokClassMethod)\n go_page.go_lesson_page()\n\n @allure.step(\"На странице урока отображается название урока (Основные понятия)\")\n def test_lesson_title(self):\n self.assertEqual(\"АверНик.Тригонометрические функции y = sin t, y = cos t\",\n self.driver.find_element_by_css_selector(\"h1.lesson-title\").text)\n\n @allure.step(\"На странице урока отображается кнопка перейти на предыдущий урок (Кнопка влево)\")\n def test_lesson_arrow_left(self):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"span.lesson-arrow_left\"))\n\n @allure.step(\"На странице урока отображается кнопка перейти на следующий урок (Кнопка вправо)\")\n def test_lesson_arrow_right(self):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"a.lesson-arrow_right\"))\n\n @allure.step(\"На страни��е урока отображается Вкладки урока (Видеоурок, Текстовый урок и т.д)\")\n def test_lesson_controls_body(self):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"ul.lesson-controls__body\"))\n\n @allure.step(\"На странице урока отображается вкладка (Видеоурок)\")\n def test_button_video(self):\n self.assertEqual(\"Видеоурок\", self.driver.find_element_by_css_selector(\"li.lc-video\").text)\n with allure.step(\"Во вкладке Видеоурок отображается иконка\"):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"span.lesson-controls__icon_vid\"))\n\n @allure.step(\"На странице урока отображается вкладка (Тестовый урок)\")\n def test_button_text_lesson(self):\n self.assertEqual(\"Текстовый урок\", self.driver.find_element_by_css_selector(\"li.lc-txt\").text)\n with allure.step(\"Во вкладке Текстовый урок отображается иконка\"):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"span.lesson-controls__icon_vid\"))\n\n @allure.step(\"На странице урока отображается вкладка (Тренажеры)\")\n def test_button_training(self):\n self.assertEqual(\"Тренажеры\", self.driver.find_element_by_css_selector(\n \".lesson-controls__body:nth-child(1) .lesson-controls__wrap:nth-child(3)\").text)\n with allure.step(\"Во вкладке Тренажеры отображается иконка\"):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"span.lesson-controls__icon_training\"))\n\n @allure.step(\"На странице урока отображается вкладка (Тесты)\")\n def test_button_test(self):\n self.assertEqual(\"Тесты\", self.driver.find_element_by_css_selector(\"li.lc-test\").text)\n with allure.step(\"Во вкладке Тесты отображается иконка\"):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"span.lesson-controls__icon_test\"))\n\n @allure.step(\"На странице урока отображается вкладка (Вопросы к уроку)\")\n def test_button_questions(self):\n self.assertEqual(\"Вопросы к уроку\", self.driver.find_element_by_css_selector(\"li.lc-questions\").text)\n with allure.step(\"Во вкладке Вопросы к уроку отображается иконка\"):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"span.lesson-controls__icon_questions\"))\n\n @allure.step(\"На странице урока отображается кнопка (Заметки)\")\n def test_button_note(self):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.lesson-note-widget\"))\n\n @allure.step(\"В видеоуроке отображается (Превью видеоурока)\")\n def test_displayed_preview_video(self):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.player__block\"))\n\n @allure.step(\"В видеоуроке отображается кнопка (Плей)\")\n def test_displayed_button_play(self):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.player__block-play\"))\n\n @allure.step(\"В видеоуроке отображается заглушка (Этот видеоурок доступен по абонементу)\")\n def test_video_blocker(self):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.video-blocker\"))\n with allure.step(\"В загулшке в левом углу отображается звезда (платный урок)\"):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.video-blocker__body-corner\"))\n with allure.step(\"В заглушка присутствует текст (Этот видеоурок доступен по абонементу)\"):\n self.assertEqual(\"Этот видеоурок доступен по абонементу\",\n self.driver.find_element_by_css_selector(\"h5.video-blocker__content_text-title\").text)\n with allure.step(\"В заглушка присутствует ссылка (Подробнее об абонементе, платных и бесплатных уроках)\"):\n self.assertEqual(\"Подробнее об абонементе, платных и бесплатных уроках\",\n self.driver.find_element_by_css_selector(\"a.video-blocker__content_text-link.link\").text)\n with allure.step(\"В заглушка присутствует текст (У вас уже есть абонемент?)\"):\n self.assertEqual(\"У вас уже есть абонемент? Войти\",\n self.driver.find_element_by_css_selector(\"p.has-abonement\").text)\n with allure.step(\"В заглушка отображается ссылка (Войти)\"):\n self.assertEqual(\"Войти\",\n self.driver.find_element_by_xpath(\"//div[2]/div/div[1]/p/a\").text)\n with allure.step(\"В заглушка отображается кнопка (Оплатить абонемент от 150 руб. в месяц)\"):\n self.assertEqual(\"Оплатить абонемент\\nот 150 руб. в месяц\",\n self.driver.find_element_by_css_selector(\"a.abonement__buy\").text)\n\n @allure.step(\"В конспекте присутствуют ссылки с таймлайнами (Определение и примеры алгебраических дробей)\")\n def test_displayed_lesson_subtitle(self):\n self.assertEqual(\"1. Определение функции\",\n self.driver.find_element_by_xpath(\"//h2[1]/a\").text)\n\n @allure.step(\n \"В конспекте присутствуют рекламный баннер ДШ (Решите домашниее задание и получите оценку в Домашней школе InternetUrok)\")\n def test_lesson_footer_error(self):\n self.assertEqual(\"https://files.interneturok.ru/public/undertext_ver1_1.jpg\",\n self.driver.find_element_by_css_selector(\n \".lesson-footer__error:nth-child(1) .lesson-footer__error-img:nth-child(1)\").get_attribute(\n \"src\"))\n\n @allure.step(\"В уроке в конце конспекта отображается ссылка (Информация об уроке)\")\n def test_displayed_lesson_footer_button_info(self):\n self.assertEqual(\"Информация об уроке\",\n self.driver.find_element_by_id(\"info-link\").text)\n with allure.step(\"В кнопке Информация об уроке присутствует иконка \"):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.icon-lesson-info\"))\n\n @allure.step(\"В уроке в конце конспекта отображается ссылка (Комментарии (11))\")\n def test_displayed_lesson_footer_button_comment(self):\n self.assertEqual(\"Комментарии (11)\",\n self.driver.find_element_by_id(\"comments-link\").text)\n with allure.step(\"В кнопке Комментарии (8) присутствует иконка \"):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.icon-lesson-comments\"))\n\n @allure.step(\"В уроке в конце конспекта отображается ссылка (Поделиться)\")\n def test_displayed_lesson_footer_button_share(self):\n self.assertEqual(\"Поделиться\",\n self.driver.find_element_by_id(\"share-link\").text)\n with allure.step(\"В кнопке Поделиться присутствует иконка \"):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.icon-lesson-share\"))\n\n @allure.step(\"В уроке в конце конспекта отображается ссылка (В избранное)\")\n def test_displayed_lesson_footer_button_lesson_add(self):\n self.assertEqual(\"В избранное\",\n self.driver.find_element_by_css_selector(\n \".lesson-footer:nth-child(4) .ember-view:nth-child(4) .lesson-icons__group\").text)\n with allure.step(\"В кнопке В избранное присутствует иконка \"):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.icon-lesson-add\"))\n\n @allure.step(\"В уроке в конце конспекта отображается ссылка (Нашли ошибку?)\")\n def test_displayed_lesson_footer_button_lesson_report(self):\n self.assertEqual(\"Нашли ошибку?\",\n self.driver.find_element_by_css_selector(\"a.lesson-icons__group.ember-view\").text)\n with allure.step(\"В кнопке Нашли ошибку? присутствует иконка \"):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.icon-lesson-report\"))\n\n @allure.step(\"В уроке в конце конспекта отображается текст (Оценить урок:)\")\n def test_displayed_lesson_footer_button_rating(self):\n self.assertEqual(\"Оценить урок:\",\n self.driver.find_element_by_css_selector(\"div.rating\").text)\n with allure.step(\"В уроке в конце конспекта отображается звёзд для оценки урока (5 звезда)\"):\n self.assertEqual(\"star5\",\n self.driver.find_element_by_css_selector(\n \".lesson-footer__row div >label:nth-child(3)\").get_attribute(\"for\"))\n with allure.step(\"В уроке в конце конспекта отображается звёзд для оценки урока (4 звезда)\"):\n self.assertEqual(\"star4\",\n self.driver.find_element_by_css_selector(\n \".lesson-footer__row div >label:nth-child(5)\").get_attribute(\"for\"))\n with allure.step(\"В уроке в конце конспекта отображается звёзд для оценки урока (3 звезда)\"):\n self.assertEqual(\"star3\",\n self.driver.find_element_by_css_selector(\n \".lesson-footer__row div >label:nth-child(7)\").get_attribute(\"for\"))\n with allure.step(\"В уроке в конце конспекта отображается звёзд для оценки урока (3 звезда)\"):\n self.assertEqual(\"star3\",\n self.driver.find_element_by_css_selector(\n \".lesson-footer__row div >label:nth-child(7)\").get_attribute(\"for\"))\n\n with allure.step(\"В уроке в конце конспекта отображается звёзд для оценки урока (2 звезда)\"):\n self.assertEqual(\"star2\",\n self.driver.find_element_by_css_selector(\n \".lesson-footer__row div >label:nth-child(9)\").get_attribute(\"for\"))\n\n with allure.step(\"В уроке в конце конспекта отображается звёзд для оценки урока (1 звезда)\"):\n self.assertEqual(\"star1\",\n self.driver.find_element_by_css_selector(\n\n \".lesson-footer__row div >label:nth-child(11)\").get_attribute(\"for\"))\n\n @allure.step(\n \"В уроке в конце конспекта отображается ссылка Хлебные крошки (Главная > Алгебра, 11 класс > Тригонометрические функции y = sin t, y = cos t)\")\n def test_displayed_link_main(self):\n self.assertEqual(\"Библиотека InternetUrok.ru Алгебра, 11 класс АверНик.Тригонометрические функции y = sin t, y = cos t\",\n self.driver.find_element_by_css_selector(\"ol.breadcrumbs.overflow-h\").text)\n\n @allure.step(\"В уроке в конце конспекта отображается блок оценки урока (Вконтакте:)\")\n def test_displayed_lesson_footer_button_social_vk(self):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.social__vk\"))\n with allure.step(\"В кнопке Вконтаке отображается кнопка Лайкнуть\"):\n self.assertTrue(self.is_element_present(By.ID, \"vk_like\"))\n\n @allure.step(\"В уроке в конце конспекта отображается блок оценки урока (Facebook)\")\n def test_displayed_lesson_footer_button_social_facebook(self):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.social__fb\"))\n\n @allure.step(\"В уроке в конце конспекта отображается блок оценки урока (Одноклассники)\")\n def test_displayed_lesson_footer_button_social_od(self):\n self.assertTrue(self.is_element_present(By.CSS_SELECTOR, \"div.social__ok\"))\n\n @allure.step(\"Проверка наличия кнопки (Подготовка к ЕГЭ) в 11 классе Алгебра\")\n def test_displayed_button_preparation_the_EGE(self):\n self.assertEqual(u\"Подготовка к ЕГЭ\", self.driver.find_element_by_css_selector(\n \".ember-view > div > ul > li.lesson-controls__wrap.lc-ege > a\").text)\n","sub_path":"Web_services/Paid_lesson_page/Сheck_all_elements_on_the_lesson_page/test_пользователь_не_авторизован_проверка_Body_вкладка_Видеоурок.py","file_name":"test_пользователь_не_авторизован_проверка_Body_вкладка_Видеоурок.py","file_ext":"py","file_size_in_byte":15895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"359191109","text":"#修改图片的size小于iphone5的分辨率\nfrom PIL import Image\nimport os\npath = 'E:/PythonProject/Python3/005/pythonimage'\n#size \nsize = 1136,640\n\ndef walkfiles():\n for i in os.listdir(path):\n #过滤文件和目录如果需要判断文件是不是图片的话就需要检测后缀\n if(os.path.isfile(os.path.join(path,i))):\n #print('Path是:' + os.path.join(path,i) + ' name是:' + i)\n thumbnailimage(os.path.join(path,i),i)\n\ndef thumbnailimage(imgpath,name):\n im = Image.open(imgpath)\n #当给定的size大于图片本身的size 则不会生成缩略图\n im.thumbnail(size,Image.ANTIALIAS)\n #这里重命名文件名 用join也能修改格式 哪种方式都一样\n # print(path+'/'+name.replace('.jpg','_thum.jpg'))\n im.save(path+'/'+name.replace('.jpg','_thum.jpg'),'JPEG')\n\nwalkfiles()","sub_path":"005/changeSize.py","file_name":"changeSize.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"109303367","text":"import numpy as np\n\nclass randomff_ext:\n def __init__(self, D1, D2, p, lamda = 0, sigma1 = 1, sigma2 = 1):\n self.D1 = D1\n self.D2 = D2\n self.lamda = lamda\n\n mean = np.zeros(p)\n cov = sigma1 * np.identity(p)\n self.w1 = np.random.multivariate_normal(mean, cov, D1)\n self.bias1 = np.random.uniform(0,2*np.pi,D1)\n\n mean = np.zeros(2*D1)\n cov = sigma2 * np.identity(2*D1)\n self.w2 = np.random.multivariate_normal(mean,cov,D2)\n self.bias2 = np.random.uniform(0,2*np.pi,D2)\n \n def train(self, xdata, ydata):\n print(\"\\nStarting to train the model.\\n\")\n lamda = self.lamda\n z = self.embed_all(xdata)\n self.beta_hat = np.linalg.solve(np.dot(z.T,z) + lamda * np.identity( 2*self.D2 ),\n np.dot( z.T, ydata))\n print(\"\\nThe model has been trained successfully!\\n\")\n\n def predict(self, xdata):\n z = self.embed_all(xdata)\n y_hat = np.dot(z, self.beta_hat)\n return y_hat\n\n def embed(self, xdata, layer = 1):\n n = xdata.shape[0]\n if layer == 1:\n w = self.w1\n D = self.D1\n b = self.bias1\n else:\n w = self.w2\n D = self.D2\n b = self.bias2\n biases = np.array([b for i in range(n)])\n z = np.dot( xdata, w.T ) + biases\n z_cos = np.cos( z ) / np.sqrt( D )\n z_sin = np.sin( z ) / np.sqrt( D )\n z = np.hstack( ( z_cos, z_sin ) )\n return z\n\n def embed_all(self, xdata):\n mu_x = np.zeros(self.D1 * 2)\n for X in xdata:\n z = self.embed(X,layer = 1)\n mean_embedding = np.mean(z, axis=0)\n mu_x = np.vstack((mu_x, mean_embedding))\n mu_x = mu_x[1:,]\n z = self.embed(mu_x, layer = 2)\n return z\n\n def test(self, xdata, ydata):\n y_hat = self.predict(xdata)\n RMSE = np.linalg.norm(y_hat-ydata)/np.sqrt(len(y_hat))\n return RMSE\n","sub_path":"final_files/src/randff_ext.py","file_name":"randff_ext.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"521308909","text":"from threading import Thread\nfrom logger import MyLogger\nfrom time import sleep, time\nfrom numpy import log, polyfit, sqrt, std, subtract\nimport pandas as pd\nfrom sqlalchemy import create_engine\nimport sqlalchemy\nimport numpy as np\nimport math\nimport json\n\nfrom xcoin.bitmexAPI import BitMEXWebsocket\n\n'''\n 고팍스 코인 시세 데이터 저장\n https://www.bitmex.com/app/trade/XBTUSD\n\n 해당 거래소에서 발급받은 key, secret 코드로 secrets.json 파일을 아래와 같이 생성해야 함\n {\n \"key\": \"value\",\n \"secret\": \"value\"\n }\n\n 20180415 by Daesony\n'''\nclass BitmexDataCollector(Thread):\n def __init__(self):\n Thread.__init__(self)\n self.logger = MyLogger.instance().logger()\n self.logger.debug('CoinDataCollector Start!!!!')\n\n self.starttime = time()\n\n with open(\"secrets.json\") as secrets_file:\n self.secrets = json.load(secrets_file)\n secrets_file.close()\n\n self.api = BitMEXWebsocket(endpoint=\"https://www.bitmex.com/api/v1\", symbol=\"XBTUSD\",\n symbolSubs=[\"trade\"], genericSubs=[\"wallet\"],\n api_key=self.secrets['bitmexkey'], api_secret=self.secrets['bitmexsecret'])\n\n def run(self):\n\n try:\n self.api.start()\n\n except Exception as e:\n print('ERROR : ' + str(e) + str(e.message) + str(e.args))\n\n\n\n\n def test(self):\n try:\n\n self.api.get_instrument()\n\n '''\n rst = self.api.getTrades('ETH-KRW')\n print(rst)\n df = pd.DataFrame(rst)\n #df.index = df['id']\n #return df['price']\n df['krtime'] = pd.to_datetime(df['date'], unit='s').dt.tz_localize('UTC').dt.tz_convert('Asia/Seoul')\n\n #data = df[['id', 'price', 'date', 'krtime']]\n data = df[['id', 'price', 'date']]\n data.index = data['id']\n data = data.drop('id', axis=1)\n '''\n\n return 0\n\n\n except Exception as c:\n print('ERROR getMarketHistory:' + str(c))\n sleep(1)\n '''\n def insertData(self, data):\n engine = create_engine('mysql+pymysql://root:Da2$ony2016%@localhost/data', encoding='utf8')\n data.to_sql('ETH_KRW', engine, if_exists='append')\n '''\n\n\nif __name__ == '__main__':\n try:\n BitmexDataCollector().start()\n\n except Exception as e :\n print('ERROR : ' + str(e))\n","sub_path":"xcoin/bitmexDataCollector.py","file_name":"bitmexDataCollector.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"525368168","text":"def post(fposts,insieme):\n ID = set()\n doc = open(fposts)\n text = doc.read()\n #il testo va in rifinitura con l'eliminazione del POST e dello spazio\n stringa =' '.join(text.split())\n lst = stringa.split('')\n for i in lst:\n strpost = i.split()\n for stringa in strpost:\n parola = list(stringa)\n #elimino i caratteri fastidiosi\n for char in parola:\n if char == '!' or char == '*' or char=='[' or char==']':\n char == 'char'\n #elimino tutto ciò che non appartiene ad isalpha\n elif not char.isalpha() == True:\n parola.remove(char)\n clean = ''.join(parola) \n minclean = clean.lower()\n #equivalgo le maiuscole con le minuscole\n for x in insieme:\n nuova= x.lower()\n if minclean == nuova:\n ID.add(strpost[0])\n return ID","sub_path":"students/1792156/homework02/program01.py","file_name":"program01.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"291268860","text":"# this module that takes in the the input parameters, trains a \n# polynomial regression algorithm on the dataset\n# and produces the predictions on the test set and provides the error metrics\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn import metrics\nfrom sklearn.linear_model import LinearRegression\n\nclass poly_engine:\n def __init__(self, filename, col_list, target_y, test_size = 0.2, degree = 2):\n self.filename = filename\n self.col_list = col_list\n self.target_y = target_y\n self.test_size = test_size\n self.degree = degree\n self.dataset = self.load_data()\n self.actual_values = []\n self.predicted_values = []\n self.score = 0.0\n\n def load_data(self):\n data = pd.read_csv(self.filename)\n return data.iloc[:2372, : ]\n\n def run_engine(self):\n y = self.dataset[self.target_y][1:].values\n X = self.dataset[self.col_list][:-1].values\n\n #split the entire set into training and test sets\n \n train_length = round(X.shape[0] * (1 - self.test_size))\n X_train = np.array(X[:train_length])\n y_train = np.array(y[:train_length])\n X_test = np.array(X[train_length:])\n y_test = np.array(y[train_length:])\n \n self.actual_values = y_test\n \n #fit the X parameters of the training set into a polynomial of degree d, best case d = 2\n poly_reg = PolynomialFeatures(degree = self.degree)\n X_poly = poly_reg.fit_transform(X_train)\n\n\t#create the model \n lin_reg2 = LinearRegression()\n\n #fit the prediction model\n lin_reg2.fit(X_poly, y_train)\n \n X_transform = poly_reg.fit_transform(X_test)\n self.score = lin_reg2.score(X_transform, self.actual_values)\n\n\t#make predictions\n self.predict(lin_reg2, X_transform)\n return lin_reg2\n\n def predict(self, lin_reg2, X):\n predictions = lin_reg2.predict(X);\n self.predicted_values = predictions\n \n def get_score(self):\n return self.score\n \n def get_root_mean_squared_error(self):\n rmse = np.sqrt(metrics.mean_squared_error(self.actual_values, self.predicted_values))\n return rmse\n \n def get_mean_absolute_error(self):\n mae = metrics.mean_absolute_error(self.actual_values, self.predicted_values)\n return mae\n \n def get_root_mean_squared_log_error(self):\n rmsle = np.sqrt(metrics.mean_squared_log_error(self.actual_values, self.predicted_values))\n return rmsle\n \n def get_error(self):\n return self.predicted_values - self.actual_values\n\n def get_prediction_accuracy(self):\n #calculate errors for each prediction\n y_error = self.predicted_values - self.actual_values\n #variable count stores errors in prediction where error < 20%\n count = []\n for i in range(len(self.actual_values)):\n if(abs(y_error[i]) < 0.20 * self.actual_values[i]):\n count.append(abs(y_error))\n return len(count) / len(y_error) * 100\n","sub_path":"code/model_evaluation_code/polynomial_reg/poly_engine.py","file_name":"poly_engine.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"275311087","text":"items_in_cart = {}\nclass ShoppinCart:\n def __init__(self,customer_id):\n self.customer_id = customer_id\n def add_item(self,product,price):\n if product in items_in_cart:\n print(\"already exists\")\n else:\n items_in_cart[product] = price\n print(product + \" added\")\n def remove_item(self,product):\n if product in items_in_cart:\n del items_in_cart[product]\n else:\n print(\"product not found\")\n # def remove_item(self,product):\nmy_shopping = ShoppinCart(\"id\")\n# my_shopping.add_item(\"oil\",250)\n# my_shopping.add_item(\"oil\",250)\nmy_shopping.add_item(\"fruits\",250)\nprint(items_in_cart)\nmy_shopping.remove_item(\"fruits\")\n\n\n\nprint(items_in_cart)\n\n\n","sub_path":"submissions/sp_014_santhi-sri/week_13/day_3/coding_1/cart_class.py","file_name":"cart_class.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"49602633","text":"import os\nimport pickle\n\n# Reads the User tweets (specified in seedusers files) from the folder /user_tweets_preprocess\n# returns array of preprocessed, tokenized user tweets\n\ndef read_user_stance_tweets_tokenized_pickles(seedusers_file):\n print('started reading user files for ' + seedusers_file)\n \n #Get the seedusers screen names\n seedusers_screen_names = []\n file = open(seedusers_file, 'r') \n for line in file:\n seedusers_screen_names.append(line.strip())\n \n user_tweets = []\n\n for screen_name in seedusers_screen_names:\n #check if file is in path\n filename = screen_name+'.pkl'\n \n if os.path.isfile('user_tweets_preprocess/'+filename):\n try:\n tweets_user_from_file = pickle.load(open(\"user_tweets_preprocess/\"+filename, \"rb\" ) )\n user_tweets.append(tweets_user_from_file)\n except Exception as e:\n print('exception occurred while trying to read user file: {}'.format(filename))\n print(str(e))\n pass\n \n else:\n print('Tweet File not found for user: ' + screen_name)\n \n print('reading of user files finished for seedusers file ' + seedusers_file)\n return user_tweets","sub_path":"Software/Crawling/Workspace/Crawler/DataManipulator/helper_functions/read_user_stance_tweets_tokenized_pickles.py","file_name":"read_user_stance_tweets_tokenized_pickles.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"243990042","text":"from Acquisition import aq_inner\nfrom Acquisition import aq_parent\nfrom opengever.base.interfaces import IReferenceNumber\nfrom opengever.base.interfaces import IReferenceNumberPrefix\nfrom opengever.bundle.sections.constructor import IDontIssueDossierReferenceNumber\nfrom opengever.dossier.behaviors.dossier import IDossier\nfrom opengever.dossier.indexers import TYPES_WITH_CONTAINING_SUBDOSSIER_INDEX\nfrom opengever.globalindex.handlers.task import sync_task\nfrom opengever.globalindex.handlers.task import TaskSqlSyncer\nfrom opengever.meeting.handlers import ProposalSqlSyncer\nfrom opengever.task.task import ITask\nfrom plone import api\nfrom plone.app.workflow.interfaces import ILocalrolesModifiedEvent\nfrom zope.component import getAdapter\nfrom zope.container.interfaces import IContainerModifiedEvent\nfrom zope.lifecycleevent import IObjectRemovedEvent\n\n\ndef set_former_reference_before_moving(obj, event):\n \"\"\"Temporarily store current reference number before\n moving the dossier.\n \"\"\"\n # make sure obj wasn't just created or deleted\n if not event.oldParent or not event.newParent:\n return\n\n dossier_repr = IDossier(obj)\n ref_no = getAdapter(obj, IReferenceNumber).get_number()\n IDossier['temporary_former_reference_number'].set(dossier_repr, ref_no)\n\n\ndef set_former_reference_after_moving(obj, event):\n \"\"\"Use the (hopefully) stored former reference number\n as the real new former reference number. This has to\n be done after the dossier was moved.\n\n \"\"\"\n # make sure obj wasn't just created or deleted\n if not event.oldParent or not event.newParent:\n return\n\n dossier_repr = IDossier(obj)\n former_ref_no = dossier_repr.temporary_former_reference_number\n IDossier['former_reference_number'].set(dossier_repr, unicode(former_ref_no))\n # reset temporary former reference number\n IDossier['temporary_former_reference_number'].set(dossier_repr, u'')\n\n # setting the new number\n parent = aq_parent(aq_inner(obj))\n prefix_adapter = IReferenceNumberPrefix(parent)\n prefix_adapter.set_number(obj)\n\n obj.reindexObject(idxs=['reference'])\n\n\n# Update reference number when adding / moving content\n# (IObjectAddedEvent inherits from IObjectMovedEvent)\ndef save_reference_number_prefix(obj, event):\n if IDontIssueDossierReferenceNumber.providedBy(obj.REQUEST):\n return\n\n if IObjectRemovedEvent.providedBy(event):\n return\n\n parent = aq_parent(aq_inner(obj))\n prefix_adapter = IReferenceNumberPrefix(parent)\n if not prefix_adapter.get_number(obj):\n prefix_adapter.set_number(obj)\n\n # because we can't control the order of event handlers we have to sync\n # all containing tasks manually\n catalog = api.portal.get_tool('portal_catalog')\n tasks = catalog({\n 'path': '/'.join(obj.getPhysicalPath()),\n 'object_provides': 'opengever.task.task.ITask',\n 'depth': -1})\n for task in tasks:\n TaskSqlSyncer(task.getObject(), None).sync()\n\n # And also proposals\n proposals = catalog({\n 'path': '/'.join(obj.getPhysicalPath()),\n 'object_provides': ['opengever.meeting.proposal.IBaseProposal'],\n 'depth': -1})\n for proposal in proposals:\n ProposalSqlSyncer(proposal.getObject(), None).sync()\n\n obj.reindexObject(idxs=['reference'])\n\n\ndef reindex_containing_subdossier_for_contained_objects(dossier, event):\n \"\"\"When a subdossier is modified, we update the ``containing_subdossier``\n index of all contained objects (documents, mails and tasks) so they don't\n show an outdated title in the ``subdossier`` column\n \"\"\"\n catalog = api.portal.get_tool('portal_catalog')\n objects = catalog(path='/'.join(dossier.getPhysicalPath()),\n portal_type=TYPES_WITH_CONTAINING_SUBDOSSIER_INDEX)\n\n for obj in objects:\n obj.getObject().reindexObject(idxs=['containing_subdossier'])\n\n\ndef reindex_containing_dossier_for_contained_objects(dossier, event):\n \"\"\"Reindex the containging_dossier index for all the contained obects.\n \"\"\"\n for brain in dossier.portal_catalog(path='/'.join(dossier.getPhysicalPath())):\n obj = brain.getObject()\n obj.reindexObject(idxs=['containing_dossier'])\n\n if ITask.providedBy(obj):\n sync_task(brain.getObject(), event)\n\n\ndef reindex_contained_objects(dossier, event):\n \"\"\"When a dossier is modified, if the title has changed we reindex\n the corresponding index in all contained object (containing_dossier or\n containing_subdossier)\n \"\"\"\n if ILocalrolesModifiedEvent.providedBy(event) or \\\n IContainerModifiedEvent.providedBy(event):\n return\n\n attrs = tuple(\n attr\n for descr in event.descriptions\n for attr in descr.attributes\n )\n if 'IOpenGeverBase.title' not in attrs:\n return\n\n if dossier.is_subdossier():\n reindex_containing_subdossier_for_contained_objects(dossier, event)\n else:\n reindex_containing_dossier_for_contained_objects(dossier, event)\n\n\ndef reindex_blocked_local_roles(dossier, event):\n \"\"\"Reindex blocked_local_roles upon the acquisition blockedness changing.\"\"\"\n dossier.reindexObject(idxs=['blocked_local_roles'])\n\n\ndef purge_reference_number_mappings(copied_dossier, event):\n \"\"\"Reset the reference number mapping when copying (or actually pasting)\n dossiers.\n \"\"\"\n prefix_adapter = IReferenceNumberPrefix(copied_dossier)\n prefix_adapter.purge_mappings()\n","sub_path":"opengever/dossier/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":5468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"488104704","text":"import numpy as np\r\nimport glob as gb\r\nimport cv2\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nimport keras.backend as K\r\nimport itertools\r\nfrom imgaug import augmenters as iaa\r\nfrom spatial_transformer import SpatialTransformer\r\nfrom keras.layers.core import Dense, Flatten, Dropout\r\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D\r\nfrom keras.utils import np_utils\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom keras.callbacks import ModelCheckpoint\r\nfrom keras.models import Model\r\nfrom keras.layers import Input\r\nfrom keras.layers.merge import concatenate\r\n\r\n\r\ntrainings = []\r\nlabels = []\r\nDIM = 60\r\nnp.random.seed(1337)\r\nbatch_size = 128\r\nnb_classes = 84\r\nnb_epoch = 12\r\nclass_names = []\r\n\r\n\r\n\r\ndef plot_confusion_matrix(cm, classes,normalize=False,title='Confusion matrix',cmap=plt.cm.Blues):\r\n \"\"\"\r\n Plotting the confusion matrix.\r\n \"\"\"\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45)\r\n plt.yticks(tick_marks, classes)\r\n\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n\r\n plt.tight_layout()\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')\r\n\r\ndef listShuffling(trainings, labels):\r\n \"\"\"\r\n Shuffling the training and label lists \r\n \"\"\"\r\n c = list(zip(trainings, labels))\r\n random.shuffle(c)\r\n a, b = zip(*c)\r\n \r\n return a, b\r\n\r\n\r\ndef readImage(picture):\r\n \"\"\"\r\n Reading each image with opencv \r\n \"\"\" \r\n return cv2.imread(picture, 0)\r\n\r\n\r\ndef viewImage(image):\r\n \"\"\"\r\n View Image \r\n \"\"\"\r\n cv2.imshow('image',image)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n\r\ndef formulation(image, directory):\r\n \r\n container = np.array([[]], dtype='float32')\r\n \r\n container = image.ravel() # Reshaping image in 3600\r\n t = container.astype(np.float32) / 255.0\r\n trainings.append(t) # Creating image training list\r\n labels.append(directory) # Creating image label list\r\n \r\n \r\n\r\n\r\ndef prepareTrainingSet():\r\n \"\"\"\r\n Preparing the image training list and image label list \r\n \"\"\"\r\n \r\n r = random.randint(1,6)\r\n\r\n seq = iaa.Sequential([\r\n iaa.Affine(translate_px={\"x\": (-r, r), \"y\": (-r, r)}),\r\n iaa.Affine(rotate=(-10, 10)),\r\n iaa.Affine(scale={\"x\": (r, r), \"y\": (r, r)}),\r\n iaa.AdditiveGaussianNoise(scale=(0, 0.02 * 255))\r\n ])\r\n\r\n \r\n for directory in range(0, 84):\r\n print(\"---------------------\", directory)\r\n class_names.append(directory)\r\n for picture in gb.glob(\"./cluttered/\"+str(directory)+\"/*.png\"):\r\n\r\n img = readImage(picture) # Reading image of 60 x 60 pixels\r\n formulation(img, directory)\r\n \r\n ## Augmentation\r\n img = img[:, :, np.newaxis] # Adding image channel 1\r\n img_aug = seq.augment_image(img) # Applying augmentation\r\n img_transposed = img_aug.transpose(2,0,1) \r\n img_reshaped = img_transposed.reshape(-1, img_aug.shape[1]) # Reshaping in 60 x 60\r\n formulation(img_reshaped, directory)\r\n \r\n \r\n return listShuffling(trainings, labels); # Shuffling lists\r\n\r\n \r\n# Initializing image training list and image label list \r\nX, Y = prepareTrainingSet()\r\nclass_names = np.array(class_names)\r\ntrainings = []\r\nlabels = []\r\n\r\n# Data Split\r\nsamples = len(Y)\r\n\r\ntrain_samples = int(samples * 0.8)\r\nX_train = np.array(X[ : train_samples])\r\ny_train = np.asarray(Y[ : train_samples], dtype=np.int64)\r\n\r\nvalid_samples = int(samples * 0.05)\r\nX_valid = np.array(X[train_samples : train_samples + valid_samples])\r\ny_valid = np.asarray(Y[train_samples : train_samples + valid_samples], dtype=np.int64)\r\n\r\ntest_samples = int(samples * 0.15)\r\nX_test = np.array(X[-test_samples : ])\r\ny_test = np.asarray(Y[-test_samples : ], dtype=np.int64)\r\n\r\n\r\nX = []\r\nY = []\r\n\r\n\r\n# Reshape for convolutions\r\nX_train = X_train.reshape((X_train.shape[0], DIM, DIM, 1))\r\nX_valid = X_valid.reshape((X_valid.shape[0], DIM, DIM, 1))\r\nX_test = X_test.reshape((X_test.shape[0], DIM, DIM, 1))\r\n\r\n\r\ny_train = np_utils.to_categorical(y_train, nb_classes)\r\ny_valid = np_utils.to_categorical(y_valid, nb_classes)\r\ny_test = np_utils.to_categorical(y_test, nb_classes)\r\n\r\ninput_shape = (60, 60, 1)\r\n\r\n# initial weights\r\nb = np.zeros((2, 3), dtype='float32')\r\nb[0, 0] = 1\r\nb[1, 1] = 1\r\nW = np.zeros((50, 6), dtype='float32')\r\nweights = [W, b.flatten()]\r\n\r\n\r\n#Localization\r\n\r\nvisible = Input(shape=input_shape)\r\n\r\n\r\nlocnet = Flatten()(visible)\r\nlocnet = Dense(50)(locnet)\r\nlocnet = Dense(units=50, activation='relu')(locnet)\r\nlocnet = Dense(6, weights=weights)(locnet)\r\nlocnet = Model(input=visible, output=locnet)\r\n\r\n#STN\r\nstn = SpatialTransformer(localization_net=locnet, output_size=(28,28), input_shape=input_shape)(visible)\r\n\r\n\r\n#Convolution\r\nconv1 = Convolution2D(32, kernel_size=(5, 5), activation='relu', padding='same')(stn)\r\nconv2 = Convolution2D(32, kernel_size=(5, 5), activation='relu', padding='same')(conv1)\r\n#Pooling\r\npool1 = MaxPooling2D(pool_size=(2, 2))(conv2)\r\n\r\n#Dropout\r\ndropout1 = Dropout(.25)(pool1)\r\n\r\n#Convolution\r\nconv6 = Convolution2D(64, kernel_size=(5, 5), activation='relu', padding='same')(dropout1)\r\nconv7 = Convolution2D(64, kernel_size=(3, 3), activation='relu', padding='same')(conv6)\r\n\r\nconv8 = Convolution2D(64, kernel_size=(5, 5), activation='relu', padding='same')(dropout1)\r\nconv9 = Convolution2D(64, kernel_size=(3, 3), activation='relu', padding='same')(conv8)\r\n\r\n#Merge\r\nmerge = concatenate([conv7, conv9])\r\n\r\n#Pooling\r\npool2 = MaxPooling2D(pool_size=(2, 2))(merge)\r\n\r\n#Dropout\r\ndropout2 = Dropout(.25)(pool2)\r\n\r\n#Flatten\r\nflatten = Flatten()(dropout2)\r\n\r\n#Dense\r\nfully = Dense(units=1280, activation='relu')(flatten)\r\n\r\n#Final\r\noutput = Dense(units=nb_classes, activation='softmax')(fully)\r\n\r\n#Training\r\nmodel = Model(inputs=visible, outputs=output)\r\n\r\nprint(model.summary())\r\n\r\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\r\n\r\nXX = model.input\r\nYY = model.layers[0].output\r\nF = K.function([XX], [YY])\r\n\r\nnb_epochs = 100\r\nbatch_size = 86\r\nfig = plt.figure()\r\n\r\n\r\ncheckpoint = ModelCheckpoint('stn.h5', monitor='val_acc', verbose=1, save_best_only=True, mode='max')\r\nhistory = model.fit( X_train, y_train, validation_data = (X_test, y_test), epochs = nb_epochs, batch_size = batch_size, callbacks=[checkpoint] )\r\n\r\n#model.load_weights('borno.h5')\r\n#model.fit( X_train, y_train, validation_data = (X_test, y_test), epochs = nb_epochs, batch_size = batch_size)\r\n\r\n# Plot training & validation accuracy values\r\nplt.plot(history.history['acc'])\r\nplt.plot(history.history['val_acc'])\r\nplt.title('Model accuracy')\r\nplt.ylabel('Accuracy')\r\nplt.xlabel('Epoch')\r\nplt.legend(['Train', 'Test'], loc='upper left')\r\nplt.show()\r\n\r\n# Plot training & validation loss values\r\nplt.plot(history.history['loss'])\r\nplt.plot(history.history['val_loss'])\r\nplt.title('Model loss')\r\nplt.ylabel('Loss')\r\nplt.xlabel('Epoch')\r\nplt.legend(['Train', 'Test'], loc='upper left')\r\nplt.show()\r\n\r\n# Plotting Confusion Matrix\r\ny_pred = model.predict(X_test)\r\ny_pred = (y_pred > 0.5)\r\n\r\ncm = confusion_matrix(y_test.argmax(axis=1), y_pred.argmax(axis=1))\r\nnp.set_printoptions(precision=2)\r\n\r\n\r\n# Plot non-normalized confusion matrix\r\nplt.figure()\r\nplot_confusion_matrix(cm, classes=class_names, title='Non-Normalized')\r\n\r\n# Plot normalized confusion matrix\r\nplt.figure()\r\nplot_confusion_matrix(cm, classes=class_names, normalize=True, title='Normalized')\r\nplt.show()\r\n","sub_path":"version_11.py","file_name":"version_11.py","file_ext":"py","file_size_in_byte":8033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"60019077","text":"#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport webapp2\nimport os\n\n#import sys to define the path to your tweepy library\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), \"lib/tweepy\"))\nimport tweepy\n\n#we use the sessions module provided directly by weapp2. In production code make sure to store your secret key in a secure place\n\nfrom webapp2_extras import sessions\n\nconfig = {}\nconfig['webapp2_extras.sessions'] = {\n 'secret_key': 'yoursecretkey',\n} \n\nclass Handler(webapp2.RequestHandler):\n #juste a method to write shorter code\n def write(self, *a, **kw):\n self.response.out.write(*a, **kw)\n\n\n def render(self, template, **kw):\n self.write(self.render_str(template, **kw))\n \n\n #again it would be safer to put our consumer key and consumer secret in a secure place \n def twitterconnect(self):\n consumer_key=\"yourconsumerkey\"\n consumer_secret=\"yourconsumersecret\"\n \n \n return tweepy.OAuthHandler(consumer_key, consumer_secret)\n \n #webapp2 session method\n def dispatch(self):\n # Get a session store for this request.\n self.session_store = sessions.get_store(request=self.request)\n\n try:\n # Dispatch the request.\n webapp2.RequestHandler.dispatch(self)\n finally:\n # Save all sessions.\n self.session_store.save_sessions(self.response)\n\n @webapp2.cached_property\n def session(self):\n # Returns a session using the default cookie key.\n return self.session_store.get_session()\n\n \n\n\n\nclass OauthHandler(Handler):\n def get(self):\n\n \n\n auth = self.twitterconnect()\n #this line makes sure tweepy connects with ssl\n auth.secure = True\n\n try: \n #we specify signin with twitter so twitter doesn't ask for permissions everytime\n redirect_url = auth.get_authorization_url(signin_with_twitter=True)\n #we store the request token in a session because we will need it on the callback\n self.session['request_token'] = (auth.request_token.key,auth.request_token.secret)\n self.redirect(redirect_url)\n except tweepy.TweepError:\n self.write('Error! Failed to get request token.')\n return\n\n#the callback URL where the user is directed after the Twitter log in\nclass CallBackHandler(Handler):\n def get(self):\n \n \n \n \n \n token = self.session.get('request_token')\n \n \n \n \n #twitter is sending us the oauth verifier as a get paramater\n verifier = self.request.get('oauth_verifier') \n auth = self.twitterconnect()\n #again make sure to use ssl or it will fail\n auth.secure = True\n if token is not None:\n auth.set_request_token(token[0], token[1])\n else:\n self.write(\"no token found\")\n try:\n auth.get_access_token(verifier)\n except tweepy.TweepError:\n self.write(\"error\")\n return\n \n api = tweepy.API(auth)\n api.verify_credentials()\n if not api:\n self.write(\"connexion failed\")\n return\n \n self.session['username']=api.me().screen_name\n \n self.write(\"Welcome\" + self.session['username'])\n \n \n \n \n \n \n \n\n\n\n\n\n\napp = webapp2.WSGIApplication([ ('/tweepyconnection', OauthHandler),('/tweepyconnection/callback.*', CallBackHandler)], debug=True,config=config)","sub_path":"tweepylogin.py","file_name":"tweepylogin.py","file_ext":"py","file_size_in_byte":4112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"148596106","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = 'arkadiy'\nfrom CGRtools.RDFread import RDFread\nfrom rdf_parser import rdf_parser\nfrom reaction_treatment import Add_Reaction\nfrom reaction_treatment import Add_Conditions\nfrom pgtools.models import PGdb\n\nclass Reactions():\n def __init__(self, args):\n inputdata = RDFread(args.input)\n stand = rdf_parser(args.dictionary)\n reactions = Add_Reaction(args.configuration_file)\n conditions = Add_Conditions(args.fields)\n for num, data in enumerate(inputdata.readdata()):\n if num % 1000==0:\n print(\"reaction: %d\" % (num + 1))\n fixed_data = stand.get_standardized(data, args.fields)\n structure_id = reactions.add_Structure(fixed_data)\n if conditions.addConditions(fixed_data, structure_id):\n print(\"Conditions have been added successfuly!\")\n else:\n print(\"There is an error in conditions addition!\")\n","sub_path":"Reactions.py","file_name":"Reactions.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"420813523","text":"#pylint: disable=R0903\nimport factory\n\nimport pyrob.schema.stargus\n\n\nclass CmtsHourCpuStats(factory.alchemy.SQLAlchemyModelFactory):\n class Meta:\n model = pyrob.schema.stargus.CmtsHourCpuStats\n sqlalchemy_session = pyrob.db.SessionS\n\n market = None\n hub = None\n cmts_name = None\n hour = None\n resets = None\n cpu = None\n updated = None\n","sub_path":"pyrob/schema/factory/stargus/cmts_hour_cpu_stats.py","file_name":"cmts_hour_cpu_stats.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"321930922","text":"from flask import Flask, request\nfrom flask_restplus import Resource, Api\n\nimport errors\nimport corpus\n\napp = Flask(__name__)\napi = Api(app)\ncorpus_ns = api.namespace('corpus', description='corpus operations')\nstopwords_ns = api.namespace('stopwords', description='stop words operations')\nsubstwords_ns = api.namespace('substwords', description='word substitutions operations')\n\n@corpus_ns.route('/')\nclass CorpusListing(Resource):\n\n @corpus_ns.doc('list corpora')\n def get(self):\n try:\n status, result = corpus.get_corpora()\n return result, status\n except errors.ApiException as e:\n return e.response()\n\n@corpus_ns.route('/')\nclass Corpus(Resource):\n\n @corpus_ns.doc('gets a corpus')\n def get(self, name):\n try:\n status, result = corpus.get_corpus(name)\n return result, status\n except errors.ApiException as e:\n return e.response()\n\n @corpus_ns.doc('creates a corpus')\n def post(self, name):\n try:\n description = api.payload.get('description', '')\n status, result = corpus.save_corpus(name, description, updating=False)\n return result, status\n except errors.ApiException as e:\n return e.response()\n\n @corpus_ns.doc('updates a corpus')\n def put(self, name):\n try:\n description = api.payload.get('description', '')\n status, result = corpus.save_corpus(name, description, updating=True)\n return result, status\n except errors.ApiException as e:\n return e.response()\n\n @corpus_ns.doc('deletes a corpus')\n def delete(self, name):\n try:\n status, result = corpus.delete_corpus(name)\n return result, status\n except errors.ApiException as e:\n return e.response()\n\n@corpus_ns.route('//docs')\nclass CorpusDocListing(Resource):\n\n @corpus_ns.doc('lists docs in a corpus')\n def get(self, name):\n try:\n limit = int(request.args.get('limit', CORPUS_DOC_LIMIT))\n if limit < 1:\n raise errors.ApiException(400, 'invalid limit')\n after_id = request.args.get(\"after_id\", \"\") \n status, result = corpus.list_docs(name, limit, after_id)\n return result, status\n except errors.ApiException as e:\n return e.response()\n\n@corpus_ns.route('//docs/')\nclass CorpusDoc(Resource):\n\n @corpus_ns.doc('creates a document')\n def post(self, name, doc_id):\n try:\n status, result = corpus.save_doc(name, doc_id, api.payload, updating=False)\n return result, status\n except errors.ApiException as e:\n return e.response()\n\n @corpus_ns.doc('deletes a document')\n def delete(self, name, doc_id):\n try:\n status, result = corpus.delete_doc(name, doc_id)\n return result, status\n except errors.ApiException as e:\n return e.response()\n\n @corpus_ns.doc('creates a document')\n def put(self, name, doc_id):\n try:\n status, result = corpus.save_doc(name, doc_id, api.payload, updating=True)\n return result, status\n except errors.ApiException as e:\n return e.response()\n\n@stopwords_ns.route('/')\nclass StopWords(Resource):\n\n @stopwords_ns.doc('list stop words files')\n def get(self):\n try:\n raise errors.ApiException(400, 'not implemented')\n except errors.ApiException as e:\n return e.response()\n\n@substwords_ns.route('/')\nclass SubstWords(Resource):\n\n @substwords_ns.doc('list word substitution files')\n def get(self):\n try:\n raise errors.ApiException(400, 'not implemented')\n except errors.ApiException as e:\n return e.response()\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"8154185","text":"# -*- coding: utf-8 -*-\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport proto # type: ignore\n\nfrom google.cloud.gaming_v1.types import common\nfrom google.protobuf import field_mask_pb2 # type: ignore\nfrom google.protobuf import timestamp_pb2 # type: ignore\n\n\n__protobuf__ = proto.module(\n package='google.cloud.gaming.v1',\n manifest={\n 'GameServerClusterView',\n 'ListGameServerClustersRequest',\n 'ListGameServerClustersResponse',\n 'GetGameServerClusterRequest',\n 'CreateGameServerClusterRequest',\n 'PreviewCreateGameServerClusterRequest',\n 'PreviewCreateGameServerClusterResponse',\n 'DeleteGameServerClusterRequest',\n 'PreviewDeleteGameServerClusterRequest',\n 'PreviewDeleteGameServerClusterResponse',\n 'UpdateGameServerClusterRequest',\n 'PreviewUpdateGameServerClusterRequest',\n 'PreviewUpdateGameServerClusterResponse',\n 'GameServerClusterConnectionInfo',\n 'GkeClusterReference',\n 'GameServerCluster',\n 'KubernetesClusterState',\n },\n)\n\n\nclass GameServerClusterView(proto.Enum):\n r\"\"\"A view for GameServerCluster objects.\"\"\"\n GAME_SERVER_CLUSTER_VIEW_UNSPECIFIED = 0\n BASIC = 1\n FULL = 2\n\n\nclass ListGameServerClustersRequest(proto.Message):\n r\"\"\"Request message for\n GameServerClustersService.ListGameServerClusters.\n\n Attributes:\n parent (str):\n Required. The parent resource name, in the\n following form:\n \"projects/{project}/locations/{location}/realms/{realm}\".\n page_size (int):\n Optional. The maximum number of items to return. If\n unspecified, the server will pick an appropriate default.\n The server may return fewer items than requested. A caller\n should only rely on response's\n [next_page_token][google.cloud.gaming.v1.ListGameServerClustersResponse.next_page_token]\n to determine if there are more GameServerClusters left to be\n queried.\n page_token (str):\n Optional. The next_page_token value returned from a previous\n List request, if any.\n filter (str):\n Optional. The filter to apply to list\n results.\n order_by (str):\n Optional. Specifies the ordering of results following syntax\n at\n https://cloud.google.com/apis/design/design_patterns#sorting_order.\n view (google.cloud.gaming_v1.types.GameServerClusterView):\n Optional. View for the returned GameServerCluster objects.\n When ``FULL`` is specified, the ``cluster_state`` field is\n also returned in the GameServerCluster object, which\n includes the state of the referenced Kubernetes cluster such\n as versions and provider info. The default/unset value is\n GAME_SERVER_CLUSTER_VIEW_UNSPECIFIED, same as BASIC, which\n does not return the ``cluster_state`` field.\n \"\"\"\n\n parent = proto.Field(\n proto.STRING,\n number=1,\n )\n page_size = proto.Field(\n proto.INT32,\n number=2,\n )\n page_token = proto.Field(\n proto.STRING,\n number=3,\n )\n filter = proto.Field(\n proto.STRING,\n number=4,\n )\n order_by = proto.Field(\n proto.STRING,\n number=5,\n )\n view = proto.Field(\n proto.ENUM,\n number=6,\n enum='GameServerClusterView',\n )\n\n\nclass ListGameServerClustersResponse(proto.Message):\n r\"\"\"Response message for\n GameServerClustersService.ListGameServerClusters.\n\n Attributes:\n game_server_clusters (Sequence[google.cloud.gaming_v1.types.GameServerCluster]):\n The list of game server clusters.\n next_page_token (str):\n Token to retrieve the next page of results,\n or empty if there are no more results in the\n list.\n unreachable (Sequence[str]):\n List of locations that could not be reached.\n \"\"\"\n\n @property\n def raw_page(self):\n return self\n\n game_server_clusters = proto.RepeatedField(\n proto.MESSAGE,\n number=1,\n message='GameServerCluster',\n )\n next_page_token = proto.Field(\n proto.STRING,\n number=2,\n )\n unreachable = proto.RepeatedField(\n proto.STRING,\n number=4,\n )\n\n\nclass GetGameServerClusterRequest(proto.Message):\n r\"\"\"Request message for\n GameServerClustersService.GetGameServerCluster.\n\n Attributes:\n name (str):\n Required. The name of the game server cluster to retrieve,\n in the following form:\n ``projects/{project}/locations/{location}/realms/{realm-id}/gameServerClusters/{cluster}``.\n view (google.cloud.gaming_v1.types.GameServerClusterView):\n Optional. View for the returned GameServerCluster objects.\n When ``FULL`` is specified, the ``cluster_state`` field is\n also returned in the GameServerCluster object, which\n includes the state of the referenced Kubernetes cluster such\n as versions and provider info. The default/unset value is\n GAME_SERVER_CLUSTER_VIEW_UNSPECIFIED, same as BASIC, which\n does not return the ``cluster_state`` field.\n \"\"\"\n\n name = proto.Field(\n proto.STRING,\n number=1,\n )\n view = proto.Field(\n proto.ENUM,\n number=6,\n enum='GameServerClusterView',\n )\n\n\nclass CreateGameServerClusterRequest(proto.Message):\n r\"\"\"Request message for\n GameServerClustersService.CreateGameServerCluster.\n\n Attributes:\n parent (str):\n Required. The parent resource name, in the following form:\n ``projects/{project}/locations/{location}/realms/{realm-id}``.\n game_server_cluster_id (str):\n Required. The ID of the game server cluster\n resource to be created.\n game_server_cluster (google.cloud.gaming_v1.types.GameServerCluster):\n Required. The game server cluster resource to\n be created.\n \"\"\"\n\n parent = proto.Field(\n proto.STRING,\n number=1,\n )\n game_server_cluster_id = proto.Field(\n proto.STRING,\n number=2,\n )\n game_server_cluster = proto.Field(\n proto.MESSAGE,\n number=3,\n message='GameServerCluster',\n )\n\n\nclass PreviewCreateGameServerClusterRequest(proto.Message):\n r\"\"\"Request message for\n GameServerClustersService.PreviewCreateGameServerCluster.\n\n Attributes:\n parent (str):\n Required. The parent resource name, in the following form:\n ``projects/{project}/locations/{location}/realms/{realm}``.\n game_server_cluster_id (str):\n Required. The ID of the game server cluster\n resource to be created.\n game_server_cluster (google.cloud.gaming_v1.types.GameServerCluster):\n Required. The game server cluster resource to\n be created.\n preview_time (google.protobuf.timestamp_pb2.Timestamp):\n Optional. The target timestamp to compute the\n preview.\n view (google.cloud.gaming_v1.types.GameServerClusterView):\n Optional. This field is deprecated, preview\n will always return KubernetesClusterState.\n \"\"\"\n\n parent = proto.Field(\n proto.STRING,\n number=1,\n )\n game_server_cluster_id = proto.Field(\n proto.STRING,\n number=2,\n )\n game_server_cluster = proto.Field(\n proto.MESSAGE,\n number=3,\n message='GameServerCluster',\n )\n preview_time = proto.Field(\n proto.MESSAGE,\n number=4,\n message=timestamp_pb2.Timestamp,\n )\n view = proto.Field(\n proto.ENUM,\n number=6,\n enum='GameServerClusterView',\n )\n\n\nclass PreviewCreateGameServerClusterResponse(proto.Message):\n r\"\"\"Response message for\n GameServerClustersService.PreviewCreateGameServerCluster.\n\n Attributes:\n etag (str):\n The ETag of the game server cluster.\n target_state (google.cloud.gaming_v1.types.TargetState):\n The target state.\n cluster_state (google.cloud.gaming_v1.types.KubernetesClusterState):\n Output only. The state of the Kubernetes cluster in preview,\n this will be available if 'view' is set to ``FULL`` in the\n relevant List/Get/Preview request.\n \"\"\"\n\n etag = proto.Field(\n proto.STRING,\n number=2,\n )\n target_state = proto.Field(\n proto.MESSAGE,\n number=3,\n message=common.TargetState,\n )\n cluster_state = proto.Field(\n proto.MESSAGE,\n number=4,\n message='KubernetesClusterState',\n )\n\n\nclass DeleteGameServerClusterRequest(proto.Message):\n r\"\"\"Request message for\n GameServerClustersService.DeleteGameServerCluster.\n\n Attributes:\n name (str):\n Required. The name of the game server cluster to delete, in\n the following form:\n ``projects/{project}/locations/{location}/gameServerClusters/{cluster}``.\n \"\"\"\n\n name = proto.Field(\n proto.STRING,\n number=1,\n )\n\n\nclass PreviewDeleteGameServerClusterRequest(proto.Message):\n r\"\"\"Request message for\n GameServerClustersService.PreviewDeleteGameServerCluster.\n\n Attributes:\n name (str):\n Required. The name of the game server cluster to delete, in\n the following form:\n ``projects/{project}/locations/{location}/gameServerClusters/{cluster}``.\n preview_time (google.protobuf.timestamp_pb2.Timestamp):\n Optional. The target timestamp to compute the\n preview.\n \"\"\"\n\n name = proto.Field(\n proto.STRING,\n number=1,\n )\n preview_time = proto.Field(\n proto.MESSAGE,\n number=2,\n message=timestamp_pb2.Timestamp,\n )\n\n\nclass PreviewDeleteGameServerClusterResponse(proto.Message):\n r\"\"\"Response message for\n GameServerClustersService.PreviewDeleteGameServerCluster.\n\n Attributes:\n etag (str):\n The ETag of the game server cluster.\n target_state (google.cloud.gaming_v1.types.TargetState):\n The target state.\n \"\"\"\n\n etag = proto.Field(\n proto.STRING,\n number=2,\n )\n target_state = proto.Field(\n proto.MESSAGE,\n number=3,\n message=common.TargetState,\n )\n\n\nclass UpdateGameServerClusterRequest(proto.Message):\n r\"\"\"Request message for\n GameServerClustersService.UpdateGameServerCluster.\n\n Attributes:\n game_server_cluster (google.cloud.gaming_v1.types.GameServerCluster):\n Required. The game server cluster to be updated. Only fields\n specified in update_mask are updated.\n update_mask (google.protobuf.field_mask_pb2.FieldMask):\n Required. Mask of fields to update. At least one path must\n be supplied in this field. For the ``FieldMask`` definition,\n see\n https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask\n \"\"\"\n\n game_server_cluster = proto.Field(\n proto.MESSAGE,\n number=1,\n message='GameServerCluster',\n )\n update_mask = proto.Field(\n proto.MESSAGE,\n number=2,\n message=field_mask_pb2.FieldMask,\n )\n\n\nclass PreviewUpdateGameServerClusterRequest(proto.Message):\n r\"\"\"Request message for\n GameServerClustersService.UpdateGameServerCluster.\n\n Attributes:\n game_server_cluster (google.cloud.gaming_v1.types.GameServerCluster):\n Required. The game server cluster to be updated. Only fields\n specified in update_mask are updated.\n update_mask (google.protobuf.field_mask_pb2.FieldMask):\n Required. Mask of fields to update. At least one path must\n be supplied in this field. For the ``FieldMask`` definition,\n see\n https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask\n preview_time (google.protobuf.timestamp_pb2.Timestamp):\n Optional. The target timestamp to compute the\n preview.\n \"\"\"\n\n game_server_cluster = proto.Field(\n proto.MESSAGE,\n number=1,\n message='GameServerCluster',\n )\n update_mask = proto.Field(\n proto.MESSAGE,\n number=2,\n message=field_mask_pb2.FieldMask,\n )\n preview_time = proto.Field(\n proto.MESSAGE,\n number=3,\n message=timestamp_pb2.Timestamp,\n )\n\n\nclass PreviewUpdateGameServerClusterResponse(proto.Message):\n r\"\"\"Response message for\n GameServerClustersService.PreviewUpdateGameServerCluster\n\n Attributes:\n etag (str):\n The ETag of the game server cluster.\n target_state (google.cloud.gaming_v1.types.TargetState):\n The target state.\n \"\"\"\n\n etag = proto.Field(\n proto.STRING,\n number=2,\n )\n target_state = proto.Field(\n proto.MESSAGE,\n number=3,\n message=common.TargetState,\n )\n\n\nclass GameServerClusterConnectionInfo(proto.Message):\n r\"\"\"The game server cluster connection information.\n Attributes:\n gke_cluster_reference (google.cloud.gaming_v1.types.GkeClusterReference):\n Reference to the GKE cluster where the game\n servers are installed.\n namespace (str):\n Namespace designated on the game server\n cluster where the Agones game server instances\n will be created. Existence of the namespace will\n be validated during creation.\n \"\"\"\n\n gke_cluster_reference = proto.Field(\n proto.MESSAGE,\n number=7,\n oneof='cluster_reference',\n message='GkeClusterReference',\n )\n namespace = proto.Field(\n proto.STRING,\n number=5,\n )\n\n\nclass GkeClusterReference(proto.Message):\n r\"\"\"A reference to a GKE cluster.\n Attributes:\n cluster (str):\n The full or partial name of a GKE cluster, using one of the\n following forms:\n\n - ``projects/{project}/locations/{location}/clusters/{cluster}``\n - ``locations/{location}/clusters/{cluster}``\n - ``{cluster}`` If project and location are not specified,\n the project and location of the GameServerCluster\n resource are used to generate the full name of the GKE\n cluster.\n \"\"\"\n\n cluster = proto.Field(\n proto.STRING,\n number=1,\n )\n\n\nclass GameServerCluster(proto.Message):\n r\"\"\"A game server cluster resource.\n Attributes:\n name (str):\n Required. The resource name of the game server cluster, in\n the following form:\n ``projects/{project}/locations/{location}/realms/{realm}/gameServerClusters/{cluster}``.\n For example,\n ``projects/my-project/locations/{location}/realms/zanzibar/gameServerClusters/my-onprem-cluster``.\n create_time (google.protobuf.timestamp_pb2.Timestamp):\n Output only. The creation time.\n update_time (google.protobuf.timestamp_pb2.Timestamp):\n Output only. The last-modified time.\n labels (Sequence[google.cloud.gaming_v1.types.GameServerCluster.LabelsEntry]):\n The labels associated with this game server\n cluster. Each label is a key-value pair.\n connection_info (google.cloud.gaming_v1.types.GameServerClusterConnectionInfo):\n The game server cluster connection\n information. This information is used to manage\n game server clusters.\n etag (str):\n ETag of the resource.\n description (str):\n Human readable description of the cluster.\n cluster_state (google.cloud.gaming_v1.types.KubernetesClusterState):\n Output only. The state of the Kubernetes cluster, this will\n be available if 'view' is set to ``FULL`` in the relevant\n List/Get/Preview request.\n \"\"\"\n\n name = proto.Field(\n proto.STRING,\n number=1,\n )\n create_time = proto.Field(\n proto.MESSAGE,\n number=2,\n message=timestamp_pb2.Timestamp,\n )\n update_time = proto.Field(\n proto.MESSAGE,\n number=3,\n message=timestamp_pb2.Timestamp,\n )\n labels = proto.MapField(\n proto.STRING,\n proto.STRING,\n number=4,\n )\n connection_info = proto.Field(\n proto.MESSAGE,\n number=5,\n message='GameServerClusterConnectionInfo',\n )\n etag = proto.Field(\n proto.STRING,\n number=6,\n )\n description = proto.Field(\n proto.STRING,\n number=7,\n )\n cluster_state = proto.Field(\n proto.MESSAGE,\n number=11,\n message='KubernetesClusterState',\n )\n\n\nclass KubernetesClusterState(proto.Message):\n r\"\"\"The state of the Kubernetes cluster.\n Attributes:\n agones_version_installed (str):\n Output only. The version of Agones currently\n installed in the registered Kubernetes cluster.\n kubernetes_version_installed (str):\n Output only. The version of Kubernetes that\n is currently used in the registered Kubernetes\n cluster (as detected by the Cloud Game Servers\n service).\n installation_state (google.cloud.gaming_v1.types.KubernetesClusterState.InstallationState):\n Output only. The state for the installed\n versions of Agones/Kubernetes.\n version_installed_error_message (str):\n Output only. The detailed error message for\n the installed versions of Agones/Kubernetes.\n provider (str):\n Output only. The cloud provider type reported\n by the first node's providerID in the list of\n nodes on the Kubernetes endpoint. On Kubernetes\n platforms that support zero-node clusters (like\n GKE-on-GCP), the provider type will be empty.\n agones_version_targeted (str):\n Output only. The version of Agones that is\n targeted to be installed in the cluster.\n \"\"\"\n class InstallationState(proto.Enum):\n r\"\"\"The state of the installed versions of Agones/Kubernetes. See\n also https://cloud.google.com/game-servers/docs/versions-and-\n upgrades.\n \"\"\"\n INSTALLATION_STATE_UNSPECIFIED = 0\n AGONES_KUBERNETES_VERSION_SUPPORTED = 1\n AGONES_VERSION_UNSUPPORTED = 2\n AGONES_KUBERNETES_VERSION_UNSUPPORTED = 3\n AGONES_VERSION_UNRECOGNIZED = 4\n KUBERNETES_VERSION_UNRECOGNIZED = 5\n VERSION_VERIFICATION_FAILED = 6\n AGONES_NOT_INSTALLED = 7\n\n agones_version_installed = proto.Field(\n proto.STRING,\n number=1,\n )\n kubernetes_version_installed = proto.Field(\n proto.STRING,\n number=2,\n )\n installation_state = proto.Field(\n proto.ENUM,\n number=3,\n enum=InstallationState,\n )\n version_installed_error_message = proto.Field(\n proto.STRING,\n number=4,\n )\n provider = proto.Field(\n proto.STRING,\n number=5,\n )\n agones_version_targeted = proto.Field(\n proto.STRING,\n number=6,\n )\n\n\n__all__ = tuple(sorted(__protobuf__.manifest))\n","sub_path":"google/cloud/gaming/v1/gaming-v1-py/google/cloud/gaming_v1/types/game_server_clusters.py","file_name":"game_server_clusters.py","file_ext":"py","file_size_in_byte":19938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"607897662","text":"from flask import Flask, render_template\n\napp=Flask(__name__)\n\n@app.route('/')\ndef home():\n\treturn render_template(\"home.html\")\n\n@app.route('/plot/')\ndef plot():\n\tfrom pandas_datareader import data\n\timport datetime\n\tfrom dateutil.relativedelta import relativedelta\n\tfrom bokeh.plotting import figure, show, output_file\n\tfrom bokeh.embed import components\n\tfrom bokeh.resources import CDN\n\n\tstart = datetime.date.today() - relativedelta(months=1)\n\tend = datetime.date.today()\n\n\tdf = data.DataReader(name='AMZN', data_source = 'yahoo', start=start, end=end)\n\n\tdef inc_dec(c, o):\n\t if c > o:\n\t value = 1\n\t elif c <= o:\n\t value = 0\n\t return value\n\n\tdf['status']=[inc_dec(c,o) for c,o in zip(df.Close,df.Open)]\n\tdf['mid']=(df.Open+df.Close)/2\n\tdf['height']=abs(df.Open-df.Close)\n\n\tp=figure(x_axis_type='datetime', width=1000, height=300,\n\t\tresponsive=True, toolbar_location = 'above')\n\tp.title.text='Candlestick Chart: AMZN from %s to %s' % (start,end)\n\tp.grid.grid_line_alpha=0.3\n\n\thour_12 = 12*60*60*1000\n\n\tp.segment(df.index, df.Low, df.index, df.High, line_color='black')\n\n\tp.rect(df.index[df.status==1], df.mid[df.status==1], hour_12, df.height[df.status==1],\n\t\tfill_color='silver', line_color='black')\n\tp.rect(df.index[df.status==0], df.mid[df.status==0], hour_12, df.height[df.status==0],\n\t\tfill_color='tomato', line_color='black')\n\n\tscript, div = components(p)\n\tcdn_js = CDN.js_files[0]\n\tcdn_css = CDN.css_files[0]\n\n\treturn render_template(\"plot.html\", script=script, div=div,\n\t\tcdn_js=cdn_js,cdn_css=cdn_css)\n\n@app.route('/about/')\ndef about():\n\treturn render_template(\"about.html\")\n\nif __name__==\"__main__\":\n\tapp.run(debug=True)","sub_path":"flask_practice.py","file_name":"flask_practice.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"64834580","text":"import sys\r\n\r\n\r\nclass stack_function:\r\n\r\n def __init__(self):\r\n\r\n self.stack = []\r\n\r\n def push(self,X):\r\n\r\n self.stack.append(X)\r\n\r\n def pop(self):\r\n\r\n if len(self.stack) == 0:\r\n\r\n return -1\r\n\r\n else:\r\n\r\n return self.stack.pop()\r\n\r\n def size(self):\r\n\r\n return len(self.stack)\r\n\r\n def empty(self):\r\n\r\n if len(self.stack) == 0:\r\n\r\n return 1\r\n\r\n else:\r\n\r\n return 0\r\n\r\n def top(self):\r\n\r\n if len(self.stack) == 0:\r\n\r\n return -1\r\n\r\n else:\r\n\r\n return self.stack[-1]\r\n\r\n\r\nT = int(sys.stdin.readline())\r\n\r\na = stack_function()\r\n\r\nbox = []\r\n\r\nfor i in range(T):\r\n\r\n func = sys.stdin.readline().split()\r\n\r\n order = func[0]\r\n\r\n if order ==\"push\":\r\n\r\n value = func[1]\r\n\r\n a.push(value)\r\n\r\n elif order ==\"pop\":\r\n\r\n box.append(a.pop())\r\n\r\n elif order ==\"top\":\r\n\r\n box.append(a.top())\r\n\r\n elif order ==\"size\":\r\n\r\n box.append(a.size())\r\n\r\n elif order ==\"empty\":\r\n\r\n box.append(a.empty())\r\n\r\n\r\nfor i in box:\r\n\r\n print(i)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"백준_10828_스택_S4.py","file_name":"백준_10828_스택_S4.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"488668256","text":"import numpy as np\nimport random\n\n\nNUM_TO_TOKEN = {\n 1: 'x',\n -1: 'o',\n 0: ' '\n}\nTOKEN_TO_NUM = {token: num for num, token in NUM_TO_TOKEN.items()}\n\n\nclass Environment(object):\n def __init__(self):\n self.board = np.zeros((3, 3))\n self.winner = 0\n \n def is_empty(self, i, j):\n return self.board[i][j] == 0\n \n def play(self, i, j, num):\n self.board[i][j] = num\n \n def try_play(self, i, j, num):\n self.board[i][j] = num\n h = self.state_hash()\n self.board[i][j] = 0\n return h\n \n def state_hash(self):\n h = 0.\n for i in range(3):\n for j in range(3):\n bit = i * 3 + j\n h += (self.board[i][j] + 1) * (3 ** bit)\n return int(h)\n \n def get_winner(self, force_recalculation=False):\n if self.winner and not force_recalculation:\n return self.winner\n \n self.winner = 0\n \n for i in range(3):\n s = np.sum(self.board[i])\n if s == -3 or s == 3:\n self.winner = s / 3\n \n for j in range(3):\n s = np.sum(self.board[:,j])\n if s == -3 or s == 3:\n self.winner = s / 3\n \n s = np.trace(self.board)\n if s == -3 or s == 3:\n self.winner = s / 3\n \n s = np.trace(np.fliplr(self.board))\n if s == -3 or s == 3:\n self.winner = s / 3\n \n return self.winner\n \n def game_over(self):\n return self.get_winner() or 0 not in self.board\n \n def draw(self):\n for i in range(3):\n print('---------')\n print(' ' \n + ' '.join([NUM_TO_TOKEN[num] \n for num in self.board[i]])\n + ' ')\n print('---------')\n \n def reset(self):\n self.board = np.zeros((3, 3))\n self.winner = 0\n \n def find_winning_positions(self, i=0, j=0, winners=None):\n if winners is None:\n winners = np.zeros(3**9)\n if i == 3:\n winners[self.state_hash()] = self.get_winner(force_recalculation=True)\n return winners\n for fill in range(-1, 2):\n self.board[i][j] = fill\n if j == 2:\n self.find_winning_positions(i+1, 0, winners)\n else:\n self.find_winning_positions(i, j+1, winners)\n return winners\n \n \nclass Agent(object):\n def __init__(self, token):\n self.num = TOKEN_TO_NUM[token]\n self._initialize_values()\n self.history = []\n self.last_state_hash = None\n \n def _initialize_values(self):\n winning_positions = Environment().find_winning_positions()\n agent_wins = np.where(winning_positions == self.num, 1, 0)\n agent_loses = np.where(winning_positions == -self.num, -1, 0)\n wins_or_loses = agent_wins + agent_loses\n self.values = np.where(wins_or_loses == 0, 0.0, 0) + wins_or_loses\n \n def play(self, env, epsilon, verbose=False):\n starting_state_hash = env.state_hash()\n if self.last_state_hash is not None:\n self.history.append((\n self.last_state_hash,\n starting_state_hash\n ))\n r = np.random.random()\n possible_plays = []\n for i in range(3):\n for j in range(3):\n if env.is_empty(i, j):\n possible_plays.append((i, j))\n if r < epsilon:\n if verbose:\n print('AI is making random move, cause why the fuck not')\n i, j = random.choice(possible_plays)\n else:\n play_values = []\n best_play = None\n best_play_value = -float('inf')\n for i, j in possible_plays:\n play_hash = env.try_play(i, j, self.num)\n play_value = self.values[play_hash]\n play_values.append((i, j, play_hash, round(play_value, 3)))\n if play_value > best_play_value:\n best_play = i, j\n best_play_value = play_value\n if verbose:\n print('AI is using the following values: ' + str(play_values))\n i, j = best_play\n env.play(i, j, self.num)\n final_state_hash = env.state_hash()\n self.history.append((\n starting_state_hash,\n final_state_hash\n ))\n self.last_state_hash = final_state_hash\n \n def update(self, env, learning_rate):\n final_state_hash = env.state_hash()\n winner = env.get_winner()\n reward = winner / self.num\n reward = reward if reward == 1 else 0\n self.values[final_state_hash] = reward\n if final_state_hash != self.last_state_hash:\n self.history.append((\n self.last_state_hash,\n final_state_hash\n ))\n for start, final in reversed(self.history):\n self.values[start] = self.values[start] + learning_rate * (self.values[final] - self.values[start])\n self.history = []\n \nclass Human(object):\n \n def __init__(self, token):\n self.num = TOKEN_TO_NUM[token]\n \n def play(self, env, *args, **kwargs):\n legal_move = False\n while not legal_move:\n move = input(\"Enter your move in coordinates i,j: \")\n i, j = move.split(',')\n i = int(i.strip())\n j = int(j.strip())\n legal_move = env.is_empty(i, j)\n env.play(i, j, self.num)\n \n def update(self, *args):\n pass\n \n \ndef play_game(env, player1, player2, learning_rate=0.01, epsilon=0.01, verbose=True):\n env.reset()\n current_player = player1\n while not env.game_over():\n if verbose: \n env.draw()\n current_player.play(env, epsilon, verbose=verbose)\n if current_player is player1:\n current_player = player2\n else:\n current_player = player1\n if verbose: \n env.draw()\n print('GAME OVER!')\n player1.update(env, learning_rate)\n player2.update(env, learning_rate)\n \n\nif __name__ == '__main__':\n env = Environment()\n agent1 = Agent('x')\n agent2 = Agent('o')\n for i in range(10000):\n if i and i % 100 == 0:\n print(i)\n play_game(env, agent1, agent2, epsilon=0.2, learning_rate=0.5, verbose=False)\n \"\"\"\n agent3 = Agent('o')\n for i in range(10000):\n if i and i % 100 == 0:\n print(i)\n play_game(env, agent1, agent3, epsilon=0.2, learning_rate=0.5, verbose=False)\n for i in range(10000):\n if i and i % 100 == 0:\n print(i)\n play_game(env, agent1, agent2, epsilon=0.2, learning_rate=0.5, verbose=False)\n \"\"\"\n \n human = Human('o')\n play_another = True\n while play_another:\n play_game(env, agent1, human)\n decision = input('Would you like to play another game? Y/N: ').strip()\n if decision == 'N':\n play_another = False","sub_path":"Reinforcement_Learning_Intro/tic_tac_toe.py","file_name":"tic_tac_toe.py","file_ext":"py","file_size_in_byte":7130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"14852898","text":"\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\nsns.set()\n\n# Load data set and split to train/test\n\nwith np.load('mnist-6k.npz', allow_pickle=False) as npz_file:\n print(npz_file.keys())\n\nwith np.load('mnist-6k.npz', allow_pickle=False) as npz_file:\n X = npz_file['data']\n y = npz_file['labels']\n\nX_tr, X_te, y_tr, y_te = train_test_split(X, y, stratify=y, test_size=1/6, random_state=0)\nprint(X_tr.shape, X_te.shape, y_tr.shape, y_te.shape)\n\n\n# Dummy classifier\ndummy = DummyClassifier(strategy='most_frequent')\ndummy.fit(X_tr, y_tr)\n\n# Accuracy on test set\naccuracy = dummy.score(X_te, y_te)\nprint('Baseline accuracy: {:.3f}'.format(accuracy))\n\n# k-NN classifier\n\nscaler = StandardScaler()\n\n# grid search for optimal k:\nk_values = np.arange(1, 50, 5)\n\ntest_curve = []\n\nfor k in k_values:\n pipe = Pipeline([\n ('scaler', None), # no scaling of data\n ('knn', KNeighborsClassifier(n_neighbors=k))\n ])\n\n pipe.fit(X_tr, y_tr)\n test_acc = pipe.score(X_te, y_te)\n test_curve.append(test_acc)\n\nplt.plot(k_values, test_curve, label='test')\nplt.legend()\nplt.show()\n\n# Print k with maximum accuracy\nmax_accuracy = max(test_curve)\nbest_k = np.argmax(test_curve) * 5 + 1\nprint('Maximum accuracy: {:.3f} with k={}'.format(max_accuracy, best_k))\n","sub_path":"mnist_knn.py","file_name":"mnist_knn.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"81301921","text":"#!/usr/bin/env python\n# license removed for brevity\nimport numpy as np\nimport rospy\nfrom std_msgs.msg import Float32\nfrom std_msgs.msg import Float32MultiArray \nfrom std_msgs.msg import Int32\nimport time\n\nimport Tkinter as tk\n\nfrom rospy.numpy_msg import numpy_msg\nfrom rospy_tutorials.msg import Floats\nposture_data=[0,0,0]\ndepth_data = 0\nfc = 0\ntc = 0\n\ndef B_onclick():\n global eList\n for i in range(3):\n posture_data[i] = float(eList[i].get())\n depth = float(eList[3].get())\n fc = int(eList[4].get())\n tc = int(eList[5].get())\n vol = float(eList[6].get())\n but = int(eList[7].get())\n t = float(eList[8].get())\n trigger = int(eList[9].get())\n pos=np.array(posture_data, dtype = np.float32)\n pub1.publish(pos)\n pub2.publish(depth)\n pub3.publish(fc)\n pub4.publish(tc)\n pub5.publish(vol)\n pub6.publish(but)\n pub7.publish(t)\n pub8.publish(trigger)\n\nwin = tk.Tk()\nwin.title('Dummy motor')\n\neList = []\ntext = ['row', 'pitch', 'yaw', 'depth', 'forward command', 'turn command', 'voltage', 'button', 'sumi_t', 'trigger']\nfor i in range(len(text)):\n L = tk.Label(win, text = text[i]).grid(row=i, column=0)\n e = tk.Entry(win)\n eList.append(e)\n e.grid(row=i, column=1)\n e.insert('insert', 0)\nb = tk.Button(win, text = 'publish', command = B_onclick).grid(row = len(text), column=0)\n\nrospy.init_node('dummy',anonymous=True)\npub1 = rospy.Publisher('/posture',numpy_msg(Floats),queue_size=10)\npub2 = rospy.Publisher('/depth',Float32,queue_size=10)\npub3 = rospy.Publisher('/forward_command',Int32,queue_size=10)\npub4 = rospy.Publisher('/turn_command',Int32,queue_size=10)\npub5 = rospy.Publisher('/voltage',Float32,queue_size=10)\npub6 = rospy.Publisher('/button',Int32,queue_size=10)\npub7 = rospy.Publisher('/sumi_t',Float32,queue_size=10)\npub8 = rospy.Publisher('/trigger_command',Int32,queue_size=10)\n\nwin.mainloop()\n","sub_path":"src/dummy_sensor.py","file_name":"dummy_sensor.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"609025613","text":"import tensorflow as tf\nimport time\nimport numpy as np\n\nimport helpers.helper_funcs as helpers\n#import helpers.cifar_models as models\n\ndef main():\n print('Loading data...')\n x_train, y_train, x_test, y_test = helpers.get_cifar10_data()\n y_test = tf.squeeze(y_test)\n\n #train_and_save_models(x_train, y_train)\n\n print(\"Loading models...\")\n l1_model = tf.keras.models.load_model('models/cifar/l1_model')\n l2_model = tf.keras.models.load_model('models/cifar/l2_model')\n l3_model = tf.keras.models.load_model('models/cifar/l3_model')\n l4_model = tf.keras.models.load_model('models/cifar/l4_model')\n l5_model = tf.keras.models.load_model('models/cifar/l5_model')\n l6_model = tf.keras.models.load_model('models/cifar/l6_model')\n l7_model = tf.keras.models.load_model('models/cifar/l7_model')\n l8_model = tf.keras.models.load_model('models/cifar/l8_model')\n l9_model = tf.keras.models.load_model('models/cifar/l9_model')\n l10_model = tf.keras.models.load_model('models/cifar/l10_model')\n\n\n # Get dictionary of counts of each class in y_test\n y_test_np = y_test.numpy()\n unique, counts = np.unique(y_test.numpy(), return_counts=True)\n count_dict = dict(zip(unique, counts))\n\n # Set up accuracy grid\n accuracies = np.zeros((10, 10))\n\n # Iterate over all models and get their predicted outputs\n models = [l1_model, l2_model, l3_model, l4_model, l5_model, l6_model, l7_model, l8_model, l9_model, l10_model]\n models_preds = []\n for i in range(10):\n model = models[i]\n\n model_probs = model.predict(x_test)\n model_preds = np.argmax(model_probs, axis=1)\n models_preds.append(model_preds)\n\n\n for i in range(10):\n model1 = models_preds[i]\n\n for j in range(10):\n model2 = models_preds[j]\n\n # Compute the number of times where the two models match predictions\n model_count = np.count_nonzero(model1 == model2)\n accuracies[i][j] = model_count / 10000\n\n\n print(accuracies)\n\n\nif __name__ == '__main__':\n main()","sub_path":"DiffNumModelCombinations/model-analysis/model_prediction_matches.py","file_name":"model_prediction_matches.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"336450096","text":"import tkinter\r\nfrom tkinter import filedialog\r\n\r\ndef openfile():\r\n\tf = filedialog.askopenfile()\r\n\tfilepath.set(f.name)\r\n\r\n\tf.close()\r\n\r\nmywind = tkinter.Tk()\r\nframe = tkinter.Frame(mywind,width = \"500\", height = \"400\", bg = \"green\")\r\nframe.pack()\r\n\r\nfilepath = tkinter.StringVar()\r\nfilepath.set(\"filepath\")\r\n\r\nbutton = tkinter.Button(frame, text = \"file open\", command = openfile)\r\nbutton.grid(row = 0, column = 0)\r\n\r\nlabel_path = tkinter.Label(frame, textvariable = filepath)\r\nlabel_path.grid(row = 1, column = 0)\r\n\r\nmywind.mainloop()","sub_path":"courses/w04_py/source/s05/gui/filedialog1.py","file_name":"filedialog1.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"408333933","text":"from PyQt5.QtWidgets import QApplication, QDialog, QVBoxLayout, QHBoxLayout, QPushButton, QSizePolicy, QLabel, \\\n QDoubleSpinBox, QSpacerItem\nfrom PyQt5.QtCore import Qt, pyqtSlot, QObject\nfrom PyQt5.QtGui import QResizeEvent\n\n\nclass _ScalableImageViewerContext(QObject):\n def __init__(self, label, x_offset_spinbox, y_offset_spinbox, scale_spinbox, reset_button, parent, relative=True):\n super(_ScalableImageViewerContext, self).__init__(parent)\n self.label = label\n self.x_offset_spinbox = x_offset_spinbox\n self.y_offset_spinbox = y_offset_spinbox\n self.scale_spinbox = scale_spinbox\n self.reset_button = reset_button\n x_offset_spinbox.valueChanged.connect(self._on_x_offset_changed)\n y_offset_spinbox.valueChanged.connect(self._on_y_offset_changed)\n scale_spinbox.valueChanged.connect(self._on_scale_changed)\n reset_button.clicked.connect(self._on_reset_button_clicked)\n label._image_viewer_context = self\n self.render = None\n\n self._default_scale = 1.\n self._default_x_offset = 0.\n self._default_y_offset = 0.\n\n self._scale = self._default_scale\n self._x_offset = self._default_x_offset\n self._y_offset = self._default_y_offset\n\n self.relative = relative\n\n def set_image(self, image):\n from Viewer.canvas.align_corner.simple_painter import SimplePainter\n self.render = SimplePainter.create_from_tf_image(image)\n self.update()\n\n def set_painter(self, painter):\n self.render = painter\n self.update()\n\n def update(self):\n if self.render is None:\n self.label.clear()\n return\n if self.relative:\n canvas_w, canvas_h = self.render.get_canvas_size()\n base_scale = min((self.label.width() - 1) / (canvas_w - 1), (self.label.height() - 1) / (canvas_h - 1))\n scale = self._scale * base_scale\n x_offset = self._x_offset * base_scale\n y_offset = self._y_offset * base_scale\n else:\n scale = self._scale\n x_offset = self._x_offset\n y_offset = self._y_offset\n image = self.render.render((self.label.width(), self.label.height()), (scale, scale), translation_target_center=(x_offset, y_offset), with_qpixmap=True)\n self.label.setPixmap(image)\n\n @pyqtSlot(float)\n def _on_scale_changed(self, scale):\n self._scale = scale\n self.update()\n\n @pyqtSlot(float)\n def _on_x_offset_changed(self, x_offset):\n self._x_offset = x_offset\n self.update()\n\n @pyqtSlot(float)\n def _on_y_offset_changed(self, y_offset):\n self._y_offset = y_offset\n self.update()\n\n @pyqtSlot(bool)\n def _on_reset_button_clicked(self, _):\n self.reset()\n\n def set_scale(self, value):\n self.scale_spinbox.setValue(value)\n\n def set_x_offset(self, value):\n self.x_offset_spinbox.setValue(value)\n\n def set_y_offset(self, value):\n self.y_offset_spinbox.setValue(value)\n\n def set(self, scale, x_offset, y_offset):\n self._scale = scale\n self._x_offset = x_offset\n self._y_offset = y_offset\n self.update()\n self.x_offset_spinbox.blockSignals(True)\n self.x_offset_spinbox.setValue(x_offset)\n self.x_offset_spinbox.blockSignals(False)\n self.y_offset_spinbox.blockSignals(True)\n self.y_offset_spinbox.setValue(y_offset)\n self.y_offset_spinbox.blockSignals(False)\n self.scale_spinbox.blockSignals(True)\n self.scale_spinbox.setValue(scale)\n self.scale_spinbox.blockSignals(False)\n self.update()\n\n def set_default(self, scale, x_offset, y_offset):\n self._default_scale = scale\n self._default_x_offset = x_offset\n self._default_y_offset = y_offset\n\n def set_default_scale(self, scale):\n self._default_scale = scale\n\n def set_default_x_offset(self, x_offset):\n self._default_x_offset = x_offset\n\n def set_default_y_offset(self, y_offset):\n self._default_y_offset = y_offset\n\n def reset(self):\n self.set(self._default_scale, self._default_x_offset, self._default_y_offset)\n\n\nclass _CanvasLabel(QLabel):\n def __init__(self, *args):\n super(_CanvasLabel, self).__init__(*args)\n self._image_viewer_context = None\n\n def resizeEvent(self, qResizeEvent: QResizeEvent):\n super().resizeEvent(qResizeEvent)\n if self._image_viewer_context is not None:\n self._image_viewer_context.update()\n\n\ndef construct_simple_image_viewer_on_qt_layout(layout):\n image_label = _CanvasLabel()\n image_label.setMinimumSize(1, 1)\n image_label.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding)\n\n x_offset_spinbox = QDoubleSpinBox()\n y_offset_spinbox = QDoubleSpinBox()\n\n x_offset_spinbox.setValue(0)\n y_offset_spinbox.setValue(0)\n\n x_offset_spinbox.setMinimum(-65536)\n x_offset_spinbox.setMaximum(65536)\n\n y_offset_spinbox.setMinimum(-65536)\n y_offset_spinbox.setMaximum(65536)\n\n scale_spinbox = QDoubleSpinBox()\n scale_spinbox.setValue(1.)\n scale_spinbox.setSingleStep(0.1)\n\n reset_button = QPushButton()\n reset_button.setText('Reset')\n\n vlayout = QVBoxLayout()\n actor_layout = QHBoxLayout()\n\n spacer = QSpacerItem(0, 0, QSizePolicy.Expanding, QSizePolicy.Minimum)\n actor_layout.addSpacerItem(spacer)\n\n x_offset_label = QLabel()\n x_offset_label.setText('x:')\n actor_layout.addWidget(x_offset_label)\n actor_layout.addWidget(x_offset_spinbox)\n y_offset_label = QLabel()\n y_offset_label.setText('y:')\n actor_layout.addWidget(y_offset_label)\n actor_layout.addWidget(y_offset_spinbox)\n scale_label = QLabel()\n scale_label.setText('scale:')\n actor_layout.addWidget(scale_label)\n actor_layout.addWidget(scale_spinbox)\n\n actor_layout.addWidget(reset_button)\n\n vlayout.addWidget(image_label)\n vlayout.addLayout(actor_layout)\n\n layout.addLayout(vlayout)\n\n return _ScalableImageViewerContext(image_label, x_offset_spinbox, y_offset_spinbox, scale_spinbox, reset_button, layout)\n\n\nclass SimpleViewer:\n def __init__(self, argv=[]):\n app = QApplication(argv)\n\n window = QDialog()\n #window.setWindowState(Qt.WindowMaximized)\n window.setWindowFlags(Qt.WindowMinimizeButtonHint | Qt.WindowMaximizeButtonHint | Qt.WindowCloseButtonHint)\n window.setWindowTitle('Viewer')\n\n layout = QVBoxLayout()\n window.setLayout(layout)\n self.main_layout = layout\n self.app = app\n self.window = window\n\n def addImage(self):\n from data.operator.image.tf.decoder import tf_decode_image\n from Viewer.canvas.align_corner.simple_painter import SimplePainter\n image_viewer_widget = construct_simple_image_viewer_on_qt_layout(self.main_layout)\n image = tf_decode_image(\"K:\\\\dataset\\\\coco\\\\images\\\\train2014\\\\COCO_train2014_000000000009.jpg\")\n painter = SimplePainter.create_from_tf_image(image)\n h, w, c = image.shape\n painter.draw_bounding_box([0, 0, w - 1, h - 1])\n painter.draw_bounding_box_with_label([5,5,10,10], 'a')\n painter.draw_bounding_box([0,0,3,1])\n painter.draw_bounding_box([0,0,2,1])\n painter.draw_bounding_box([0, 0, 1, 1])\n #painter.draw_bounding_box_with_label([2,2,3,3], 'a')\n image_viewer_widget.set_painter(painter)\n\n def setWindowTitle(self, title: str):\n self.window.setWindowTitle(title)\n\n def runEventLoop(self):\n self.window.show()\n return self.app.exec_()\n\n def close(self):\n self.window.close()\n\nif __name__ == '__main__':\n v=SimpleViewer()\n v.addImage()\n v.runEventLoop()\n","sub_path":"Viewer/canvas/align_corner/simple_image_viewer.py","file_name":"simple_image_viewer.py","file_ext":"py","file_size_in_byte":7751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"20142274","text":"from src.common.database import Database\nfrom src.models.alerts.alert import Alert\nDatabase.initialize()\nalerts = Alert.find_needing_update()\n\n\n\nfor alert in alerts:\n alert.load_item_price()\n alert.send_email_if_price_reached()","sub_path":"src/models/alerts/alert_updater.py","file_name":"alert_updater.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"204964977","text":"import socket\nimport asyncio\nimport time\nimport random\nimport json\nimport re\nimport requests\nfrom elasticsearch import Elasticsearch\n\nfrom ioc_finder import find_iocs\nfrom walkoff_app_sdk.app_base import AppBase\n\nclass Tools(AppBase):\n \"\"\"\n An example of a Walkoff App.\n Inherit from the AppBase class to have Redis, logging, and console logging set up behind the scenes.\n \"\"\"\n __version__ = \"1.0.0\"\n app_name = \"Shuffle Tools\" # this needs to match \"name\" in api.yaml for WALKOFF to work\n\n def __init__(self, redis, logger, console_logger=None):\n \"\"\"\n Each app should have this __init__ to set up Redis and logging.\n :param redis:\n :param logger:\n :param console_logger:\n \"\"\"\n super().__init__(redis, logger, console_logger)\n\n # https://github.com/fhightower/ioc-finder\n async def parse_ioc(self, input_string, input_type=\"all\"):\n if input_type == \"\":\n input_type = \"all\"\n\n iocs = find_iocs(input_string)\n newarray = []\n for key, value in iocs.items():\n if input_type != \"all\":\n if key != input_type:\n continue\n\n if len(value) > 0:\n for item in value:\n # If in here: attack techniques. Shouldn't be 3 levels so no\n # recursion necessary\n if isinstance(value, dict):\n for subkey, subvalue in value.items():\n if len(subvalue) > 0:\n for subitem in subvalue:\n data = {\"data\": subitem, \"data_type\": \"%s_%s\" % (key[:-1], subkey)}\n if data not in newarray:\n newarray.append(data)\n else:\n data = {\"data\": item, \"data_type\": key[:-1]}\n if data not in newarray:\n newarray.append(data)\n\n\n # Reformatting IP\n for item in newarray:\n if \"ip\" in item[\"data_type\"]:\n item[\"data_type\"] = \"ip\"\n\n try:\n newarray = json.dumps(newarray)\n except json.decoder.JSONDecodeError as e:\n return \"Failed to parse IOC's: %s\" % e\n\n return newarray\n\n async def parse_list(self, items, splitter=\"\\n\"):\n if splitter == \"\":\n splitter = \"\\n\"\n\n splititems = items.split(splitter)\n\n return str(splititems)\n\n async def get_length(self, item):\n if item.startswith(\"[\") and item.endswith(\"]\"):\n try:\n item = item.replace(\"\\'\", \"\\\"\", -1)\n item = json.loads(item)\n except json.decoder.JSONDecodeError as e:\n print(\"Parse error: %s\" % e) \n pass\n\n return str(len(item))\n\n async def translate_value(self, input_data, translate_from, translate_to):\n splitdata = [translate_from]\n splitvalue = \"\"\n if \", \" in translate_from:\n splitdata = translate_from.split(\", \")\n elif \",\" in translate_from:\n splitdata = translate_from.split(\",\")\n\n for item in splitdata:\n input_data = input_data.replace(item, translate_to)\n\n return input_data\n\n async def execute_python(self, code, shuffle_input):\n print(\"Run with shuffle_data %s\" % shuffle_input)\n print(\"And python code %s\" % code)\n # Write the code to a file, then jdjd\n exec(code)\n\n # May be necessary\n #compile()\n\n return \"Some return: %s\" % shuffle_input\n\n async def parse_json(self, str_input):\n str_input = re.sub('[^A-Za-z0-9\".]', '', str_input)\n list_json = str_input.split(\"\\\"\\\"\")\n #json_object = json.loads(json_strings)\n #return (str_input.split('\"src\": \"')[1]).split('\", \"http_user_agent\":')[0]\n\n #return list_json[list_json.index(\"src\")+1]\n json_object = {\"src\":list_json[list_json.index(\"src\")+1], \"time\":list_json[list_json.index(\"reqtime\")+1]}\n return json_object\n\n async def save_results(self, result, ref_time, analyzer_name):\n es = Elasticsearch([{'host':'10.88.200.105','port':9200}])\n res = es.index(index='results', doc_type='result', id=ref_time+analyzer_name, body=eval(result))\n\n return res\n\nif __name__ == \"__main__\":\n asyncio.run(Tools.run(), debug=True)\n","sub_path":"tools/1.0.0/src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"229322602","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\nimport os\r\nimport json\r\nimport deal_file as ssfile\r\nimport time\r\nfrom datetime import datetime\r\nimport math\r\nimport socket\r\n\r\ndf = ssfile.deal_files()\r\nend_dict = df.deals()\r\n\r\n# 整理每个文件中的内容,将重叠的部分重合计算\r\n# 先分个处理,后几个文件一起处理\r\nclass format_json:\r\n def __init__(self):\r\n self.same_sip_lists = []\r\n\r\n def format_single_file(self,files):\r\n\r\n # 将不同Saddress的dict 先存放到list_tmp中再存放到以当前Saddress为key的字典中\r\n # 如果相同则存到同key字典中的list_tmp列表中\r\n same_sip_list = []\r\n same_sip_dict = []\r\n for td in files:\r\n list_tmp = []\r\n list_tmp2 = []\r\n list_dict = {}\r\n try:\r\n ip = td['Saddress']\r\n except:\r\n continue\r\n\r\n if ip not in same_sip_list:\r\n list_tmp.append(td)\r\n list_dict.setdefault(ip,list_tmp)\r\n same_sip_list.append(ip)\r\n same_sip_dict.append(list_dict)\r\n else:\r\n for i in same_sip_dict:\r\n if ip in i.keys():\r\n list_tmp2 = i[ip]\r\n list_tmp2.append(td)\r\n i[ip] = list_tmp2\r\n else:\r\n pass\r\n pass\r\n self.same_sip_lists = same_sip_list\r\n return same_sip_dict\r\n\r\n def get_avge(self,same_sip_dict):\r\n # print(len(same_sip_dict))\r\n lis = []\r\n for tmp_dict in same_sip_dict:\r\n rtt = 0\r\n cwnd = 0\r\n Retrans = 0\r\n send = 0\r\n dic = {}\r\n tmp_list = list(tmp_dict.values())[0]\r\n # print(tmp_list)\r\n # print(len(tmp_list))\r\n for i in range(len(tmp_list)):\r\n rtt = tmp_list[i]['rtt'] + rtt\r\n cwnd = tmp_list[i]['cwnd'] + cwnd\r\n # print(cwnd)\r\n Retrans = float(tmp_list[i]['Retrans']) + Retrans\r\n send = float(tmp_list[i]['send']) + send\r\n # print(tmp_list[i]['Retrans'])\r\n # print(len(tmp_list))\r\n rtt = round(float(rtt/len(tmp_list)),2)\r\n cwnd = round(float(cwnd/len(tmp_list)),2)\r\n Retrans = math.ceil(float(Retrans/len(tmp_list)))\r\n # if Retrans == 0:\r\n # pass\r\n # else:\r\n # print(Retrans)\r\n # pass\r\n send = round(float(send/len(tmp_list)),2)\r\n # print(Retrans,send)\r\n timestamp = datetime.now().strftime(\"%d/%b/%Y:%X +0800\")\r\n total = len(tmp_list)\r\n hostname = socket.gethostname()\r\n #全变成字符串\r\n # rtt = str(rtt)\r\n # cwnd = str(cwnd)\r\n # Retrans = str(Retrans)\r\n # send = str(send)\r\n\r\n # print(rtt)\r\n dic.setdefault(\"Daddress\",tmp_list[0]['Daddress'])\r\n dic.setdefault(\"Saddress\",tmp_list[0]['Saddress'].split(\":\")[0])\r\n dic.setdefault(\"SPort\",tmp_list[0]['Saddress'].split(\":\")[-1])\r\n # print(tmp_list[0])\r\n dic.setdefault(\"rtt\",rtt)\r\n dic.setdefault(\"cwnd\",cwnd)\r\n dic.setdefault(\"Retrans\",Retrans)\r\n dic.setdefault(\"send\",send)\r\n dic.setdefault(\"total\",total)\r\n # print(len(tmp_list))\r\n dic.setdefault(\"line_state\",tmp_list[0]['line_state'])\r\n dic.setdefault(\"type\",tmp_list[0]['type'])\r\n dic.setdefault(\"node_type\",\"network-attack\")\r\n dic.setdefault(\"timestamp\",timestamp)\r\n dic.setdefault(\"hostname\",hostname)\r\n lis.append(dic)\r\n # print(Retrans)\r\n return lis\r\n\r\n\r\n\r\n def get_dict(self):\r\n newfiles = os.listdir(os.path.join(os.path.dirname (os.path.abspath(__file__)),\"tmpfiles/\"))\r\n list_iterator = []\r\n for i in newfiles:\r\n list_iterator = list_iterator + end_dict[i]\r\n # print(i)\r\n same_sip_dict = self.format_single_file(list_iterator)\r\n lis = self.get_avge(same_sip_dict)\r\n return lis\r\n\r\n def update_json(self):\r\n _str = \"\"\r\n lis = self.get_dict()\r\n for i in lis:\r\n _str_tmp = \"\\n\"+json.dumps(i)\r\n _str = _str+_str_tmp\r\n print(\"'\"+_str[1:]+\"'\")\r\nif __name__ == \"__main__\":\r\n fj = format_json()\r\n # fj.get_dict()\r\n fj.update_json()\r\n","sub_path":"getss_work/getss/general_json.py","file_name":"general_json.py","file_ext":"py","file_size_in_byte":4563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"516419296","text":"# Q9 - Solving using adaptive step-size control with the fourth-order Runge-Kutta method\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt \r\n\r\ndef f(x,y):\r\n return (y**2+y)/x\r\n\r\nxtemp=1\r\nx1=1\r\nytemp=-2\r\ny1=-2\r\nY=[-2]\r\nX=[1]\r\nh=0.1\r\nh1=0.9\r\nh0=0.09\r\n\r\nfor i in range(50):\r\n while x1<3:\r\n if hh1:\r\n h=h1\r\n else:\r\n pass\r\n for i in range(1):\r\n \r\n k1=f(xtemp,ytemp) \r\n k2=f(xtemp+h/2,ytemp+h*k1/2)\r\n k3=f(xtemp+h/2,ytemp+k2*h/2)\r\n k4=f(xtemp+h,ytemp+k3*h)\r\n y_s=ytemp+(k1+2*(k2+k3)+k4)*h/6\r\n xtemp=xtemp+h\r\n ytemp=y_s\r\n \r\n k11=f(x1,y1) \r\n k21=f(x1+h,y1+h*k11)\r\n k31=f(x1+h,y1+k21*h)\r\n k41=f(x1+2*h,y1+k31*h*2)\r\n y_d=y1+(k11+2*(k21+k31)+k41)*h/6\r\n x1=x1+h\r\n y1=y_d\r\n X.append(xtemp)\r\n Y.append(y_s)\r\n tol=abs(y_s-y_d)\r\n if tol>1e-4:\r\n h=h/2\r\n if tol<0.5e-4:\r\n h=2*h\r\n else:\r\n pass\r\nx=np.array(X)\r\ny=np.array(Y)\r\nplt.plot(x,y,'r',label=\"Numerical Solution\")\r\nplt.legend()\r\nplt.xlabel(\"x\")\r\nplt.ylabel(\"y\")\r\nplt.show() \r\n","sub_path":"Q9.py","file_name":"Q9.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"427366483","text":"#!/usr/bin/env python3\nimport datetime\nimport json\nimport os\nimport sys\nimport time\n\nfrom flask import Flask, render_template, request\n\nfrom classes import Graph\n\napp = Flask('c3nav-wificollect')\n\n\nif 'C3NAVCONF' in os.environ:\n filename = os.environ['C3NAVCONF']\nelif len(sys.argv) > 1:\n filename = sys.argv[1]\nelse:\n print('Please specify filename: run.py or environment variable C3NAVCONF')\n sys.exit(1)\n\nstarttime = time.time()\n\n\n@app.route('/')\ndef map():\n f = open(filename)\n graph = Graph(json.load(f), auto_connect=False)\n return render_template('wificollect.html', graph=graph)\n\n\n@app.route('/add', methods=['POST'])\ndef addroom():\n data = json.load(open(filename))\n position = [int(i) for i in request.form.get('position').split('.')]\n stations = json.loads(request.form.get('stations'))\n data['wifidata'].append({\n 'level': position[0],\n 'x': position[1],\n 'y': position[2],\n 'time': str(datetime.datetime.now()),\n 'stations': stations\n })\n json.dump(data, open(filename, 'w'), indent=4, sort_keys=True)\n return 'ok'\n\n\n@app.route('/locate', methods=['POST'])\ndef locate():\n f = open(filename)\n graph = Graph(json.load(f), auto_connect=False)\n result = graph.wifi.locate(json.loads(request.form.get('stations')))\n return json.dumps(result)\n\napp.run(threaded=True, debug=True)\n","sub_path":"src/wificollect.py","file_name":"wificollect.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"413496163","text":"\"\"\"This module contains classes and functions for high level interaction\nwith AOVs.\n\n\"\"\"\n\n# =============================================================================\n# IMPORTS\n# =============================================================================\n\n# Python Imports\nimport glob\nimport json\nimport os\n\n# Houdini Toolbox Imports\nfrom ht.sohohooks.aovs.aov import AOV, AOVGroup, IntrinsicAOVGroup\n\n# Houdini Imports\nimport hou\n\n# =============================================================================\n# CLASSES\n# =============================================================================\n\nclass AOVManager(object):\n \"\"\"This class is for managing and applying AOVs at render time.\"\"\"\n\n def __init__(self):\n self._aovs = {}\n self._groups = {}\n self._interface = None\n\n self._initFromFiles()\n\n # =========================================================================\n # SPECIAL METHODS\n # =========================================================================\n\n def __repr__(self):\n return \"\".format(\n len(self.aovs),\n len(self.groups),\n )\n\n # =========================================================================\n # NON-PUBLIC METHODS\n # =========================================================================\n\n def _buildIntrinsicGroups(self):\n \"\"\"Build intrinsic groups.\"\"\"\n # Process any AOVs that we have to look for any intrinsic groups.\n for aov in self.aovs.itervalues():\n for intrinsic_name in aov.intrinsics:\n # Intrinsic groups are prefixed with \"i:\".\n name = \"i:\" + intrinsic_name\n\n # Group exists so use it.\n if name in self.groups:\n group = self.groups[name]\n\n # Create the group and add it to our list.\n else:\n group = IntrinsicAOVGroup(name)\n self.addGroup(group)\n\n # Add this AOV to the group.\n group.aovs.append(aov)\n\n def _initFromFiles(self):\n \"\"\"Initialize the manager from files on disk.\"\"\"\n file_paths = _findAOVFiles()\n\n readers = [AOVFile(file_path) for file_path in file_paths]\n\n self._mergeReaders(readers)\n\n self._buildIntrinsicGroups()\n\n def _initGroupMembers(self, group):\n \"\"\"Populate the AOV lists of each group based on available AOVs.\"\"\"\n # Process each of the group's includes.\n for include in group.includes:\n # If the AOV name is available, add it to the group.\n if include in self.aovs:\n group.aovs.append(self.aovs[include])\n\n def _mergeReaders(self, readers):\n \"\"\"Merge the data of multiple AOVFile objects.\"\"\"\n # We need to handle AOVs first since AOVs in other files may overwrite\n # AOVs in group definition files.\n for reader in readers:\n for aov in reader.aovs:\n variable_name = aov.variable\n\n # Check if this AOV has already been seen.\n if variable_name in self.aovs:\n # If this AOV has a higher priority, replace the previous\n # one.\n if aov.priority > self.aovs[variable_name].priority:\n self.addAOV(aov)\n\n # Hasn't been seen, so add it.\n else:\n self.addAOV(aov)\n\n # Now that AOVs have been made available, add them to groups.\n for reader in readers:\n for group in reader.groups:\n self._initGroupMembers(group)\n\n group_name = group.name\n\n # Check if this group has already been seen.\n if group_name in self.groups:\n # If this group has a higher priority, replace the previous\n # one.\n if group.priority > self.groups[group_name].priority:\n self.addGroup(group)\n\n # Hasn't been seen, so add it.\n else:\n self.addGroup(group)\n\n # =========================================================================\n # PROPERTIES\n # =========================================================================\n\n @property\n def interface(self):\n \"\"\"Any AOVViewerInterface assigned to the manager.\"\"\"\n return self._interface\n\n @property\n def aovs(self):\n \"\"\"Dictionary containing all available AOVs.\"\"\"\n return self._aovs\n\n @property\n def groups(self):\n \"\"\"Dictionary containing all available AOVGroups.\"\"\"\n return self._groups\n\n # =========================================================================\n # STATIC METHODS\n # =========================================================================\n\n @staticmethod\n def addAOVsToIfd(wrangler, cam, now):\n \"\"\"Add auto_aovs to the ifd.\"\"\"\n import IFDapi\n import IFDsettings\n import soho\n\n # The parameter that defines which automatic aovs to add.\n parms = {\n \"enable\": soho.SohoParm(\n \"enable_auto_aovs\",\n \"int\",\n [1],\n skipdefault=False\n ),\n \"auto_aovs\": soho.SohoParm(\n \"auto_aovs\",\n \"str\",\n [\"\"],\n skipdefault=False\n ),\n }\n\n # Attempt to evaluate the parameter.\n plist = cam.wrangle(wrangler, parms, now)\n\n if plist:\n # Adding is disabled so bail out.\n if plist[\"enable_auto_aovs\"].Value[0] == 0:\n return\n\n aov_str = plist[\"auto_aovs\"].Value[0]\n\n # Construct a manager-laf\n manager = findOrCreateSessionAOVManager()\n\n # Parse the string to get any aovs/groups.\n aovs = manager.getAOVsFromString(aov_str)\n\n # Write any found items to the ifd.\n for aov in aovs:\n aov.writeToIfd(wrangler, cam, now)\n\n # If we are generating the \"Op_Id\" plane we will need to tell SOHO\n # to generate these properties when outputting object. Look for\n # the \"Op_Id\" variable being exported and if so enable operator id\n # generation\n for aov in flattenedList(aovs):\n if aov.variable == \"Op_Id\":\n IFDapi.ray_comment(\"Forcing object id generation\")\n IFDsettings._GenerateOpId = True\n\n break\n\n # =========================================================================\n # METHODS\n # =========================================================================\n\n def addAOV(self, aov):\n \"\"\"Add an AOV to the manager.\"\"\"\n self._aovs[aov.variable] = aov\n\n if self.interface is not None:\n self.interface.aovAddedSignal.emit(aov)\n\n def addGroup(self, group):\n \"\"\"Add an AOVGroup to the manager.\"\"\"\n self.groups[group.name] = group\n\n if self.interface is not None:\n self.interface.groupAddedSignal.emit(group)\n\n def clear(self):\n \"\"\"Clear all definitions.\"\"\"\n self._aovs = {}\n self._groups = {}\n\n def getAOVsFromString(self, aov_str):\n \"\"\"Get a list of AOVs and AOVGroups from a string.\"\"\"\n aovs = []\n\n aov_str = aov_str.replace(',', ' ')\n\n for name in aov_str.split():\n if name.startswith('@'):\n name = name[1:]\n\n if name in self.groups:\n aovs.append(self.groups[name])\n\n else:\n if name in self._aovs:\n aovs.append(self._aovs[name])\n\n return aovs\n\n def initInterface(self):\n \"\"\"Initialize an AOVViewerInterface for this manager.\"\"\"\n from ht.ui.aovs.utils import AOVViewerInterface\n\n self._interface = AOVViewerInterface()\n\n def load(self, path):\n \"\"\"Load a file.\"\"\"\n readers = [AOVFile(path)]\n\n self._mergeReaders(readers)\n\n def reload(self):\n \"\"\"Reload all definitions.\"\"\"\n self.clear()\n self._initFromFiles()\n\n def removeAOV(self, aov):\n \"\"\"Remove the specified AOV from the manager.\"\"\"\n if aov.variable in self.aovs:\n self.aovs.pop(aov.variable)\n\n if self.interface is not None:\n self.interface.aovRemovedSignal.emit(aov)\n\n def removeGroup(self, group):\n \"\"\"Remove the specified group from the manager.\"\"\"\n if group.name in self.groups:\n self.groups.pop(group.name)\n\n if self.interface is not None:\n self.interface.groupRemovedSignal.emit(group)\n\n# =============================================================================\n\nclass AOVFile(object):\n \"\"\"Class to handle reading and writing AOV .json files.\"\"\"\n\n def __init__(self, path):\n self._path = path\n\n self._aovs = []\n self._data = {}\n self._groups = []\n\n if self.exists:\n self._initFromFile()\n\n # =========================================================================\n # NON-PUBLIC METHODS\n # =========================================================================\n\n def _initFromFile(self):\n \"\"\"Read data from the file and create the appropriate entities.\"\"\"\n with open(self.path) as handle:\n data = json.load(handle)\n\n if \"definitions\" in data:\n self._createAOVs(data[\"definitions\"])\n\n if \"groups\" in data:\n self._createGroups(data[\"groups\"])\n\n # =========================================================================\n\n def _createAOVs(self, definitions):\n \"\"\"Create AOVs based on definitions.\"\"\"\n for definition in definitions:\n # Insert this file path into the data.\n definition[\"path\"] = self.path\n\n # Construct a new AOV and add it to our list.\n aov = AOV(definition)\n self.aovs.append(aov)\n\n # =========================================================================\n\n def _createGroups(self, definitions):\n \"\"\"Create AOVGroups based on definitions.\"\"\"\n for name, group_data in definitions.iteritems():\n # Create a new AOVGroup.\n group = AOVGroup(name)\n\n # Process its list of AOVs to include.\n if \"include\" in group_data:\n group.includes.extend(group_data[\"include\"])\n\n # Set any comment.\n if \"comment\" in group_data:\n group.comment = group_data[\"comment\"]\n\n if \"priority\" in group_data:\n group.priority = group_data[\"priority\"]\n\n # Set any icon.\n if \"icon\" in group_data:\n group.icon = os.path.expandvars(group_data[\"icon\"])\n\n # Set the path to this file.\n group.path = self.path\n\n # Add the group to the list.\n self.groups.append(group)\n\n # =========================================================================\n # PROPERTIES\n # =========================================================================\n\n @property\n def aovs(self):\n \"\"\"List containing AOVs defined in this file.\"\"\"\n return self._aovs\n\n # =========================================================================\n\n @property\n def groups(self):\n \"\"\"List containing AOVGroups defined in this file.\"\"\"\n return self._groups\n\n # =========================================================================\n\n @property\n def path(self):\n \"\"\"File path on disk.\"\"\"\n return self._path\n\n # =========================================================================\n\n @property\n def exists(self):\n \"\"\"Check if the file actually exists.\"\"\"\n return os.path.isfile(self.path)\n\n # =========================================================================\n # METHODS\n # =========================================================================\n\n def addAOV(self, aov):\n \"\"\"Add an AOV for writing.\"\"\"\n self.aovs.append(aov)\n\n def addGroup(self, group):\n \"\"\"Add An AOVGroup for writing.\"\"\"\n self.groups.append(group)\n\n def containsAOV(self, aov):\n \"\"\"Check if this file contains an AOV with the same variable name.\"\"\"\n return aov in self.aovs\n\n def containsGroup(self, group):\n \"\"\"Check if this file contains a group with the same name.\"\"\"\n return group in self.groups\n\n def removeAOV(self, aov):\n \"\"\"Remove an AOV from the file.\"\"\"\n idx = self.aovs.index(aov)\n\n del self.aovs[idx]\n\n def removeGroup(self, group):\n \"\"\"Remove a group from the file.\"\"\"\n idx = self.groups.index(group)\n\n del self.groups[idx]\n\n def replaceAOV(self, aov):\n \"\"\"Replace an AOV in the file.\"\"\"\n idx = self.aovs.index(aov)\n\n self.aovs[idx] = aov\n\n def replaceGroup(self, group):\n \"\"\"Replace a group in the file.\"\"\"\n idx = self.groups.index(group)\n\n self.groups[idx] = group\n\n def writeToFile(self, path=None):\n \"\"\"Write data to file.\"\"\"\n data = {}\n\n for group in self.groups:\n groups = data.setdefault(\"groups\", {})\n\n groups.update(group.getData())\n\n for aov in self.aovs:\n aovs = data.setdefault(\"definitions\", [])\n\n aovs.append(aov.getData())\n\n if path is None:\n path = self.path\n\n with open(path, 'w') as handle:\n json.dump(data, handle, indent=4)\n\n# =============================================================================\n# NON-PUBLIC FUNCTIONS\n# =============================================================================\n\ndef _findAOVFiles():\n \"\"\"Find any .json files that should be read.\"\"\"\n # Look for the specific AOV search path.\n if \"HT_AOV_PATH\" in os.environ:\n # Get the search path.\n search_path = os.environ[\"HT_AOV_PATH\"]\n\n # If '&' is in the path then following Houdini path conventions we'll\n # search through the HOUDINI_PATH as well.\n if '&' in search_path:\n # Find any config/aovs folders in HOUDINI_PATH.\n hpath_dirs = _findHoudiniPathAOVFolders()\n\n # If there are any then we replace the '&' with those paths.\n if hpath_dirs:\n search_path = search_path.replace('&', ':'.join(hpath_dirs))\n\n directories = search_path.split(\":\")\n\n else:\n directories = _findHoudiniPathAOVFolders()\n\n all_files = []\n\n for directory in directories:\n all_files.extend(glob.glob(os.path.join(directory, \"*.json\")))\n\n return all_files\n\n\ndef _findHoudiniPathAOVFolders():\n \"\"\"Look for any config/aovs folders in the HOUDINI_PATH.\"\"\"\n # Try to find HOUDINI_PATH directories.\n try:\n directories = hou.findDirectories(\"config/aovs\")\n\n except hou.OperationFailed:\n directories = ()\n\n return directories\n\n# =============================================================================\n# FUNCTIONS\n# =============================================================================\n\ndef buildMenuScript():\n \"\"\"Build a menu script for choosing AOVs and groups.\"\"\"\n manager = findOrCreateSessionAOVManager()\n\n menu = []\n\n if manager.groups:\n for group in sorted(manager.groups.keys()):\n menu.extend([\"@{}\".format(group), group])\n\n menu.extend([\"_separator_\", \"---------\"])\n\n for aov in sorted(manager.aovs):\n menu.extend([aov, aov])\n\n return menu\n\n\ndef createSessionAOVManager():\n \"\"\"Create an AOVManager stored in hou.session.\"\"\"\n manager = AOVManager()\n hou.session.aov_manager = manager\n\n return manager\n\n\ndef findOrCreateSessionAOVManager(rebuild=False):\n \"\"\"Find or create an AOVManager from hou.session.\"\"\"\n manager = None\n\n if hasattr(hou.session, \"aov_manager\") and not rebuild:\n manager = hou.session.aov_manager\n\n else:\n manager = createSessionAOVManager()\n\n return manager\n\n\ndef flattenedList(items):\n \"\"\"Flatten a list that contains AOVs and groups into a list of all AOVs.\"\"\"\n aovs = []\n\n for item in items:\n if isinstance(item, AOVGroup):\n aovs.extend(item.aovs)\n\n else:\n aovs.append(item)\n\n return aovs\n\n\ndef loadJsonFiles():\n \"\"\"Load .json files into the manager.\"\"\"\n result = hou.ui.selectFile(\n pattern=\"*.json\",\n chooser_mode=hou.fileChooserMode.Read,\n multiple_select=True,\n )\n\n paths = result.split(\" ; \")\n\n for path in paths:\n path = os.path.expandvars(path)\n\n if os.path.exists(path):\n MANAGER.load(path)\n\n# =============================================================================\n\nMANAGER = findOrCreateSessionAOVManager(rebuild=True)\n\n","sub_path":"python/ht/sohohooks/aovs/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":16972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"641637173","text":"def add_elipsis(lst,elnum=4):\n for i in lst:\n print('{}{}'.format(i[:elnum],'...'))\ncomments = (\n \"Implementation note\",\n \"Changed\",\n \"ABC for generator\",\n)\nadd_elipsis(comments,elnum=6)\n\n# 这样写,逻辑上也能行,对了 区分下目录哈","sub_path":"P17075-上海-宁明强/homework/add_ellipsis.py","file_name":"add_ellipsis.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"613870306","text":"def merge_lists(list1, list2):\n new_list = []\n for num in list1:\n if not num in new_list:\n new_list.append(num)\n for num in list2:\n if not num in new_list:\n new_list.append(num)\n new_list.sort()\n return new_list\n\nprint(\"Merging [1,2,3],[4,3,2] :\", merge_lists([1,2,3],[4,3,2]))\nprint(\"Merging [3,2,1],[2,6,4,10,4] :\", merge_lists([3,2,1],[2,6,4,10,4]))\nprint(\"Merging [3,1,1],[] :\", merge_lists([3,1,1],[]))\nprint(\"Merging [],[9,7,2,7] :\", merge_lists([],[9,7,2,7]))\n","sub_path":"lab5/lab5_5.py","file_name":"lab5_5.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"218264053","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.auth.models import User\nfrom .models import *\n\nadmin.site.register([Estado,Cidade])\n\nclass PerfilInline(admin.StackedInline):\n model = Perfil\n can_delete = True\n verbose_name_plural = 'Imagens'\n fk_name = 'usuario'\n\nclass ImagemInline(admin.StackedInline):\n model = Imagem\n can_delete = True\n verbose_name_plural = 'Imagens'\n fk_name = 'usuario'\n\nclass CustomUserAdmin(UserAdmin):\n inlines = (PerfilInline, ImagemInline)\n list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff')\n list_select_related = ('perfil', )\n\n def get_inline_instances(self, request, obj=None):\n if not obj:\n return list()\n return super(CustomUserAdmin, self).get_inline_instances(request, obj)\n\nadmin.site.unregister(User)\nadmin.site.register(User, CustomUserAdmin)","sub_path":"perfil/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"350530765","text":"#!/usr/bin/env python\nu\"\"\"\ncompute_tidal_currents.py\nWritten by Tyler Sutterley (08/2023)\nCalculates zonal and meridional tidal currents for an input file\n\nUses OTIS format tidal solutions provided by Ohio State University and ESR\n http://volkov.oce.orst.edu/tides/region.html\n https://www.esr.org/research/polar-tide-models/list-of-polar-tide-models/\n ftp://ftp.esr.org/pub/datasets/tmd/\nor Finite Element Solution (FES) models provided by AVISO\n\nINPUTS:\n csv file with columns for spatial and temporal coordinates\n HDF5 file with variables for spatial and temporal coordinates\n netCDF4 file with variables for spatial and temporal coordinates\n geotiff file with bands in spatial coordinates\n\nCOMMAND LINE OPTIONS:\n -D X, --directory X: Working data directory\n -T X, --tide X: Tide model to use in calculating currents\n --atlas-format X: ATLAS tide model format (OTIS, netcdf)\n --gzip, -G: Tide model files are gzip compressed\n --definition-file X: Model definition file for use in calculating currents\n --format X: input and output data format\n csv (default)\n netCDF4\n HDF5\n geotiff\n --variables X: variable names of data in csv, HDF5 or netCDF4 file\n for csv files: the order of the columns within the file\n for HDF5 and netCDF4 files: time, y, x and data variable names\n -H X, --header X: number of header lines for csv files\n --delimiter X: Delimiter for csv or ascii files\n -t X, --type X: input data type\n drift: drift buoys or satellite/airborne altimetry (time per data point)\n grid: spatial grids or images (single time for all data points)\n -e X, --epoch X: Reference epoch of input time (default Modified Julian Day)\n days since 1858-11-17T00:00:00\n -d X, --deltatime X: Input delta time for files without date information\n can be set to 0 to use exact calendar date from epoch\n -s X, --standard X: Input time standard for delta times or input time type\n UTC: Coordinate Universal Time\n GPS: GPS Time\n LORAN: Long Range Navigator Time\n TAI: International Atomic Time\n datetime: formatted datetime string in UTC\n -P X, --projection X: spatial projection as EPSG code or PROJ4 string\n 4326: latitude and longitude coordinates on WGS84 reference ellipsoid\n -I X, --interpolate X: Interpolation method\n spline\n linear\n nearest\n bilinear\n -E X, --extrapolate X: Extrapolate with nearest-neighbors\n -c X, --cutoff X: Extrapolation cutoff in kilometers\n set to inf to extrapolate for all points\n -V, --verbose: Verbose output of processing run\n -M X, --mode X: Permission mode of output file\n\nPYTHON DEPENDENCIES:\n numpy: Scientific Computing Tools For Python\n https://numpy.org\n https://numpy.org/doc/stable/user/numpy-for-matlab-users.html\n scipy: Scientific Tools for Python\n https://docs.scipy.org/doc/\n h5py: Python interface for Hierarchal Data Format 5 (HDF5)\n https://www.h5py.org/\n netCDF4: Python interface to the netCDF C library\n https://unidata.github.io/netcdf4-python/netCDF4/index.html\n gdal: Pythonic interface to the Geospatial Data Abstraction Library (GDAL)\n https://pypi.python.org/pypi/GDAL\n dateutil: powerful extensions to datetime\n https://dateutil.readthedocs.io/en/stable/\n pyproj: Python interface to PROJ library\n https://pypi.org/project/pyproj/\n\nPROGRAM DEPENDENCIES:\n time.py: utilities for calculating time operations\n spatial: utilities for reading, writing and operating on spatial data\n utilities.py: download and management utilities for syncing files\n arguments.py: load the nodal corrections for tidal constituents\n astro.py: computes the basic astronomical mean longitudes\n convert_crs.py: convert points to and from Coordinates Reference Systems\n load_constituent.py: loads parameters for a given tidal constituent\n io/model.py: retrieves tide model parameters for named tide models\n io/OTIS.py: extract tidal harmonic constants from OTIS tide models\n io/ATLAS.py: extract tidal harmonic constants from netcdf models\n io/FES.py: extract tidal harmonic constants from FES tide models\n interpolate.py: interpolation routines for spatial data\n predict.py: predict tidal values using harmonic constants\n\nUPDATE HISTORY:\n Updated 08/2023: changed ESR netCDF4 format to TMD3 format\n Updated 05/2023: use timescale class for time conversion operations\n Updated 04/2023: using pathlib to define and expand paths\n using long_name and description attributes from model class\n Updated 02/2023: added functionality for time series type\n Updated 01/2023: added default field mapping for reading from netCDF4/HDF5\n added data type keyword for netCDF4 output\n Updated 12/2022: single implicit import of pyTMD tools\n Updated 11/2022: place some imports within try/except statements\n use f-strings for formatting verbose or ascii output\n Updated 10/2022: added delimiter option and datetime parsing for ascii files\n Updated 05/2022: added ESR netCDF4 formats to list of model types\n updated keyword arguments to read tide model programs\n Updated 04/2022: use argparse descriptions within documentation\n Updated 03/2022: using static decorators to define available models\n Updated 02/2022: added Arctic 2km model (Arc2kmTM) to list of models\n Updated 01/2022: added option for changing the time standard\n Updated 12/2021: added TPXO9-atlas-v5 to list of available tide models\n Updated 10/2021: using python logging for handling verbose output\n Updated 09/2021: refactor to use model class for files and attributes\n Updated 07/2021: added tide model reference to output attributes\n can use prefix files to define command line arguments\n Updated 06/2021: added new Gr1km-v2 1km Greenland model from ESR\n Updated 05/2021: added option for extrapolation cutoff in kilometers\n Updated 03/2021: added TPXO9-atlas-v4 in binary OTIS format\n simplified netcdf inputs to be similar to binary OTIS read program\n Updated 02/2021: replaced numpy bool to prevent deprecation warning\n Updated 12/2020: added valid data extrapolation with nearest_extrap\n Updated 11/2020: added options to read from and write to geotiff image files\n Updated 10/2020: using argparse to set command line parameters\n Forked 09/2020 from compute_tidal_elevations.py\n Updated 09/2020: can use HDF5 and netCDF4 as inputs and outputs\n Updated 08/2020: using builtin time operations\n Updated 07/2020: added FES2014 and FES2014_load. use merged delta times\n Updated 06/2020: added version 2 of TPXO9-atlas (TPXO9-atlas-v2)\n Updated 02/2020: changed CATS2008 grid to match version on U.S. Antarctic\n Program Data Center http://www.usap-dc.org/view/dataset/601235\n Updated 11/2019: added AOTIM-5-2018 tide model (2018 update to 2004 model)\n Updated 09/2019: added TPXO9_atlas reading from netcdf4 tide files\n Updated 07/2018: added GSFC Global Ocean Tides (GOT) models\n Written 10/2017 for public release\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\nimport logging\nimport pathlib\nimport argparse\nimport numpy as np\nimport pyTMD\n\n# attempt imports\ntry:\n import pyproj\nexcept (ImportError, ModuleNotFoundError) as exc:\n logging.critical(\"pyproj not available\")\n\n# PURPOSE: try to get the projection information for the input file\ndef get_projection(attributes, PROJECTION):\n # coordinate reference system string from file\n try:\n crs = pyproj.CRS.from_string(attributes['projection'])\n except (ValueError,KeyError,pyproj.exceptions.CRSError):\n pass\n else:\n return crs\n # EPSG projection code\n try:\n crs = pyproj.CRS.from_epsg(int(PROJECTION))\n except (ValueError,pyproj.exceptions.CRSError):\n pass\n else:\n return crs\n # coordinate reference system string\n try:\n crs = pyproj.CRS.from_string(PROJECTION)\n except (ValueError,pyproj.exceptions.CRSError):\n pass\n else:\n return crs\n # no projection can be made\n raise pyproj.exceptions.CRSError\n\n# PURPOSE: read csv, netCDF or HDF5 data\n# compute tides at points and times using tidal model driver algorithms\ndef compute_tidal_currents(tide_dir, input_file, output_file,\n TIDE_MODEL=None,\n ATLAS_FORMAT='netcdf',\n GZIP=True,\n DEFINITION_FILE=None,\n FORMAT='csv',\n VARIABLES=[],\n HEADER=0,\n DELIMITER=',',\n TYPE='drift',\n TIME_UNITS='days since 1858-11-17T00:00:00',\n TIME=None,\n TIME_STANDARD='UTC',\n PROJECTION='4326',\n METHOD='spline',\n EXTRAPOLATE=False,\n CUTOFF=None,\n VERBOSE=False,\n MODE=0o775):\n\n # create logger for verbosity level\n loglevel = logging.INFO if VERBOSE else logging.CRITICAL\n logging.basicConfig(level=loglevel)\n\n # get parameters for tide model\n if DEFINITION_FILE is not None:\n model = pyTMD.io.model(tide_dir).from_file(DEFINITION_FILE)\n else:\n model = pyTMD.io.model(tide_dir, format=ATLAS_FORMAT,\n compressed=GZIP).current(TIDE_MODEL)\n\n # invalid value\n fill_value = -9999.0\n # output netCDF4 and HDF5 file attributes\n # will be added to YAML header in csv files\n attrib = {}\n # latitude\n attrib['lat'] = {}\n attrib['lat']['long_name'] = 'Latitude'\n attrib['lat']['units'] = 'Degrees_North'\n # longitude\n attrib['lon'] = {}\n attrib['lon']['long_name'] = 'Longitude'\n attrib['lon']['units'] = 'Degrees_East'\n # zonal tidal currents\n attrib['u'] = {}\n attrib['u']['description'] = model.description['u']\n attrib['u']['reference'] = model.reference\n attrib['u']['model'] = model.name\n attrib['u']['units'] = 'cm/s'\n attrib['u']['long_name'] = model.long_name['u']\n attrib['u']['_FillValue'] = fill_value\n # meridional tidal currents\n attrib['v'] = {}\n attrib['v']['description'] = model.description['v']\n attrib['v']['reference'] = model.reference\n attrib['v']['model'] = model.name\n attrib['v']['units'] = 'cm/s'\n attrib['v']['long_name'] = model.long_name['v']\n attrib['v']['_FillValue'] = fill_value\n # time\n attrib['time'] = {}\n attrib['time']['long_name'] = 'Time'\n attrib['time']['units'] = 'days since 1992-01-01T00:00:00'\n attrib['time']['calendar'] = 'standard'\n\n # read input file to extract time, spatial coordinates and data\n if (FORMAT == 'csv'):\n parse_dates = (TIME_STANDARD.lower() == 'datetime')\n dinput = pyTMD.spatial.from_ascii(input_file, columns=VARIABLES,\n delimiter=DELIMITER, header=HEADER, parse_dates=parse_dates)\n elif (FORMAT == 'netCDF4'):\n field_mapping = pyTMD.spatial.default_field_mapping(VARIABLES)\n dinput = pyTMD.spatial.from_netCDF4(input_file,\n field_mapping=field_mapping)\n elif (FORMAT == 'HDF5'):\n field_mapping = pyTMD.spatial.default_field_mapping(VARIABLES)\n dinput = pyTMD.spatial.from_HDF5(input_file,\n field_mapping=field_mapping)\n elif (FORMAT == 'geotiff'):\n dinput = pyTMD.spatial.from_geotiff(input_file)\n # copy global geotiff attributes for projection and grid parameters\n for att_name in ['projection','wkt','spacing','extent']:\n attrib[att_name] = dinput['attributes'][att_name]\n # update time variable if entered as argument\n if TIME is not None:\n dinput['time'] = np.copy(TIME)\n\n # converting x,y from projection to latitude/longitude\n crs1 = get_projection(dinput['attributes'], PROJECTION)\n crs2 = pyproj.CRS.from_epsg(4326)\n transformer = pyproj.Transformer.from_crs(crs1, crs2, always_xy=True)\n if (TYPE == 'grid'):\n ny, nx = (len(dinput['y']), len(dinput['x']))\n gridx, gridy = np.meshgrid(dinput['x'], dinput['y'])\n lon, lat = transformer.transform(gridx, gridy)\n elif (TYPE == 'drift'):\n lon, lat = transformer.transform(dinput['x'], dinput['y'])\n elif (TYPE == 'time series'):\n nstation = len(dinput['y'])\n lon, lat = transformer.transform(dinput['x'], dinput['y'])\n\n # extract time units from netCDF4 and HDF5 attributes or from TIME_UNITS\n try:\n time_string = dinput['attributes']['time']['units']\n epoch1, to_secs = pyTMD.time.parse_date_string(time_string)\n except (TypeError, KeyError, ValueError):\n epoch1, to_secs = pyTMD.time.parse_date_string(TIME_UNITS)\n\n # convert delta times or datetimes objects to timescale\n if (TIME_STANDARD.lower() == 'datetime'):\n timescale = pyTMD.time.timescale().from_datetime(\n dinput['time'].flatten())\n else:\n # convert time to seconds\n delta_time = to_secs*dinput['time'].flatten()\n timescale = pyTMD.time.timescale().from_deltatime(delta_time,\n epoch=epoch1, standard=TIME_STANDARD)\n # number of time points\n nt = len(timescale)\n\n # python dictionary with output data\n output = {'time':timescale.tide, 'lon':lon, 'lat':lat}\n # iterate over u and v currents\n for t in model.type:\n # read tidal constants and interpolate to grid points\n if model.format in ('OTIS','ATLAS','TMD3'):\n amp,ph,D,c = pyTMD.io.OTIS.extract_constants(lon.flatten(), lat.flatten(),\n model.grid_file, model.model_file['u'], model.projection,\n type=t, method=METHOD, extrapolate=EXTRAPOLATE, cutoff=CUTOFF,\n grid=model.format)\n deltat = np.zeros((nt))\n elif (model.format == 'netcdf'):\n amp,ph,D,c = pyTMD.io.ATLAS.extract_constants(lon.flatten(), lat.flatten(),\n model.grid_file, model.model_file[t], type=t, method=METHOD,\n extrapolate=EXTRAPOLATE, cutoff=CUTOFF, scale=model.scale,\n compressed=model.compressed)\n deltat = np.zeros((nt))\n elif (model.format == 'FES'):\n amp,ph = pyTMD.io.FES.extract_constants(lon.flatten(), lat.flatten(),\n model.model_file[t], type=t, version=model.version,\n method=METHOD, extrapolate=EXTRAPOLATE, cutoff=CUTOFF,\n scale=model.scale, compressed=model.compressed)\n # available model constituents\n c = model.constituents\n # delta time (TT - UT1)\n deltat = timescale.tt_ut1\n\n # calculate complex phase in radians for Euler's\n cph = -1j*ph*np.pi/180.0\n # calculate constituent oscillation\n hc = amp*np.exp(cph)\n\n # predict tidal currents at time and infer minor corrections\n if (TYPE == 'grid'):\n output[t] = np.ma.zeros((ny,nx,nt),fill_value=fill_value)\n output[t].mask = np.zeros((ny,nx,nt),dtype=bool)\n for i in range(nt):\n TIDE = pyTMD.predict.map(timescale.tide[i], hc, c,\n deltat=deltat[i], corrections=model.format)\n MINOR = pyTMD.predict.infer_minor(timescale.tide[i], hc, c,\n deltat=deltat[i], corrections=model.format)\n # add major and minor components and reform grid\n output[t][:,:,i] = np.reshape((TIDE+MINOR), (ny,nx))\n output[t].mask[:,:,i] = np.reshape((TIDE.mask | MINOR.mask),\n (ny,nx))\n elif (TYPE == 'drift'):\n output[t] = np.ma.zeros((nt), fill_value=fill_value)\n output[t].mask = np.any(hc.mask,axis=1)\n output[t].data[:] = pyTMD.predict.drift(timescale.tide, hc, c,\n deltat=deltat, corrections=model.format)\n minor = pyTMD.predict.infer_minor(timescale.tide, hc, c,\n deltat=deltat, corrections=model.format)\n output[t].data[:] += minor.data[:]\n elif (TYPE == 'time series'):\n output[t] = np.ma.zeros((nstation,nt),fill_value=fill_value)\n output[t].mask = np.zeros((nstation,nt),dtype=bool)\n for s in range(nstation):\n # calculate constituent oscillation for station\n TIDE = pyTMD.predict.time_series(timescale.tide, hc[s,None,:], c,\n deltat=deltat, corrections=model.format)\n MINOR = pyTMD.predict.infer_minor(timescale.tide, hc[s,None,:], c,\n deltat=deltat, corrections=model.format)\n output[t].data[s,:] = TIDE.data[:] + MINOR.data[:]\n output[t].mask[s,:] = (TIDE.mask | MINOR.mask)\n # replace invalid values with fill value\n output[t].data[output[t].mask] = output[t].fill_value\n\n # output to file\n if (FORMAT == 'csv'):\n pyTMD.spatial.to_ascii(output, attrib, output_file,\n delimiter=DELIMITER, header=False,\n columns=['time','lat','lon','u','v'])\n elif (FORMAT == 'netCDF4'):\n pyTMD.spatial.to_netCDF4(output, attrib, output_file, data_type=TYPE)\n elif (FORMAT == 'HDF5'):\n pyTMD.spatial.to_HDF5(output, attrib, output_file)\n elif (FORMAT == 'geotiff'):\n # merge current variables into a single variable\n output['data'] = np.concatenate((output['u'],output['v']),axis=-1)\n attrib['data'] = {'_FillValue':fill_value}\n pyTMD.spatial.to_geotiff(output, attrib, output_file,\n varname='data')\n # change the permissions level to MODE\n output_file.chmod(mode=MODE)\n\n# PURPOSE: create argument parser\ndef arguments():\n parser = argparse.ArgumentParser(\n description=\"\"\"Calculates zonal and meridional tidal currents for\n an input file\n \"\"\",\n fromfile_prefix_chars=\"@\"\n )\n parser.convert_arg_line_to_args = pyTMD.utilities.convert_arg_line_to_args\n group = parser.add_mutually_exclusive_group(required=True)\n # command line options\n # input and output file\n parser.add_argument('infile',\n type=pathlib.Path, nargs='?',\n help='Input file to run')\n parser.add_argument('outfile',\n type=pathlib.Path, nargs='?',\n help='Computed output file')\n # set data directory containing the tidal data\n parser.add_argument('--directory','-D',\n type=pathlib.Path,\n help='Working data directory')\n # tide model to use\n choices = sorted(pyTMD.io.model.ocean_current())\n group.add_argument('--tide','-T',\n type=str, choices=choices,\n help='Tide model to use in calculating currents')\n parser.add_argument('--atlas-format',\n type=str, choices=('OTIS','netcdf'), default='netcdf',\n help='ATLAS tide model format')\n parser.add_argument('--gzip','-G',\n default=False, action='store_true',\n help='Tide model files are gzip compressed')\n # tide model definition file to set an undefined model\n group.add_argument('--definition-file',\n type=pathlib.Path,\n help='Tide model definition file')\n # input and output data format\n parser.add_argument('--format','-F',\n type=str, default='csv', choices=('csv','netCDF4','HDF5','geotiff'),\n help='Input and output data format')\n # variable names (for csv names of columns)\n parser.add_argument('--variables','-v',\n type=str, nargs='+', default=['time','lat','lon','data'],\n help='Variable names of data in input file')\n # number of header lines for csv files\n parser.add_argument('--header','-H',\n type=int, default=0,\n help='Number of header lines for csv files')\n # delimiter for csv or ascii files\n parser.add_argument('--delimiter',\n type=str, default=',',\n help='Delimiter for csv or ascii files')\n # input data type\n # drift: drift buoys or satellite/airborne altimetry (time per data point)\n # grid: spatial grids or images (single time for all data points)\n # time series: station locations with multiple time values\n parser.add_argument('--type','-t',\n type=str, default='drift',\n choices=('drift','grid','time series'),\n help='Input data type')\n # time epoch (default Modified Julian Days)\n # in form \"time-units since yyyy-mm-dd hh:mm:ss\"\n parser.add_argument('--epoch','-e',\n type=str, default='days since 1858-11-17T00:00:00',\n help='Reference epoch of input time')\n # input delta time for files without date information\n parser.add_argument('--deltatime','-d',\n type=float, nargs='+',\n help='Input delta time for files without date variables')\n # input time standard definition\n parser.add_argument('--standard','-s',\n type=str, choices=('UTC','GPS','TAI','LORAN','datetime'), default='UTC',\n help='Input time standard definition')\n # spatial projection (EPSG code or PROJ4 string)\n parser.add_argument('--projection','-P',\n type=str, default='4326',\n help='Spatial projection as EPSG code or PROJ4 string')\n # interpolation method\n parser.add_argument('--interpolate','-I',\n metavar='METHOD', type=str, default='spline',\n choices=('spline','linear','nearest','bilinear'),\n help='Spatial interpolation method')\n # extrapolate with nearest-neighbors\n parser.add_argument('--extrapolate','-E',\n default=False, action='store_true',\n help='Extrapolate with nearest-neighbors')\n # extrapolation cutoff in kilometers\n # set to inf to extrapolate over all points\n parser.add_argument('--cutoff','-c',\n type=np.float64, default=10.0,\n help='Extrapolation cutoff in kilometers')\n # verbose output of processing run\n # print information about each input and output file\n parser.add_argument('--verbose','-V',\n default=False, action='store_true',\n help='Verbose output of run')\n # permissions mode of the local files (number in octal)\n parser.add_argument('--mode','-M',\n type=lambda x: int(x,base=8), default=0o775,\n help='Permission mode of output file')\n # return the parser\n return parser\n\n# This is the main part of the program that calls the individual functions\ndef main():\n # Read the system arguments listed after the program\n parser = arguments()\n args,_ = parser.parse_known_args()\n\n # set output file from input filename if not entered\n if not args.outfile:\n vars = (args.infile.stem,args.tide,'_currents',args.infile.suffix)\n args.outfile = args.infile.with_name('{0}_{1}{2}{3}'.format(*vars))\n\n # run tidal current program for input file\n compute_tidal_currents(args.directory, args.infile, args.outfile,\n TIDE_MODEL=args.tide,\n ATLAS_FORMAT=args.atlas_format,\n GZIP=args.gzip,\n DEFINITION_FILE=args.definition_file,\n FORMAT=args.format,\n VARIABLES=args.variables,\n HEADER=args.header,\n DELIMITER=args.delimiter,\n TYPE=args.type,\n TIME_UNITS=args.epoch,\n TIME=args.deltatime,\n TIME_STANDARD=args.standard,\n PROJECTION=args.projection,\n METHOD=args.interpolate,\n EXTRAPOLATE=args.extrapolate,\n CUTOFF=args.cutoff,\n VERBOSE=args.verbose,\n MODE=args.mode)\n\n# run main program\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/compute_tidal_currents.py","file_name":"compute_tidal_currents.py","file_ext":"py","file_size_in_byte":23140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"67893358","text":"import random\nwinning_num = random.randint(1,5)\nguess = 1\nnum = int(input(\"guess a number betn 1 to 100 :\"))\ngame_over = False\n\nwhile not game_over:\n if num == winning_num:\n print(f\"YOU WIN !!!. and you guess this number in {guess} time\")\n break\n else:\n if num > winning_num:\n print(\"too high\")\n guess += 1\n num = int(input(\"guess again :\"))\n else:\n print(\"too low\")\n guess += 1\n num = int(input(\"guess again :\"))\n\n\n","sub_path":"new_python/begin_1.py","file_name":"begin_1.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"600410853","text":"import os\nimport utilities\nimport gmplot\nfrom entities import Instance, Solution\nfrom algorithm import Algorithm\n\n\nif __name__ == \"__main__\":\n # Create random generator and set seed\n r = utilities.RandGenerator()\n r.set_seed(22)\n\n # Read Instance\n df, instance = utilities.read_instance_csv(\"data_small.csv\")\n #instance = utilities.read_instance(\"instance_small.txt\") # Deprecated\n print(\"Instance has been read\")\n\n # Compute distances\n instance.compute_dist(\"default\")\n print(\"Distances have been calculated\")\n\n # Create a random solution\n solution = Solution(instance)\n algorithm = Algorithm(6, solution)\n algorithm.random_sol()\n print(\"Solution has been created\")\n\n # Get objective functions\n print(solution.get_objvalue(\"sumAllToCenter\"))\n print(solution.get_objvalue(\"sumAllToAll\"))\n print(solution.get_objvalue(\"loadRange\"))\n print(\"Objective function has been calculated\")\n\n # Print clusters\n map = utilities.MapVisualiser()\n # Set key google API\n # Read documentation of ggogle API to get your own key\n #map.set_gmapkey(\"1AIzaSyBrcChgM41NgYRy7FL4oXoxkz6KJbrKyJY\")\n map.draw_cluster(solution)\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Districting/src/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"494362467","text":"import pandas as pd\nimport numpy as np\nimport sys\nimport pickle\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split, RandomizedSearchCV\nfrom sklearn.metrics import accuracy_score, f1_score\n\n\nclass RandomForestTrainer():\n def __init__(self, data_path, target_name):\n self.data = pd.read_csv(data_path)\n print(self.data)\n self.X, self.y = self.data.drop([target_name], axis = 1), self.data[target_name]\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=0.33, random_state=42, stratify = self.y)\n\n def train(self):\n n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]\n max_features = [\"auto\", \"sqrt\"]\n max_depth = [int(x) for x in np.linspace(10, 110, num = 11)]\n max_depth.append(None)\n min_samples_split = [2,5,10]\n min_samples_leaf = [1,2,4]\n bootstrap = [True, False]\n\n random_grid = {\"n_estimators\":n_estimators,\n \"max_features\":max_features,\n \"max_depth\":max_depth,\n \"min_samples_split\":min_samples_split,\n \"min_samples_leaf\":min_samples_leaf,\n \"bootstrap\":bootstrap\n }\n\n rf = RandomForestClassifier()\n rf_random = RandomizedSearchCV(estimator = rf, param_distributions = random_grid, n_iter = 100, cv = 3, verbose = 2, random_state = 42, n_jobs = -1)\n\n rf_random.fit(self.X_train, self.y_train)\n self.model = RandomForestClassifier()\n self.model = RandomForestClassifier(**rf_random.best_params_)\n self.model.fit(self.X_train, self.y_train)\n\n def test(self):\n print(\"Accuracy =\", accuracy_score(self.model.predict(self.X_test), self.y_test))\n print(\"F1 Score =\", f1_score(self.model.predict(self.X_test), self.y_test))\n\n def save(self, model_path):\n pickle.dump(self.model, open(model_path, \"wb\"))\n\n def predict(self, model_path):\n \"\"\"\n :param model_path:\n :return:\n \"\"\"\n return None\n\ndef main():\n data_path = sys.argv[1]\n target_name = sys.argv[2]\n model_path = sys.argv[3]\n model = RandomForestTrainer(data_path, target_name)\n model.train()\n model.test()\n model.save(model_path)\n\n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"133599880","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Author : LEITENG\n@Version : \n------------------------------------\n@File : p39_VAE_mnist.py\n@Description : \n@CreateTime : 2020/6/23 10:15\n------------------------------------\n@ModifyTime : 手写数字生成\n\n条件式 VAE: CVAE\n\n\"\"\"\nimport p43_framework_muti_gpus as myf\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist.input_data import read_data_sets\nimport numpy as np\nimport cv2\nimport os\n\n\nclass MyConfig(myf.Config):\n def __init__(self):\n super(MyConfig, self).__init__()\n # mac windows下分隔符不同,以下是通用表达\n # self.sample_path = '..{sep}deeplearning_tensorflow_p{sep}MNIST_data'.format(sep=os.sep)\n self.sample_path = './MNIST_data'\n self.vector_size = 4\n # 惯性系数\n self.momentum = 0.99\n self.cols = 20\n self.img_path = './imgs/{name}/test.jpg'.format(name=self.get_name())\n self.batch_size = 500\n self.epoches = 5\n\n def get_name(self):\n return 'p44'\n\n def get_sub_tensors(self, gpu_idx):\n return MySubTensors(self)\n\n def get_tensors(self):\n return MyTensors(self)\n\n\nclass MyTensors(myf.Tensors):\n def get_loss_for_summary(self, loss):\n return tf.sqrt(loss)\n\n\nclass MySubTensors:\n def __init__(self, config: MyConfig):\n self.config = config\n with tf.device('/gpu:0'):\n # 分配第0个gpu,而不是操作系统的第 0 块gpu\n x = tf.placeholder(tf.float32, [None, 784], 'x')\n label = tf.placeholder(tf.int32, [None], 'label')\n self.inputs = [x, label]\n\n x = tf.reshape(x, [-1, 28, 28, 1])\n # [-1, 10, 4]\n self.vec = self.encode(x, config.vector_size) # [-1, 4]\n\n self.process_normal(self.vec) # 注意次序!!!\n self.y = self.decode(self.vec, label) # [-1, 28, 28, 1]\n\n self.losses = [tf.reduce_mean(tf.square(self.y - x))]\n # loss = tf.reduce_mean(tf.square(self.y - x))\n # opt = tf.train.AdamOptimizer(lr)\n # with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n # # loss,assign 在 train_op 之上定义\n # # train_op 依赖 loss,assign\n # self.train_op = opt.minimize(loss)\n # self.summary = tf.summary.scalar('loss', tf.sqrt(loss))\n self.y = tf.reshape(self.y, [-1, 28, 28])\n\n def process_normal(self, vec):\n '''\n 计算平均数(动量法)\n 并在计算图中保存变量(assign)\n :param vec: [-1, vec_size]\n :return:\n '''\n mean = tf.reduce_mean(vec, axis=0) # [vec_size]\n # mean square difference\n msd = tf.reduce_mean(tf.square(vec), axis=0)\n\n vector_size = vec.shape[1].value\n # [10, 4]\n self.final_mean = tf.get_variable('mean', [vector_size], tf.float32, tf.initializers.zeros, trainable=False)\n self.final_msd = tf.get_variable('msd', [vector_size], tf.float32, tf.initializers.zeros, trainable=False)\n\n mom = self.config.momentum\n # final_mean = final_mean * mom + mean * (1 - mom) # 错误做法:变量没有更新\n assign = tf.assign(self.final_mean, self.final_mean * mom + mean * (1 - mom))\n # 建立 assign 与 train_op 的控制依赖,正向传播是走实线和虚线,反向传播不去求控制依赖的值\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, assign)\n\n assign = tf.assign(self.final_msd, self.final_msd*mom + msd*(1-mom))\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, assign)\n\n def encode(self, x, vec_size):\n '''\n encode the x to vector which size is vec_size\n :param x: input tensor, shape is [-1, 28, 28, 1]\n :param vec_size:\n :return: the semantics vectors which shape is [-1, vec_size]\n '''\n filters = 16\n x = tf.layers.conv2d(x, filters, 3, 1, 'same', activation=tf.nn.relu, name='conv1') # [-1, 28, 28, 16]\n for i in range(2):\n filters *= 2\n # [-1, 28, 28, 32] [-1, 14, 14, 64]\n x = tf.layers.conv2d(x, filters, 3, 1, 'same', activation=tf.nn.relu, name='conv2_%d' % i)\n # 池化操作不产生可训练参数, 不需要训练参数\n # [-1, 14, 14, 32] [-1, 7, 7, 64]\n x = tf.layers.max_pooling2d(x, 2, 2, 'valid')\n # x: [-1, 7, 7, 64]\n # 卷积或者使用全连接\n # [-1, 1, 1, vec_size]\n x = tf.layers.conv2d(x, vec_size, 7, 1, 'valid', name='conv3')\n return tf.reshape(x, [-1, vec_size])\n\n def decode(self, vec, label):\n '''\n 使用反卷积(上采样),反卷积只能恢复尺寸,不能恢复数值\n the semantics vector\n :param vec: [-1, vec_size]\n :param label: [-1]\n :return: [-1, 28, 28, 1]\n '''\n y = tf.layers.dense(vec, 7 * 7 * 64, activation=tf.nn.relu, name='dens_1') # [-1 ,4] -> [-1, 7*7*64]\n label = tf.one_hot(label, 10)\n l = tf.layers.dense(label, 7 * 7 * 64, name='dense1')\n y += l\n # [-1, 7*7*64] -> [-1, 7, 7, 64]\n y = tf.reshape(y, [-1, 7, 7, 64])\n filters = 64\n size = 7\n for i in range(2):\n filters //= 2\n size *= 2\n # 两次反卷积 :[-1, 14, 14, 32] [-1, 28, 28, 16]\n y = tf.layers.conv2d_transpose(y, filters, 3, 2, 'same', activation=tf.nn.relu, name='deconv1_%d' % i)\n l = tf.layers.dense(label, size * size * filters, name='deconv_l_1_%d' % i)\n l = tf.reshape(l, [-1, size, size, filters])\n y += l\n # [-1, 28, 28, 16]\n y = tf.layers.conv2d_transpose(y, 1, 3, 1, 'same', name='deconv2') # [-1, 28, 28, 1]\n return y\n\n\nclass MyDS:\n def __init__(self, ds, config):\n self.ds = ds\n self.num_examples = ds.num_examples\n\n def next_batch(self, batch_size):\n xs, labels = self.ds.next_batch(batch_size)\n return xs, labels\n\n\nclass App:\n def __init__(self):\n pass\n\n\ndef predict(app, samples, path, cols):\n mean = app.session.run(app.ts.sub_ts[0].final_mean)\n print(mean)\n msd = app.session.run(app.ts.sub_ts[0].final_msd) # 二阶原点矩\n std = np.sqrt(msd - mean ** 2)\n print(std)\n\n vec = np.random.normal(mean, std, [samples, len(std)])\n label = [e % 10 for e in range(samples)]\n # feed_dict 中,任何一个张量局可作为key\n # imgs = app.session.run(app.ts.y, {app.ts.vec: vec, app.ts.inputs[-1]: label}) # 【-1, 28, 28】\n imgs = app.session.run(app.ts.sub_ts[0].y,\n {app.ts.sub_ts[0].vec: vec, app.ts.sub_ts[0].inputs[-1]: label}) # [-1, 28, 28]\n\n # 方法一:\n imgs = np.reshape(imgs, [-1, cols, 28, 28])\n imgs = np.transpose(imgs, [0, 2, 1, 3]) # [-1, 28, 20, 28]\n # imgs = np.reshape(imgs, [-1, 28, cols*28])\n # imgs = np.transpose(imgs, [1, 0, 2]) # [28, -1, 20*28]\n imgs = np.reshape(imgs, [-1, cols*28])\n # 方法二:\n # imgs = np.transpose(imgs, [1, 0, 2])\n # imgs = np.reshape(imgs, [-1, 28, cols * 28])\n # imgs = np.transpose(imgs, [1, 0, 2])\n # imgs = np.reshape(imgs, [-1, cols * 28])\n\n myf.make_dirs(path)\n cv2.imwrite(path, imgs*255)\n print('write image into', path)\n\n\nif __name__ == '__main__':\n cfg = MyConfig()\n cfg.from_cmd()\n print('_'*20)\n print(cfg)\n\n dss = read_data_sets(cfg.sample_path)\n app = myf.App(cfg)\n with app:\n # app.train(MyDS(dss.train, cfg), MyDS(dss.validation, cfg))\n predict(app, cfg.batch_size, cfg.img_path, cfg.cols)\n","sub_path":"deeplearning_tensorflow_p/p44_CVAE_mutigpus.py","file_name":"p44_CVAE_mutigpus.py","file_ext":"py","file_size_in_byte":7669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"338917107","text":"# Import the required module for text \r\n\r\nfrom gtts import gTTS \r\n\r\n \r\n# to play the converted audio \r\nimport os \r\n\r\n# The text that you want to convert to audio \r\nmytext = 'your text is converted to speech!'\r\n\r\n# Language in which you want to convert \r\nlanguage = 'en'\r\n\r\n# Passing the text and language to the engine, \r\n\r\nmyobj = gTTS(text=mytext, lang=language, slow=False) \r\n\r\n# Saving the converted audio in a mp3 file named \r\n\r\nmyobj.save(\"my.mp3\") \r\n\r\n# Playing the converted file \r\nos.system(\"mpg321 my.mp3\") \r\n","sub_path":"text_to_speech.py","file_name":"text_to_speech.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"324466825","text":"\"\"\"\nStack to queue converter.\n\"\"\"\n\nfrom arraystack import ArrayStack\nfrom arrayqueue import ArrayQueue\nimport copy\n\n\ndef stack_to_queue(stack):\n \"\"\"\n Convert queue to a stack\n \"\"\"\n input_stack = copy.deepcopy(stack)\n output_queue = ArrayQueue()\n while True:\n try:\n output_queue.add(input_stack.pop())\n except KeyError:\n break\n return output_queue\n","sub_path":"stack_to_queue.py","file_name":"stack_to_queue.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"352877140","text":"from faker.providers import BaseProvider\nimport random\nimport time\n\n\nclass MetricProvider(BaseProvider):\n def hostname(self):\n validIds = [\n \"doc\",\n \"grumpy\",\n \"sleepy\",\n \"bashful\",\n \"happy\",\n \"sneezy\",\n \"dopey\",\n ]\n return validIds[random.randint(0, len(validIds) - 1)]\n\n def cpu_id(self):\n validIds = [\"cpu1\", \"cpu2\", \"cpu3\", \"cpu4\", \"cpu5\"]\n return validIds[random.randint(0, len(validIds) - 1)]\n\n def usage(self):\n return random.random() * 30 + 70\n\n def produce_msg(self):\n hostname = self.hostname()\n ts = time.time()\n message = {\n \"hostname\": hostname,\n \"cpu\": self.cpu_id(),\n \"usage\": self.usage(),\n \"occurred_at\": int(ts * 1000),\n }\n key = {\"hostname\": hostname}\n return message, key\n","sub_path":"metricproducer.py","file_name":"metricproducer.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"56756678","text":"from app import app\n\nfrom flask import render_template,request,jsonify\n\n\nfrom forms import GetQuestion\n\n\nfrom random import randint\n\n\"\"\"@app.route('/lotto/')\ndef new_numbers():\n game_nums = []\n while len(game_nums) < 6:\n n = randint(1,47)\n if not n in game_nums:\n game_nums.append(n)\n game_nums.sort()\n return jsonify(result1=game_nums)\n \"\"\" \n \n@app.route('/8ball/')\ndef get_answer():\n quest = request.args.get('question')\n choice = randint(1,20)\n #https://en.wikipedia.org/wiki/Magic_8-Ball\n if choice ==1:\n response = \"It is certain\"\n elif choice ==2:\n response = \"It is decidedly so\"\n elif choice ==3:\n response = \"Without a doubt\"\n elif choice ==4:\n response = \"Yes, definitely\"\n elif choice ==5:\n response = \"You may rely on it\"\n elif choice ==6:\n response = \"As I see it, yes\"\n elif choice ==7:\n response = \"Most likely\"\n elif choice ==8:\n response = \"Outlook good\"\n elif choice ==9:\n response = \"Yes\"\n elif choice ==10:\n response = \"Signs point to yes\"\n elif choice ==11:\n response = \"Reply hazy try again\"\n elif choice ==12:\n response = \"Ask again later\"\n elif choice ==13:\n response = \"Better not tell you now\"\n elif choice ==14:\n response = \"Cannot predict now\"\n elif choice ==15:\n response = \"Concentrate and ask again\"\n elif choice ==16:\n response = \"Don't count on it\"\n elif choice ==17:\n response = \"My reply is no\"\n elif choice ==18:\n response = \"My sources say no\"\n elif choice ==19:\n response = \"Outlook not so good\"\n elif choice ==20:\n response = \"Very doubtful\"\n return jsonify(result=response)\n \n@app.route('/')\ndef index():\n game_nums = []\n while len(game_nums) < 6:\n n = randint(1,47)\n if not n in game_nums:\n game_nums.append(n)\n game_nums.sort()\n return render_template('index.html',game_nums = game_nums)\n\n@app.route('/die/')\ndef get_die():\n a = randint(1,6)\n return jsonify(result2 = a) ","sub_path":"project-class/app/.~c9_invoke_cZ5PYS.py","file_name":".~c9_invoke_cZ5PYS.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"10984096","text":"train_sent = open('train_sentiments.txt','r')\ntest_sent = open('test_sentiments.txt','r')\n\nY = []\nfor line in train_sent:\n Y += [line[0:-1]]\n\nZ = []\nfor line in test_sent:\n Z += [line[0:-1]]\n\ntrain_sent.close()\ntest_sent.close()\n\n\n\nimport numpy\nimport codecs\nfrom sklearn.svm import LinearSVC\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.feature_extraction.text import CountVectorizer\n\nword_vectorizer = CountVectorizer(analyzer='word')\nX = word_vectorizer.fit_transform(codecs.open('train_tweets.txt','r','utf8'))\n\nclf = LinearSVC()\n\nparams_space = { 'C': numpy.logspace(-6,0,11), 'class_weight':[None,'auto']}\ngscv = GridSearchCV(clf,params_space,cv=3)\n\ngscv.fit(X, Y)\nprint(gscv.best_estimator_, gscv.best_params_, gscv.best_score_)\n\ntestset = word_vectorizer.transform(codecs.open('test_tweets.txt','r','utf8'))\nresults = gscv.predict(testset)\n#print(results[0::])\n\ncorrect = wrong = total = 0\nwl = []\n\nfor i in results:\n total += 1\n if i == Z[total - 1]:\n correct += 1\n else:\n wrong += 1\n wl += [Z[total - 1]]\n\npercent_right = correct/total * 100 \nprint(str(correct) + ' correct \\n' + str(wrong) + ' wrong \\n' + str(total) + ' total \\n' + str(percent_right) + '% correct')\n\ns = a = n = 0\n\nfor i in wl:\n if i == 'skeptical':\n s += 1\n if i == 'neutral':\n n += 1\n if i == 'activist':\n a += 1\n\nprint('\\nWrong breakdown:\\n' + str(s) + ' skeptical\\n' + str(n) + ' neutral\\n' + str(a) + ' activist')\n\n\n\n\n\n\n\n\n'''\n######## helper function ##########\n## determines if the tweet has already been processed\n## will return true if it has been\ndef already_in(twt):\n a = False\n check_file = open('tweets_sentiment3.txt','r')\n for aline in check_file:\n if twt in aline:\n if 'redundant' in aline:\n break\n else:\n a = True\n break\n check_file.close()\n return a\n\n\n\n\n###### main program #########\nclimate_file = open('climate_19_Jul_2015.txt','r')\n#climate_file = codecs.open('climate_19_Jul_2015.txt','r','utf8')\n\nfor line in climate_file:\n sentiment = ''\n tweet = line.split('\":::text: \"')[1].split('\":::id: ')[0]\n if already_in(tweet):\n print('Already have that tweet')\n continue\n \n print('\\n' + tweet + '\\n')\n ans = input('1:skeptical, 2:neutral, 3:activist, 4:redundant --- type \"end\" to quit \\n')\n while (ans != 'end') and (ans != '1') and (ans != '2') and (ans != '3') and (ans != '4'):\n ans = input('1:skeptical, 2:neutral, 3:activist, 4;redundant --- type \"end\" to quit \\n')\n if ans == 'end':\n break\n if ans == '1':\n sentiment = 'skeptical'\n if ans == '2':\n sentiment = 'neutral'\n if ans == '3':\n sentiment = 'activist'\n if ans == '4':\n sentiment = 'redundant'\n tweets_file = open('tweets_sentiment2.txt','w')\n tweets_file.write(tweet)\n tweets_file.close()\n \n tweets_file3 = open('tweets_sentiment3.txt','a')\n tweets_file3.write(sentiment + ' ::---:: ' + tweet + '\\n')\n tweets_file3.close() \n \n test = word_vectorizer.transform(codecs.open('tweets_sentiment2.txt','r','utf8'))\n results = gscv.predict(test)\n print('\\n' + results[0] + '\\n')\n \n \n \n rw_file = open('right_wrong.txt','a') \n \n if results[0] == sentiment:\n print('correct')\n rw_file.write('correct\\n')\n else:\n print('wrong \\n guess: ' + results[0] + '\\n actual: ' + sentiment)\n rw_file.write('wrong\\n')\n \n answer = input('Would you like to add this tweet to the training set? (y or n)\\n')\n while answer != 'y' and answer != 'n':\n answer = input('Would you like to add this tweet to the training set? (y or n)\\n') \n \n if answer == 'y':\n add = open('tweets_sentiment_refined.txt','a')\n add.write(sentiment + ' ::---:: ' + tweet)\n add.close()\n \n rw_file.close()\n \nrw_file2 = open('right_wrong.txt','r')\nr = w = 0\n\nfor line in rw_file2:\n if 'correct' in line:\n r += 1\n if 'wrong' in line:\n w += 1\nprint(r)\nprint(w)\nper = (r / (r + w)) * 100 \nprint(str(r) + ' right\\n' + str(w) + ' wrong\\n' + str(per) + ' % correct') \n\nrw_file2.close()\nclimate_file.close()\n'''","sub_path":"Vince_Talbot_Code_Training_Set/work again/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"637751031","text":"#!/usr/bin/python\n\n\"\"\"\nWindows user information\n\"\"\"\n\n# >>> win32net.NetUserGetInfo(None,\"rchateau\",3)\n# {'comment': u'', 'workstations': u'', 'country_code': 0L, 'last_logon': 1480721751L, 'password_expired': 0L, 'full_name': u'', 'parm\n# s': u'', 'code_page': 0L, 'priv': 2L, 'auth_flags': 0L, 'logon_server': u'\\\\\\\\*', 'home_dir': u'', 'home_dir_drive': u'', 'usr_comme\n# nt': u'', 'profile': u'', 'acct_expires': 4294967295L, 'primary_group_id': 513L, 'bad_pw_count': 0L, 'user_id': 1001L, 'logon_hours'\n# : '\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff', 'password': None, 'units_per_week': 168L,\n# 'last_logoff': 0L, 'name': u'rchateau', 'max_storage': 4294967295L, 'num_logons': 15896L, 'password_age': 45314825L, 'flags': 66081L\n# , 'script_path': u''}\n\nimport sys\nimport lib_util\nimport lib_common\nfrom lib_properties import pc\n\nimport win32net\n\nfrom sources_types import Win32_UserAccount as survol_Win32_UserAccount\n\nUsable = lib_util.UsableWindows\n\nCanProcessRemote = True\n\ndef Main():\n\tcgiEnv = lib_common.CgiEnv(can_process_remote = True)\n\n\ttry:\n\t\t# Exception if local machine.\n\t\thostName = cgiEnv.m_entity_id_dict[\"Domain\"]\n\texcept KeyError:\n\t\thostName = None\n\n\tif not hostName or lib_util.IsLocalAddress( hostName ):\n\t\tserverBox = lib_common.gUriGen\n\t\tserverNode = lib_common.nodeMachine\n\t\tservName_or_None = None\n\telse:\n\t\tserverBox = lib_common.RemoteBox(hostName)\n\t\tserverNode = lib_common.gUriGen.HostnameUri(hostName)\n\t\tservName_or_None = hostName\n\n\t\t# hostname = \"Titi\" for example\n\t\t# lib_win32.WNetAddConnect(hostName)\n\n\n\n\n\n\tuserName = cgiEnv.m_entity_id_dict[\"Name\"]\n\n\tgrph = cgiEnv.GetGraph()\n\n\tnodeUser = survol_Win32_UserAccount.MakeUri( userName, hostName )\n\n\ttry:\n\t\tinfoList = win32net.NetUserGetInfo(servName_or_None, userName, 2)\n\texcept:\n\t\tlib_common.ErrorMessageHtml(\"Error:\"+str(sys.exc_info()))\n\n\tfor infoKey in infoList:\n\n\t\ttry:\n\t\t\tinfoVal = infoList[infoKey]\n\t\t\tgrph.add( ( nodeUser, lib_common.MakeProp(infoKey), lib_common.NodeLiteral(infoVal) ) )\n\t\texcept:\n\t\t\ttxtDisp = str( sys.exc_info()[1] )\n\t\t\tgrph.add( ( nodeUser, lib_common.MakeProp(infoKey), lib_common.NodeLiteral(txtDisp) ) )\n\n\n\n\tcgiEnv.OutCgiRdf()\n\nif __name__ == '__main__':\n\tMain()\n\n\n","sub_path":"survol/sources_types/Win32_UserAccount/Win32_NetUserGetInfo.py","file_name":"Win32_NetUserGetInfo.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"462407876","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os.path\n\nfrom OpenGL.GL import *\n\nimport pyassimp as assimp\nfrom mesh import Mesh\n\n\nclass Model(object):\n\n def __init__(self, path, gamma=False):\n self.gammaCorrection = gamma\n self.meshes = []\n self.textures_loaded = []\n self.directory = ''\n\n self.loadModel(path)\n\n def draw(self, shader):\n for mesh in self.meshes:\n mesh.draw(shader)\n\n def loadModel(self, path):\n scene = assimp.load(path, processing=(assimp.postprocess.aiProcess_Triangulate |\n assimp.postprocess.aiProcess_FlipUVs |\n assimp.postprocess.aiProcess_CalcTangentSpace))\n if not scene:\n raise Exception(\"ASSIMP can't load model\")\n\n self.directory = os.path.dirname(path)\n\n for mesh in scene.meshes:\n self.meshes.append(Mesh(mesh, self.directory))\n\n assimp.release(scene)\n\n # self.__processNode(scene)\n #\n # def __processNode(self, scene):\n # for mesh in scene.meshes:\n # self.meshes.append(Mesh(mesh))\n #\n # assimp.release(scene)","sub_path":"pysrc/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"447328804","text":"import cv2\nimport time\nimport numpy as np\nfrom pyardrone import ARDrone, at\nimport threading\n\nimport move\n\n\ndef nothing(x):\n pass\n\n\ndef init():\n uav = ARDrone()\n print(\"Initiating - waiting..\")\n uav.navdata_ready.wait()\n print(\"NavData Ready\")\n uav.send(at.CONFIG('general:navdata_demo', True))\n time.sleep(0.1)\n uav.send(at.CONFIG(\"video:video_channel\", 1))\n time.sleep(0.1)\n print(\"Battery = \", uav.navdata.demo.vbat_flying_percentage)\n\n if uav.state.vbat_low:\n print(\"Battery to low, please replace before flying\")\n exit()\n if uav.state.video_mask == 0:\n print(\"Video Disabled\")\n if uav.state.vision_mask == 0:\n print(\"Vision Disabled\")\n if uav.state.altitude_mask == 0:\n print(\"Altitude control inactive\")\n if uav.state.camera_mask == 0:\n print(\"Camera not ready\")\n if uav.state.travelling_mask == 0:\n print(\"Travelling mask disabled\")\n if uav.state.usb_mask == 0:\n print(\"USB key not ready\")\n if uav.state.navdata_demo_mask == 0:\n print(\"navdata demo not activated\")\n if uav.state.navdata_bootstrap:\n print(\"no navdata options send\")\n if uav.state.motors_mask:\n print(\"Motors problem\")\n if uav.state.com_lost_mask:\n print(\"Communication problem\")\n if uav.state.software_fault:\n print(\"Software fault detected\")\n if uav.state.magneto_needs_calib:\n print(\"Magneto calibration needed\")\n if uav.state.angles_out_of_range:\n print(\"angles_out_of_range\")\n if uav.state.wind_mask:\n print(\"Too much wind\")\n if uav.state.ultrasound_mask:\n print(\"Ultrasonic sensor deaf\")\n if uav.state.cutout_mask:\n print(\"Cutout system detected\")\n\n while uav.state.emergency_mask:\n print(\"Emergency\")\n uav.send(at.REF(0b0100000000))\n time.sleep(1)\n\n # Create windows and sliders\n cv2.namedWindow(\"Image\", cv2.WINDOW_AUTOSIZE)\n\n # cv2.namedWindow('slider', cv2.WINDOW_AUTOSIZE)\n # cv2.moveWindow('slider', 640, 0)\n # cv2.resizeWindow('slider', 560, 400)\n # cv2.createTrackbar('B', 'slider', 0, 255, nothing)\n # cv2.createTrackbar('G', 'slider', 0, 255, nothing)\n # cv2.createTrackbar('R', 'slider', 0, 255, nothing)\n #\n # cv2.createTrackbar('B1', 'slider', 0, 255, nothing)\n # cv2.createTrackbar('G1', 'slider', 0, 255, nothing)\n # cv2.createTrackbar('R1', 'slider', 0, 255, nothing)\n #\n # cv2.createTrackbar('kernel', 'slider', 1, 20, nothing)\n # cv2.setTrackbarPos('kernel', 'slider', 2)\n\n return uav\n\n\ndef filter_image(img, lower_mask, upper_mask):\n # For calibrating for different backgrounds\n # set sliders to start values\n\n # cv2.setTrackbarPos('B', 'slider', lower_mask[0])\n # cv2.setTrackbarPos('G', 'slider', lower_mask[1])\n # cv2.setTrackbarPos('R', 'slider', lower_mask[2])\n # cv2.setTrackbarPos('B1', 'slider', upper_mask[0])\n # cv2.setTrackbarPos('G1', 'slider', upper_mask[1])\n # cv2.setTrackbarPos('R1', 'slider', upper_mask[2])\n\n # # wait a bit to update\n # cv2.waitKey(5)\n\n # # Read slider positions\n # b = cv2.getTrackbarPos('B', 'slider')\n # g = cv2.getTrackbarPos('G', 'slider')\n # r = cv2.getTrackbarPos('R', 'slider')\n # b1 = cv2.getTrackbarPos('B1', 'slider')\n # g1 = cv2.getTrackbarPos('G1', 'slider')\n # r1 = cv2.getTrackbarPos('R1', 'slider')\n kernelsize = 2\n # kernelsize = cv2.getTrackbarPos('kernel', 'slider')\n kernel = np.ones((kernelsize, kernelsize), np.uint8)\n #\n # # Build mask array from sliders\n # lower_unit = np.array([b, g, r])\n # upper_unit = np.array([b1, g1, r1])\n\n lower_unit = lower_mask\n upper_unit = upper_mask\n # Convert image to HSV\n hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n\n # Filter colors\n mask = cv2.inRange(hsv, lower_unit, upper_unit)\n res = cv2.bitwise_and(img, img, mask=mask)\n # cv2.imshow(\"res\", res)\n\n # Convert to grayscale\n gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)\n\n # Make binary image\n ret, thres = cv2.threshold(gray, 20, 255, 0)\n\n # Close some holes\n thres = cv2.morphologyEx(thres, cv2.MORPH_CLOSE, kernel)\n\n # Return binary image and slider data, so program remembers their position\n return thres\n\n\ndef takeoff(drone):\n print(\"Take-off..\")\n while not drone.state.fly_mask:\n drone.takeoff()\n\n print(\"Hovering\")\n timeout = time.time() + 6\n while True:\n drone.hover()\n if time.time() > timeout:\n break\n\n print(\"Going up\")\n altitude = drone.navdata.demo.altitude\n while altitude < 1800:\n drone.move(up=0.2)\n altitude = drone.navdata.demo.altitude\n drone.move(up=0)\n\n print(\"Hovering\")\n timeout = time.time() + 3\n while True:\n drone.hover()\n\n if time.time() > timeout:\n break\n\n\nclass MoveData:\n def __init__(self, marker, dir_x, speed_x, dir_y, speed_y):\n self.speed_y = speed_y\n self.dir_y = dir_y\n self.speed_x = speed_x\n self.dir_x = dir_x\n self.marker = marker\n\n\nprint(\"\\tStarting Program\")\ncam = cv2.VideoCapture('tcp://192.168.1.1:5555')\nprint(\"\\tVideoCapture ready\")\ndrone = init()\n\n# Data for debuging\nlower_mask = np.array([0, 4, 148])\nupper_mask = np.array([255, 255, 255])\ni = 1\nret = True\n\nnextMarker = 1 # first marker\nmaxMarkers = 4 # number of markers +1\nfirstMarker = True\nspeed = 0.1 # speed of drone\n\nlookForNextMarker = False\nMarkerFound = False\n\n# Create things for thread that moves the drone\nmoveData = MoveData(False, 0, speed, 0, speed)\nmovethread = threading.Thread(target=move.droneMove, args=(moveData, drone))\n\n# takeoff(drone)\n\nwhile True:\n # img = cv2.imread(\"drone/img\" + str(i) + \".jpg\") # for testing with images\n tijd = time.time()\n ret, img = cam.read() # Get picture from video feed\n if ret: # If picture gotten\n\n # For setting color filtering settings, useful for different backgrounds\n # thres, b, g, r, b1, g1, r1 = filter_image(img, lower_mask, upper_mask)\n # lower_mask = [b, g, r]\n # upper_mask = [b1, g1, r1]\n thres = filter_image(img, lower_mask, upper_mask)\n hist = cv2.calcHist([thres], [0], None, [256], [0, 256])\n\n cv2.imshow(\"thres\", thres)\n print(\"hist\", hist[255])\n if hist[255] < 18200:\n try:\n im2, contours, hierarchy = cv2.findContours(thres, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n hierarchy = hierarchy[0]\n # print(\"hierarchy: \", hierarchy)\n\n except TypeError:\n # When contrours doesnt find anything\n print(\" Ja Dat was me weer een errotje zeg\")\n\n else: # If contours found\n currentMarker = 0\n # area = np.array([[180, 0], [180, 360], [540, 360], [540, 0]])\n # cv2.drawContours(img, [area], 0, (255, 0, 0), 2)\n\n for component in zip(contours, hierarchy):\n print(\"contour loop\")\n currentContour = component[0]\n currentHierarchy = component[1]\n\n if 500 < cv2.contourArea(currentContour) < 50000:\n print(\"area loop\")\n # print(\"currentHierarchy: \", currentHierarchy)\n if currentHierarchy[2] >= 0 and currentHierarchy[3] >= 0: # if contour has a child and a parent\n # Draw box around contours\n # rect = cv2.minAreaRect(currentContour)\n # box = cv2.boxPoints(rect)\n # box = np.int0(box)\n\n # currentContour should be black square, check if its parent doesnt have a parent and its child\n # doesnt have a child\n if hierarchy[currentHierarchy[2]][2] < 0 and hierarchy[currentHierarchy[3]][3] < 0:\n print(\"found contour with child and parant\")\n # Probably found a marker, yeey!\n MarkerContourOutside = contours[currentHierarchy[3]]\n MarkerContourInside = currentContour\n # print(\"Contour area ratio: \",\n # cv2.contourArea(MarkerContourInside) / cv2.contourArea(MarkerContourOutside))\n\n # Draw box around contours\n # rect2 = cv2.minAreaRect(MarkerContourOutside)\n # box2 = cv2.boxPoints(rect2)\n # box2 = np.int0(box2)\n\n # Found and printed marker contours above. Now check for circles in it.\n circleContour = contours[currentHierarchy[2]]\n circleHierarchy = hierarchy[currentHierarchy[2]]\n currentMarker = 0\n breakNext = False\n\n if circleHierarchy[0] == -1:\n breakNext = True\n\n while True:\n print(\"counting circles\")\n print(circleHierarchy)\n # rect3 = cv2.minAreaRect(circleContour)\n # box3 = cv2.boxPoints(rect3)\n # box3 = np.int0(box3)\n # cv2.drawContours(img, [box3], 0, (255, 0, 0), 2)\n currentMarker += 1\n circleHierarchy = hierarchy[circleHierarchy[0]]\n circleContour = contours[circleHierarchy[0]]\n\n if breakNext:\n break\n\n if circleHierarchy[0] == -1:\n breakNext = True\n\n # If the marker found is the marker we're looking for calculate its distance to the center\n # of the image\n if currentMarker == nextMarker:\n lookForNextMarker = False\n moveData.marker = True\n firstMarker = False\n moments = cv2.moments(MarkerContourOutside)\n\n cx = int(moments['m10'] / moments['m00'])\n cy = int(moments['m01'] / moments['m00'])\n\n dx = cx - 320\n dy = cy - 180\n distanceToCenter = np.sqrt(dx * dx + dy * dy)\n print(\"calculated line to center\")\n cv2.line(img, (cx, cy), (320, 180), (0, 255, 0), thickness=4)\n # print(\"D: \", distanceToCenter)\n if distanceToCenter < 40: # If close to center, we are above the marker!\n\n nextMarker = currentMarker + 1\n print(\"TAKE PICTURE OF WALL\")\n # move.takePicture(drone, currentMarker, 1, cam)\n timeout = time.time() + 3\n drone.send(at.CONFIG(\"video:video_channel\", 0))\n while time.time() < timeout:\n drone.hover()\n\n ret, img = cam.read()\n cv2.imshow(\"Image\", img)\n\n ret, muur = cam.read()\n\n if ret:\n string = str(time.ctime()) + \"_\" + str(currentMarker) + \"_1\" + \".jpg\"\n string.replace(\" \",\"_\")\n string.replace(\":\", \"_\")\n cv2.imwrite(string, muur)\n cv2.imwrite(\"img\" + str(currentMarker) + \".jpg\", muur)\n cv2.imshow(string, muur)\n cv2.waitKey(1)\n print(\"saved image\")\n time.sleep(0.1)\n\n drone.send(at.CONFIG(\"video:video_channel\", 1))\n timeout = time.time() + 3\n while time.time() < timeout:\n drone.hover()\n lookForNextMarker = True\n if nextMarker == maxMarkers:\n nextMarker = 1\n print(\"Landing\")\n while drone.state.fly_mask:\n drone.land()\n exit()\n\n if dx > 0:\n print(\"move right\")\n # move right\n cv2.putText(img, \"Move: Right\", (10, 80), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 255))\n moveData.dir_y = 0\n\n else:\n print(\"move left\")\n # move left\n cv2.putText(img, \"Move: Left\", (10, 80), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 255))\n moveData.dir_y = 1\n if dy > 0:\n print(\"move back\")\n\n # move back\n cv2.putText(img, \"Move: back\", (10, 100), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 255))\n moveData.dir_x = 0\n else:\n print(\"move forward\")\n\n # move left\n cv2.putText(img, \"Move: forward\", (10, 100), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 255))\n moveData.dir_x = 1\n\n # Print more!\n\n cv2.putText(img, str(currentMarker), (10, 50), cv2.FONT_HERSHEY_PLAIN, 3, (0, 255, 255))\n print(\"marker = \", currentMarker)\n\n # cv2.drawContours(img, [box2], 0, (0, 255, 255), 2)\n # cv2.drawContours(img, [box], 0, (255, 255, 255), 2)\n else:\n # cv2.drawContours(img, [box2], 0, (0, 0, 0), 2)\n # cv2.drawContours(img, [box], 0, (0, 0, 0), 2)\n pass\n\n cv2.imshow(\"Image\", img)\n cv2.waitKey(1)\n tijd2 = time.time()\n if not movethread.is_alive() and moveData.marker:\n print(\"start move thread marker = True\")\n movethread = threading.Thread(target=move.droneMove, args=(moveData, drone))\n movethread.start()\n moveData.marker = False\n\n if not movethread.is_alive() and not moveData.marker and not firstMarker:\n print(\"start move thread marker = false\")\n movethread = threading.Thread(target=move.droneMove, args=(moveData, drone))\n movethread.start()\n # time.sleep(3)\n # print(\"Alles: \", time.time() - tijd)\n # print(\"Threads: \",time.time() - tijd2)\n","sub_path":"Nextcloud/1. School/UAV/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":16129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"593988326","text":"@pytest.mark.parametrize('dtype', [None, object])\ndef test_raise_when_saving_timezones(self, engine, ext, dtype, tz_aware_fixture):\n tz = tz_aware_fixture\n data = pd.Timestamp('2019', tz=tz)\n df = DataFrame([data], dtype=dtype)\n with pytest.raises(ValueError, match='Excel does not support'):\n df.to_excel(self.path)\n data = data.to_pydatetime()\n df = DataFrame([data], dtype=dtype)\n with pytest.raises(ValueError, match='Excel does not support'):\n df.to_excel(self.path)","sub_path":"Data Set/bug-fixing-4/06a6b496a4608bdcc54c8e0ad85197437257d9dc--bug.py","file_name":"06a6b496a4608bdcc54c8e0ad85197437257d9dc--bug.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"428718239","text":"import pandas as pd\nimport numpy as np\nfrom dask.distributed import Client\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimport time\nimport sys\nsys.path.append('/home/sandeep/Desktop/BankBuddy/Reco-usecases/new_reco/preprocessing_pipeline/')\nfrom DataPreprocess import *\n\nstart_time = time.time()\n\nloan_file = './data/unprocessed/loan_up_sell.csv'\n\nloan_obj = DataPreprocess(loan_file)\nloan_df = loan_obj.get_df()\n\n\"\"\"Setup Dask Client\"\"\"\nworkers = 1\nthr_per_worker = 4\nprocess = False\nmemory = '2GB'\nclient = Client(n_workers=workers, threads_per_worker=thr_per_worker, processes=process, memory_limit=memory)\n\n\"\"\"Format Column Names\"\"\"\nloan_df.columns = loan_obj.format_column_name(loan_df)\n\n\"\"\"Check dtypes\"\"\"\nloan_df_dtypes = loan_obj.check_dtype(loan_df)\nprint(loan_df_dtypes)\n\n\"\"\" Drop gender column\"\"\"\nloan_df = loan_obj.drop_column(loan_df,\"gender\")\n\n\"\"\"Get list of obj/non-obj columns\"\"\"\nloan_obj_cols = loan_obj.get_obj_col_list(loan_df)\nloan_nonobj_cols = loan_obj.get_nonobj_col_list(loan_df)\n\n\"\"\"Convert string (object datatypes) to lowercase\"\"\"\nloan_df = loan_obj.convert_data_to_lowercase(loan_df, loan_obj_cols)\n\n\"\"\"Get df with number of unique values column-wise\"\"\"\nloan_unique_col_values = loan_obj.get_unique_col_values(loan_df)\nprint(loan_unique_col_values)\n\"\"\"unnecessary_cols stores columns with number of unique values either equal to 1 or len(df)\"\"\"\nloan_unnecessary_cols = loan_unique_col_values[loan_unique_col_values['num_unique_values'].isin([1, len(loan_df)])]\nprint(\"columns to be dropped - \", loan_unnecessary_cols.column.unique())\n\ntarget_df = pd.DataFrame()\ntarget_df['loan_availed2'] = loan_df['loan_availed2']\nloan_df.drop('loan_availed2', axis=1, inplace=True)\n\n\"\"\"Update list of obj/non-obj columns\"\"\"\nloan_obj_cols = loan_obj.get_obj_col_list(loan_df)\nloan_nonobj_cols = loan_obj.get_nonobj_col_list(loan_df)\n\n\"\"\"CALCULATING DERIVED DATA\"\"\"\n\n\"\"\"Age from DoB\"\"\"\nloan_df = loan_obj.get_age_col(loan_df, 'dob', '/', 1, 0, 2)\nprint(\"age added\")\n\n\"\"\"Customer Since\"\"\"\nloan_df = loan_obj.get_customer_since(loan_df, 'customer_to_bank', '/', 1, 0, 2)\n\n\"\"\"Customer group - new/old\"\"\"\nloan_df = loan_obj.group_customer(loan_df, 'customer_since_months', 'customer_rel_dur_segment')\nprint(\"grouped\")\n\n\"\"\"\"Zip code retrieval and distance mapping\"\"\"\ndict_add={}\n\naddress = list(loan_df['address'].unique())\n\nfor add in address:\n dict_add[add]=loan_obj.zipcode_distance_retrieval(add,\"IN\")\n\n\nloan_df = loan_obj.address_distance(loan_df,dict_add,\"address\")\n\n\n\n\"\"\"Since we have added some new columns - we have to update our cols_list\"\"\"\nloan_obj_cols = loan_obj.get_obj_col_list(loan_df)\nloan_nonobj_cols = loan_obj.get_nonobj_col_list(loan_df)\n\nloan_temp_df = loan_df.copy()\nloan_temp_df.to_csv('./data/unprocessed/consolidated_for_comparison.csv', index=False)\n\nprint(loan_df.head())\n\n\"\"\"Handling Missing Data\"\"\"\nloan_missing_df = loan_obj.get_missing_df(loan_df)\nloan_missing_df = loan_missing_df[loan_missing_df.percent_missing > 0]\nprint(loan_missing_df)\n\n\"\"\"Handling Categorical Variables\"\"\"\nloan_cat_col_uniques_dict = loan_obj.get_cat_cols_unique_val_dict(loan_df, loan_obj_cols)\nprint(loan_cat_col_uniques_dict)\n\nloan_binary_cols_list = loan_cat_col_uniques_dict.get(2).split(\",\")\nloan_onehot_col_list = loan_cat_col_uniques_dict[4].split(\",\") + loan_cat_col_uniques_dict[6].split(\",\")\n\nloan_df = loan_obj.convert_cat_cols_to_binary(loan_df, loan_binary_cols_list)\nloan_df = loan_obj.convert_cat_cols_to_onehot(loan_df, loan_onehot_col_list)\n# # customer_df = customer_obj.group_less_occurring_cat_vars(customer_df, customer_obj_cols)\nprint(loan_df.head())\nprint(\"Categorical data handling complete for Customer data\")\n\n\"\"\"OUTLIER DETECTION\"\"\"\nloan_outlier_df = loan_obj.outlier_detection(loan_df[loan_nonobj_cols])\nloan_outlier_df = loan_obj.outlier_details(loan_outlier_df)\nprint(\"Outliers\")\nprint(loan_outlier_df)\n\n\"\"\"\" log scaling \"\"\"\nlist_cols_scaled = [\"applicant_income\",\"coapplicant_income\",\"loan_amt\"]\nloan_df = loan_obj.log_scaling(loan_df,list_cols_scaled)\n\n\n\"\"\"CORRELATION ANALYSIS\"\"\"\n\n\"\"\"Top Absolute Correlation\"\"\"\nprint(\"Top Absolute Correlation\")\n#print(loan_obj.get_top_abs_correlations(loan_df.iloc[:, 1:], 10))\n\n\"\"\"Highly correlated columns -- exceeding 0.75\"\"\"\nprint(\"Suggested Highly Correlated Columns to be dropped\")\nprint(loan_obj.drop_highly_corr_var(loan_df, 0.75))\n\nloan_df['loan_availed2'] = target_df['loan_availed2']\nprint(loan_df.head())\nloan_df.to_csv(\"./data/processed/loan_upsell_processed.csv\", index=False)\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n","sub_path":"loan/upsell/loan_upsell_preprocess.py","file_name":"loan_upsell_preprocess.py","file_ext":"py","file_size_in_byte":4541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"512859860","text":"from framework.interface_drivers.http.HttpLib import HttpLib\nfrom framework.support.Log import log_info\nfrom test_project.api_call_builders.SettingsFiltersApi import SettingsFiltersApi\nfrom test_project.configurations.status_codes import status_code_200, status_code_204\nfrom test_project.models.SettingsFiltersModel import SettingsFiltersModel\n\n\ndef create_filters_model(criteria_label):\n \"\"\"\n Create random model filter\n :param criteria_label: Created label with method label.create\n :return: model\n \"\"\"\n model = SettingsFiltersModel().get_randomly_model(criteria_label)\n log_info(\"Create random model settingsFilters:\\nModel:\\n{model}\".format(model=model))\n return model\n\n\ndef create_filters(user_id, model_filters):\n \"\"\"\n Creates a filter filter\n :param user_id: User's email address. The special value \"me\" can be used to indicate the authenticated user.\n :param model_filters:\n :type: SettingsFiltersModel\n :returns: model \n \"\"\"\n response, model = SettingsFiltersApi().filters_create(user_id, model_filters)\n response_status_code = HttpLib.get_response_status_code(response)\n assert response_status_code == status_code_200, \\\n \"Create filter failed: Status code isn't '200 OK'.\" \\\n \"\\nStatus code = {status_code}\".format(status_code=response_status_code)\n return model\n\n\ndef get_filters(user_id, filter_id):\n \"\"\"\n Gets a filter step\n :param user_id: User's email address. The special value \"me\" can be used to indicate the authenticated user.\n :param filter_id: The server assigned ID of the filter\n :return: model \n \"\"\"\n response, model = SettingsFiltersApi().filters_get(user_id, filter_id)\n response_status_code = HttpLib.get_response_status_code(response)\n assert response_status_code == status_code_200, \\\n \"Get filter failed: Status code isn't '200 OK'.\" \\\n \"\\nStatus code = {status_code}\".format(status_code=response_status_code)\n return model\n\n\ndef list_filters(user_id):\n \"\"\"\n Lists the message filters of a Gmail user step.\n :param user_id: User's email address. The special value \"me\" can be used to indicate the authenticated user.\n :return: list\n \"\"\"\n response, model_list = SettingsFiltersApi().filters_list(user_id)\n response_status_code = HttpLib.get_response_status_code(response)\n assert response_status_code == status_code_200, \\\n \"List filter failed: Status code isn't '200 OK'.\" \\\n \"\\nStatus code = {status_code}\".format(status_code=response_status_code)\n return model_list\n\n\ndef delete_filters(user_id, filter_id):\n \"\"\"\n Deletes a filter step\n :param user_id: User's email address. The special value \"me\" can be used to indicate the authenticated user.\n :param filter_id: The server assigned ID of the filter\n :return: response\n \"\"\"\n response = SettingsFiltersApi().filters_delete(user_id, filter_id)\n response_status_code = HttpLib.get_response_status_code(response)\n assert response_status_code == status_code_204, \\\n \"Delete filter failed: Status code isn't '204 OK'.\" \\\n \"\\nStatus code = {status_code}\".format(status_code=response_status_code)\n\n\ndef check_model(expected_model, actual_model):\n \"\"\"\n Compare two models.\n :param expected_model\n :type: SettingsFiltersModel\n :param actual_model\n :type: SettingsFiltersModel\n \"\"\"\n assert (expected_model == actual_model), \"Not Compare model: Expected model:\\n {0}\\nActual model:\\n {1}\". \\\n format(expected_model, actual_model)\n\n\ndef check_model_is_the_list_models(model_list, insert_model):\n \"\"\"\n Checking contains model in the list models\n :param model_list:\n :param insert_model:\n \"\"\"\n for model in model_list:\n if model == insert_model:\n return\n assert False, \"Not Contains model in the list:\\nModel_list:\\n{model_list}\\nInsert model:\\n{insert_model}\" \\\n .format(model_list='\\n'.join(str(item.__dict__) for item in model_list),\n insert_model=str(insert_model.__dict__))\n\n\ndef check_model_is_not_the_list_models(model_list, insert_model):\n \"\"\"\n Checking contains model in the list models\n :param model_list:\n :param insert_model:\n \"\"\"\n for model in model_list:\n if model == insert_model:\n assert False, \"Contains model in the list:\\nModel_list:\\n{model_list}\\nInsert model:\\n{insert_model}\" \\\n .format(model_list='\\n'.join(str(item.__dict__) for item in model_list),\n insert_model=str(insert_model.__dict__))\n","sub_path":"test_project/steps/SettingsFiltersSteps.py","file_name":"SettingsFiltersSteps.py","file_ext":"py","file_size_in_byte":4652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"214977264","text":"import numpy as np\r\nimport torch\r\nimport nibabel as nib\r\n\r\nfrom torchio.data.image import Image\r\nimport torchio\r\n\r\nclass IXI_H5DSImage(Image):\r\n def __init__(self, h5DS=None, lazypatch=True, imtype=torchio.INTENSITY, **kwargs):\r\n kwargs['path'] = ''\r\n kwargs['type'] = imtype\r\n super().__init__(**kwargs)\r\n self.h5DS = h5DS\r\n self.lazypatch = lazypatch\r\n\r\n if not self.lazypatch:\r\n self.load()\r\n\r\n def load(self) -> None:\r\n if self._loaded:\r\n return\r\n if self.lazypatch:\r\n tensor, affine = self.h5DS, np.eye(4)\r\n else:\r\n tensor, affine = self.read_and_check_h5(self.h5DS)\r\n self[torchio.DATA] = tensor\r\n self[torchio.AFFINE] = affine\r\n self._loaded = True\r\n\r\n @property\r\n def spatial_shape(self):\r\n if self.lazypatch:\r\n return self.shape\r\n else:\r\n return self.shape[1:]\r\n\r\n def crop(self, index_ini, index_fin):\r\n new_origin = nib.affines.apply_affine(self.affine, index_ini)\r\n new_affine = self.affine.copy()\r\n new_affine[:3, 3] = new_origin\r\n i0, j0, k0 = index_ini\r\n i1, j1, k1 = index_fin\r\n if len(self.data.shape) == 4:\r\n patch = self.data[:, i0:i1, j0:j1, k0:k1]\r\n else:\r\n patch = np.expand_dims(self.data[i0:i1, j0:j1, k0:k1], 0)\r\n if not isinstance(self.data, torch.Tensor):\r\n patch = torch.from_numpy(patch)\r\n kwargs = dict(\r\n tensor=patch,\r\n affine=new_affine,\r\n type=self.type,\r\n path=self.path,\r\n h5DS=self.h5DS\r\n )\r\n for key, value in self.items():\r\n if key in torchio.data.image.PROTECTED_KEYS: continue\r\n kwargs[key] = value \r\n return self.__class__(**kwargs)\r\n\r\n def read_and_check_h5(self, h5DS):\r\n tensor, affine = torch.from_numpy(h5DS[()]).unsqueeze(0), np.eye(4)\r\n tensor = super().parse_tensor_shape(tensor)\r\n if self.channels_last:\r\n tensor = tensor.permute(3, 0, 1, 2)\r\n if self.check_nans and torch.isnan(tensor).any():\r\n warnings.warn(f'NaNs found in file \"{path}\"')\r\n return tensor, affine","sub_path":"datasets/ixi_torchiowrap.py","file_name":"ixi_torchiowrap.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"145610833","text":"from django.shortcuts import render, get_object_or_404\n\nfrom ticketsub.models import Ticket, TicketForm\n\ndef submit_ticket(request, *args):\n if request.method == 'POST':\n form = TicketForm(request.POST)\n if form.is_valid():\n form.save()\n return render(request, 'success.html')\n\n else:\n if args:\n ticket = get_object_or_404(Ticket, \n pk=args[0]\n )\n form = TicketForm(instance=ticket)\n else:\n form = TicketForm()\n\n return render(request, 'form.html', {\n 'form' : form,\n })\n\ndef ticket_list(request):\n t = Ticket.objects.order_by('id')\n return render(request, 'list.html', {\n 'ticketlist': t,\n })\n","sub_path":"utk_prog_team_2015_04_09/djtest/ticketsub/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"501406655","text":"import logging\nimport random\nfrom struct import pack\nfrom zlib import crc32\n\nfrom pox.core import core\nfrom pox.lib.util import dpidToStr\nimport pox.openflow.libopenflow_01 as of\nfrom pox.lib.revent import EventMixin\nfrom pox.lib.packet.ipv4 import ipv4\nfrom pox.lib.packet.udp import udp\nfrom pox.lib.packet.tcp import tcp\nfrom pox.lib.addresses import IPAddr, EthAddr\nfrom pox.lib.packet.ethernet import ethernet, ETHER_BROADCAST\nfrom pox.lib.packet.arp import arp\nfrom pox.lib.util import str_to_bool, dpid_to_str, str_to_dpid\nfrom ripllib.mn import topos\n\nfrom util import buildTopo, getRouting\nimport time\nimport random\n\nlog = core.getLogger(\"HederaController\")\n#log.setLevel(logging.WARNING)\n\n# Number of bytes to send for packet_ins\nMISS_SEND_LEN = 2000\n\nIDLE_TIMEOUT = 10\nCAPACITY = 1\n\nFLOW_IDLE_TIMEOUT = 10\nFLOW_MEMORY_TIMEOUT = 60 * 5\n\n# Borrowed from pox/forwarding/l2_multi\nclass Switch(object):\n def __init__(self):\n self.connection = None\n self.ports = None\n self.dpid = None\n self._listeners = None\n\n def __repr__(self):\n return dpidToStr(self.dpid)\n\n def disconnect(self):\n if self.connection is not None:\n log.debug(\"Disconnect %s\" % (self.connection,))\n self.connection.removeListeners(self._listeners)\n self.connection = None\n self._listeners = None\n\n def connect(self, connection):\n if self.dpid is None:\n self.dpid = connection.dpid\n assert self.dpid == connection.dpid\n if self.ports is None:\n self.ports = connection.features.ports\n self.disconnect()\n log.debug(\"Connect %s\" % (connection,))\n self.connection = connection\n self._listeners = connection.addListeners(self)\n\n def send_packet_data(self, outport, data=None):\n msg = of.ofp_packet_out(in_port=of.OFPP_NONE, data=data)\n msg.actions.append(of.ofp_action_output(port=outport))\n self.connection.send(msg)\n\n def send_packet_data2(self, outport, server_src, mac_src, data=None):\n msg = of.ofp_packet_out(in_port=of.OFPP_NONE, data=data)\n msg.actions.append(of.ofp_action_nw_addr.set_src(server_src))\n msg.actions.append(of.ofp_action_dl_addr.set_src(mac_src))\n msg.actions.append(of.ofp_action_output(port=outport))\n self.connection.send(msg)\n\n def send_packet_data3(self, outport, server_dst, mac_dst, data=None):\n msg = of.ofp_packet_out(in_port=of.OFPP_NONE, data=data)\n msg.actions.append(of.ofp_action_nw_addr.set_dst(server_dst))\n msg.actions.append(of.ofp_action_dl_addr.set_dst(mac_dst))\n msg.actions.append(of.ofp_action_output(port=outport))\n self.connection.send(msg)\n\n def send_packet_bufid(self, outport, buffer_id=None):\n msg = of.ofp_packet_out(in_port=of.OFPP_NONE)\n msg.actions.append(of.ofp_action_output(port=outport))\n msg.buffer_id = buffer_id\n self.connection.send(msg)\n\n def install(self, port, match, buf=None, idle_timeout=0, hard_timeout=0,\n priority=of.OFP_DEFAULT_PRIORITY):\n msg = of.ofp_flow_mod()\n msg.match = match\n msg.idle_timeout = idle_timeout\n msg.hard_timeout = hard_timeout\n msg.priority = priority\n msg.actions.append(of.ofp_action_output(port=port))\n msg.buffer_id = buf\n self.connection.send(msg)\n\n def install2(self, port, server_src, mac_src, match, buf=None, idle_timeout=0, hard_timeout=0,\n priority=of.OFP_DEFAULT_PRIORITY):\n msg = of.ofp_flow_mod()\n msg.match = match\n msg.idle_timeout = idle_timeout\n msg.hard_timeout = hard_timeout\n msg.priority = priority\n msg.actions.append(of.ofp_action_nw_addr.set_src(server_src))\n msg.actions.append(of.ofp_action_dl_addr.set_src(mac_src))\n msg.actions.append(of.ofp_action_output(port=port))\n msg.buffer_id = buf\n self.connection.send(msg)\n\n def install3(self, port, server_dst, mac_dst, match, buf=None, idle_timeout=0, hard_timeout=0,\n priority=of.OFP_DEFAULT_PRIORITY):\n msg = of.ofp_flow_mod()\n msg.match = match\n msg.idle_timeout = idle_timeout\n msg.hard_timeout = hard_timeout\n msg.priority = priority\n msg.actions.append(of.ofp_action_nw_addr.set_dst(server_dst))\n msg.actions.append(of.ofp_action_dl_addr.set_dst(mac_dst))\n msg.actions.append(of.ofp_action_output(port=port))\n msg.buffer_id = buf\n self.connection.send(msg)\n\n def install_multiple(self, actions, match, buf=None, idle_timeout=0,\n hard_timeout=0, priority=of.OFP_DEFAULT_PRIORITY):\n msg = of.ofp_flow_mod()\n msg.match = match\n msg.idle_timeout = idle_timeout\n msg.hard_timeout = hard_timeout\n msg.priority = priority\n for a in actions:\n msg.actions.append(a)\n msg.buffer_id = buf\n self.connection.send(msg)\n\n def _handle_ConnectionDown(self, event):\n self.disconnect()\n pass\n\n\ndef sep():\n log.info(\"************************************************\")\n\nclass MemoryEntry(object):\n \"\"\"\n Record for flows we are balancing\n Table entries in the switch \"remember\" flows for a period of time, but\n rather than set their expirations to some long value (potentially leading\n to lots of rules for dead connections), we let them expire from the\n switch relatively quickly and remember them here in the controller for\n longer.\n Another tactic would be to increase the timeouts on the switch and use\n the Nicira extension which can match packets with FIN set to remove them\n when the connection closes.\n \"\"\"\n\n def __init__(self, server, first_packet, client_port):\n self.server = server\n self.first_packet = first_packet\n self.client_port = client_port\n self.refresh()\n\n def refresh(self):\n self.timeout = time.time() + FLOW_MEMORY_TIMEOUT\n\n @property\n def is_expired(self):\n return time.time() > self.timeout\n\n @property\n def key1(self):\n ethp = self.first_packet\n ipp = ethp.find('ipv4')\n tcpp = ethp.find('tcp')\n\n return ipp.srcip, ipp.dstip, tcpp.srcport, tcpp.dstport\n\n @property\n def key2(self):\n ethp = self.first_packet\n ipp = ethp.find('ipv4')\n tcpp = ethp.find('tcp')\n\n return self.server, ipp.srcip, tcpp.dstport, tcpp.srcport\n\n @property\n def from_client_to_server(self):\n ethp = self.first_packet\n ipp = ethp.find('ipv4')\n tcpp = ethp.find('tcp')\n\n return ipp.srcip, ipp.dstip, tcpp.srcport, tcpp.dstport\n\n @property\n def from_server_to_client(self):\n ethp = self.first_packet\n ipp = ethp.find('ipv4')\n tcpp = ethp.find('tcp')\n\n return self.server, ipp.srcip, tcpp.dstport, tcpp.srcport\n\nclass HederaController(object):\n\n def __init__(self, t, r, service_ip, servers=[]):\n self.switches = {} # Switches seen: [dpid] -> Switch\n self.t = t # Master Topo object, passed in and never modified.\n self.r = r # Master Routing object, passed in and reused.\n self.macTable = {} # [mac] -> (dpid, port)\n self.macTable2 = {}\n self.paths = {}\n self.flows = {}\n self.link_usage = {}\n self.last_server = 0\n\n self.service_ip = IPAddr(service_ip)\n self.servers = [IPAddr(a) for a in servers]\n self.live_servers = {} # IP -> MAC,port\n self.selected_server = None\n try:\n self.log = log.getChild(dpid_to_str(self.con.dpid))\n except:\n # Be nice to Python 2.6 (ugh)\n self.log = log\n\n self.total_connection = {} # IP -> total connection\n for ip in servers:\n self.total_connection[ip] = 0\n self.memory = {} # (srcip,dstip,srcport,dstport) -> MemoryEntry\n\n self.outstanding_probes = {} # IP -> expire_time\n # How quickly do we probe?\n self.probe_cycle_time = 5\n\n # How long do we wait for an ARP reply before we consider a server dead?\n self.arp_timeout = 3\n\n\n # TODO: generalize all_switches_up to a more general state machine.\n self.all_switches_up = False # Sequences event handling.\n core.openflow.addListeners(self, priority=0)\n\n\n def _raw_dpids(self, arr):\n \"Convert a list of name strings (from Topo object) to numbers.\"\n return [self.t.id_gen(name=a).dpid for a in arr]\n\n def _flow_key(self, src_ip, dst_ip):\n return str(src_ip) + \"::\" + str(dst_ip)\n\n def _path_key(self, src_sw_name, dst_sw_name):\n return src_sw_name + \"::\" + dst_sw_name\n\n def _link_key(self, sw1_name, sw2_name):\n return sw1_name + \"::\" + sw2_name\n\n def _do_expire(self):\n \"\"\"\n Expire probes and \"memorized\" flows\n Each of these should only have a limited lifetime.\n \"\"\"\n t = time.time()\n\n # Expire probes\n for ip, expire_at in self.outstanding_probes.items():\n if t > expire_at:\n self.outstanding_probes.pop(ip, None)\n if ip in self.live_servers:\n self.log.warn(\"Server %s down\", ip)\n del self.live_servers[ip]\n\n # Expire flow\n c = len(self.memory)\n self.memory = {k: v for k, v in self.memory.items()\n if not v.is_expired}\n if len(self.memory) != c:\n self.log.debug(\"Expired %i flows\", c - len(self.memory))\n\n def _do_probe(self):\n \"\"\"\n Send an ARP to a server to see if it's still up\n \"\"\"\n self._do_expire()\n\n server = self.servers.pop(0)\n self.servers.append(server)\n\n r = arp()\n r.hwtype = r.HW_TYPE_ETHERNET\n r.prototype = r.PROTO_TYPE_IP\n r.opcode = r.REQUEST\n r.hwdst = ETHER_BROADCAST\n r.protodst = server\n r.hwsrc = self.mac\n r.protosrc = self.service_ip\n e = ethernet(type=ethernet.ARP_TYPE, src=self.mac,\n dst=ETHER_BROADCAST)\n e.set_payload(r)\n self.log.debug(\"ARPing for %s\", server)\n msg = of.ofp_packet_out()\n msg.data = e.pack()\n msg.actions.append(of.ofp_action_output(port=of.OFPP_FLOOD))\n msg.in_port = of.OFPP_NONE\n self.con.send(msg)\n\n self.outstanding_probes[server] = time.time() + self.arp_timeout\n core.callDelayed(self._probe_wait_time, self._do_probe)\n\n @property\n def _probe_wait_time(self):\n \"\"\"\n Time to wait between probes\n \"\"\"\n r = self.probe_cycle_time / float(len(self.servers))\n r = max(.25, r) # Cap it at four per second\n return r\n\n def _ecmp_hash(self, packet):\n \"Return an ECMP-style 5-tuple hash for TCP/IP packets, otherwise 0.\"\n hash_input = [0] * 5\n if isinstance(packet.next, ipv4):\n ip = packet.next\n hash_input[0] = ip.srcip.toUnsigned()\n if ip.dstip == self.service_ip:\n hash_input[1] = self.selected_server.toUnsigned()\n else:\n hash_input[1] = ip.dstip.toUnsigned()\n hash_input[2] = ip.protocol\n if isinstance(ip.next, tcp) or isinstance(ip.next, udp):\n l4 = ip.next\n hash_input[3] = l4.srcport\n hash_input[4] = l4.dstport\n return crc32(pack('LLHHH', *hash_input))\n return 0\n\n\n def _install_reactive_path(self, event, out_dpid, final_out_port, packet):\n \"Install entries on route between two switches.\"\n inport = event.port\n mac, port_s = self.live_servers[self.selected_server]\n ip = packet.next\n in_name = self.t.id_gen(dpid=event.dpid).name_str()\n out_name = self.t.id_gen(dpid=out_dpid).name_str()\n hash_ = self._ecmp_hash(packet)\n route = self.r.get_route(in_name, out_name, hash_, False)\n log.info(\"route: %s\" % route)\n # match = of.ofp_match.from_packet(packet)\n for i, node in enumerate(route):\n node_dpid = self.t.id_gen(name=node).dpid\n if i < len(route) - 1:\n next_node = route[i + 1]\n out_port, next_in_port = self.t.port(node, next_node)\n if i == 0:\n in_port = inport\n else:\n prev_node = route[i - 1]\n in_port, next_in_port2 = self.t.port(node, prev_node)\n else:\n prev_node = route[i - 1]\n in_port, next_in_port2 = self.t.port(node, prev_node)\n out_port = final_out_port\n if ip.dstip == self.service_ip:\n log.info(\"path to %s , to %s server\" % (node_dpid, mac))\n match = of.ofp_match.from_packet(packet, in_port)\n if i == len(route) - 1:\n self.switches[out_dpid].install3(out_port, self.selected_server, mac, match,\n idle_timeout=IDLE_TIMEOUT)\n else:\n self.switches[node_dpid].install(out_port, match, idle_timeout=IDLE_TIMEOUT)\n else:\n match = of.ofp_match.from_packet(packet, in_port)\n if i == len(route) - 1:\n self.switches[out_dpid].install2(out_port, self.service_ip, self.mac, match,\n idle_timeout=IDLE_TIMEOUT)\n else:\n self.switches[node_dpid].install(out_port, match, idle_timeout=IDLE_TIMEOUT)\n\n def _eth_to_int(self, eth):\n return sum(([ord(x) * 2 ** ((5 - i) * 8) for i, x in enumerate(eth.raw)]))\n\n def _int_to_eth(self, inteth):\n return EthAddr(\"%012x\" % (inteth,))\n\n def _src_dst_str(self, src_dpid, dst_dpid):\n \"Return a hash based on src and dst dpids.\"\n return crc32(pack('QQ', src_dpid, dst_dpid))\n\n def _flood(self, event):\n packet = event.parsed\n dpid = event.dpid\n in_port = event.port\n # log.info(\"flood PacketIn to: %s\" % packet)\n\n t = self.t\n\n # Broadcast to every output port except the input on the input switch.\n # Hub behavior, baby!\n for sw in self._raw_dpids(t.layer_nodes(t.LAYER_EDGE)):\n # log.info(\"considering sw %s\" % sw)\n ports = []\n sw_name = t.id_gen(dpid=sw).name_str()\n for host in t.down_nodes(sw_name):\n sw_port, host_port = t.port(sw_name, host)\n if sw != dpid or (sw == dpid and in_port != sw_port):\n ports.append(sw_port)\n # Send packet out each non-input host port\n # TODO: send one packet only.\n for port in ports:\n # log.info(\"sending to port %s on switch %s\" % (port, sw))\n # buffer_id = event.ofp.buffer_id\n # if sw == dpid:\n # self.switches[sw].send_packet_bufid(port, event.ofp.buffer_id)\n # else:\n self.switches[sw].send_packet_data(port, event.data)\n # buffer_id = None\n\n def _flood2(self, event, server, mac, dpid, in_port):\n\n # log.info(\"flood PacketIn to: %s\" % packet)\n\n t = self.t\n\n # Broadcast to every output port except the input on the input switch.\n # Hub behavior, baby!\n for sw in self._raw_dpids(t.layer_nodes(t.LAYER_EDGE)):\n # log.info(\"considering sw %s\" % sw)\n ports = []\n sw_name = t.id_gen(dpid=sw).name_str()\n for host in t.down_nodes(sw_name):\n sw_port, host_port = t.port(sw_name, host)\n if sw != dpid or (sw == dpid and in_port != sw_port):\n ports.append(sw_port)\n # Send packet out each non-input host port\n # TODO: send one packet only.\n for port in ports:\n log.info(\"sending to port %s on switch %s\" % (port, sw))\n # buffer_id = event.ofp.buffer_id\n # if sw == dpid:\n # self.switches[sw].send_packet_bufid(port, event.ofp.buffer_id)\n # else:\n self.switches[sw].send_packet_data3(port, server, mac, event.data)\n # buffer_id = None\n\n def _pick_server(self, key, in_port):\n \"\"\"\n Pick a server for a (hopefully) new connection\n \"\"\"\n self.last_server = (self.last_server + 1) % len(self.live_servers)\n return self.live_servers.keys()[self.last_server]\n return random.choice(self.live_servers.keys())\n\n\n def _handle_packet_reactive(self, event):\n global server\n packet = event.parsed\n dpid = event.dpid\n # log.info(\"PacketIn: %s\" % packet)\n in_port = event.port\n t = self.t\n\n def drop():\n if event.ofp.buffer_id is not None:\n # Kill the buffer\n msg = of.ofp_packet_out(data=event.ofp)\n self.con.send(msg)\n return None\n #log.info(\"mactable: %s\" % self.macTable)\n # self.macTable[packet.src] = (dpid, in_port)\n tcpp = packet.find('tcp')\n if not tcpp:\n arpp = packet.find('arp')\n if arpp:\n # Handle replies to our server-liveness probes\n if arpp.opcode == arpp.REPLY:\n # log.info(\"packetin arp : %s\" %packet)\n if arpp.protosrc in self.outstanding_probes:\n # A server is (still?) up; cool.\n del self.outstanding_probes[arpp.protosrc]\n if (self.live_servers.get(arpp.protosrc, (None, None))\n == (arpp.hwsrc, in_port)):\n\n pass\n else:\n # Ooh, new server.\n self.live_servers[arpp.protosrc] = arpp.hwsrc, in_port\n self.log.info(\"Server %s port %s up\" % (arpp.hwsrc, in_port))\n\n return\n # Not TCP and not ARP. Don't know what to do with this. Drop it.\n return\n ipp = packet.find('ipv4')\n # Learn MAC address of the sender on every packet-in.\n # log.info(\"reacPacketIn: %s\" % packet)\n self.macTable[packet.src] = (dpid, in_port)\n\n if ipp.srcip in self.servers:\n key = ipp.srcip, ipp.dstip, tcpp.srcport, tcpp.dstport\n entry = self.memory.get(key)\n if entry is None:\n # We either didn't install it, or we forgot about it.\n self.log.debug(\"No client for %s\", key)\n return drop()\n\n # Refresh time timeout and reinstall.\n entry.refresh()\n\n log.info(\"packetin dri server :%s\" % packet)\n if packet.dst in self.macTable2:\n out_dpid, out_port = self.macTable2[packet.dst]\n log.info(\"instal path S: %s %s\" % (out_dpid, out_port))\n self._install_reactive_path(event, out_dpid, out_port, packet)\n\n log.info(\"sending to S entry in mactable: %s %s\" % (out_dpid, out_port))\n self.switches[out_dpid].send_packet_data2(out_port, self.service_ip, self.mac, event.data)\n elif packet.dst in self.macTable:\n out_dpid, out_port = self.macTable[packet.dst]\n log.info(\"instal path S: %s %s\" % (out_dpid, out_port))\n self._install_reactive_path(event, out_dpid, out_port, packet)\n\n log.info(\"sending to S entry in mactable: %s %s\" % (out_dpid, out_port))\n self.switches[out_dpid].send_packet_data2(out_port, self.service_ip, self.mac, event.data)\n\n elif ipp.dstip == self.service_ip:\n log.info(\"packetin dri client :%s\" % packet)\n # Ah, it's for our service IP and needs to be load balanced\n\n # Do we already know this flow?\n key = ipp.srcip, ipp.dstip, tcpp.srcport, tcpp.dstport\n entry = self.memory.get(key)\n if entry is None or entry.server not in self.live_servers:\n # Don't know it (hopefully it's new!)\n if len(self.live_servers) == 0:\n self.log.warn(\"No servers!\")\n return drop()\n # Pick a server for this flow\n server = self._pick_server(key, in_port)\n self.log.debug(\"Directing traffic to %s\", server)\n entry = MemoryEntry(server, packet, in_port)\n self.memory[entry.key1] = entry\n self.memory[entry.key2] = entry\n self.selected_server = server\n\n # Update timestamp\n entry.refresh()\n\n # Set up table entry towards selected server\n mac, port = self.live_servers[entry.server]\n dpid_mac = self._eth_to_int(mac)\n # Insert flow, deliver packet directly to destination.\n if mac in self.macTable:\n out_dpid, out_port = self.macTable[mac]\n log.info(\"sending to entry gff: %s %s\" % (out_dpid, out_port))\n\n self._install_reactive_path(event, out_dpid, out_port, packet)\n\n log.info(\"sending to entry in mactable: %s %s\" % (out_dpid, out_port))\n self.switches[out_dpid].send_packet_data3(out_port, server, mac, event.data)\n else:\n self._flood2(event, server, mac, dpid_mac, port)\n else:\n self._flood(event)\n\n # Get host index.\n def dpid_port_to_host_index(self, dpid, port):\n node = self.t.id_gen(dpid=dpid)\n return node.pod * ((self.t.k ** 2) / 4) + node.sw * (self.t.k / 2) + ((port - 2) / 2)\n\n def _handle_PacketIn(self, event):\n # log.info(\"Parsing PacketIn.\")\n\n packet = event.parsed\n if not self.all_switches_up:\n log.info(\"Saw PacketIn before all switches were up - ignoring.\")\n #log.info(\"PacketIn: %s\" % packet)\n return\n else:\n self._handle_packet_reactive(event)\n\n def _get_links_from_path(self, path):\n path_len = len(path)\n for i in range(0, path_len - 1):\n link_key = self._link_key(path[i], path[i + 1])\n reverse_link_key = self._link_key(path[i + 1], path[i])\n # if link_key is not in self.link_usage and reverse_link_key is not in self.link_usage:\n self.link_usage[link_key] = 0\n self.link_usage[reverse_link_key] = 0\n\n def _get_equal_cost_routes(self, src, dst):\n src_host_name = self.t.id_gen(dpid=src).name_str()\n src_sw = self.t.up_nodes(src_host_name)\n assert len(src_sw) == 1\n src_sw_name = src_sw[0]\n dst_host_name = self.t.id_gen(dpid=dst).name_str()\n dst_sw = self.t.up_nodes(dst_host_name)\n assert len(dst_sw) == 1\n dst_sw_name = dst_sw[0]\n all_paths = self.r.get_route(src_sw_name, dst_sw_name, None, True)\n for path in all_paths:\n self._get_links_from_path(path)\n self.paths[self._path_key(src_sw_name, dst_sw_name)] = all_paths\n\n def _get_all_paths(self):\n t = self.t\n # Install L2 src/dst flow for every possible pair of hosts.\n for src in sorted(self._raw_dpids(t.layer_nodes(t.LAYER_HOST))):\n for dst in sorted(self._raw_dpids(t.layer_nodes(t.LAYER_HOST))):\n self._get_equal_cost_routes(src, dst)\n\n def _handle_ConnectionUp(self, event):\n sw = self.switches.get(event.dpid)\n sw_str = dpidToStr(event.dpid)\n self.con = event.connection\n self.mac = self.con.eth_addr\n log.info(\"Saw switch come up: %s\", sw_str)\n name_str = self.t.id_gen(dpid=event.dpid).name_str()\n if name_str not in self.t.switches():\n log.warn(\"Ignoring unknown switch %s\" % sw_str)\n return\n if sw is None:\n log.info(\"Added fresh switch %s\" % sw_str)\n sw = Switch()\n self.switches[event.dpid] = sw\n sw.connect(event.connection)\n else:\n log.info(\"Odd - already saw switch %s come up\" % sw_str)\n sw.connect(event.connection)\n sw.connection.send(of.ofp_set_config(miss_send_len=MISS_SEND_LEN))\n\n if len(self.switches) == len(self.t.switches()):\n log.info(\"Woo! All switches up\")\n self.all_switches_up = True\n self._get_all_paths()\n if self.all_switches_up == True:\n self._do_probe()\n\ndef launch(topo, ip, servers):\n \"\"\"\n Launch Hedera Controller\n\n topo is in format toponame,arg1,arg2,...\n \"\"\"\n # Boot up ARP Responder\n from proto.arp_responder import launch as arp_launch\n arp_launch(eat_packets=False, **{str(ip): True})\n import logging\n logging.getLogger(\"proto.arp_responder\").setLevel(logging.WARN)\n\n # Instantiate a topo object from the passed-in file.\n if not topo:\n raise Exception(\"please specify topo and args on cmd line\")\n else:\n t = buildTopo(topo, topos)\n r = getRouting('hashed', t)\n servers = servers.replace(\",\", \" \").split()\n servers = [IPAddr(x) for x in servers]\n ip = IPAddr(ip)\n log.info(\"Load Balancer Ready.\")\n core.registerNew(HederaController, t, r, IPAddr(ip), servers)\n\n log.info(\"Hedera running with topo=%s.\" % topo)\n","sub_path":"jogress2.py","file_name":"jogress2.py","file_ext":"py","file_size_in_byte":25447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"93141226","text":"from django.db.models import Field\nfrom django.db.models.expressions import Col\n\nfrom .constants import CPK_SEP\n\n\nclass CompositeCol(Col):\n def __init__(self, alias, target, output_field=None):\n super().__init__(alias, target, output_field)\n self.children = [Col(alias, key, output_field) for key in target.keys]\n\n def as_sql(self, compiler, connection):\n sqls = []\n for child in self.children:\n sql, _ = child.as_sql(compiler, connection)\n sqls.append(sql)\n return \"(%s)\" % \",\".join(sqls), []\n\n\nclass CompositeKey(Field):\n def __init__(self, keys, primary=False):\n names = tuple((f.name for f in keys))\n join_name = CPK_SEP.join(names)\n db_columns = tuple((f.db_column if f.db_column else f.name for f in keys))\n db_join_column = \"(\" + \",\".join(db_columns) + \")\"\n super().__init__(\n name=join_name, \n primary_key=primary,\n unique=True,\n )\n self.keys = keys\n self.attname = join_name\n self.column = join_name\n self.names = names\n self.model = keys[0].model\n\n def get_col(self, alias, output_field=None):\n return CompositeCol(alias, self, output_field)\n","sub_path":"compositepk-model/cpkmodel/compositekey.py","file_name":"compositekey.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"443017947","text":"from flask import Flask\nfrom flask.templating import render_template\n\napp = Flask(__name__)\n\n\n@app.route('/test')\ndef test():\n data = {\n \"Status\": 200,\n \"Message\": \"Up and Running\"\n }\n return data\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"Day 56/portfolio-website/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"151033166","text":"import datetime\nimport os\nimport tempfile\n\nfrom django.conf import settings as djangoSettings\nfrom django.http import HttpResponse\nfrom django.template.loader import get_template\n\nfrom integrations.google.analytics.utils.charts.pageviews_users import \\\n pageviews_users\nfrom integrations.google.analytics.utils.charts.world_map import world_map\nfrom integrations.google.analytics.utils.create_csv import create_csv\nfrom integrations.mailchimp.utils.list_campaigns import list_campaigns\nfrom integrations.pdfreactor.utils.helper_function import html_file_2_pdf\nfrom issuer.models import Issuer\n\n\ndef traffic_report(request, issuer_pk):\n \"\"\"Generate an external report.\"\"\"\n\n template_file = 'issuer/reports/traffic.html'\n\n issuer_obj = Issuer.objects.get(pk=issuer_pk)\n\n campaigns = list_campaigns(filter=issuer_obj.internal_identifier)\n\n df = create_csv(issuer_obj)\n\n # Replace all Jinja2 variables with relevant data\n context = {\n 'environment': djangoSettings.ENVIRONMENT_MODE,\n 'style': {\n 'top_color': '#37474F'\n },\n 'content': {\n 'top_header': 'traffic report',\n 'report_date': datetime.datetime.now().strftime('%Y-%m-%d'),\n 'report_title': 'Traffic report for ' + issuer_obj.legal_name,\n 'issuer': {\n 'data': issuer_obj,\n },\n 'campaigns': campaigns,\n 'charts': {\n 'world_map': world_map(df, return_base64=True),\n 'pageviews_users': pageviews_users(df, return_base64=True),\n }\n }\n }\n\n # Return formatted as html\n rendered_html = get_template(template_file).render(context)\n\n f = tempfile.NamedTemporaryFile(mode='w+t',\n suffix='.html')\n f.write(rendered_html)\n f.read()\n\n # Generate a unique file name\n target_file = 'Web traffic report for {}, extracted on {} at {}.pdf'.\\\n format(\n issuer_obj.legal_name,\n datetime.datetime.today().strftime('%Y-%m-%d'),\n datetime.datetime.today().strftime('%H%M'), )\n html_file_2_pdf(f.name, target_file)\n\n from document_export.util import compress_pdf\n # Make pdf file much smaller\n compress_pdf(target_file)\n\n response = HttpResponse(content=open(target_file, 'rb'))\n response['Content-Type'] = 'application/pdf'\n response['Content-Disposition'] = 'inline; filename=\"%s\"' % target_file\n\n os.unlink(target_file)\n\n return response\n","sub_path":"ncr_website/document_export/reports/traffic.py","file_name":"traffic.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"470966055","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2014, 2015 Adam.Dybbroe\n\n# Author(s):\n\n# Adam.Dybbroe \n# Panu Lahtinen \n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n\"\"\"Conversion between radiances and brightness temperatures for the IR bands of\nvarious satellite sensors\n\"\"\"\n\nimport numpy as np\nfrom pyspectral.blackbody import blackbody, blackbody_wn\nfrom pyspectral.utils import BANDNAMES\n\nimport logging\nLOG = logging.getLogger(__name__)\n\nWAVE_LENGTH = 'wavelength'\nWAVE_NUMBER = 'wavenumber'\n\nEPSILON = 0.01\nTB_MIN = 150.\nTB_MAX = 360.\n\n# Meteosat SEVIRI regression parameters according to documentation\n# (PDF_EFFECT_RAD_TO_BRIGHTNESS.pdf).\n#\n# Tb = C2 * νc/{α * log[C1*νc**3 / L + 1]} - β/α\n#\n# L = C1 * νc**3 / (exp (C2 νc / [αTb + β]) − 1)\n#\n# C1 = 2 * h * c**2 and C2 = hc/k\n#\n# Units are cm-1 for the channel/band central wavenumber, K for the beta\n# parameter, and the alpha parameter is dimensionless:\n#\nSEVIRI = {'IR3.9': {'Meteosat-8': [2567.330, 0.9956, 3.410],\n 'Meteosat-9': [2568.832, 0.9954, 3.438],\n 'Meteosat-10': [],\n },\n 'WV6.2': {'Meteosat-8': [1598.103, 0.9962, 2.218],\n 'Meteosat-9': [1600.548, 0.9963, 2.185],\n },\n 'WV7.3': {'Meteosat-8': [1362.081, 0.9991, 0.478],\n 'Meteosat-9': [1360.330, 0.9991, 0.470],\n },\n 'IR8.7': {'Meteosat-8': [1149.069, 0.9996, 0.179],\n 'Meteosat-9': [1148.620, 0.9996, 0.179],\n },\n 'IR9.7': {'Meteosat-8': [1034.343, 0.9999, 0.060],\n 'Meteosat-9': [1035.289, 0.9999, 0.056],\n },\n 'IR10.8': {'Meteosat-8': [930.647, 0.9983, 0.625],\n 'Meteosat-9': [931.700, 0.9983, 0.640],\n },\n 'IR12.0': {'Meteosat-8': [839.660, 0.9988, 0.397],\n 'Meteosat-9': [836.445, 0.9988, 0.408],\n },\n 'IR13.4': {'Meteosat-8': [752.387, 0.9981, 0.578],\n 'Meteosat-9': [751.792, 0.9981, 0.561],\n },\n }\n\n\nclass RadTbConverter(object):\n\n \"\"\"A radiance to brightness temperature calculator\n\n It can do the conversion either based on direct use of the band relative\n spectral response function, or on officially (by satellite agencies)\n tabulated standard values using non-linear regression methods.\n Methods:\n 1: Spectral response function\n 2: non-linear approximation using tabulated coefficients \n \"\"\"\n\n def __init__(self, platform_name, instrument, bandname, method=1,\n **options):\n \"\"\"E.g.:\n platform_name = 'Meteosat-9'\n instrument = 'seviri'\n \"\"\"\n self.platform_name = platform_name\n self.instrument = instrument\n self.rsr = None\n self.bandname = BANDNAMES.get(bandname, bandname)\n\n if 'detector' in options:\n self.detector = options['detector']\n else:\n self.detector = 'det-1'\n\n if 'wavespace' in options:\n if options['wavespace'] not in [WAVE_LENGTH, WAVE_NUMBER]:\n raise AttributeError('Wave space not %s or %s!' % (WAVE_LENGTH,\n WAVE_NUMBER))\n self.wavespace = options['wavespace']\n else:\n self.wavespace = WAVE_LENGTH\n\n self._wave_unit = ''\n self._wave_si_scale = 1.0\n\n if 'tb_resolution' in options:\n self.tb_resolution = options['tb_resolution']\n else:\n self.tb_resolution = 0.1\n self.tb_scale = 1. / self.tb_resolution\n\n if method == 1:\n self.get_rsr()\n\n def get_rsr(self):\n \"\"\"Get all spectral responses for the sensor\"\"\"\n from pyspectral.utils import convert2wavenumber\n from pyspectral.rsr_reader import RelativeSpectralResponse\n\n sensor = RelativeSpectralResponse(self.platform_name,\n self.instrument)\n LOG.debug(\"Wavenumber? %s\", str(self.wavespace))\n if self.wavespace == WAVE_NUMBER:\n LOG.debug(\"Converting to wavenumber...\")\n self.rsr, info = convert2wavenumber(sensor.rsr)\n else:\n self.rsr = sensor.rsr\n info = {'unit': sensor.unit, 'si_scale': sensor.si_scale}\n\n self._wave_unit = info['unit']\n self._wave_si_scale = info['si_scale']\n\n def _getsatname(self):\n \"\"\"Get the satellite name used in the rsr-reader, from the platform\n and number\n \"\"\"\n if self.platform_name.startswith(\"Meteosat\"):\n return self.platform_name\n else:\n raise NotImplementedError('Platform %s not yet supported...' %\n str(self.platform_name))\n\n def tb2radiance(self, tb_, bandname, lut=None):\n \"\"\"Get the radiance from the brightness temperature (Tb) given the\n band name. \n \"\"\"\n from scipy import integrate\n\n if self.wavespace == WAVE_NUMBER:\n unit = 'W/m^2 sr^-1 (m^-1)^-1'\n scale = 1.0\n else:\n unit = 'W/m^2 sr^-1 m^-1'\n scale = 1.0\n\n if not bandname and not np.any(lut):\n raise SyntaxError('Either a band name or a lut needs '\n 'to be provided as input to the function call!')\n\n if lut:\n ntb = (tb_ * self.tb_scale).astype('int16')\n start = int(lut['tb'][0] * self.tb_scale)\n retv = {}\n bounds = 0, lut['radiance'].shape[0] - 1\n index = np.clip(ntb - start, bounds[0], bounds[1])\n retv['radiance'] = lut['radiance'][index]\n retv['unit'] = unit\n retv['scale'] = scale\n return retv\n\n if self.wavespace == WAVE_LENGTH:\n wv_ = (self.rsr[bandname][self.detector]['wavelength'] *\n self._wave_si_scale)\n resp = self.rsr[bandname][self.detector]['response']\n planck = blackbody(wv_, tb_) * resp\n elif self.wavespace == WAVE_NUMBER:\n wv_ = (self.rsr[bandname][self.detector]['wavenumber'] *\n self._wave_si_scale)\n resp = self.rsr[bandname][self.detector]['response']\n planck = blackbody_wn(wv_, tb_) * resp\n else:\n raise NotImplementedError('%s representation of '\n 'rsr data not supported!' %\n str(self.wavespace))\n\n radiance = integrate.trapz(planck, wv_) / np.trapz(resp, wv_)\n\n return {'radiance': radiance,\n 'unit': unit,\n 'scale': scale}\n\n def make_tb2rad_lut(self, bandname, filepath):\n \"\"\"Generate a Tb to radiance look-up table\"\"\"\n tb_ = np.arange(TB_MIN, TB_MAX, self.tb_resolution)\n retv = self.tb2radiance(tb_, bandname)\n rad = retv['radiance']\n np.savez(filepath, tb=tb_, radiance=rad.compressed())\n\n def read_tb2rad_lut(self, filepath):\n \"\"\"Read the Tb to radiance look-up table\"\"\"\n retv = np.load(filepath, 'r')\n return retv\n\n def tb2radiance_simple(self, tb_, bandname):\n \"\"\"Get the radiance from the Tb using the simple non-linear regression\n method. SI units of course!\n \"\"\"\n # L = C1 * νc**3 / (exp (C2 νc / [αTb + β]) − 1)\n #\n # C1 = 2 * h * c**2 and C2 = hc/k\n #\n from pyspectral.blackbody import (H_PLANCK, K_BOLTZMANN, C_SPEED)\n\n c_1 = 2 * H_PLANCK * C_SPEED ** 2\n c_2 = H_PLANCK * C_SPEED / K_BOLTZMANN\n\n vc_ = SEVIRI[bandname][self.platform_name][0]\n # Multiply by 100 to get SI units!\n vc_ *= 100.0\n alpha = SEVIRI[bandname][self.platform_name][1]\n beta = SEVIRI[bandname][self.platform_name][2]\n\n radiance = c_1 * vc_ ** 3 / \\\n (np.exp(c_2 * vc_ / (alpha * tb_ + beta)) - 1)\n\n unit = 'W/m^2 sr^-1 (m^-1)^-1'\n scale = 1.0\n #unit = 'mW/m^2 sr^-1 (cm^-1)^-1'\n #scale = 10.0\n return {'radiance': radiance,\n 'unit': unit,\n 'scale': scale}\n\n def radiance2tb_simple(self, rad, bandname):\n \"\"\"Get the Tb from the radiance using the simple non-linear regression\n method. \n rad: Radiance in units = 'mW/m^2 sr^-1 (cm^-1)^-1'\n \"\"\"\n #\n # Tb = C2 * νc/{α * log[C1*νc**3 / L + 1]} - β/α\n #\n # C1 = 2 * h * c**2 and C2 = hc/k\n #\n from pyspectral.blackbody import (H_PLANCK, K_BOLTZMANN, C_SPEED)\n\n c_1 = 2 * H_PLANCK * C_SPEED ** 2\n c_2 = H_PLANCK * C_SPEED / K_BOLTZMANN\n\n vc_ = SEVIRI[bandname][self.platform_name][0]\n # Multiply by 100 to get SI units!\n vc_ *= 100.0\n alpha = SEVIRI[bandname][self.platform_name][1]\n beta = SEVIRI[bandname][self.platform_name][2]\n\n tb_ = c_2 * vc_ / \\\n (alpha * np.log(c_1 * vc_ ** 3 / rad + 1)) - beta / alpha\n\n return tb_\n","sub_path":"pyspectral/radiance_tb_conversion.py","file_name":"radiance_tb_conversion.py","file_ext":"py","file_size_in_byte":9748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"577028704","text":"\"\"\"\nSQL Storage for Policies.\n\"\"\"\n\nimport json\nimport logging\n\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm.exc import FlushError\n\nfrom .model import PolicyModel, PolicyActionModel, PolicyResourceModel, PolicySubjectModel\nfrom ..abc import Storage\nfrom ...checker import StringExactChecker, StringFuzzyChecker, RegexChecker, RulesChecker\nfrom ...exceptions import PolicyExistsError, UnknownCheckerType\nfrom ...policy import TYPE_STRING_BASED, TYPE_RULE_BASED\n\nlog = logging.getLogger(__name__)\n\n\nclass SQLStorage(Storage):\n \"\"\"Stores all policies in SQL Database\"\"\"\n\n def __init__(self, scoped_session):\n \"\"\"\n Initialize SQL Storage\n\n :param scoped_session: SQL Alchemy scoped session\n \"\"\"\n self.session = scoped_session\n\n def add(self, policy):\n try:\n policy_model = PolicyModel.from_policy(policy)\n self.session.add(policy_model)\n self.session.commit()\n except IntegrityError:\n self.session.rollback()\n log.error('Error trying to create already existing policy with UID=%s.', policy.uid)\n raise PolicyExistsError(policy.uid)\n # todo - figure out why FlushError is raised instead of IntegrityError on PyPy tests\n except FlushError as e:\n if 'conflicts with persistent instance' in str(e):\n self.session.rollback()\n log.error('Error trying to create already existing policy with UID=%s.', policy.uid)\n raise PolicyExistsError(policy.uid)\n log.info('Added Policy: %s', policy)\n\n def get(self, uid):\n policy_model = self.session.query(PolicyModel).get(uid)\n if not policy_model:\n return None\n return policy_model.to_policy()\n\n def get_all(self, limit, offset):\n self._check_limit_and_offset(limit, offset)\n cur = self.session.query(PolicyModel).slice(offset, offset + limit)\n for policy_model in cur:\n yield policy_model.to_policy()\n\n def find_for_inquiry(self, inquiry, checker=None):\n cur = self._get_filtered_cursor(inquiry, checker)\n for policy_model in cur:\n yield policy_model.to_policy()\n\n def update(self, policy):\n try:\n policy_model = self.session.query(PolicyModel).get(policy.uid)\n if not policy_model:\n return\n policy_model.update(policy)\n self.session.commit()\n except IntegrityError:\n self.session.rollback()\n raise\n log.info('Updated Policy with UID=%s. New value is: %s', policy.uid, policy)\n\n def delete(self, uid):\n self.session.query(PolicyModel).filter(PolicyModel.uid == uid).delete()\n log.info('Deleted Policy with UID=%s.', uid)\n\n def _get_filtered_cursor(self, inquiry, checker):\n \"\"\"\n Returns cursor with proper query-filter based on the checker type.\n \"\"\"\n cur = self.session.query(PolicyModel)\n if isinstance(checker, StringFuzzyChecker):\n return cur.filter(\n PolicyModel.type == TYPE_STRING_BASED,\n PolicyModel.subjects.any(PolicySubjectModel.subject.like(\"%{}%\".format(inquiry.subject))),\n PolicyModel.resources.any(PolicyResourceModel.resource.like(\"%{}%\".format(inquiry.resource))),\n PolicyModel.actions.any(PolicyActionModel.action.like(\"%{}%\".format(inquiry.action))))\n elif isinstance(checker, StringExactChecker):\n # A string is converted to a JSON string before inserting\n return cur.filter(\n PolicyModel.type == TYPE_STRING_BASED,\n PolicyModel.subjects.any(PolicySubjectModel.subject == json.dumps(inquiry.subject)),\n PolicyModel.resources.any(PolicyResourceModel.resource == json.dumps(inquiry.resource)),\n PolicyModel.actions.any(PolicyActionModel.action == json.dumps(inquiry.action)))\n elif isinstance(checker, RegexChecker):\n return cur.filter(\n PolicyModel.type == TYPE_STRING_BASED)\n elif isinstance(checker, RulesChecker):\n return cur.filter(\n PolicyModel.type == TYPE_RULE_BASED)\n elif not checker:\n return cur\n else:\n log.error('Provided Checker type is not supported.')\n raise UnknownCheckerType(checker)\n","sub_path":"vakt/storage/sql/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"236240122","text":"#!/usr/bin/python\r\nimport os\r\nimport time\r\nimport sys\r\nimport json \r\nimport argparse\r\nimport csv\r\nimport DTIAtlasBuilder_Preprocess\r\nimport DTIAtlasBuilder_AtlasBuilding\r\nimport DTIAtlasBuilder_Utilities\r\nimport shutil\r\nimport threading\r\nimport traceback\r\nfrom copy import deepcopy\r\n\r\n### load configutation json\r\n\r\ndef unique(list1): \r\n unique_list = [] \r\n for x in list1: \r\n # check if exists in unique_list or not \r\n if x not in unique_list: \r\n unique_list.append(x) \r\n return unique_list\r\n\r\ndef isComponent(seq,name):\r\n comp=list(filter(lambda x : x['name']==name,seq))\r\n if len(comp)>0 :\r\n return comp[0] \r\n else:\r\n return False \r\n\r\ndef find_config_by_nodename(build_sequence,nodename):\r\n for cfg in build_sequence:\r\n if cfg[\"m_NodeName\"]==nodename:\r\n return cfg \r\n\r\n\r\ndef generate_deformation_track(seq,node=\"target\"): #input : initialSequence to generate deformation field tracking information (to concatenate them)\r\n component=isComponent(seq,node)\r\n outseq=[]\r\n\r\n if component != False:\r\n for c in component[\"dataset_ids\"]:\r\n tmpseq=generate_deformation_track(seq,c)\r\n for t in tmpseq:\r\n outseq.append(node+\"/\"+t)\r\n else:\r\n outseq.append(node)\r\n return outseq \r\n return outseq\r\n\r\ndef invert_deformation_track(deformation_seq):\r\n seq=deepcopy(deformation_seq)\r\n outseq=[]\r\n for s in seq:\r\n elm=s\r\n strvec=s['id'].split(\"/\")\r\n strvec.reverse()\r\n elm['id']='/'.join(strvec)\r\n #elm['original_dti_id']=strvec[-1]\r\n arr=[]\r\n for e in s['filelist']:\r\n basedir=os.path.dirname(e)\r\n name=\"_\".join(os.path.basename(e).split('_')[:-1])+\"_InverseGlobalDisplacementField.nrrd\"\r\n inverted_deform_path=os.path.join(basedir,name)\r\n arr.append(inverted_deform_path)\r\n arr.reverse()\r\n elm['filelist']=arr \r\n\r\n output_dir=os.path.dirname(s['output_path'])\r\n output_name=\"_\".join(os.path.basename(s['output_path']).split('_')[:-2])+\"_InverseGlobalDisplacementField_Concatenated.nrrd\"\r\n output_path=os.path.join(output_dir,output_name)\r\n elm['output_path']=output_path\r\n outseq.append(elm)\r\n return outseq \r\n\r\n \r\ndef furnish_deformation_track(seq,project_path,build_sequence,inverse=False): #input deformSequence \r\n res=[]\r\n for d in seq:\r\n tmp={}\r\n tmp['id']=d\r\n compseq=d.split('/')\r\n cfg=find_config_by_nodename(build_sequence,compseq[-2])\r\n originalDTIId=compseq[-1]\r\n originalDTIPath=None\r\n for idx,case in enumerate(zip(cfg[\"m_CasesIDs\"],cfg[\"m_CasesPath\"])):\r\n caseID,casePath=case \r\n if originalDTIId==caseID: \r\n originalDTIPath=casePath\r\n break\r\n\r\n entry=[]\r\n for idx,c in enumerate(compseq[0:-1]):\r\n fpath=\"atlases/\" + c + \"/5_Final_Atlas/FinalDeformationFields/\" + compseq[idx+1] + \"_GlobalDisplacementField.nrrd\"\r\n fpath=os.path.join(project_path,fpath)\r\n entry.append(fpath)\r\n tmp['filelist']=entry\r\n tmp['original_dti_path']=originalDTIPath \r\n tmp['original_dti_id']=originalDTIId\r\n tmp['scalar_measurement']=cfg[\"m_ScalarMeasurement\"]\r\n tmp['nb_loops']=cfg['m_nbLoops']\r\n tmp['nb_loops_dtireg']=cfg['m_nbLoopsDTIReg']\r\n tmp['project_path']=cfg['m_OutputPath']\r\n tmp['need_to_be_cropped']=cfg['m_NeedToBeCropped']\r\n outputDir=os.path.join(project_path,\"displacement_fields\")\r\n hpairList=tmp[\"id\"].split(\"/\")\r\n outFilename=\"_\".join(hpairList) + \"_GlobalDisplacementField_Concatenated.nrrd\"\r\n outFilename=os.path.join(outputDir,outFilename)\r\n tmp['output_path']=outFilename\r\n res.append(tmp)\r\n return res \r\n\r\n\r\n\r\n\r\ndef parse_hbuild(hb,root_path,root_node=\"target\"): #hbuild parser to generate build sequence\r\n if root_node is None:\r\n root_node=hb['project']['target_node']\r\n root=hb['build'][root_node]\r\n seq=[]\r\n nodeFiles=[] ## sub node's final atlases\r\n # scalar=hb['config']['m_ScalarMeasurement']\r\n if root[\"type\"]==\"node\": \r\n for c in root[\"components\"]:\r\n seq+=parse_hbuild(hb, root_path=root_path, root_node=c)\r\n nodeAtlasPath=os.path.join(root_path,\"atlases/\"+c+\"/5_Final_Atlas/FinalAtlasDTI.nrrd\")\r\n nodeFiles.append(nodeAtlasPath)\r\n elif root[\"type\"]==\"end_node\":\r\n if root[\"filetype\"]==\"dataset\":\r\n rows=[]\r\n rows_id=[]\r\n with open(str(root['datasetfiles']),'r') as f:\r\n csvreader=csv.reader(f)\r\n next(csvreader,None)\r\n for r in csvreader:\r\n fpath=str(r[1])\r\n fid=os.path.splitext(os.path.basename(fpath))[0]\r\n rows.append(fpath)\r\n rows_id.append(str(fid))\r\n\r\n return [{\"name\" : str(root_node),\r\n \"dataset_files\" : rows,\r\n \"dataset_ids\" : rows_id,\r\n \"project_path\" : str(os.path.join(root_path,\"atlases/\"+root_node))\r\n }]\r\n else:\r\n flist=list(map(str,root[\"datasetfiles\"]))\r\n fids=[]\r\n for e in flist:\r\n fid=os.path.splitext(os.path.basename(e))[0]\r\n fids.append(fid)\r\n\r\n return [{\"name\" : str(root_node),\r\n \"dataset_files\" : flist,\r\n \"dataset_ids\" : fids ,\r\n \"project_path\" : str(os.path.join(root_path,\"atlases/\"+root_node))\r\n }]\r\n\r\n # node type file reading\r\n\r\n seq+=[{\"name\" : str(root_node),\r\n \"dataset_files\" : list(map(str,nodeFiles)),\r\n \"dataset_ids\" : list(map(str,root[\"components\"])),\r\n \"project_path\" : str(os.path.join(root_path,\"atlases/\"+root_node))\r\n\r\n }]\r\n seq=unique(seq)\r\n\r\n ## generate final buildsequence furnished with configuration\r\n\r\n\r\n return seq\r\n\r\ndef furnish_sequence(hb,seq):\r\n bs=[]\r\n for s in seq:\r\n conf=hb[\"config\"].copy()\r\n conf[\"m_OutputPath\"]=s['project_path']\r\n conf[\"m_CasesPath\"]=s['dataset_files']\r\n conf[\"m_CasesIDs\"]=s['dataset_ids']\r\n conf[\"m_NodeInfo\"]=hb[\"build\"][s['name']]\r\n conf[\"m_NodeName\"]=s[\"name\"]\r\n bs.append(conf)\r\n\r\n return bs\r\n\r\ndef generate_directories(project_path,sequence): ## from build sequence, generate directories\r\n atlasesPath=os.path.join(project_path,\"atlases\")\r\n finalAtlasPath=os.path.join(project_path,\"final_atlas\")\r\n if not os.path.isdir(atlasesPath):\r\n print(\"\\n=> Creation of the atlas directory = \" + atlasesPath)\r\n os.mkdir(atlasesPath)\r\n if not os.path.isdir(finalAtlasPath):\r\n print(\"\\n=> Creation of the atlas directory = \" + finalAtlasPath)\r\n os.mkdir(finalAtlasPath)\r\n for s in sequence:\r\n apath=os.path.join(s[\"m_OutputPath\"])\r\n if not os.path.isdir(apath):\r\n print(\"\\n=> Creation of the atlas directory = \" + apath)\r\n os.mkdir(apath)\r\n print(\"Initial directories are generated\")\r\n\r\n\r\ndef dependency_satisfied(hb,node_name,completed_atlases):\r\n if hb[\"build\"][node_name][\"type\"]==\"end_node\": \r\n return True\r\n else:\r\n comps=hb[\"build\"][node_name][\"components\"]\r\n for c in comps:\r\n if c not in completed_atlases: return False \r\n return True\r\n\r\n\r\n\r\ndef generate_results_csv_from_deformation_track(deformation_track,project_path): # generate final result file with deformation track file\r\n\r\n dt=deformation_track\r\n outpath=os.path.join(project_path,\"DTIAtlasBuilderResults.csv\")\r\n \r\n m_ScalarMeasurement=dt[0][\"scalar_measurement\"]\r\n m_NeedToBeCropped=dt[0][\"need_to_be_cropped\"]\r\n header=[\"id\", \"Original DTI Image\"]\r\n if m_NeedToBeCropped==1: header + [\"Cropped DTI\"]\r\n tmp=[m_ScalarMeasurement+ \" from original\",\r\n \"Affine transform\", \"Affine Registered DTI\", \r\n \"Affine Registered \"+m_ScalarMeasurement,\r\n \"Diffeomorphic Deformed \" + m_ScalarMeasurement,\r\n \"Diffeomorphic Deformation field to Affine space\",\r\n \"Diffeomorphic Deformation field to Affine space\",\r\n \"Diffeomorphic DTI\",\r\n \"Diffeomorphic Deformation field to Original space\",\r\n \"DTI-Reg Final DTI\"\r\n ]\r\n header+=tmp\r\n with open(outpath,\"w\") as f:\r\n csvwriter=csv.writer(f,delimiter=',')\r\n csvwriter.writerow(header)\r\n for idx,case in enumerate(dt):\r\n caseID,casePath = case[\"original_dti_id\"],case[\"original_dti_path\"]\r\n m_OutputPath=case[\"project_path\"]\r\n m_nbLoops=case[\"nb_loops\"]\r\n m_nbLoopsDTIReg=case[\"nb_loops_dtireg\"]\r\n row=[\r\n idx+1,\r\n casePath]\r\n if m_NeedToBeCropped==1: row+=[m_OutputPath+\"/1_Affine_Registration/\" + caseID+\"_croppedDTI.nrrd\"]\r\n concatenated_displacement_path=case[\"output_path\"]\r\n row+=[\r\n m_OutputPath+\"/1_Affine_Registration/\" + caseID + \"_\" + m_ScalarMeasurement + \".nrrd\",\r\n m_OutputPath+\"/1_Affine_Registration/Loop\" + str(m_nbLoops) + \"/\" + caseID + \"_Loop\" + str(m_nbLoops)+\"_LinearTrans.txt\",\r\n m_OutputPath+\"/1_Affine_Registration/Loop\" + str(m_nbLoops) + \"/\" + caseID + \"_Loop\" + str(m_nbLoops)+\"_LinearTrans_DTI.nrrd\",\r\n m_OutputPath+\"/1_Affine_Registration/Loop\" + str(m_nbLoops) + \"/\" + caseID + \"_Loop\" + str(m_nbLoops)+\"_Final\" + m_ScalarMeasurement +\".nrrd\",\r\n m_OutputPath+\"/2_NonLinear_Registration/\" + caseID + \"_NonLinearTrans_\" + m_ScalarMeasurement + \".mhd\",\r\n m_OutputPath+\"/2_NonLinear_Registration/\" + caseID + \"_HField.mhd\" ,\r\n m_OutputPath+\"/2_NonLinear_Registration/\" + caseID + \"_InverseHField.mhd\" ,\r\n m_OutputPath+\"/3_Diffeomorphic_Atlas/\" + caseID + \"_DiffeomorphicDTI.nrrd\",\r\n concatenated_displacement_path,\r\n m_OutputPath+\"/4_Final_Resampling/FinalTensors/\" + caseID + \"_FinalDeformedDTI.nrrd\"\r\n ]\r\n csvwriter.writerow(row)\r\n\r\ndef generate_results_csv(cfg):\r\n\r\n outpath=os.path.join(cfg[\"m_OutputPath\"],\"DTIAtlasBuilderResults.csv\")\r\n m_OutputPath=cfg[\"m_OutputPath\"]\r\n m_ScalarMeasurement=cfg[\"m_ScalarMeasurement\"]\r\n m_nbLoops=cfg[\"m_nbLoops\"]\r\n m_nbLoopsDTIReg=cfg[\"m_nbLoopsDTIReg\"]\r\n m_NeedToBeCropped=cfg[\"m_NeedToBeCropped\"]\r\n header=[\"id\", \"Original DTI Image\"]\r\n if m_NeedToBeCropped==1: header + [\"Cropped DTI\"]\r\n tmp=[cfg[\"m_ScalarMeasurement\"]+ \" from original\",\r\n \"Affine transform\", \"Affine Registered DTI\", \r\n \"Affine Registered \"+cfg[\"m_ScalarMeasurement\"],\r\n \"Diffeomorphic Deformed \" + cfg[\"m_ScalarMeasurement\"],\r\n \"Diffeomorphic Deformation field to Affine space\",\r\n \"Diffeomorphic Deformation field to Affine space\",\r\n \"Diffeomorphic DTI\",\r\n \"Diffeomorphic Deformation field to Original space\",\r\n \"DTI-Reg Final DTI\"\r\n ]\r\n header+=tmp\r\n with open(outpath,\"w\") as f:\r\n csvwriter=csv.writer(f,delimiter=',')\r\n csvwriter.writerow(header)\r\n for idx,case in enumerate(zip(cfg[\"m_CasesIDs\"],cfg[\"m_CasesPath\"])):\r\n caseID,casePath = case\r\n row=[\r\n idx+1,\r\n casePath]\r\n if m_NeedToBeCropped==1: row+=[m_OutputPath+\"/1_Affine_Registration/\" + caseID+\"_croppedDTI.nrrd\"]\r\n row+=[\r\n m_OutputPath+\"/1_Affine_Registration/\" + caseID + \"_\" + m_ScalarMeasurement + \".nrrd\",\r\n m_OutputPath+\"/1_Affine_Registration/Loop\" + str(m_nbLoops) + \"/\" + caseID + \"_Loop\" + str(m_nbLoops)+\"_LinearTrans.txt\",\r\n m_OutputPath+\"/1_Affine_Registration/Loop\" + str(m_nbLoops) + \"/\" + caseID + \"_Loop\" + str(m_nbLoops)+\"_LinearTrans_DTI.nrrd\",\r\n m_OutputPath+\"/1_Affine_Registration/Loop\" + str(m_nbLoops) + \"/\" + caseID + \"_Loop\" + str(m_nbLoops)+\"_Final\" + m_ScalarMeasurement +\".nrrd\",\r\n m_OutputPath+\"/2_NonLinear_Registration/\" + caseID + \"_NonLinearTrans_\" + m_ScalarMeasurement + \".mhd\",\r\n m_OutputPath+\"/2_NonLinear_Registration/\" + caseID + \"_HField.mhd\" ,\r\n m_OutputPath+\"/2_NonLinear_Registration/\" + caseID + \"_InverseHField.mhd\" ,\r\n m_OutputPath+\"/3_Diffeomorphic_Atlas/\" + caseID + \"_DiffeomorphicDTI.nrrd\",\r\n m_OutputPath+\"/4_Final_Resampling/FinalDeformationFields/\" + caseID + \"_GlobalDisplacementField.nrrd\",\r\n m_OutputPath+\"/4_Final_Resampling/FinalTensors/\" + caseID + \"_FinalDeformedDTI.nrrd\"\r\n ]\r\n csvwriter.writerow(row)\r\n\r\n\r\n\r\n\r\ndef main(args):\r\n projectPath=os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)),\"../\"))\r\n scriptPath=os.path.join(projectPath,\"scripts\")\r\n commonPath=os.path.join(projectPath,'common')\r\n configPath=os.path.join(commonPath,\"config.json\")\r\n hbuildPath=os.path.join(commonPath,\"h-build.json\")\r\n \r\n ### generate build sequence\r\n buildSequence=[]\r\n hbuild={}\r\n deformSequence=[]\r\n numThreads=1\r\n if args.buildsequence is None:\r\n hbuild={}\r\n with open(hbuildPath,'r') as f:\r\n hbuild=json.load(f)\r\n config={}\r\n with open(configPath,'r') as f:\r\n config=json.load(f)\r\n numThreads=max(1,int(config[\"m_NbThreadsString\"]))\r\n hbuild[\"config\"]=config\r\n hbuild['config']['m_GreedyAtlasParametersTemplatePath']=str(os.path.join(commonPath,'GreedyAtlasParameters.xml'))\r\n initSequence=parse_hbuild(hbuild,root_path=projectPath,root_node=args.node)\r\n buildSequence=furnish_sequence(hbuild,initSequence)\r\n\r\n # for s in buildSequence:\r\n # print(s)\r\n #save sequence \r\n with open(os.path.join(commonPath,'build_sequence.json'),'w') as f:\r\n json.dump(buildSequence,f,indent=4,sort_keys=True)\r\n\r\n # generate scaffolding directories \r\n generate_directories(projectPath,buildSequence)\r\n else:\r\n with open(args.buildsequence,'r') as f:\r\n buildSequence=json.load(f)\r\n numThreads=max(int(buildSequence[0][\"m_NbThreadsString\"]),1)\r\n\r\n with open(os.path.join(commonPath,'initial_sequence.json'),'w') as f:\r\n json.dump(initSequence,f,indent=4)\r\n\r\n\r\n\r\n ## generate deformation field map\r\n deformInitSequence=generate_deformation_track(initSequence,node=hbuild['project']['target_node'])\r\n deformSequence=furnish_deformation_track(deformInitSequence,projectPath,buildSequence)\r\n inverseDeformSequence=invert_deformation_track(deformSequence)\r\n\r\n with open(os.path.join(commonPath,'deformation_track.json'),'w') as f:\r\n json.dump(deformSequence,f,indent=4)\r\n with open(os.path.join(commonPath,'deformation_track_inverted.json'),'w') as f:\r\n json.dump(inverseDeformSequence,f,indent=4)\r\n\r\n\r\n\r\n\r\n\r\n ### atlas build begins (to be multiprocessed)\r\n print(\"\\nThe current date and time are:\")\r\n print( time.strftime('%x %X %Z') )\r\n print(\"\\n=============== Main Script ================\")\r\n time1=time.time()\r\n\r\n\r\n ## threading\r\n completedAtlases=[] #entry should be the node name \r\n runningAtlases=[] # should have length less or equal than numTheads, entry is the node name\r\n\r\n\r\n def buildAtlas(conf,rt,ct): # rt : list of running threads, ct : list of completed threads, nt : number of thread (numThreads)\r\n prjName=conf[\"m_NodeName\"]\r\n rt.append(prjName)\r\n try:\r\n DTIAtlasBuilder_Preprocess.run(conf)\r\n except Exception as e:\r\n raise Exception(\"Error occurred in DTIAtlasBuilder_Preprocess : \" + str(e))\r\n\r\n try:\r\n DTIAtlasBuilder_AtlasBuilding.run(conf)\r\n except Exception as e:\r\n raise Exception(\"Error occurred in DTIAtlasBuilding_DTIAtlasBuilder : \" + str(e)) \r\n rt.remove(prjName)\r\n ct.append(prjName)\r\n\r\n numNodes=len(buildSequence)\r\n while len(completedAtlases) < numNodes:\r\n if len(runningAtlases) < numThreads and len(buildSequence)>0:\r\n if dependency_satisfied(hbuild,buildSequence[0][\"m_NodeName\"],completedAtlases):\r\n cfg=buildSequence.pop(0)\r\n generate_results_csv(cfg)\r\n threading.Thread(target=buildAtlas,args=(cfg,runningAtlases,completedAtlases)).start()\r\n\r\n # print(\"Completed : \" + str(completedAtlases))\r\n # print(\"Running : \" + str(runningAtlases))\r\n # print(\"Pending : \" + str([x[\"m_NodeName\"] for x in buildSequence]))\r\n time.sleep(1.0)\r\n\r\n # print(\"Completed : \" + str(completedAtlases))\r\n # print(\"Running : \" + str(runningAtlases))\r\n # print(\"Pending : \" + str([x[\"m_NodeName\"] for x in buildSequence]))\r\n\r\n ### copy final atals to 'final_atlas' directory\r\n try:\r\n if args.node is None:\r\n src=os.path.join(projectPath,\"atlases/\"+hbuild['project']['target_node'])\r\n else:\r\n src=os.path.join(projectPath,\"atlases/\"+args.node)\r\n dst=os.path.join(projectPath,\"final_atlas\")\r\n print(\"Copying filed from %s to %s\" %(src,dst))\r\n shutil.rmtree(dst)\r\n shutil.copytree(src,dst)\r\n\r\n except Exception as e:\r\n raise Exception(\"Error occurred in copying final atlas directory : \" +str(e))\r\n\r\n print(\"Final atlas copied into %s \"% dst)\r\n\r\n\r\n ### Concatenate the displacement fields\r\n print(\"\\nConcatenating deformation fields\")\r\n try:\r\n DTIAtlasBuilder_Utilities.ITKTransformTools_Concatenate(config,deformSequence)\r\n DTIAtlasBuilder_Utilities.ITKTransformTools_Concatenate_Inverse(config,inverseDeformSequence)\r\n generate_results_csv_from_deformation_track(deformSequence,projectPath)\r\n\r\n except Exception as e:\r\n raise Exception(\"Error occurred in concatenating deformation fields : \" + str(e))\r\n\r\n # Display execution time\r\n time2=time.time()\r\n timeTot=time2-time1\r\n if timeTot<60 : print(\"| Execution time = \" + str(int(timeTot)) + \"s\")\r\n elif timeTot<3600 : print(\"| Execution time = \" + str(int(timeTot)) + \"s = \" + str(int(timeTot/60)) + \"m \" + str( int(timeTot) - (int(timeTot/60)*60) ) + \"s\")\r\n else : print(\"| Execution time = \" + str(int(timeTot)) + \"s = \" + str(int(timeTot/3600)) + \"h \" + str( int( (int(timeTot) - int(timeTot/3600)*3600) /60) ) + \"m \" + str( int(timeTot) - (int(timeTot/60)*60) ) + \"s\")\r\n\r\n\r\n \r\n\r\nif __name__==\"__main__\":\r\n parser=argparse.ArgumentParser(description=\"Argument Parser\")\r\n parser.add_argument('--node',help=\"node to build\",type=str)\r\n parser.add_argument('--buildsequence',help='build sequence file, if this option is inputted then build sequence process will be skipped',type=str)\r\n args=parser.parse_args()\r\n\r\n\r\n try:\r\n main(args)\r\n sys.exit(0)\r\n except Exception as e:\r\n print(str(e))\r\n traceback.print_exc(file=sys.stdout)\r\n sys.exit(1)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Scripts/template_main.py","file_name":"template_main.py","file_ext":"py","file_size_in_byte":19068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"557343019","text":"from selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\nfrom Config import config\n\nconfig = config\n\n\nclass Driver:\n\n @staticmethod\n def get_driver():\n options = Options()\n options.headless = config.headless\n return webdriver.Firefox(options=options, service_log_path='../Logs/geckodriver.log')\n\n @staticmethod\n def close_driver(driver):\n driver.quit()\n","sub_path":"SeleniumFramework/Driver.py","file_name":"Driver.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"327205963","text":"batch_size = 100\nnum_classes = 10\nepochs = 100\nimport keras\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten, Conv2D, SeparableConv2D, DepthwiseConv2D\nfrom keras.layers import MaxPooling2D, AveragePooling2D, GlobalMaxPooling2D, GlobalAveragePooling2D\nfrom keras import regularizers\nfrom keras import metrics\nfrom keras import callbacks\nfrom apscheduler.schedulers.background import BackgroundScheduler\nimport pickle\nimport psutil\n\nprocess = psutil.Process()\n\nmcpu = 0\nmmem = 0\n\ndef get_info():\n global mmem\n mem = process.memory_info().rss\n if mmem < mem:\n mmem = mem\n\nscheduler = BackgroundScheduler()\nscheduler.add_job(get_info, 'interval', seconds=1)\n\n\ndata_augmentation = True\n\n#load saved data\npkl_file = open('/exports/home/j_liu21/projects/genetic_algorithms/x_train.pkl', 'rb')\nx_train = pickle.load(pkl_file, encoding='latin1')\npkl_file.close()\n\n############################################################\nx_data_len = len(x_train)\n#end = int(.2*x_data_len)\n#x_train_train = x_train[0:end]\n#x_train_valid = x_train[end:]\nprint(x_data_len)\nx_train_train = x_train[0:40000]\nx_train_valid = x_train[40000:50000]\n############################################################\n\npkl_file = open('/exports/home/j_liu21/projects/genetic_algorithms/y_train.pkl', 'rb')\ny_train = pickle.load(pkl_file, encoding='latin1')\npkl_file.close()\n\n############################################################\n#y_train_train = y_train[0:end]\n#y_train_valid = y_train[end:]\ny_train_train = y_train[0:40000]\ny_train_valid = y_train[40000:50000]\n############################################################\n\npkl_file = open('/exports/home/j_liu21/projects/genetic_algorithms/x_test.pkl', 'rb')\nx_test = pickle.load(pkl_file, encoding='latin1')\npkl_file.close()\n\npkl_file = open('/exports/home/j_liu21/projects/genetic_algorithms/y_test.pkl', 'rb')\ny_test = pickle.load(pkl_file, encoding='latin1')\npkl_file.close()\n\n# Convert class vectors to binary class matrices.\ny_train_train = keras.utils.to_categorical(y_train_train, num_classes)\ny_train_valid = keras.utils.to_categorical(y_train_valid, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\n\ndef identity_block(model):\n save_model = model\n\n model.add(Conv2D(20, (3, 3), padding='same', activation='relu'))\n model.add(BatchNormalization())\n\n\n\nmodel = Sequential()\n\n\n#print(model.summary())\n\nopt = keras.optimizers.rmsprop(lr=0.01, decay=1e-6)\n#opt = keras.optimizers.SGD(learning_rate = .01, decay=1e-6)\n\nes = keras.callbacks.EarlyStopping(monitor = 'val_loss', patience = 10, verbose = 1)\n\nscheduler.start()\n\nmodel.compile(loss = 'categorical_crossentropy',\n optimizer = opt,\n metrics = ['accuracy'])\n\nx_train_train = x_train_train.astype('float32')\nx_train_valid = x_train_valid.astype('float32')\nx_test = x_test.astype('float32')\nx_train_train /= 255\nx_train_valid /= 255\nx_test /= 255\n\nif not data_augmentation:\n print('Not using data augmentation.')\n model.fit(x_train_train, y_train_train,\n batch_size = batch_size,\n epochs = epochs,\n validation_data = (x_train_valid, y_train_valid),\n shuffle = True)\nelse:\n print('Using real-time data augmentation.')\n # This will do preprocessing and realtime data augmentation:\n datagen = ImageDataGenerator(\n featurewise_center = False, # set input mean to 0 over the dataset\n samplewise_center = False, # set each sample mean to 0\n featurewise_std_normalization = False, # divide inputs by std of the dataset\n samplewise_std_normalization = False, # divide each input by its std\n zca_whitening = False, # apply ZCA whitening\n zca_epsilon = 1e-06, # epsilon for ZCA whitening\n rotation_range = 0, # randomly rotate images in the range (degrees, 0 to 180)\n width_shift_range = 0.1, # randomly shift images horizontally (fraction of total width)\n height_shift_range = 0.1, # randomly shift images vertically (fraction of total height)\n shear_range = 0., # set range for random shear\n zoom_range = 0., # set range for random zoom\n channel_shift_range = 0., # set range for random channel shifts\n fill_mode = 'nearest', # set mode for filling points outside the input boundaries\n cval = 0., # value used for fill_mode = \"constant\"\n horizontal_flip = True, # randomly flip images\n vertical_flip = False, # randomly flip images\n rescale = None, # set rescaling factor (applied before any other transformation)\n preprocessing_function = None, # set function that will be applied on each input\n data_format = None, # image data format, either \"channels_first\" or \"channels_last\"\n validation_split = 0.0 ) # fraction of images reserved for validation (strictly between 0 and 1)\n\n\n # Compute quantities required for feature-wise normalization\n # (std, mean, and principal components if ZCA whitening is applied).\n\n datagen.fit(x_train_train)\n\n # Fit the model on the batches generated by datagen.flow().\n\n model.fit_generator(datagen.flow(x_train_train, y_train_train, batch_size = batch_size),\n steps_per_epoch = 100,\n epochs = epochs,\n validation_data = (x_train_valid, y_train_valid),\n workers = 8,\n callbacks = [es] )\n\n #model.fit_generator(datagen.flow(x_train_train, y_train_train, batch_size = batch_size),\n # steps_per_epoch = 100,\n # epochs = epochs,\n # validation_data = (x_train_valid, y_train_valid),\n # workers = 8 )\n\n\n# Score trained model.\nscores_train_train = model.evaluate(x_train_train, y_train_train, verbose = 0)\nscores_test = model.evaluate(x_test, y_test, verbose = 0)\nscheduler.shutdown()\ncpu_time = process.cpu_times().user\n\nprint('Training_loss: {} Test_accuracy: {} Mem: {} CPU: {}'.format(scores_train_train[0], scores_test[1], mmem, cpu_time) )\n\n","sub_path":"sample_networks/ResNet/ResNet.py","file_name":"ResNet.py","file_ext":"py","file_size_in_byte":6582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"13737313","text":"import numpy\nimport pandas\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom sklearn.model_selection import train_test_split\nfrom torch.autograd import Variable\nfrom sklearn.utils import shuffle\n\nMAX_EPOCH = 1000\n\nclass Classifier(nn.Module):\n def __init__(self):\n super(Classifier, self).__init__()\n self.l1 = nn.Linear(7,77)\n self.l2 = nn.Linear(77,77)\n self.l3 = nn.Linear(77,77)\n self.l4 = nn.Linear(77,77)\n self.l5 = nn.Linear(77,77)\n self.l6 = nn.Linear(77,77)\n self.l7 = nn.Linear(77,77)\n self.l8 = nn.Linear(77,2)\n self.l9 = nn.Linear(2,1)\n\t\t\n def forward(self, x):\n x = F.relu(self.l1(x))\n x = F.relu(self.l2(x))\n x = F.relu(self.l3(x))\n x = F.relu(self.l4(x))\n x = F.relu(self.l5(x))\n x = F.relu(self.l6(x))\n x = F.relu(self.l7(x))\n x = F.relu(self.l8(x))\n x = F.relu(self.l9(x))\n return x\n\ndef percentage_correct(pred, labels, threshold = 0.5):\n\tcorrect = 0\n\ttotal = 0\n\tconverted_pred = []\n\tfor p in pred:\n\t\tif (p.data[0] > threshold):\n\t\t\tconverted_pred.append(1)\n\t\telse:\n\t\t\tconverted_pred.append(0)\n \n\tif (len(converted_pred) == len(labels)):\n\t\tfor i in range(len(converted_pred)):\n\t\t\tif (converted_pred[i] == labels[i].data[0]):\n\t\t\t\tcorrect += 1\n\t\t\ttotal += 1\n\treturn correct/total\n\t\t\n## Importing Data ##\ndataset = pandas.read_csv('kickstarter_data_full.csv', low_memory = False)\ndata = dataset[[\n\t'disable_communication',\n\t'country',\n\t'currency',\n\t'staff_pick',\n\t'static_usd_rate',\n\t'category',\n\t'spotlight',\n\t'SuccessfulBool'\n]].dropna().reset_index(drop = True)\n\n## Converting Categorical Columns to Integers and Bools to 0/1 ##\ndata['disable_communication'] = (data['disable_communication']).astype(int)\ndata['staff_pick'] = (data['staff_pick']).astype(int)\ndata['spotlight'] = (data['spotlight']).astype(int)\ndata['country'] = (data['country']).astype('category').cat.codes\ndata['currency'] = (data['currency']).astype('category').cat.codes\ndata['category'] = (data['category']).astype('category').cat.codes\n\n## Initiallizing Testing and Training Data ##\nY = data.iloc[0:int(data.size / 8), 7].as_matrix()\nX = data.iloc[0:int(data.size / 8), data.columns != 'SuccessfulBool'].as_matrix()\n\nX_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 42)\n\n#shuffle X and y\nX_train, y_train = shuffle(X_train, y_train, random_state=0)\n\nX_train = torch.Tensor(X_train)\nX_test = torch.Tensor(X_test)\ny_train = torch.Tensor(y_train)\ny_test = torch.Tensor(y_test)\n\ntrain_data = Variable(X_train)\ntrain_labels= Variable(y_train)\ntest_data = Variable(X_test)\ntest_labels = Variable(y_test)\n\n## Training the Model ##\nmodel = Classifier()\noptimizer = torch.optim.Adam(model.parameters(), lr=0.03, momentum=0.9)\nloss = nn.BCEWithLogitsLoss()\n\naccuracies = []\nb_size = 100 #batch size\n\nfor epoch in range(MAX_EPOCH):\n model.train()\n for batch in range(0,train_data.size(0),b_size):\n \td = train_data[batch:batch+b_size]\n \tl = train_labels[batch:batch+b_size]\n \toptimizer.zero_grad()\n \tpred = model(d).view(len(l))\n \terror = loss(pred, l)\n \terror.backward()\n \toptimizer.step()\n print ('epoch {} -- percentage correct: {}, error: {}'.format(epoch, percentage_correct(pred,l), error.data[0]))\n\n\n## Testing the Model ## \nmodel.eval()\npred = model(test_data).view(len(test_labels))\nerror = loss(pred, test_labels)\nprint(\"===================================\")\nprint(\"Final Accuracy\")\nprint(percentage_correct(pred, test_labels))\nprint(\"===================================\")","sub_path":"neural_networks/old_nns/nn2.py","file_name":"nn2.py","file_ext":"py","file_size_in_byte":3621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"380283574","text":"from splinter import Browser\nfrom bs4 import BeautifulSoup as bs\nfrom selenium import webdriver\nimport os\n\nimport pandas as pd\n\ndef init_browser():\n # @NOTE: Replace the path with your actual path to the chromedriver\n executable_path ={\"executable_path\":\"c:/Users/ai3ca/web-scraping-challenge/Missions_to_Mars/chromedriver\"}\n \n return Browser(\"chrome\", **executable_path, headless=False)\n\n\ndef scrape():\n browser = init_browser()\n mars={}\n\n url = \"https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest\"\n browser.visit(url)\n\n html = browser.html\n soup = bs(html, \"html.parser\")\n\n\n #scrape latest news \n title= soup.find(\"div\", class_=\"content_title\").text\n paragraph= soup.find(\"div\", class_=\"article_teaser_body\").text\n mars[\"News Title\"]=title\n mars[\"News Paragraph\"]=paragraph\n \n # scrape the image\n image_url=\"https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars\"\n browser.visit(image_url)\n #Getting the base url\n from urllib.parse import urlsplit\n base_url = \"{0.scheme}://{0.netloc}/\".format(urlsplit(image_url))\n print(base_url)\n xpath = \"//*[@id=\\\"page\\\"]/section[3]/div/ul/li[1]/a/div/div[2]/img\"\n results = browser.find_by_xpath(xpath)\n img = results[0]\n img.click()\n html_image = browser.html\n soup = bs(html_image, \"html.parser\")\n url_image = soup.find(\"img\", class_=\"fancybox-image\")[\"src\"]\n featured_image_url = base_url + url_image\n print(featured_image_url)\n mars[\"featured_image\"]=featured_image_url\n \n # scrape the weather data\n \n weather_url=\"https://twitter.com/marswxreport?lang=en\"\n browser.visit(weather_url)\n html_weather= browser.html\n soup = bs(html_weather, \"html.parser\")\n mars_weather = soup.find(\"p\", class_=\"TweetTextSize TweetTextSize--normal js-tweet-text tweet-text\").text\n mars[\"weather\"]=mars_weather\n \n # scrape the fact data\n \n fact_url=\"https://space-facts.com/mars/\"\n browser.visit(fact_url)\n mars_table = pd.read_html(fact_url)\n mars_table[0]\n mars_df=mars_table[0]\n mars_df.columns=[\"Parameter\",\"Mars\",\"Earth\"]\n mars_df.set_index([\"Parameter\"], inplace=True)\n\n mars_df\n mars_fact=mars_df.to_html(index=True,header=True)\n mars_fact=mars_fact.replace(\"\\n\",\"\")\n mars[\"fact\"]=mars_fact\n\n # mars hemisphere\n hemis_url=\"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n browser.visit(hemis_url)\n #Getting the base url\n base_url = \"{0.scheme}://{0.netloc}/\".format(urlsplit(hemis_url))\n print(base_url)\n\n # create a empty for these hemisphere url\n all_urls=[]\n \n hemisphere_html = browser.html\n soup = bs(hemisphere_html, 'html.parser')\n image_list = soup.find_all('div', class_='item')\n # Loop through list of hemispheres \n for image in image_list:\n hemisphere_dict = {}\n # Find link\n href = image.find('a', class_='itemLink product-item')\n link = base_url + href['href']\n # Visit the link\n browser.visit(link)\n # Parse the html of the new page\n\n hemisphere_html2 = browser.html\n soup2 = bs(hemisphere_html2, 'html.parser')\n \n # Find the title\n img_title = soup2.find('div', class_='content').find('h2', class_='title').text\n # Append to dict\n hemisphere_dict['Title'] = img_title\n # Find image url\n img_url = soup2.find('div', class_='downloads').find('a')['href']\n # Append to dict\n hemisphere_dict['URL_IMG'] = img_url\n\n # Append dict to list\n\n all_urls.append(hemisphere_dict)\n\n mars[\"hemisphere_img_url\"] = all_urls\n\n\n return mars\n\n\n\n\n \n \n","sub_path":"Missions_to_Mars/.ipynb_checkpoints/scrape_mars-checkpoint.py","file_name":"scrape_mars-checkpoint.py","file_ext":"py","file_size_in_byte":3773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"217420123","text":"# -- coding = 'utf-8' -- \n# Author Kylin\n# Python Version 3.7.3\n# OS macOS\n\"\"\"\nNo.334 递增的三元子序列\n需求:\n 给你一个整数数组nums,判断这个数组中是否存在长度为3的递增子序列\n 如果存在这样的三元组下标 (i, j, k) 且满足 i < j < k ,使得 nums[i] < nums[j] < nums[k] ,返回 true ;否则,返回 false 。\n\n\"\"\"\n\n\ndef increasingTriplet_onePointer(nums):\n \"\"\"\n 利用枚举+单指针,可惜超时了\n 时间复杂度:O(n^2)\n 空间复杂度:O(1)\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n n = len(nums)\n # 固定中间元素\n for i in range(1, n-1):\n left, right = 0, n-1\n flag1, flag2 = False, False\n\n while left < i:\n if nums[left] < nums[i]:\n flag1 = True\n break\n left += 1\n while right > i:\n if nums[right] > nums[i]:\n flag2 = True\n break\n right -= 1\n\n if flag1 and flag2:\n return True\n\n return False\n\ndef increasingTriplet_pointers(nums):\n \"\"\"\n 贪心+快慢指针\n 在a < b的前提下,保证a尽可能小。在b < c的条件下,保证 b 尽可能小。\n 时间复杂度:O(n)\n 空间复杂度:O(1)\n :param nums:\n :return:\n \"\"\"\n a = float('inf')\n b = float('inf')\n\n for num in nums:\n if num <= a:\n # 当前元素比a小,就用当前元素赋值a\n a = num\n elif num <= b:\n # 当前元素比a大,但是比b小,就用该元素赋值b\n b = num\n else:\n # 否则说明该元素比a和b都大,符合要求\n return True\n return False\n\nif __name__ == \"__main__\":\n nums = [20,100,10,12,5,13]\n flag = increasingTriplet_pointers(nums)\n print(flag)","sub_path":"LeetCode/src/search07/increasing_triplet.py","file_name":"increasing_triplet.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"373265798","text":"import numpy as np\nfrom Game import Game as Game\nclass CambiaGame(Game):\n \"\"\"\n This class specifies the base Game class. To define your own game, subclass\n this class and implement the functions below. This works when the game is\n two-player, adversarial and turn-based.\n\n Use 1 for player1 and -1 for player2.\n\n See othello/OthelloGame.py for an example implementation.\n \"\"\"\n def getInitBoard(self):\n \"\"\"\n Returns:\n startBoard: a representation of the board (ideally this is the form\n that will be the input to your neural network)\n \"\"\"\n # the last 4 moves\n # 8 possible cards, 4 for each player, plus one for each player to represent the card they draw\n # 53 channels, one-hot representation for 52 card deck\n # 56 channels here because we convert to 55 channels before predicting - last 2 channels holds whether card is known by player 1 and player 2\n # use 2x2 convolutions to get history\n newBoard = np.zeros(shape=(4, 10, 56))\n for i in range(4):\n # add p1 cards\n # set starting cards\n newDraw = np.random.randint(low=0, high=54)\n newBoard[0][i] = np.zeros(shape=(56))\n newBoard[0][i][newDraw] = 1.\n if i < 2:\n # give vision\n newBoard[0][i][54] = 1.\n\n # add p2 cards\n newDraw = np.random.randint(low=0, high=54)\n newBoard[0][i + 5] = np.zeros(shape=(56))\n newBoard[0][i + 5][newDraw] = 1.\n if i < 2:\n # give vision\n newBoard[0][i + 5][55] = 1.\n\n # p1 starts with draw\n newDraw = np.random.randint(low=0, high=54)\n newBoard[0][4] = np.zeros(shape=(56))\n newBoard[0][4][newDraw] = 1.\n # give vision\n newBoard[0][4][54] = 1.\n\n return newBoard\n\n def getBoardSize(self):\n \"\"\"\n Returns:\n (x,y): a tuple of board dimensions\n \"\"\"\n return (4, 10)\n\n def getActionSize(self):\n \"\"\"\n Returns:\n actionSize: number of all possible actions\n \"\"\"\n # You can only play one of 4 cards, or the card you draw = 5 actions\n return 5\n\n def getNextState(self, board, player, action):\n \"\"\"\n Input:\n board: current board\n player: current player (1 or -1)\n action: action taken by current player\n\n Returns:\n nextBoard: board after applying action\n nextPlayer: player who plays in the next turn (should be -player)\n \"\"\"\n startIndex = 0\n # If player1\n if player == 1:\n startIndex = 0\n else:\n # player2\n startIndex = 5\n # delete the last move in history\n newBoard = np.delete(board, 3, axis=0)\n # insert a new move that's identical to previous move\n # handle playing own card\n a = np.copy(newBoard[0])\n newBoard = np.insert(newBoard, 0, a, axis=0)\n cardPlayed=0\n if action < 4:\n # play own card and swap with drawn card\n\n # get own card\n a = np.where(np.isin(newBoard[0][action + startIndex], [1.]))[0]\n if a.size > 0:\n cardToPlayIndex = a[0]\n # get unknown status - index 51 is player1, 52 is player2\n isCardKnown = newBoard[0][action + startIndex][54]\n if player == -1:\n isCardKnown = newBoard[0][action + startIndex][55]\n\n isCardKnownOpponent = newBoard[0][action + startIndex][54]\n if player == -1:\n isCardKnownOpponent = newBoard[0][action + startIndex][55]\n\n # our card\n # if card is known play it, otherwise get a random card to play\n # we assume a deck composed of infinite decks so we can uniformly draw\n if isCardKnown == 1.:\n cardPlayed = cardToPlayIndex\n else:\n cardPlayed = np.random.randint(low=0, high=54)\n\n # replace the current card with the drawn card and remove the drawn card from the board\n # replace played card slot with the drawn card\n newBoard[0][action + startIndex] = np.copy(newBoard[0][4 + startIndex])\n # remove card from drawn card slot\n newBoard[0][4 + startIndex] = np.zeros(shape=(56))\n # remove opponent's vision of new card\n temp = 0\n if player == -1:\n temp = 1\n newBoard[0][action + startIndex][54 + temp] = 0.\n \n if action == 4:\n # play drawn card\n s = np.where(np.isin(newBoard[0][4 + startIndex], [1.]))[0]\n if s.size > 0:\n cardPlayed = s[0]\n # set drawn card slot to nothing\n newBoard[0][4 + startIndex] = np.zeros(shape=(56))\n\n\n temp = 0\n if player == -1:\n temp = 1\n # regardless of action, play out the effects of the played card\n redKing = False\n if cardPlayed == 38 or cardPlayed == 51:\n redKing = True\n cardPlayed += 1\n cardPlayed %= 13\n if cardPlayed == 7 or cardPlayed == 8:\n # look at one of your own unknown cards\n toLookAtRandom = []\n for i in range(4):\n if newBoard[0][i + startIndex][54 + temp] == 0.:\n toLookAtRandom.append(i)\n if len(toLookAtRandom) < 1:\n toLookAtRandom.append(np.random.randint(low=0, high=5))\n rand = np.random.randint(low=0, high=len(toLookAtRandom))\n newBoard[0][toLookAtRandom[rand]][54 + temp] = 1.\n if cardPlayed == 9 or cardPlayed == 10:\n # look at one of opponent's cards that we don't know\n toLookAtRandom = []\n if player == 1:\n s = 5\n else:\n # player2\n s = 0\n for i in range(4):\n if newBoard[0][i + s][54 + temp] == 0.:\n toLookAtRandom.append(i)\n if len(toLookAtRandom) < 1:\n toLookAtRandom.append(np.random.randint(low=0, high=5))\n rand = np.random.randint(low=0, high=len(toLookAtRandom))\n newBoard[0][toLookAtRandom[rand]][54 + temp] = 1.\n if cardPlayed == 11 or cardPlayed == 12:\n # blind swap with opponent's cards - choose one of our unknown and one of opponent's unknown\n myUnknown = 0\n toLookAtRandom = []\n for i in range(4):\n if newBoard[0][i + startIndex][54 + temp] == 0.:\n toLookAtRandom.append(i)\n # if there's no unknown, randomly append one\n if len(toLookAtRandom) < 1:\n toLookAtRandom.append(np.random.randint(low=0, high=5))\n rand = np.random.randint(low=0, high=len(toLookAtRandom))\n myUnknown = toLookAtRandom[rand]\n\n oppUnknown = 5\n toLookAtRandom = []\n if player == 1:\n s = 5\n else:\n # player2\n s = 0\n for i in range(4):\n if newBoard[0][i + s][54 + temp] == 0.:\n toLookAtRandom.append(i)\n if len(toLookAtRandom) < 1:\n toLookAtRandom.append(np.random.randint(low=0, high=5))\n rand = np.random.randint(low=0, high=len(toLookAtRandom))\n oppUnknown = toLookAtRandom[rand]\n\n # swap cards\n oppCard = np.copy(newBoard[0][oppUnknown])\n newBoard[0][oppUnknown] = np.copy(newBoard[0][myUnknown])\n newBoard[0][myUnknown] = np.copy(oppCard)\n\n if cardPlayed == 13:\n # King; check if it's red, if not then we look at an opponent's card and swap\n if not redKing:\n # swap one of our unknown with lowest or random opponent's card\n # blind swap with opponent's cards - choose one of our unknown and one of opponent's unknown\n myUnknown = 0\n toLookAtRandom = []\n for i in range(4):\n if newBoard[0][i + startIndex][54 + temp] == 0.:\n toLookAtRandom.append(i)\n if len(toLookAtRandom) < 1:\n toLookAtRandom.append(np.random.randint(low=0, high=5))\n rand = np.random.randint(low=0, high=len(toLookAtRandom))\n myUnknown = toLookAtRandom[rand]\n\n oppUnknown = 5\n toLookAtRandom = []\n if player == 1:\n s = 5\n else:\n # player2\n s = 0\n for i in range(4):\n if newBoard[0][i + s][54 + temp] == 0.:\n toLookAtRandom.append(i)\n if len(toLookAtRandom) < 1:\n toLookAtRandom.append(np.random.randint(low=0, high=5))\n rand = np.random.randint(low=0, high=len(toLookAtRandom))\n oppUnknown = toLookAtRandom[rand]\n\n # swap cards\n oppCard = np.copy(newBoard[0][oppUnknown])\n oppCard[54 + temp] = 1.\n newBoard[0][oppUnknown] = np.copy(newBoard[0][myUnknown])\n newBoard[0][myUnknown] = np.copy(oppCard)\n\n\n # play all identical cards\n for j in range(10):\n # loop through and look for cards that are similar\n for index in np.where(np.isin(newBoard[0][j], [1.]))[0]:\n # don't play red kings or jokers\n if index != 38 and index != 51 and index < 52:\n # if the card is similar to current card, play it\n c = index + 1\n c %= 13\n if c == cardPlayed:\n newBoard[0][j] = np.zeros(shape=(56))\n\n # then draw a card for the opponent and switch turns\n # swap startIndex and temp\n if player == 1:\n startIndex = 5\n else:\n # player2\n startIndex = 0\n\n temp = 1\n if player == -1:\n temp = 0\n \n # set opponent's draw\n newDraw = np.random.randint(low=0, high=54)\n newBoard[0][4 + startIndex] = np.zeros(shape=(56))\n newBoard[0][4 + startIndex][newDraw] = 1.\n # give vision to opponent only\n newBoard[0][4 + startIndex][54 + temp] = 1.\n\n # update turn count\n newBoard[2][0][0] = newBoard[3][0][0] + 1\n return newBoard, player * -1\n\n\n def getValidMoves(self, board, player):\n \"\"\"\n Input:\n board: current board\n player: current player\n\n Returns:\n validMoves: a binary vector of length self.getActionSize(), 1 for\n moves that are valid from the current board and player,\n 0 for invalid moves\n \"\"\"\n outArr = np.zeros(shape=(5), dtype='int')\n if player == 1:\n startIndex = 0\n else:\n # player2\n startIndex = 5\n for i in range(4):\n if np.where(np.isin(board[0][i + startIndex], [1.]))[0].size > 0 and np.where(np.isin(board[0][i + startIndex], [1.]))[0][0] < 54:\n outArr[i] = 1\n outArr[4] = 1\n #print(outArr)\n return outArr\n\n def getGameEnded(self, board, player):\n \"\"\"\n Input:\n board: current board\n player: current player (1 or -1)\n\n Returns:\n r: 0 if game has not ended. 1 if player won, -1 if player lost,\n small non-zero value for draw.\n \n \"\"\"\n # if player1 or player2 is 1 or lower, game is over\n player1, player2 = self.computeScore(board)\n if player1 <= 1:\n return 1\n if player2 <= 1:\n return -1\n if board[2][0][0] > 52:\n # else, game ends after ~54 turns\n if player1 > player2:\n return -1\n else:\n return 1\n return 0\n\n \n def computeScore(self, board):\n \"\"\"\n Input:\n board: current board\n Returns:\n score1: player1's score\n score2: player2's score\n \"\"\"\n player1 = 0\n player2 = 0\n\n startIndex = 0\n for i in range(4):\n s = np.where(np.isin(board[0][i + startIndex], [1.]))[0]\n if s.size > 0:\n player1 += self.getCardScore(s[0])\n\n startIndex = 5\n for i in range(4):\n s = np.where(np.isin(board[0][i + startIndex], [1.]))[0]\n if s.size > 0:\n player2 += self.getCardScore(s[0])\n \n return player1, player2\n\n def getCardScore(self, num):\n \"\"\"\n Input:\n num: an index of a card from 0-53\n Returns:\n value: the card's value\n \"\"\"\n if num == 38 or num == 51:\n return -1\n if num == 52 or num == 53:\n return 0\n num += 1\n num %= 13\n return num\n\n\n def getCanonicalForm(self, board, player):\n \"\"\"\n Input:\n board: current board\n player: current player (1 or -1)\n\n Returns:\n canonicalBoard: returns canonical form of board. The canonical form\n should be independent of player. For e.g. in chess,\n the canonical form can be chosen to be from the pov\n of white. When the player is white, we can return\n board as is. When the player is black, we can invert\n the colors and return the board.\n \"\"\"\n newArr = np.zeros(shape=(4, 10, 56))\n # swap 0-4 with 5-9\n for i in range(4):\n for j in range(5):\n newArr[i][j + 5] = board[i][j]\n for i in range(4):\n for j in range(5):\n newArr[i][j] = board[i][j + 5]\n\n # swap vision columns\n for i in range(4):\n for j in range(10):\n temp = newArr[i][j][54]\n newArr[i][j][54] = newArr[i][j][55]\n newArr[i][j][55] = temp\n \n return newArr\n\n def unknownize(self, board, player):\n \"\"\"\n Input:\n board: current full board\n player: current player (1 or -1)\n Returns:\n unknownizedBoard: returns board with unknown spots blanked. (4x12x55) shape\n \"\"\"\n index = 55\n if player == -1:\n index = 54\n newBoard = np.delete(board, index, axis=2)\n #print(newBoard)\n #print(newBoard.shape)\n for i in range(newBoard.shape[0]):\n for j in range(newBoard[i].shape[0]):\n #print(newBoard.size)\n s = np.where(np.isin(board[i][j], [1.]))\n known = False\n #print(s[0])\n for indice in range(s[0].size):\n #print(indice)\n if indice == 54:\n # if it's known, don't black card out\n known = True\n if not known and s[0].size > 0:\n #print(s[0])\n if s[0][0] > 54:\n s[0][0] = 54\n newBoard[i][j][s[0][0]] = 0.\n return newBoard\n\n\n def getSymmetries(self, board, pi):\n \"\"\"\n Input:\n board: current board\n pi: policy vector of size self.getActionSize()\n\n Returns:\n symmForms: a list of [(board,pi)] where each tuple is a symmetrical\n form of the board and the corresponding pi vector. This\n is used when training the neural network from examples.\n \"\"\"\n # We don't need to input symmetries because not having them doesn't prevent convergence\n # See AlphaZero Reddit AMA\n return [(board, pi)]\n\n def stringRepresentation(self, board):\n \"\"\"\n Input:\n board: current board\n\n Returns:\n boardString: a quick conversion of board to a string format.\n Required by MCTS for hashing.\n \"\"\"\n return np.array_str(board)\n","sub_path":"CambiaGame.py","file_name":"CambiaGame.py","file_ext":"py","file_size_in_byte":16236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"118555101","text":"from PIL import Image\nimport cv2\nimport pytesseract\nfrom .handwriting_extract.src.main import extract\nfrom .field.data.field_data import FieldData, FieldType\nfrom skimage.segmentation import clear_border\nfrom imutils import contours\nimport imutils\nimport os\nimport numpy as np\n\n\"\"\"\nMethod extracts data from given image using Py-Tesseract\n\n@return text the extracted text\n\"\"\"\n\n\ndef extract_data_pytesseract(image):\n filename = \"{}.png\".format(\"temp\")\n cv2.imwrite(filename, image)\n text = pytesseract.image_to_string(Image.open(filename), config='--psm 7')\n\n return text\n\n\"\"\"\nMethod extracts data from given image using handwriting_extract library\n\n@return the text that was extracted\n\"\"\"\n\n\ndef extract_data_handwriting(image):\n return extract(image)\n\n\n\"\"\"\nPerforms account and routing extraction from the provided image. Checks the given\npair parameters' field_type field to see if it wants the routing or the account number, and then\nsets the pair's extracted_data field to the accordingly, and returns the pair. If not successful, blank \nor garbage information is returned, otherwise both the extracted_data for routing and account would be a \nsingle string of digits.\n\n@param img: image to extract the data from - this is a cropped version of full image, containing only the bottom 3rd\n@param pair: the value that contains the type of the field that is requested, and the extracted_data itself to be returned\n\n@return pair regardless of if extraction was successful; difference is only in the accuracy of pair.extracted_data\n\"\"\"\n\n\ndef account_routing_extraction(img, pair: FieldData):\n print(\"Account/Writing extraction\")\n if img is not None:\n filedir = os.path.abspath(os.path.dirname(__file__))\n ref_image_file = os.path.join(\n filedir, '../../resources/images/micr_e13b_reference.png')\n\n # init list of reference character names, in same order as they appear in reference\n # image where the digits, their names and:\n # T = Transit (delimit bank branch routing transit #)\n # U = On-us (delimit customer account number)\n # A = Amount (delimit transaction amount)\n # D = Dash (delimit parts of numbers, such as routing or account)\n charNames = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\",\n \"7\", \"8\", \"9\", \"0\", \"T\", \"U\", \"A\", \"D\"]\n\n # load ref MICR image, convert to grayscale and threshold it\n # this will cause digits to appear white on black background\n ref = cv2.imread(ref_image_file)\n ref = cv2.cvtColor(ref, cv2.COLOR_BGR2GRAY)\n ref = imutils.resize(ref, width=400)\n ref = cv2.threshold(\n ref, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n\n # find contours in the MICR image and sort them left to right\n refCnts = cv2.findContours(\n ref.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n refCnts = imutils.grab_contours(refCnts)\n refCnts = contours.sort_contours(refCnts, method=\"left-to-right\")[0]\n\n # extract digits and symbols from list of contours\n refROIs = extract_digits_and_symbols(ref, refCnts, minW=10, minH=20)[0]\n chars = {}\n\n # loop over reference ROIs\n for (name, roi) in zip(charNames, refROIs):\n # resize the ROI to a fixed size, then update the chars dict,\n # mapping char name to ROI\n roi = cv2.resize(roi, (36, 36))\n chars[name] = roi\n\n # init rectangular kernel along w/an empty list to store output of OCR\n rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (17, 7))\n output = []\n\n # load the input image, grab its dimensions, and apply array slicing\n # to keep only the bottom 40% of the image (that's where the account/routing info is)\n (h, w) = img.shape[:2]\n delta = int(h - (h * 0.65))\n height_max = int(h*0.85)\n bottom = img[delta:height_max, 0:w]\n\n # convert bottom image to grayscale, apply blackhat morphological operator\n # to find dark regions against a light background (the routing/account #s)\n # gray = cv2.cvtColor(bottom, cv2.COLOR_BGR2GRAY)\n blackhat = cv2.morphologyEx(bottom, cv2.MORPH_BLACKHAT, rectKernel)\n\n # compute the Scharr gradient of the blackhat image, then scale\n # the rest back into the range [0, 255]\n gradX = cv2.Sobel(blackhat, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)\n gradX = np.absolute(gradX)\n (minVal, maxVal) = (np.min(gradX), np.max(gradX))\n gradX = (255 * ((gradX - minVal) / (maxVal - minVal)))\n gradX = gradX.astype(\"uint8\")\n\n # apply a closing operation using rectangular kernel to close gaps\n # between digits, then apply Otsus thresholding method to binarize image\n gradX = cv2.morphologyEx(gradX, cv2.MORPH_CLOSE, rectKernel)\n thresh = cv2.threshold(\n gradX, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n\n # remove any pixels that are touching borders of image (helps us in next\n # step when pruning contours)\n thresh = clear_border(thresh)\n\n # find contours in thresholded image, init list of group locations\n groupCnts = cv2.findContours(\n thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n groupCnts = imutils.grab_contours(groupCnts)\n groupLocs = []\n\n # loop over group contours\n for (i, c) in enumerate(groupCnts):\n # compute bounding box of contour\n (x, y, w, h) = cv2.boundingRect(c)\n\n # only accept contour region as a grouping of chars if ROI sufficiently large\n if w > 50 and h > 15:\n groupLocs.append((x, y, w, h))\n\n # sort the digit locs from left to right\n groupLocs = sorted(groupLocs, key=lambda x: x[0])\n\n # loop over group locations\n for (gX, gY, gW, gH) in groupLocs:\n # init the group output of chars\n groupOutput = []\n\n # extract group ROI of chars from the grayscale image\n # then apply thresholding to segment the digits from background\n group = bottom[gY - 5: gY + gH + 5, gX - 5: gX + gW + 5]\n group = cv2.threshold(\n group, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n\n # find char contours in the group, then sort from left to right\n charCnts = cv2.findContours(\n group.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n charCnts = imutils.grab_contours(charCnts)\n charCnts = contours.sort_contours(charCnts, method=\"left-to-right\")[0]\n\n # find chars and symbols in the group\n (rois, locs) = extract_digits_and_symbols(group, charCnts)\n\n # loop over ROIS from group\n for roi in rois:\n # init list of template matching scores and resize ROI to fixed size\n scores = []\n roi = cv2.resize(roi, (36, 36))\n\n # loop over ref char name and corresponding ROi\n for charName in charNames:\n # apply correlation-based template matching, take score, update scores list\n result = cv2.matchTemplate(roi, chars[charName], cv2.TM_CCOEFF)\n (_, score, _, _) = cv2.minMaxLoc(result)\n scores.append(score)\n\n # the classification for char ROI will be ref char name w/largest template matching score\n groupOutput.append(charNames[np.argmax(scores)])\n\n # add group output to overall check OCR output\n output.append(\"\".join(groupOutput))\n\n # display output check OCR info to screen\n print(\"Check OCR: {}\".format(\" \".join(output)))\n\n if pair.field_type == FieldType.FIELD_TYPE_ROUTING:\n print('routing ' + output[0].translate({ord(c): None for c in 'TUAD'}))\n pair.extracted_data = output[0].translate({ord(c): None for c in 'TUAD'})\n elif pair.field_type == FieldType.FIELD_TYPE_ACCOUNT:\n print('account ' + output[1].translate({ord(c): None for c in 'TUAD'}))\n pair.extracted_data = output[1].translate({ord(c): None for c in 'TUAD'})\n return pair\n\n\n\"\"\"\nThis function extracts each digit and symbol from the given image. If it is successful, it returns a tuple containing a\nlist of the roi (regions of interest, regions containing the chars to extract) and a list of locs (the actual locations\nof those rois)\n\n@param image: image to extract the data from - cropped version of full image, containing only an image of group of chars \n@param charCnts: list of character contours (what is used to determine each characters' location and identity)\n@param minW: minimum width of a char for it to count as a character\n@param minH: minimum height of a char for it to count as a character\n\n@return tuple containing a list of rois and a list of locs\n\"\"\"\n\n\ndef extract_digits_and_symbols(image, charCnts, minW=5, minH=15):\n # get Python iterator for character contours, and init ROI and location lists\n charIter = charCnts.__iter__()\n rois = []\n locs = []\n\n # loop over char contours until end of list\n while True:\n try:\n # get next char contour, compute bounding box, init ROI\n c = next(charIter)\n (cX, cY, cW, cH) = cv2.boundingRect(c)\n roi = None\n\n # check width/height if large enough, meaning we found a digit\n if cW >= minW and cH >= minH:\n # extract ROI\n roi = image[cY:cY + cH, cX: cX + cW]\n rois.append(roi)\n locs.append((cX, cY, cX + cW, cY + cH))\n else: # otherwise it is a special symbol\n # MICR special symbols include 3 parts, so\n # need to get next 2 from iterator, then\n # init bounding box coordinates for symbol\n parts = [c, next(charIter), next(charIter)]\n # init to positive and negative infinities\n (sXA, sYA, sXB, sYB) = (np.inf, np.inf, -np.inf, -np.inf)\n\n # loop over parts\n for p in parts:\n # calc bounding box for each part, update bookkeeping variables\n (pX, pY, pW, pH) = cv2.boundingRect(p)\n sXA = min(sXA, pX)\n sYA = min(sYA, pY)\n sXB = max(sXB, pX + pW)\n sYB = max(sYB, pY + pH)\n\n # extract ROI\n roi = image[sYA:sYB, sXA:sXB]\n rois.append(roi)\n locs.append((sXA, sYA, sXB, sYB))\n except StopIteration: # reached end of iterator, break from loop\n break\n\n # return tuple of ROIS and locations\n return rois, locs\n","sub_path":"src/main/backend/data_extraction/extract_methods.py","file_name":"extract_methods.py","file_ext":"py","file_size_in_byte":10806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"74326145","text":"from WindPy import *\nimport sys\n\nfrom WindPy import w as WindPyGateway\nfrom datetime import datetime\nfrom CommonUtilityFunc import *\n\n\ndef getFirstTradeDayOfMonth(year_n, month_n):\n date_str = '%d-%d-%d'%(year_n, month_n, 1)\n# date_str_next = '%d-%d-%d'%(year_n, month_n, 2)\n if w.tdayscount(date_str, date_str).Data[0][0]==1:\n return date_str \n tradeDay = WindPyGateway.tdaysoffset(1, date_str, \"\")\n return tradeDay.Data[0][0].strftime('%Y-%m-%d')\n\ndef getNearestTradeDay(date):\n date_str = '%d-%d-%d'%(date.year, date.month, date.day)\n tradeDay = WindPyGateway.tdaysoffset(0, date_str, \"\")\n return tradeDay.Data[0][0]\n\ndef getNextTradeDayStr(date_str):\n date = datetime.strptime(date_str,'%Y-%m-%d')\n date_str = '%d-%d-%d'%(date.year, date.month, date.day)\n tradeDay = WindPyGateway.tdaysoffset(1, date_str, \"\")\n return tradeDay.Data[0][0].strftime('%Y-%m-%d')\n \ndef getLastTradeDay(date_str):\n tradeDay = WindPyGateway.tdaysoffset(-1, date_str, \"\")\n return tradeDay.Data[0][0].strftime('%Y-%m-%d')\n \nif __name__=='__main__':\n WindPyGateway.start()\n date_str = '2017-2-19'\n date = datetime.strptime(date_str,'%Y-%m-%d')\n print(getNearestTradeDay(date))\n \n print(getLastTwoTradeDaysOfStock('SH600004', '2017-2-18'))\n print(getNextTradeDayStr('2017-2-18'))\n# print(data, str(data[2], encoding='gbk'))\n print(getLastTradeDayOfStock('IC1703', '2017-2-18'))\n# print(getFirstTradeDayOfMonth(2017, 1))","sub_path":"src/bakup_file/TradeDateCompute.py","file_name":"TradeDateCompute.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"438856764","text":"\"\"\"inition migrate\n\nRevision ID: c06e10d89e95\nRevises: a958748b0a72\nCreate Date: 2018-08-23 14:39:24.230841\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c06e10d89e95'\ndown_revision = 'a958748b0a72'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('role', schema=None) as batch_op:\n batch_op.add_column(sa.Column('role_name', sa.String(), nullable=True))\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('role', schema=None) as batch_op:\n batch_op.drop_column('role_name')\n\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/c06e10d89e95_inition_migrate.py","file_name":"c06e10d89e95_inition_migrate.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"500511495","text":"#!/usr/bin/env python\n# -*- encoding:utf-8 -*-\nimport json\nfrom web import models\nfrom django.conf import settings\nfrom web.views import Paging\nfrom django.views import View\nfrom bs4 import BeautifulSoup\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, redirect, HttpResponse\n\n\ndef article_list(request):\n return render(request, 'manager/article_list.html')\n\n\ndef article_list_table(request):\n if request.method == 'GET':\n row = int(request.GET.get('rows'))\n page = int(request.GET.get('page'))\n title = request.GET.get('title', None)\n print(row,page)\n if title:\n article_obj = models.Article.objects.filter(\n title__contains=title).order_by('-id')\n total_count = article_obj.count()\n else:\n article_all = models.Article.objects.all()\n article_obj = article_all.order_by('-id')[(row * page) -\n row:row * page]\n total_count = article_all.count()\n rows = list()\n for obj in article_obj:\n tag = str()\n result = {\n 'id': obj.id,\n 'title': obj.title,\n 'author': obj.user.username,\n 'category': obj.category.title,\n 'is_recommend': obj.is_recommend,\n 'date_publish': obj.date_publish.strftime('%Y-%m-%d %H:%M:%S')\n }\n for i in obj.tag.all():\n tag += i.title + '、'\n result.update({'tag': tag.rstrip('、')})\n rows.append(result)\n return JsonResponse({'total': total_count, 'rows': rows}, safe=False)\n\n\ndef is_recommend(request):\n if request.method == 'POST':\n article_id = request.POST.get('id')\n recommend = request.POST.get('recommend')\n is_data = True if recommend.lower() == 'true' else False\n models.Article.objects.filter(id=article_id).update(\n is_recommend=is_data)\n return JsonResponse({'status': 'success', 'msg': '数据变更成功!'})\n\n\nclass Article_edit(View):\n def get(self, request):\n article_id = request.GET.get('article')\n Category = models.Category.objects.all()\n tag_obj = models.Tag.objects.all()\n article_obj = models.Article.objects.filter(id=article_id).first()\n return render(request, 'manager/article_edit.html', locals())\n\n def post(self, request):\n article_id = request.POST.get('id')\n content = request.POST.get(\"content\")\n title = request.POST.get(\"title\")\n soup = BeautifulSoup(content, \"html.parser\")\n for i in soup.find_all(\"script\"):\n i.decompose()\n\n with transaction.atomic():\n category = models.Category.objects.filter(\n title=request.POST.get(\"category\")).first()\n tag = json.loads(request.POST.get(\"tag\"))\n tag_list = []\n for i in tag:\n c = models.Tag.objects.filter(title=i).first().id\n if not c:\n c = models.Tag.objects.create(title=i).id\n tag_list.append(c)\n models.Article.objects.filter(id=article_id).update(\n title=title,\n desc=soup.text[0:150],\n category=category,\n content=content,\n )\n report_obj = models.Article.objects.filter(id=article_id).first()\n report_obj.tag.set(tag_list)\n return JsonResponse(\"ok\", safe=False)\n\n\ndef article_del(request):\n if request.method == \"POST\":\n article_id = json.loads(request.POST.get('id'))\n models.Article.objects.filter(id__in=article_id).delete()\n return JsonResponse({\n 'status': 'success',\n 'msg': '删除成功!'\n })\n\n\ndef link_list(request):\n if request.method == 'GET':\n return render(request, 'manager/link_list.html')\n\n\ndef link_table(request):\n if request.method == 'GET':\n link_all = models.Links.objects.all()\n result = list()\n for obj in link_all:\n rdict = {\n 'id': obj.id,\n 'title': obj.title,\n 'description': obj.description,\n 'callback_url': obj.callback_url,\n 'date_publish': obj.date_publish.strftime('%Y-%m-%d %H:%M:%S')\n }\n result.append(rdict)\n return JsonResponse({\n 'total': link_all.count(),\n 'rows': result\n }, safe=False)\n\n\ndef link_new(request):\n if request.method == 'POST':\n title = request.POST.get('title')\n url = request.POST.get('url')\n desc = request.POST.get('desc')\n models.Links.objects.create(\n title=title, description=desc, callback_url=url)\n return JsonResponse({\n 'status': 'success',\n 'msg': '创建成功!',\n })\n\n\ndef link_del(request):\n if request.method == 'POST':\n link_id = json.loads(request.POST.get('id'))\n links = models.Links.objects.filter(id__in=link_id).delete()\n return JsonResponse({\n 'status': 'success',\n 'msg': '删除成功!',\n })\n\nclass Link_edit(View):\n def get(self, request):\n link_id = request.GET.get('link')\n link_obj = models.Links.objects.filter(id=link_id).first()\n res = {\n 'id': link_obj.id,\n 'title': link_obj.title,\n 'url': link_obj.callback_url,\n 'desc': link_obj.description\n }\n return JsonResponse(res, safe=False)\n\n def post(self, request):\n id = request.GET.get('link')\n title = request.POST.get('title')\n url = request.POST.get('url')\n desc = request.POST.get('desc')\n models.Links.objects.filter(id=id).update(\n title=title, callback_url=url, description=desc)\n return redirect('/manager/link_list/')\n\n\ndef menu_list(request):\n if request.method == \"GET\":\n return render(request, 'manager/menu_list.html')\n\n\ndef menu_table(request):\n if request.method == 'GET':\n menu_all = models.Menu.objects.all().order_by('-index')\n menu_obj = menu_all.values(\n 'id',\n 'title',\n 'icon',\n 'index',\n )\n return JsonResponse({\n 'total': menu_all.count(),\n 'rows': list(menu_obj)\n }, safe=False)\n\n\ndef api_list(request):\n if request.method == \"GET\":\n return render(request, 'manager/api_list.html')\n\n\ndef api_table(request):\n if request.method == 'GET':\n api_all = models.Menu_List.objects.all().order_by('-index')\n api_obj = api_all.values(\n 'id',\n 'title',\n 'url',\n 'menu__title',\n )\n return JsonResponse({\n 'total': api_all.count(),\n 'rows': list(api_obj)\n }, safe=False)\n\n\ndef menu_new(request):\n return render(request, 'manager/menu_new.html', locals())\n\n\ndef profile(request):\n return render(request, 'manager/profile.html', locals())\n\n\ndef gallery_table(request):\n if request.method == 'GET':\n gallery_all = models.Gallery.objects.all().order_by('-id')\n gallery_obj = gallery_all.values(\n 'id',\n 'title',\n 'desc',\n )\n return JsonResponse({\n 'total': gallery_all.count(),\n 'rows': list(gallery_obj)\n }, safe=False)\n\nclass Gallery(View):\n def get(self, request):\n return render(request, 'manager/gallery_list.html')\n\n def post(self, request):\n result = dict()\n try:\n id = request.POST.get('id')\n title = request.POST.get('title')\n desc = request.POST.get('desc')\n if title:\n models.Gallery.objects.filter(id=id).update(title=title)\n elif desc:\n models.Gallery.objects.filter(id=id).update(desc=desc)\n result.update({'status': 'success', 'msg': '数据变更成功!'})\n except:\n result.update({'status': 'error', 'msg': '数据变更失败!'})\n return JsonResponse(result)\n\n\nclass Image_new(View):\n def get(self, request):\n obj = models.Gallery.objects.all()\n return render(request, 'manager/image_new.html', {'obj': obj})\n\n def post(self, request):\n title = request.POST.get('title')\n filedata = request.FILES.getlist('filedata')\n try:\n if title:\n gallery_obj = models.Gallery.objects.filter(\n title=title).first()\n for item in filedata:\n models.Image.objects.create(\n parent=gallery_obj,\n image=item,\n )\n except:\n result = {'status': 'error', 'msg': \"上传失败\"}\n else:\n result = {'status': 'success', 'msg': \"上传成功\"}\n return JsonResponse(result)\n\n\nclass Gallery_new(View):\n \"\"\"\n 添加相册\n \"\"\"\n\n def get(self, request):\n pass\n\n def post(self, request):\n title = request.POST.get('title')\n desc = request.POST.get('desc')\n obj = models.Gallery.objects.filter(title=title).exists()\n result = dict()\n if obj:\n result.update({'status': 'success', 'msg': '相册已存在!'})\n else:\n models.Gallery.objects.create(title=title, desc=desc)\n result.update({'status': 'success', 'msg': '相册创建成功!'})\n return JsonResponse(result)\n\n\nclass Comment(View):\n def get(self, request):\n return render(request, 'manager/comment_list.html')\n\n\ndef comment_table(request):\n if request.method == 'GET':\n comment_all = models.Comment.objects.all().order_by('-id')\n result = list()\n for obj in comment_all:\n rdict = {\n 'id': obj.id,\n 'username': obj.username,\n 'email': obj.email,\n 'article__title': obj.article.title,\n 'content': obj.content\n }\n result.append(rdict)\n return JsonResponse({\n 'total': comment_all.count(),\n 'rows': result\n }, safe=False)\n\n\ndef user_list(request):\n if request.method == 'GET':\n return render(request, 'manager/user_list.html')\n\n\ndef user_table(request):\n if request.method == 'GET':\n user_all = models.Userinfo.objects.all().order_by('-id')\n user_obj = user_all.values(\n 'id',\n 'username',\n 'desc',\n 'qq',\n 'email'\n )\n return JsonResponse({\n 'total': user_all.count(),\n 'rows': list(user_obj)\n }, safe=False)\n\n\ndef Category(request):\n return render(request, 'manager/category.html')\n\n\nclass Category_list(View):\n def get(self, request):\n rows = int(request.GET.get('rows'))\n page = int(request.GET.get('page'))\n title = request.GET.get('title', None)\n if title:\n obj = models.Category.objects.filter(title__contains=title)\n rows = obj.order_by('-id')[(rows * page) -\n rows:rows * page].values(\n 'id', 'title', 'pid__title')\n else:\n obj = models.Category.objects.all()\n rows = obj.order_by('-id')[(rows * page) -\n rows:rows * page].values(\n 'id', 'title', 'pid__title')\n return JsonResponse({\n 'total': obj.count(),\n 'rows': list(rows)\n },\n safe=False)\n\n def post(self, request):\n pass\n\n\ndef category_edit(request):\n if request.method == 'POST':\n ids = request.POST.get('id', None)\n value = request.POST.get('value', None)\n field = request.POST.get('field', None)\n result = dict()\n if field == 'pid__title':\n obj = models.Category.objects.filter(title=value)\n if obj.exists():\n if obj.first().pid:\n result.update({'status': 'error', 'msg': '所选父类不能拥有父类!'})\n else:\n models.Category.objects.filter(id=ids).update(\n pid=obj.first())\n result.update({'status': 'success', 'msg': '编辑成功!'})\n else:\n result.update({'status': 'error', 'msg': '没有指定的父分类!'})\n elif field == 'title':\n models.Category.objects.filter(id=ids).update(title=value)\n result.update({'status': 'success', 'msg': '编辑成功!'})\n print(result)\n return JsonResponse(result)\n\n\ndef category_new(request):\n if request.method == \"POST\":\n title = request.POST.get('title')\n pid_title = request.POST.get('pid_title')\n result = dict()\n obj = models.Category.objects.filter(title=title)\n if pid_title:\n ptitle = models.Category.objects.filter(title=pid_title)\n if ptitle.exists and not ptitle.first().pid:\n if obj.exists():\n result.update({'status': 'error', 'msg': '分类已存在!'})\n else:\n models.Category.objects.create(\n title=title, pid=ptitle.first())\n result.update({'status': 'success', 'msg': '新增成功!'})\n else:\n result.update({'status': 'error', 'msg': '指定父分类不存在或着不是一级菜单!'})\n else:\n if obj.exists():\n result.update({'status': 'error', 'msg': '分类已存在!'})\n else:\n models.Category.objects.create(title=title)\n result.update({'status': 'success', 'msg': '新增成功!'})\n return JsonResponse(result)\n\n\ndef category_del(request):\n if request.method == 'POST':\n ids = json.loads(request.POST.get('id'))\n title = json.loads(request.POST.get('title'))\n result = dict()\n if ids:\n obj = models.Category.objects.filter(Q(id__in=ids) and Q(title__in=title))\n for i in obj:\n if i.article_set.all().exists():\n result.update({\n 'status': 'error',\n 'msg': '数据删除失败,所选分类存在文章!'\n })\n break\n elif i.category_set.all().exists():\n result.update({\n 'status': 'error',\n 'msg': '数据删除失败,所选分类存在子分类!'\n })\n break\n else:\n i.delete()\n result.update({'status': 'success', 'msg': '数据删除成功!'})\n else:\n result.update({'status': 'error', 'msg': '没有找到指定的数据!'})\n return JsonResponse(result)\n\n\ndef tag_list(request):\n if request.method == 'GET':\n return render(request, 'manager/tag_list.html')\n\n\ndef tag_new(request):\n if request.method == 'POST':\n title = request.POST.get('title')\n obj = models.Tag.objects.filter(title=title).exists()\n result = dict()\n if obj:\n result.update({'status': 'error', 'msg': '标签已存在!'})\n else:\n models.Tag.objects.create(title=title)\n result.update({'status': 'success', 'msg': '标签创建成功!'})\n return JsonResponse(result)\n\n\ndef tag_del(request):\n if request.method == 'POST':\n id_list = json.loads(request.POST.get('id'))\n title_list = json.loads(request.POST.get('title'))\n obj = models.Tag.objects.filter(Q(id__in=tag_list) and Q(title__in=title_list))\n result = dict()\n if obj.exists():\n obj.delete()\n result.update({'status': 'success', 'msg': '标签删除成功!'})\n else:\n result.update({'status': 'error', 'msg': '没有指定的数据!'})\n return JsonResponse(result)\n\n\ndef tag_table(request):\n if request.method == 'GET':\n row = int(request.GET.get('rows'))\n page = int(request.GET.get('page'))\n title = request.GET.get('title', None)\n if title:\n tag_obj = models.Tag.objects.filter(title__contains=title).order_by('-id').values('id', 'title')\n total_count = tag_obj.count()\n else:\n tag_all = models.Tag.objects.all()\n tag_obj = tag_all.order_by('-id')[(row * page) - row:row * page].values('id', 'title')\n total_count = tag_all.count()\n return JsonResponse({'total': total_count, 'rows': list(tag_obj)}, safe=False)\n","sub_path":"web/views/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":16916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"553551041","text":"from Share import List_Dict_Basis_Alg, List_Sheffer_Sets\n\n\ndef basis_construct(basis_len: int, full_set: list):\n \"\"\"\n Generator for construct basis by extending basis with len - 1\n\n :param basis_len: len of basis for construct\n :param full_set: list of operations from which basis construct\n :rtype: list\n :return: basis as sorted list of tuples multi operations or -1\n\n \"\"\"\n List_Dict_Basis_Alg.append({})\n\n count_extended_basis = 0\n\n for basis, alg in List_Dict_Basis_Alg[basis_len - 1].items():\n\n remaining_operations = set(full_set) - set(alg)\n\n if remaining_operations:\n\n remaining_operations = list(remaining_operations)\n\n remaining_operations.sort()\n\n for remaining_operation in remaining_operations:\n\n is_sheffer_basis = False\n\n if basis_len == 2:\n\n new_basis = sorted(list((basis,) + (remaining_operation,)))\n\n else:\n\n new_basis = sorted(list(basis + (remaining_operation,)))\n\n if len(List_Sheffer_Sets) > 0 and any(map(set(new_basis).issuperset, List_Sheffer_Sets)):\n is_sheffer_basis = True\n\n if (is_sheffer_basis is False) and (tuple(new_basis) not in List_Dict_Basis_Alg[basis_len]):\n count_extended_basis += 1\n\n yield new_basis\n else:\n\n continue\n\n if count_extended_basis == 0:\n yield \"NO BASIS FOR EXTEND\"\n","sub_path":"Basis_Construct.py","file_name":"Basis_Construct.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}
+{"seq_id":"271119924","text":"# Kornpob Bhirombhakdi 20200331\n\nimport numpy as np\n\ndef make_SIP(coef,x,y,startx=True):\n \"\"\"\n Simple imaging polynomial (SIP) is a conventional method to describe non-linear variation in an image. Ref: https://fits.gsfc.nasa.gov/registry/sip/shupeADASS.pdf.\n ##########\n Assume a SIP model of order 2, i.e., Z = a0 + a1*X + a2*X**2.\n Typically, X is relative to SIP reference system whose origin is corresponding to (xref,yref) in the original image. Therefore, X = x - xref where (x,y) is an image pixel.\n Z is a quantity of interests. In aXe grism reduction, Z can be Y (as y = Y + yref) for trace, or wavelength.\n SIP coefficients are 2D with a given polynomial order. Assume the order is 3. Therefore, ai = ai0 + ai1*X' + ai2*Y' + ai3*X'**2 + ai4*X'*Y' + ai5*Y'**2 + ... + ai9*Y'**3.\n Note X is the leading term (this is specified by startx=True in make_SIP). Set startx=False otherwise.\n Note that X and X' might be different. For aXe reduction, (X',Y') = (xd,yd) as the source location from direct image.\n \"\"\"\n if startx:\n xref,yref = x,y\n else:\n xref,yref = y,x\n n = len(coef)\n d = []\n px,py = 0,0\n a = [(px,py)]\n b = [(xref,yref)]\n p = 0\n q = True\n while(q):\n if px==0:\n p+=1\n px=p\n py=0\n else:\n px-=1\n py+=1\n a.append((px,py))\n b.append((xref,yref))\n if len(a)>=len(coef):\n q = False\n a,b = np.array(a),np.array(b)\n c = b**a\n c = np.sum(c[:,0]*c[:,1]*coef)\n d.append(c)\n d = np.array(d)\n return d \n","sub_path":"axehelper/make_sip.py","file_name":"make_sip.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}